diff options
Diffstat (limited to 'drivers')
-rw-r--r-- | drivers/bios_emulator/atibios.c | 1 | ||||
-rw-r--r-- | drivers/mmc/Kconfig | 23 | ||||
-rw-r--r-- | drivers/mmc/atmel_sdhci.c | 4 | ||||
-rw-r--r-- | drivers/mmc/bcm2835_sdhci.c | 4 | ||||
-rw-r--r-- | drivers/mmc/bcmstb_sdhci.c | 3 | ||||
-rw-r--r-- | drivers/mmc/dw_mmc.c | 16 | ||||
-rw-r--r-- | drivers/mmc/msm_sdhci.c | 4 | ||||
-rw-r--r-- | drivers/mmc/pci_mmc.c | 4 | ||||
-rw-r--r-- | drivers/mmc/s5p_sdhci.c | 4 | ||||
-rw-r--r-- | drivers/mmc/sdhci-cadence.c | 3 | ||||
-rw-r--r-- | drivers/mmc/tangier_sdhci.c | 3 | ||||
-rw-r--r-- | drivers/mmc/zynq_sdhci.c | 7 | ||||
-rw-r--r-- | drivers/ram/rockchip/sdram_rk3328.c | 1018 | ||||
-rw-r--r-- | drivers/spi/ich.c | 258 | ||||
-rw-r--r-- | drivers/spi/ich.h | 9 | ||||
-rw-r--r-- | drivers/watchdog/Kconfig | 9 | ||||
-rw-r--r-- | drivers/watchdog/Makefile | 1 | ||||
-rw-r--r-- | drivers/watchdog/bcm2835_wdt.c | 34 | ||||
-rw-r--r-- | drivers/watchdog/mtk_wdt.c | 25 |
19 files changed, 1175 insertions, 255 deletions
diff --git a/drivers/bios_emulator/atibios.c b/drivers/bios_emulator/atibios.c index 4f362d1453..fb806b53d4 100644 --- a/drivers/bios_emulator/atibios.c +++ b/drivers/bios_emulator/atibios.c @@ -136,7 +136,6 @@ static int atibios_debug_mode(BE_VGAInfo *vga_info, RMREGS *regs, bool linear_ok; int attr; - break; debug("Mode %x: ", mode); memset(buffer, '\0', sizeof(struct vbe_mode_info)); regs->e.eax = VESA_GET_MODE_INFO; diff --git a/drivers/mmc/Kconfig b/drivers/mmc/Kconfig index 4cdae41b59..c6812f6517 100644 --- a/drivers/mmc/Kconfig +++ b/drivers/mmc/Kconfig @@ -156,6 +156,7 @@ config MMC_HS400_SUPPORT config SPL_MMC_HS400_SUPPORT bool "enable HS400 support in SPL" + select SPL_MMC_HS200_SUPPORT help The HS400 mode is support by some eMMC. The bus frequency is up to 200MHz. This mode requires tuning the IO. @@ -591,6 +592,17 @@ config MMC_SDHCI_TEGRA If unsure, say N. +config TEGRA124_MMC_DISABLE_EXT_LOOPBACK + bool "Disable external clock loopback" + depends on MMC_SDHCI_TEGRA && TEGRA124 + help + Disable the external clock loopback and use the internal one on SDMMC3 + as per the SDMMC_VENDOR_MISC_CNTRL_0 register's SDMMC_SPARE1 bits + being set to 0xfffd according to the TRM. + + TODO(marcel.ziswiler@toradex.com): Move to device tree controlled + approach once proper kernel integration made it mainline. + config MMC_SDHCI_ZYNQ bool "Arasan SDHCI controller support" depends on ARCH_ZYNQ || ARCH_ZYNQMP || ARCH_VERSAL @@ -670,17 +682,6 @@ config MMC_MTK endif -config TEGRA124_MMC_DISABLE_EXT_LOOPBACK - bool "Disable external clock loopback" - depends on MMC_SDHCI_TEGRA && TEGRA124 - help - Disable the external clock loopback and use the internal one on SDMMC3 - as per the SDMMC_VENDOR_MISC_CNTRL_0 register's SDMMC_SPARE1 bits - being set to 0xfffd according to the TRM. - - TODO(marcel.ziswiler@toradex.com): Move to device tree controlled - approach once proper kernel integration made it mainline. - config FSL_ESDHC bool "Freescale/NXP eSDHC controller support" help diff --git a/drivers/mmc/atmel_sdhci.c b/drivers/mmc/atmel_sdhci.c index 4be47ba75e..d930ed8da0 100644 --- a/drivers/mmc/atmel_sdhci.c +++ b/drivers/mmc/atmel_sdhci.c @@ -88,13 +88,13 @@ static int atmel_sdhci_probe(struct udevice *dev) return -EINVAL; host->max_clk = max_clk; + host->mmc = &plat->mmc; + host->mmc->dev = dev; ret = sdhci_setup_cfg(&plat->cfg, host, 0, ATMEL_SDHC_MIN_FREQ); if (ret) return ret; - host->mmc = &plat->mmc; - host->mmc->dev = dev; host->mmc->priv = host; upriv->mmc = host->mmc; diff --git a/drivers/mmc/bcm2835_sdhci.c b/drivers/mmc/bcm2835_sdhci.c index 08bddd410e..bf3304c4dc 100644 --- a/drivers/mmc/bcm2835_sdhci.c +++ b/drivers/mmc/bcm2835_sdhci.c @@ -214,6 +214,9 @@ static int bcm2835_sdhci_probe(struct udevice *dev) host->voltages = MMC_VDD_32_33 | MMC_VDD_33_34 | MMC_VDD_165_195; host->ops = &bcm2835_ops; + host->mmc = &plat->mmc; + host->mmc->dev = dev; + ret = sdhci_setup_cfg(&plat->cfg, host, emmc_freq, MIN_FREQ); if (ret) { debug("%s: Failed to setup SDHCI (err=%d)\n", __func__, ret); @@ -221,7 +224,6 @@ static int bcm2835_sdhci_probe(struct udevice *dev) } upriv->mmc = &plat->mmc; - host->mmc = &plat->mmc; host->mmc->priv = host; return sdhci_probe(dev); diff --git a/drivers/mmc/bcmstb_sdhci.c b/drivers/mmc/bcmstb_sdhci.c index eef46f3af1..c14f8289e6 100644 --- a/drivers/mmc/bcmstb_sdhci.c +++ b/drivers/mmc/bcmstb_sdhci.c @@ -73,6 +73,8 @@ static int sdhci_bcmstb_probe(struct udevice *dev) if (ret) return ret; + host->mmc = &plat->mmc; + host->mmc->dev = dev; ret = sdhci_setup_cfg(&plat->cfg, host, BCMSTB_SDHCI_MAXIMUM_CLOCK_FREQUENCY, BCMSTB_SDHCI_MINIMUM_CLOCK_FREQUENCY); @@ -80,7 +82,6 @@ static int sdhci_bcmstb_probe(struct udevice *dev) return ret; upriv->mmc = &plat->mmc; - host->mmc = &plat->mmc; host->mmc->priv = host; return sdhci_probe(dev); diff --git a/drivers/mmc/dw_mmc.c b/drivers/mmc/dw_mmc.c index 1992d61182..22f6c7eefd 100644 --- a/drivers/mmc/dw_mmc.c +++ b/drivers/mmc/dw_mmc.c @@ -13,6 +13,7 @@ #include <mmc.h> #include <dwmmc.h> #include <wait_bit.h> +#include <power/regulator.h> #define PAGE_SIZE 4096 @@ -493,6 +494,21 @@ static int dwmci_set_ios(struct mmc *mmc) if (host->clksel) host->clksel(host); +#if CONFIG_IS_ENABLED(DM_REGULATOR) + if (mmc->vqmmc_supply) { + int ret; + + if (mmc->signal_voltage == MMC_SIGNAL_VOLTAGE_180) + regulator_set_value(mmc->vqmmc_supply, 1800000); + else + regulator_set_value(mmc->vqmmc_supply, 3300000); + + ret = regulator_set_enable_if_allowed(mmc->vqmmc_supply, true); + if (ret) + return ret; + } +#endif + return 0; } diff --git a/drivers/mmc/msm_sdhci.c b/drivers/mmc/msm_sdhci.c index 51f9e0ec05..cae42ec4ac 100644 --- a/drivers/mmc/msm_sdhci.c +++ b/drivers/mmc/msm_sdhci.c @@ -141,12 +141,12 @@ static int msm_sdc_probe(struct udevice *dev) writel(caps, host->ioaddr + SDHCI_VENDOR_SPEC_CAPABILITIES0); } - ret = sdhci_setup_cfg(&plat->cfg, host, 0, 0); host->mmc = &plat->mmc; + host->mmc->dev = dev; + ret = sdhci_setup_cfg(&plat->cfg, host, 0, 0); if (ret) return ret; host->mmc->priv = &prv->host; - host->mmc->dev = dev; upriv->mmc = host->mmc; return sdhci_probe(dev); diff --git a/drivers/mmc/pci_mmc.c b/drivers/mmc/pci_mmc.c index 182d41637f..404264a697 100644 --- a/drivers/mmc/pci_mmc.c +++ b/drivers/mmc/pci_mmc.c @@ -33,12 +33,12 @@ static int pci_mmc_probe(struct udevice *dev) host->ioaddr = (void *)dm_pci_map_bar(dev, PCI_BASE_ADDRESS_0, PCI_REGION_MEM); host->name = dev->name; + host->mmc = &plat->mmc; + host->mmc->dev = dev; ret = sdhci_setup_cfg(&plat->cfg, host, 0, 0); if (ret) return ret; - host->mmc = &plat->mmc; host->mmc->priv = &priv->host; - host->mmc->dev = dev; upriv->mmc = host->mmc; return sdhci_probe(dev); diff --git a/drivers/mmc/s5p_sdhci.c b/drivers/mmc/s5p_sdhci.c index 9dd0b865eb..53efa968cf 100644 --- a/drivers/mmc/s5p_sdhci.c +++ b/drivers/mmc/s5p_sdhci.c @@ -204,13 +204,13 @@ static int s5p_sdhci_probe(struct udevice *dev) if (ret) return ret; + host->mmc = &plat->mmc; + host->mmc->dev = dev; ret = sdhci_setup_cfg(&plat->cfg, host, 0, 400000); if (ret) return ret; - host->mmc = &plat->mmc; host->mmc->priv = host; - host->mmc->dev = dev; upriv->mmc = host->mmc; return sdhci_probe(dev); diff --git a/drivers/mmc/sdhci-cadence.c b/drivers/mmc/sdhci-cadence.c index 4f9338f733..4736263bf2 100644 --- a/drivers/mmc/sdhci-cadence.c +++ b/drivers/mmc/sdhci-cadence.c @@ -269,12 +269,13 @@ static int sdhci_cdns_probe(struct udevice *dev) if (ret) return ret; + host->mmc = &plat->mmc; + host->mmc->dev = dev; ret = sdhci_setup_cfg(&plat->cfg, host, 0, 0); if (ret) return ret; upriv->mmc = &plat->mmc; - host->mmc = &plat->mmc; host->mmc->priv = host; return sdhci_probe(dev); diff --git a/drivers/mmc/tangier_sdhci.c b/drivers/mmc/tangier_sdhci.c index 4c33356b9f..0d6e5d6246 100644 --- a/drivers/mmc/tangier_sdhci.c +++ b/drivers/mmc/tangier_sdhci.c @@ -51,13 +51,14 @@ static int sdhci_tangier_probe(struct udevice *dev) /* MMC_VDD_32_33 | MMC_VDD_33_34 | MMC_VDD_165_195 */ host->voltages = MMC_VDD_165_195; + host->mmc = &plat->mmc; + host->mmc->dev = dev; ret = sdhci_setup_cfg(&plat->cfg, host, SDHCI_TANGIER_FMAX, SDHCI_TANGIER_FMIN); if (ret) return ret; upriv->mmc = &plat->mmc; - host->mmc = &plat->mmc; host->mmc->priv = host; return sdhci_probe(dev); diff --git a/drivers/mmc/zynq_sdhci.c b/drivers/mmc/zynq_sdhci.c index c525084250..3225a7ac93 100644 --- a/drivers/mmc/zynq_sdhci.c +++ b/drivers/mmc/zynq_sdhci.c @@ -242,13 +242,14 @@ static int arasan_sdhci_probe(struct udevice *dev) host->max_clk = clock; + host->mmc = &plat->mmc; + host->mmc->dev = dev; + host->mmc->priv = host; + ret = sdhci_setup_cfg(&plat->cfg, host, plat->f_max, CONFIG_ZYNQ_SDHCI_MIN_FREQ); - host->mmc = &plat->mmc; if (ret) return ret; - host->mmc->priv = host; - host->mmc->dev = dev; upriv->mmc = host->mmc; return sdhci_probe(dev); diff --git a/drivers/ram/rockchip/sdram_rk3328.c b/drivers/ram/rockchip/sdram_rk3328.c index f4e0b18447..656696ac3c 100644 --- a/drivers/ram/rockchip/sdram_rk3328.c +++ b/drivers/ram/rockchip/sdram_rk3328.c @@ -2,22 +2,1029 @@ /* * (C) Copyright 2017 Rockchip Electronics Co., Ltd. */ - #include <common.h> +#include <clk.h> +#include <debug_uart.h> #include <dm.h> +#include <dt-structs.h> #include <ram.h> +#include <regmap.h> #include <syscon.h> +#include <asm/io.h> #include <asm/arch-rockchip/clock.h> +#include <asm/arch-rockchip/cru_rk3328.h> #include <asm/arch-rockchip/grf_rk3328.h> #include <asm/arch-rockchip/sdram_common.h> +#include <asm/arch-rockchip/sdram_rk3328.h> +#include <asm/arch-rockchip/uart.h> struct dram_info { +#ifdef CONFIG_TPL_BUILD + struct rk3328_ddr_pctl_regs *pctl; + struct rk3328_ddr_phy_regs *phy; + struct clk ddr_clk; + struct rk3328_cru *cru; + struct rk3328_msch_regs *msch; + struct rk3328_ddr_grf_regs *ddr_grf; +#endif struct ram_info info; struct rk3328_grf_regs *grf; }; +#ifdef CONFIG_TPL_BUILD + +struct rk3328_sdram_channel sdram_ch; + +struct rockchip_dmc_plat { +#if CONFIG_IS_ENABLED(OF_PLATDATA) + struct dtd_rockchip_rk3328_dmc dtplat; +#else + struct rk3328_sdram_params sdram_params; +#endif + struct regmap *map; +}; + +#if CONFIG_IS_ENABLED(OF_PLATDATA) +static int conv_of_platdata(struct udevice *dev) +{ + struct rockchip_dmc_plat *plat = dev_get_platdata(dev); + struct dtd_rockchip_rk3328_dmc *dtplat = &plat->dtplat; + int ret; + + ret = regmap_init_mem_platdata(dev, dtplat->reg, + ARRAY_SIZE(dtplat->reg) / 2, + &plat->map); + if (ret) + return ret; + + return 0; +} +#endif + +static void rkclk_ddr_reset(struct dram_info *dram, + u32 ctl_srstn, u32 ctl_psrstn, + u32 phy_srstn, u32 phy_psrstn) +{ + writel(ddrctrl_srstn_req(ctl_srstn) | ddrctrl_psrstn_req(ctl_psrstn) | + ddrphy_srstn_req(phy_srstn) | ddrphy_psrstn_req(phy_psrstn), + &dram->cru->softrst_con[5]); + writel(ddrctrl_asrstn_req(ctl_srstn), &dram->cru->softrst_con[9]); +} + +static void rkclk_set_dpll(struct dram_info *dram, unsigned int mhz) +{ + unsigned int refdiv, postdiv1, postdiv2, fbdiv; + int delay = 1000; + + refdiv = 1; + if (mhz <= 300) { + postdiv1 = 4; + postdiv2 = 2; + } else if (mhz <= 400) { + postdiv1 = 6; + postdiv2 = 1; + } else if (mhz <= 600) { + postdiv1 = 4; + postdiv2 = 1; + } else if (mhz <= 800) { + postdiv1 = 3; + postdiv2 = 1; + } else if (mhz <= 1600) { + postdiv1 = 2; + postdiv2 = 1; + } else { + postdiv1 = 1; + postdiv2 = 1; + } + fbdiv = (mhz * refdiv * postdiv1 * postdiv2) / 24; + + writel(((0x1 << 4) << 16) | (0 << 4), &dram->cru->mode_con); + writel(POSTDIV1(postdiv1) | FBDIV(fbdiv), &dram->cru->dpll_con[0]); + writel(DSMPD(1) | POSTDIV2(postdiv2) | REFDIV(refdiv), + &dram->cru->dpll_con[1]); + + while (delay > 0) { + udelay(1); + if (LOCK(readl(&dram->cru->dpll_con[1]))) + break; + delay--; + } + + writel(((0x1 << 4) << 16) | (1 << 4), &dram->cru->mode_con); +} + +static void rkclk_configure_ddr(struct dram_info *dram, + struct rk3328_sdram_params *sdram_params) +{ + void __iomem *phy_base = dram->phy; + + /* choose DPLL for ddr clk source */ + clrbits_le32(PHY_REG(phy_base, 0xef), 1 << 7); + + /* for inno ddr phy need 2*freq */ + rkclk_set_dpll(dram, sdram_params->ddr_freq * 2); +} + +static void phy_soft_reset(struct dram_info *dram) +{ + void __iomem *phy_base = dram->phy; + + clrbits_le32(PHY_REG(phy_base, 0), 0x3 << 2); + udelay(1); + setbits_le32(PHY_REG(phy_base, 0), ANALOG_DERESET); + udelay(5); + setbits_le32(PHY_REG(phy_base, 0), DIGITAL_DERESET); + udelay(1); +} + +static int pctl_cfg(struct dram_info *dram, + struct rk3328_sdram_params *sdram_params) +{ + u32 i; + void __iomem *pctl_base = dram->pctl; + + for (i = 0; sdram_params->pctl_regs.pctl[i][0] != 0xFFFFFFFF; i++) { + writel(sdram_params->pctl_regs.pctl[i][1], + pctl_base + sdram_params->pctl_regs.pctl[i][0]); + } + clrsetbits_le32(pctl_base + DDR_PCTL2_PWRTMG, + (0xff << 16) | 0x1f, + ((SR_IDLE & 0xff) << 16) | (PD_IDLE & 0x1f)); + /* + * dfi_lp_en_pd=1,dfi_lp_wakeup_pd=2 + * hw_lp_idle_x32=1 + */ + if (sdram_params->dramtype == LPDDR3) { + setbits_le32(pctl_base + DDR_PCTL2_DFILPCFG0, 1); + clrsetbits_le32(pctl_base + DDR_PCTL2_DFILPCFG0, + 0xf << 4, + 2 << 4); + } + clrsetbits_le32(pctl_base + DDR_PCTL2_HWLPCTL, + 0xfff << 16, + 1 << 16); + /* disable zqcs */ + setbits_le32(pctl_base + DDR_PCTL2_ZQCTL0, 1u << 31); + setbits_le32(pctl_base + 0x2000 + DDR_PCTL2_ZQCTL0, 1u << 31); + + return 0; +} + +/* return ddrconfig value + * (-1), find ddrconfig fail + * other, the ddrconfig value + * only support cs0_row >= cs1_row + */ +static unsigned int calculate_ddrconfig(struct rk3328_sdram_params *sdram_params) +{ + static const u16 ddr_cfg_2_rbc[] = { + /*************************** + * [5:4] row(13+n) + * [3] cs(0:0 cs, 1:2 cs) + * [2] bank(0:0bank,1:8bank) + * [1:0] col(11+n) + ****************************/ + /* row, cs, bank, col */ + ((3 << 4) | (0 << 3) | (1 << 2) | 0), + ((3 << 4) | (0 << 3) | (1 << 2) | 1), + ((2 << 4) | (0 << 3) | (1 << 2) | 2), + ((3 << 4) | (0 << 3) | (1 << 2) | 2), + ((2 << 4) | (0 << 3) | (1 << 2) | 3), + ((3 << 4) | (1 << 3) | (1 << 2) | 0), + ((3 << 4) | (1 << 3) | (1 << 2) | 1), + ((2 << 4) | (1 << 3) | (1 << 2) | 2), + ((3 << 4) | (0 << 3) | (0 << 2) | 1), + ((2 << 4) | (0 << 3) | (1 << 2) | 1), + }; + + static const u16 ddr4_cfg_2_rbc[] = { + /*************************** + * [6] cs 0:0cs 1:2 cs + * [5:3] row(13+n) + * [2] cs(0:0 cs, 1:2 cs) + * [1] bw 0: 16bit 1:32bit + * [0] diebw 0:8bit 1:16bit + ***************************/ + /* cs, row, cs, bw, diebw */ + ((0 << 6) | (3 << 3) | (0 << 2) | (1 << 1) | 0), + ((1 << 6) | (2 << 3) | (0 << 2) | (1 << 1) | 0), + ((0 << 6) | (4 << 3) | (0 << 2) | (0 << 1) | 0), + ((1 << 6) | (3 << 3) | (0 << 2) | (0 << 1) | 0), + ((0 << 6) | (4 << 3) | (0 << 2) | (1 << 1) | 1), + ((1 << 6) | (3 << 3) | (0 << 2) | (1 << 1) | 1), + ((1 << 6) | (4 << 3) | (0 << 2) | (0 << 1) | 1), + ((0 << 6) | (2 << 3) | (1 << 2) | (1 << 1) | 0), + ((0 << 6) | (3 << 3) | (1 << 2) | (0 << 1) | 0), + ((0 << 6) | (3 << 3) | (1 << 2) | (1 << 1) | 1), + ((0 << 6) | (4 << 3) | (1 << 2) | (0 << 1) | 1), + }; + + u32 cs, bw, die_bw, col, row, bank; + u32 i, tmp; + u32 ddrconf = -1; + + cs = sdram_ch.rank; + bw = sdram_ch.bw; + die_bw = sdram_ch.dbw; + col = sdram_ch.col; + row = sdram_ch.cs0_row; + bank = sdram_ch.bk; + + if (sdram_params->dramtype == DDR4) { + tmp = ((cs - 1) << 6) | ((row - 13) << 3) | (bw & 0x2) | die_bw; + for (i = 10; i < 17; i++) { + if (((tmp & 0x7) == (ddr4_cfg_2_rbc[i - 10] & 0x7)) && + ((tmp & 0x3c) <= (ddr4_cfg_2_rbc[i - 10] & 0x3c)) && + ((tmp & 0x40) <= (ddr4_cfg_2_rbc[i - 10] & 0x40))) { + ddrconf = i; + goto out; + } + } + } else { + if (bank == 2) { + ddrconf = 8; + goto out; + } + + tmp = ((row - 13) << 4) | (1 << 2) | ((bw + col - 11) << 0); + for (i = 0; i < 5; i++) + if (((tmp & 0xf) == (ddr_cfg_2_rbc[i] & 0xf)) && + ((tmp & 0x30) <= (ddr_cfg_2_rbc[i] & 0x30))) { + ddrconf = i; + goto out; + } + } + +out: + if (ddrconf > 20) + printf("calculate_ddrconfig error\n"); + + return ddrconf; +} + +/* n: Unit bytes */ +static void copy_to_reg(u32 *dest, u32 *src, u32 n) +{ + int i; + + for (i = 0; i < n / sizeof(u32); i++) { + writel(*src, dest); + src++; + dest++; + } +} + +/******* + * calculate controller dram address map, and setting to register. + * argument sdram_ch.ddrconf must be right value before + * call this function. + *******/ +static void set_ctl_address_map(struct dram_info *dram, + struct rk3328_sdram_params *sdram_params) +{ + void __iomem *pctl_base = dram->pctl; + + copy_to_reg((u32 *)(pctl_base + DDR_PCTL2_ADDRMAP0), + &addrmap[sdram_ch.ddrconfig][0], 9 * 4); + if (sdram_params->dramtype == LPDDR3 && sdram_ch.row_3_4) + setbits_le32(pctl_base + DDR_PCTL2_ADDRMAP6, 1 << 31); + if (sdram_params->dramtype == DDR4 && sdram_ch.bw == 0x1) + setbits_le32(pctl_base + DDR_PCTL2_PCCFG, 1 << 8); + + if (sdram_ch.rank == 1) + clrsetbits_le32(pctl_base + DDR_PCTL2_ADDRMAP0, 0x1f, 0x1f); +} + +static void phy_dll_bypass_set(struct dram_info *dram, u32 freq) +{ + u32 tmp; + void __iomem *phy_base = dram->phy; + + setbits_le32(PHY_REG(phy_base, 0x13), 1 << 4); + clrbits_le32(PHY_REG(phy_base, 0x14), 1 << 3); + setbits_le32(PHY_REG(phy_base, 0x26), 1 << 4); + clrbits_le32(PHY_REG(phy_base, 0x27), 1 << 3); + setbits_le32(PHY_REG(phy_base, 0x36), 1 << 4); + clrbits_le32(PHY_REG(phy_base, 0x37), 1 << 3); + setbits_le32(PHY_REG(phy_base, 0x46), 1 << 4); + clrbits_le32(PHY_REG(phy_base, 0x47), 1 << 3); + setbits_le32(PHY_REG(phy_base, 0x56), 1 << 4); + clrbits_le32(PHY_REG(phy_base, 0x57), 1 << 3); + + if (freq <= (400 * MHz)) + /* DLL bypass */ + setbits_le32(PHY_REG(phy_base, 0xa4), 0x1f); + else + clrbits_le32(PHY_REG(phy_base, 0xa4), 0x1f); + if (freq <= (680 * MHz)) + tmp = 2; + else + tmp = 1; + writel(tmp, PHY_REG(phy_base, 0x28)); + writel(tmp, PHY_REG(phy_base, 0x38)); + writel(tmp, PHY_REG(phy_base, 0x48)); + writel(tmp, PHY_REG(phy_base, 0x58)); +} + +static void set_ds_odt(struct dram_info *dram, + struct rk3328_sdram_params *sdram_params) +{ + u32 cmd_drv, clk_drv, dqs_drv, dqs_odt; + void __iomem *phy_base = dram->phy; + + if (sdram_params->dramtype == DDR3) { + cmd_drv = PHY_DDR3_RON_RTT_34ohm; + clk_drv = PHY_DDR3_RON_RTT_45ohm; + dqs_drv = PHY_DDR3_RON_RTT_34ohm; + dqs_odt = PHY_DDR3_RON_RTT_225ohm; + } else { + cmd_drv = PHY_DDR4_LPDDR3_RON_RTT_34ohm; + clk_drv = PHY_DDR4_LPDDR3_RON_RTT_43ohm; + dqs_drv = PHY_DDR4_LPDDR3_RON_RTT_34ohm; + dqs_odt = PHY_DDR4_LPDDR3_RON_RTT_240ohm; + } + /* DS */ + writel(cmd_drv, PHY_REG(phy_base, 0x11)); + clrsetbits_le32(PHY_REG(phy_base, 0x12), 0x1f << 3, cmd_drv << 3); + writel(clk_drv, PHY_REG(phy_base, 0x16)); + writel(clk_drv, PHY_REG(phy_base, 0x18)); + writel(dqs_drv, PHY_REG(phy_base, 0x20)); + writel(dqs_drv, PHY_REG(phy_base, 0x2f)); + writel(dqs_drv, PHY_REG(phy_base, 0x30)); + writel(dqs_drv, PHY_REG(phy_base, 0x3f)); + writel(dqs_drv, PHY_REG(phy_base, 0x40)); + writel(dqs_drv, PHY_REG(phy_base, 0x4f)); + writel(dqs_drv, PHY_REG(phy_base, 0x50)); + writel(dqs_drv, PHY_REG(phy_base, 0x5f)); + /* ODT */ + writel(dqs_odt, PHY_REG(phy_base, 0x21)); + writel(dqs_odt, PHY_REG(phy_base, 0x2e)); + writel(dqs_odt, PHY_REG(phy_base, 0x31)); + writel(dqs_odt, PHY_REG(phy_base, 0x3e)); + writel(dqs_odt, PHY_REG(phy_base, 0x41)); + writel(dqs_odt, PHY_REG(phy_base, 0x4e)); + writel(dqs_odt, PHY_REG(phy_base, 0x51)); + writel(dqs_odt, PHY_REG(phy_base, 0x5e)); +} + +static void phy_cfg(struct dram_info *dram, + struct rk3328_sdram_params *sdram_params) +{ + u32 i; + void __iomem *phy_base = dram->phy; + + phy_dll_bypass_set(dram, sdram_params->ddr_freq); + for (i = 0; sdram_params->phy_regs.phy[i][0] != 0xFFFFFFFF; i++) { + writel(sdram_params->phy_regs.phy[i][1], + phy_base + sdram_params->phy_regs.phy[i][0]); + } + if (sdram_ch.bw == 2) { + clrsetbits_le32(PHY_REG(phy_base, 0), 0xf << 4, 0xf << 4); + } else { + clrsetbits_le32(PHY_REG(phy_base, 0), 0xf << 4, 3 << 4); + /* disable DQS2,DQS3 tx dll for saving power */ + clrbits_le32(PHY_REG(phy_base, 0x46), 1 << 3); + clrbits_le32(PHY_REG(phy_base, 0x56), 1 << 3); + } + set_ds_odt(dram, sdram_params); + /* deskew */ + setbits_le32(PHY_REG(phy_base, 2), 8); + copy_to_reg(PHY_REG(phy_base, 0xb0), + &sdram_params->skew.a0_a1_skew[0], 15 * 4); + copy_to_reg(PHY_REG(phy_base, 0x70), + &sdram_params->skew.cs0_dm0_skew[0], 44 * 4); + copy_to_reg(PHY_REG(phy_base, 0xc0), + &sdram_params->skew.cs0_dm1_skew[0], 44 * 4); +} + +static int update_refresh_reg(struct dram_info *dram) +{ + void __iomem *pctl_base = dram->pctl; + u32 ret; + + ret = readl(pctl_base + DDR_PCTL2_RFSHCTL3) ^ (1 << 1); + writel(ret, pctl_base + DDR_PCTL2_RFSHCTL3); + + return 0; +} + +static int data_training(struct dram_info *dram, u32 cs, u32 dramtype) +{ + u32 ret; + u32 dis_auto_zq = 0; + void __iomem *pctl_base = dram->pctl; + void __iomem *phy_base = dram->phy; + + /* disable zqcs */ + if (!(readl(pctl_base + DDR_PCTL2_ZQCTL0) & + (1ul << 31))) { + dis_auto_zq = 1; + setbits_le32(pctl_base + DDR_PCTL2_ZQCTL0, 1 << 31); + } + /* disable auto refresh */ + setbits_le32(pctl_base + DDR_PCTL2_RFSHCTL3, 1); + update_refresh_reg(dram); + + if (dramtype == DDR4) { + clrsetbits_le32(PHY_REG(phy_base, 0x29), 0x3, 0); + clrsetbits_le32(PHY_REG(phy_base, 0x39), 0x3, 0); + clrsetbits_le32(PHY_REG(phy_base, 0x49), 0x3, 0); + clrsetbits_le32(PHY_REG(phy_base, 0x59), 0x3, 0); + } + /* choose training cs */ + clrsetbits_le32(PHY_REG(phy_base, 2), 0x33, (0x20 >> cs)); + /* enable gate training */ + clrsetbits_le32(PHY_REG(phy_base, 2), 0x33, (0x20 >> cs) | 1); + udelay(50); + ret = readl(PHY_REG(phy_base, 0xff)); + /* disable gate training */ + clrsetbits_le32(PHY_REG(phy_base, 2), 0x33, (0x20 >> cs) | 0); + /* restore zqcs */ + if (dis_auto_zq) + clrbits_le32(pctl_base + DDR_PCTL2_ZQCTL0, 1 << 31); + /* restore auto refresh */ + clrbits_le32(pctl_base + DDR_PCTL2_RFSHCTL3, 1); + update_refresh_reg(dram); + + if (dramtype == DDR4) { + clrsetbits_le32(PHY_REG(phy_base, 0x29), 0x3, 0x2); + clrsetbits_le32(PHY_REG(phy_base, 0x39), 0x3, 0x2); + clrsetbits_le32(PHY_REG(phy_base, 0x49), 0x3, 0x2); + clrsetbits_le32(PHY_REG(phy_base, 0x59), 0x3, 0x2); + } + + if (ret & 0x10) { + ret = -1; + } else { + ret = (ret & 0xf) ^ (readl(PHY_REG(phy_base, 0)) >> 4); + ret = (ret == 0) ? 0 : -1; + } + return ret; +} + +/* rank = 1: cs0 + * rank = 2: cs1 + * rank = 3: cs0 & cs1 + * note: be careful of keep mr original val + */ +static int write_mr(struct dram_info *dram, u32 rank, u32 mr_num, u32 arg, + u32 dramtype) +{ + void __iomem *pctl_base = dram->pctl; + + while (readl(pctl_base + DDR_PCTL2_MRSTAT) & MR_WR_BUSY) + continue; + if (dramtype == DDR3 || dramtype == DDR4) { + writel((mr_num << 12) | (rank << 4) | (0 << 0), + pctl_base + DDR_PCTL2_MRCTRL0); + writel(arg, pctl_base + DDR_PCTL2_MRCTRL1); + } else { + writel((rank << 4) | (0 << 0), + pctl_base + DDR_PCTL2_MRCTRL0); + writel((mr_num << 8) | (arg & 0xff), + pctl_base + DDR_PCTL2_MRCTRL1); + } + + setbits_le32(pctl_base + DDR_PCTL2_MRCTRL0, 1u << 31); + while (readl(pctl_base + DDR_PCTL2_MRCTRL0) & (1u << 31)) + continue; + while (readl(pctl_base + DDR_PCTL2_MRSTAT) & MR_WR_BUSY) + continue; + + return 0; +} + +/* + * rank : 1:cs0, 2:cs1, 3:cs0&cs1 + * vrefrate: 4500: 45%, + */ +static int write_vrefdq(struct dram_info *dram, u32 rank, u32 vrefrate, + u32 dramtype) +{ + u32 tccd_l, value; + u32 dis_auto_zq = 0; + void __iomem *pctl_base = dram->pctl; + + if (dramtype != DDR4 || vrefrate < 4500 || vrefrate > 9200) + return -1; + + tccd_l = (readl(pctl_base + DDR_PCTL2_DRAMTMG4) >> 16) & 0xf; + tccd_l = (tccd_l - 4) << 10; + + if (vrefrate > 7500) { + /* range 1 */ + value = ((vrefrate - 6000) / 65) | tccd_l; + } else { + /* range 2 */ + value = ((vrefrate - 4500) / 65) | tccd_l | (1 << 6); + } + + /* disable zqcs */ + if (!(readl(pctl_base + DDR_PCTL2_ZQCTL0) & + (1ul << 31))) { + dis_auto_zq = 1; + setbits_le32(pctl_base + DDR_PCTL2_ZQCTL0, 1 << 31); + } + /* disable auto refresh */ + setbits_le32(pctl_base + DDR_PCTL2_RFSHCTL3, 1); + update_refresh_reg(dram); + + /* enable vrefdq calibratin */ + write_mr(dram, rank, 6, value | (1 << 7), dramtype); + udelay(1);/* tvrefdqe */ + /* write vrefdq value */ + write_mr(dram, rank, 6, value | (1 << 7), dramtype); + udelay(1);/* tvref_time */ + write_mr(dram, rank, 6, value | (0 << 7), dramtype); + udelay(1);/* tvrefdqx */ + + /* restore zqcs */ + if (dis_auto_zq) + clrbits_le32(pctl_base + DDR_PCTL2_ZQCTL0, 1 << 31); + /* restore auto refresh */ + clrbits_le32(pctl_base + DDR_PCTL2_RFSHCTL3, 1); + update_refresh_reg(dram); + + return 0; +} + +#define _MAX_(x, y) ((x) > (y) ? (x) : (y)) + +static void rx_deskew_switch_adjust(struct dram_info *dram) +{ + u32 i, deskew_val; + u32 gate_val = 0; + void __iomem *phy_base = dram->phy; + + for (i = 0; i < 4; i++) + gate_val = _MAX_(readl(PHY_REG(phy_base, 0xfb + i)), gate_val); + + deskew_val = (gate_val >> 3) + 1; + deskew_val = (deskew_val > 0x1f) ? 0x1f : deskew_val; + clrsetbits_le32(PHY_REG(phy_base, 0x6e), 0xc, (deskew_val & 0x3) << 2); + clrsetbits_le32(PHY_REG(phy_base, 0x6f), 0x7 << 4, + (deskew_val & 0x1c) << 2); +} + +#undef _MAX_ + +static void tx_deskew_switch_adjust(struct dram_info *dram) +{ + void __iomem *phy_base = dram->phy; + + clrsetbits_le32(PHY_REG(phy_base, 0x6e), 0x3, 1); +} + +static void set_ddrconfig(struct dram_info *dram, u32 ddrconfig) +{ + writel(ddrconfig, &dram->msch->ddrconf); +} + +static void dram_all_config(struct dram_info *dram, + struct rk3328_sdram_params *sdram_params) +{ + u32 sys_reg = 0, tmp = 0; + + set_ddrconfig(dram, sdram_ch.ddrconfig); + + sys_reg |= SYS_REG_ENC_DDRTYPE(sdram_params->dramtype); + sys_reg |= SYS_REG_ENC_ROW_3_4(sdram_ch.row_3_4, 0); + sys_reg |= SYS_REG_ENC_RANK(sdram_ch.rank, 0); + sys_reg |= SYS_REG_ENC_COL(sdram_ch.col, 0); + sys_reg |= SYS_REG_ENC_BK(sdram_ch.bk, 0); + SYS_REG_ENC_CS0_ROW(sdram_ch.cs0_row, sys_reg, tmp, 0); + if (sdram_ch.cs1_row) + SYS_REG_ENC_CS1_ROW(sdram_ch.cs1_row, sys_reg, tmp, 0); + sys_reg |= SYS_REG_ENC_BW(sdram_ch.bw, 0); + sys_reg |= SYS_REG_ENC_DBW(sdram_ch.dbw, 0); + + writel(sys_reg, &dram->grf->os_reg[2]); + + writel(sdram_ch.noc_timings.ddrtiming.d32, &dram->msch->ddrtiming); + + writel(sdram_ch.noc_timings.ddrmode.d32, &dram->msch->ddrmode); + writel(sdram_ch.noc_timings.readlatency, &dram->msch->readlatency); + + writel(sdram_ch.noc_timings.activate.d32, &dram->msch->activate); + writel(sdram_ch.noc_timings.devtodev.d32, &dram->msch->devtodev); + writel(sdram_ch.noc_timings.ddr4timing.d32, &dram->msch->ddr4_timing); + writel(sdram_ch.noc_timings.agingx0, &dram->msch->aging0); + writel(sdram_ch.noc_timings.agingx0, &dram->msch->aging1); + writel(sdram_ch.noc_timings.agingx0, &dram->msch->aging2); + writel(sdram_ch.noc_timings.agingx0, &dram->msch->aging3); + writel(sdram_ch.noc_timings.agingx0, &dram->msch->aging4); + writel(sdram_ch.noc_timings.agingx0, &dram->msch->aging5); +} + +static void enable_low_power(struct dram_info *dram, + struct rk3328_sdram_params *sdram_params) +{ + void __iomem *pctl_base = dram->pctl; + + /* enable upctl2 axi clock auto gating */ + writel(0x00800000, &dram->ddr_grf->ddr_grf_con[0]); + writel(0x20012001, &dram->ddr_grf->ddr_grf_con[2]); + /* enable upctl2 core clock auto gating */ + writel(0x001e001a, &dram->ddr_grf->ddr_grf_con[2]); + /* enable sr, pd */ + if (PD_IDLE == 0) + clrbits_le32(pctl_base + DDR_PCTL2_PWRCTL, (1 << 1)); + else + setbits_le32(pctl_base + DDR_PCTL2_PWRCTL, (1 << 1)); + if (SR_IDLE == 0) + clrbits_le32(pctl_base + DDR_PCTL2_PWRCTL, 1); + else + setbits_le32(pctl_base + DDR_PCTL2_PWRCTL, 1); + setbits_le32(pctl_base + DDR_PCTL2_PWRCTL, (1 << 3)); +} + +static int sdram_init(struct dram_info *dram, + struct rk3328_sdram_params *sdram_params, u32 pre_init) +{ + void __iomem *pctl_base = dram->pctl; + + rkclk_ddr_reset(dram, 1, 1, 1, 1); + udelay(10); + /* + * dereset ddr phy psrstn to config pll, + * if using phy pll psrstn must be dereset + * before config pll + */ + rkclk_ddr_reset(dram, 1, 1, 1, 0); + rkclk_configure_ddr(dram, sdram_params); + if (pre_init == 0) { + switch (sdram_params->dramtype) { + case DDR3: + printf("DDR3\n"); + break; + case DDR4: + printf("DDR4\n"); + break; + case LPDDR3: + default: + printf("LPDDR3\n"); + break; + } + } + /* release phy srst to provide clk to ctrl */ + rkclk_ddr_reset(dram, 1, 1, 0, 0); + udelay(10); + phy_soft_reset(dram); + /* release ctrl presetn, and config ctl registers */ + rkclk_ddr_reset(dram, 1, 0, 0, 0); + pctl_cfg(dram, sdram_params); + sdram_ch.ddrconfig = calculate_ddrconfig(sdram_params); + set_ctl_address_map(dram, sdram_params); + phy_cfg(dram, sdram_params); + + /* enable dfi_init_start to init phy after ctl srstn deassert */ + setbits_le32(pctl_base + DDR_PCTL2_DFIMISC, (1 << 5) | (1 << 4)); + rkclk_ddr_reset(dram, 0, 0, 0, 0); + /* wait for dfi_init_done and dram init complete */ + while ((readl(pctl_base + DDR_PCTL2_STAT) & 0x7) == 0) + continue; + + /* do ddr gate training */ + if (data_training(dram, 0, sdram_params->dramtype) != 0) { + printf("data training error\n"); + return -1; + } + + if (sdram_params->dramtype == DDR4) + write_vrefdq(dram, 0x3, 5670, sdram_params->dramtype); + + if (pre_init == 0) { + rx_deskew_switch_adjust(dram); + tx_deskew_switch_adjust(dram); + } + + dram_all_config(dram, sdram_params); + enable_low_power(dram, sdram_params); + + return 0; +} + +static u64 dram_detect_cap(struct dram_info *dram, + struct rk3328_sdram_params *sdram_params, + unsigned char channel) +{ + void __iomem *pctl_base = dram->pctl; + + /* + * for ddr3: ddrconf = 3 + * for ddr4: ddrconf = 12 + * for lpddr3: ddrconf = 3 + * default bw = 1 + */ + u32 bk, bktmp; + u32 col, coltmp; + u32 row, rowtmp, row_3_4; + void __iomem *test_addr, *test_addr1; + u32 dbw; + u32 cs; + u32 bw = 1; + u64 cap = 0; + u32 dram_type = sdram_params->dramtype; + u32 pwrctl; + + if (dram_type != DDR4) { + /* detect col and bk for ddr3/lpddr3 */ + coltmp = 12; + bktmp = 3; + rowtmp = 16; + + for (col = coltmp; col >= 9; col -= 1) { + writel(0, SDRAM_ADDR); + test_addr = (void __iomem *)(SDRAM_ADDR + + (1ul << (col + bw - 1ul))); + writel(PATTERN, test_addr); + if ((readl(test_addr) == PATTERN) && + (readl(SDRAM_ADDR) == 0)) + break; + } + if (col == 8) { + printf("col error\n"); + goto cap_err; + } + + test_addr = (void __iomem *)(SDRAM_ADDR + + (1ul << (coltmp + bktmp + bw - 1ul))); + writel(0, SDRAM_ADDR); + writel(PATTERN, test_addr); + if ((readl(test_addr) == PATTERN) && + (readl(SDRAM_ADDR) == 0)) + bk = 3; + else + bk = 2; + if (dram_type == LPDDR3) + dbw = 2; + else + dbw = 1; + } else { + /* detect bg for ddr4 */ + coltmp = 10; + bktmp = 4; + rowtmp = 17; + + col = 10; + bk = 2; + test_addr = (void __iomem *)(SDRAM_ADDR + + (1ul << (coltmp + bw + 1ul))); + writel(0, SDRAM_ADDR); + writel(PATTERN, test_addr); + if ((readl(test_addr) == PATTERN) && + (readl(SDRAM_ADDR) == 0)) + dbw = 0; + else + dbw = 1; + } + /* detect row */ + for (row = rowtmp; row > 12; row--) { + writel(0, SDRAM_ADDR); + test_addr = (void __iomem *)(SDRAM_ADDR + + (1ul << (row + bktmp + coltmp + bw - 1ul))); + writel(PATTERN, test_addr); + if ((readl(test_addr) == PATTERN) && + (readl(SDRAM_ADDR) == 0)) + break; + } + if (row == 12) { + printf("row error"); + goto cap_err; + } + /* detect row_3_4 */ + test_addr = SDRAM_ADDR; + test_addr1 = (void __iomem *)(SDRAM_ADDR + + (0x3ul << (row + bktmp + coltmp + bw - 1ul - 1ul))); + + writel(0, test_addr); + writel(PATTERN, test_addr1); + if ((readl(test_addr) == 0) && + (readl(test_addr1) == PATTERN)) + row_3_4 = 0; + else + row_3_4 = 1; + + /* disable auto low-power */ + pwrctl = readl(pctl_base + DDR_PCTL2_PWRCTL); + writel(0, pctl_base + DDR_PCTL2_PWRCTL); + + /* bw and cs detect using phy read gate training */ + if (data_training(dram, 1, dram_type) == 0) + cs = 1; + else + cs = 0; + + bw = 2; + + /* restore auto low-power */ + writel(pwrctl, pctl_base + DDR_PCTL2_PWRCTL); + + sdram_ch.rank = cs + 1; + sdram_ch.col = col; + sdram_ch.bk = bk; + sdram_ch.dbw = dbw; + sdram_ch.bw = bw; + sdram_ch.cs0_row = row; + if (cs) + sdram_ch.cs1_row = row; + else + sdram_ch.cs1_row = 0; + sdram_ch.row_3_4 = row_3_4; + + if (dram_type == DDR4) + cap = 1llu << (cs + row + bk + col + ((dbw == 0) ? 2 : 1) + bw); + else + cap = 1llu << (cs + row + bk + col + bw); + + return cap; + +cap_err: + return 0; +} + +static u32 remodify_sdram_params(struct rk3328_sdram_params *sdram_params) +{ + u32 tmp = 0, tmp_adr = 0, i; + + for (i = 0; sdram_params->pctl_regs.pctl[i][0] != 0xFFFFFFFF; i++) { + if (sdram_params->pctl_regs.pctl[i][0] == 0) { + tmp = sdram_params->pctl_regs.pctl[i][1];/* MSTR */ + tmp_adr = i; + } + } + + tmp &= ~((3ul << 30) | (3ul << 24) | (3ul << 12)); + + switch (sdram_ch.dbw) { + case 2: + tmp |= (3ul << 30); + break; + case 1: + tmp |= (2ul << 30); + break; + case 0: + default: + tmp |= (1ul << 30); + break; + } + + if (sdram_ch.rank == 2) + tmp |= 3 << 24; + else + tmp |= 1 << 24; + + tmp |= (2 - sdram_ch.bw) << 12; + + sdram_params->pctl_regs.pctl[tmp_adr][1] = tmp; + + if (sdram_ch.bw == 2) + sdram_ch.noc_timings.ddrtiming.b.bwratio = 0; + else + sdram_ch.noc_timings.ddrtiming.b.bwratio = 1; + + return 0; +} + +static int dram_detect_cs1_row(struct rk3328_sdram_params *sdram_params, + unsigned char channel) +{ + u32 ret = 0; + u32 cs1_bit; + void __iomem *test_addr, *cs1_addr; + u32 row, bktmp, coltmp, bw; + u32 ddrconf = sdram_ch.ddrconfig; + + if (sdram_ch.rank == 2) { + cs1_bit = addrmap[ddrconf][0] + 8; + + if (cs1_bit > 31) + goto out; + + cs1_addr = (void __iomem *)(1ul << cs1_bit); + if (cs1_bit < 20) + cs1_bit = 1; + else + cs1_bit = 0; + + if (sdram_params->dramtype == DDR4) { + if (sdram_ch.dbw == 0) + bktmp = sdram_ch.bk + 2; + else + bktmp = sdram_ch.bk + 1; + } else { + bktmp = sdram_ch.bk; + } + bw = sdram_ch.bw; + coltmp = sdram_ch.col; + + /* detect cs1 row */ + for (row = sdram_ch.cs0_row; row > 12; row--) { + test_addr = (void __iomem *)(SDRAM_ADDR + cs1_addr + + (1ul << (row + cs1_bit + bktmp + + coltmp + bw - 1ul))); + writel(0, SDRAM_ADDR + cs1_addr); + writel(PATTERN, test_addr); + if ((readl(test_addr) == PATTERN) && + (readl(SDRAM_ADDR + cs1_addr) == 0)) { + ret = row; + break; + } + } + } + +out: + return ret; +} + +static int sdram_init_detect(struct dram_info *dram, + struct rk3328_sdram_params *sdram_params) +{ + debug("Starting SDRAM initialization...\n"); + + memcpy(&sdram_ch, &sdram_params->ch, + sizeof(struct rk3328_sdram_channel)); + + sdram_init(dram, sdram_params, 1); + dram_detect_cap(dram, sdram_params, 0); + + /* modify bw, cs related timing */ + remodify_sdram_params(sdram_params); + /* reinit sdram by real dram cap */ + sdram_init(dram, sdram_params, 0); + + /* redetect cs1 row */ + sdram_ch.cs1_row = + dram_detect_cs1_row(sdram_params, 0); + + return 0; +} + +static int rk3328_dmc_init(struct udevice *dev) +{ + struct dram_info *priv = dev_get_priv(dev); + struct rockchip_dmc_plat *plat = dev_get_platdata(dev); + int ret; + +#if !CONFIG_IS_ENABLED(OF_PLATDATA) + struct rk3328_sdram_params *params = &plat->sdram_params; +#else + struct dtd_rockchip_rk3328_dmc *dtplat = &plat->dtplat; + struct rk3328_sdram_params *params = + (void *)dtplat->rockchip_sdram_params; + + ret = conv_of_platdata(dev); + if (ret) + return ret; +#endif + priv->phy = regmap_get_range(plat->map, 0); + priv->pctl = regmap_get_range(plat->map, 1); + priv->grf = regmap_get_range(plat->map, 2); + priv->cru = regmap_get_range(plat->map, 3); + priv->msch = regmap_get_range(plat->map, 4); + priv->ddr_grf = regmap_get_range(plat->map, 5); + + debug("%s phy %p pctrl %p grf %p cru %p msch %p ddr_grf %p\n", + __func__, priv->phy, priv->pctl, priv->grf, priv->cru, + priv->msch, priv->ddr_grf); + ret = sdram_init_detect(priv, params); + if (ret < 0) { + printf("%s DRAM init failed%d\n", __func__, ret); + return ret; + } + + return 0; +} + +static int rk3328_dmc_ofdata_to_platdata(struct udevice *dev) +{ +#if !CONFIG_IS_ENABLED(OF_PLATDATA) + struct rockchip_dmc_plat *plat = dev_get_platdata(dev); + int ret; + + ret = dev_read_u32_array(dev, "rockchip,sdram-params", + (u32 *)&plat->sdram_params, + sizeof(plat->sdram_params) / sizeof(u32)); + if (ret) { + printf("%s: Cannot read rockchip,sdram-params %d\n", + __func__, ret); + return ret; + } + ret = regmap_init_mem(dev, &plat->map); + if (ret) + printf("%s: regmap failed %d\n", __func__, ret); +#endif + return 0; +} + +#endif + static int rk3328_dmc_probe(struct udevice *dev) { +#ifdef CONFIG_TPL_BUILD + if (rk3328_dmc_init(dev)) + return 0; +#else struct dram_info *priv = dev_get_priv(dev); priv->grf = syscon_get_first_range(ROCKCHIP_SYSCON_GRF); @@ -25,7 +1032,7 @@ static int rk3328_dmc_probe(struct udevice *dev) priv->info.base = CONFIG_SYS_SDRAM_BASE; priv->info.size = rockchip_sdram_size( (phys_addr_t)&priv->grf->os_reg[2]); - +#endif return 0; } @@ -42,7 +1049,6 @@ static struct ram_ops rk3328_dmc_ops = { .get_info = rk3328_dmc_get_info, }; - static const struct udevice_id rk3328_dmc_ids[] = { { .compatible = "rockchip,rk3328-dmc" }, { } @@ -53,6 +1059,12 @@ U_BOOT_DRIVER(dmc_rk3328) = { .id = UCLASS_RAM, .of_match = rk3328_dmc_ids, .ops = &rk3328_dmc_ops, +#ifdef CONFIG_TPL_BUILD + .ofdata_to_platdata = rk3328_dmc_ofdata_to_platdata, +#endif .probe = rk3328_dmc_probe, .priv_auto_alloc_size = sizeof(struct dram_info), +#ifdef CONFIG_TPL_BUILD + .platdata_auto_alloc_size = sizeof(struct rockchip_dmc_plat), +#endif }; diff --git a/drivers/spi/ich.c b/drivers/spi/ich.c index 03531a8c0c..fbb58c783e 100644 --- a/drivers/spi/ich.c +++ b/drivers/spi/ich.c @@ -14,6 +14,8 @@ #include <pci_ids.h> #include <spi.h> #include <asm/io.h> +#include <spi-mem.h> +#include <div64.h> #include "ich.h" @@ -171,18 +173,6 @@ static int ich_init_controller(struct udevice *dev, return 0; } -static inline void spi_use_out(struct spi_trans *trans, unsigned bytes) -{ - trans->out += bytes; - trans->bytesout -= bytes; -} - -static inline void spi_use_in(struct spi_trans *trans, unsigned bytes) -{ - trans->in += bytes; - trans->bytesin -= bytes; -} - static void spi_lock_down(struct ich_spi_platdata *plat, void *sbase) { if (plat->ich_version == ICHV_7) { @@ -213,47 +203,12 @@ static bool spi_lock_status(struct ich_spi_platdata *plat, void *sbase) return lock != 0; } -static void spi_setup_type(struct spi_trans *trans, int data_bytes) -{ - trans->type = 0xFF; - - /* Try to guess spi type from read/write sizes */ - if (trans->bytesin == 0) { - if (trans->bytesout + data_bytes > 4) - /* - * If bytesin = 0 and bytesout > 4, we presume this is - * a write data operation, which is accompanied by an - * address. - */ - trans->type = SPI_OPCODE_TYPE_WRITE_WITH_ADDRESS; - else - trans->type = SPI_OPCODE_TYPE_WRITE_NO_ADDRESS; - return; - } - - if (trans->bytesout == 1) { /* and bytesin is > 0 */ - trans->type = SPI_OPCODE_TYPE_READ_NO_ADDRESS; - return; - } - - if (trans->bytesout == 4) /* and bytesin is > 0 */ - trans->type = SPI_OPCODE_TYPE_READ_WITH_ADDRESS; - - /* Fast read command is called with 5 bytes instead of 4 */ - if (trans->out[0] == SPI_OPCODE_FAST_READ && trans->bytesout == 5) { - trans->type = SPI_OPCODE_TYPE_READ_WITH_ADDRESS; - --trans->bytesout; - } -} - static int spi_setup_opcode(struct ich_spi_priv *ctlr, struct spi_trans *trans, bool lock) { uint16_t optypes; uint8_t opmenu[ctlr->menubytes]; - trans->opcode = trans->out[0]; - spi_use_out(trans, 1); if (!lock) { /* The lock is off, so just use index 0. */ ich_writeb(ctlr, trans->opcode, ctlr->opmenu); @@ -285,12 +240,7 @@ static int spi_setup_opcode(struct ich_spi_priv *ctlr, struct spi_trans *trans, optypes = ich_readw(ctlr, ctlr->optype); optype = (optypes >> (opcode_index * 2)) & 0x3; - if (trans->type == SPI_OPCODE_TYPE_WRITE_NO_ADDRESS && - optype == SPI_OPCODE_TYPE_WRITE_WITH_ADDRESS && - trans->bytesout >= 3) { - /* We guessed wrong earlier. Fix it up. */ - trans->type = optype; - } + if (optype != trans->type) { printf("ICH SPI: Transaction doesn't fit type %d\n", optype); @@ -300,26 +250,6 @@ static int spi_setup_opcode(struct ich_spi_priv *ctlr, struct spi_trans *trans, } } -static int spi_setup_offset(struct spi_trans *trans) -{ - /* Separate the SPI address and data */ - switch (trans->type) { - case SPI_OPCODE_TYPE_READ_NO_ADDRESS: - case SPI_OPCODE_TYPE_WRITE_NO_ADDRESS: - return 0; - case SPI_OPCODE_TYPE_READ_WITH_ADDRESS: - case SPI_OPCODE_TYPE_WRITE_WITH_ADDRESS: - trans->offset = ((uint32_t)trans->out[0] << 16) | - ((uint32_t)trans->out[1] << 8) | - ((uint32_t)trans->out[2] << 0); - spi_use_out(trans, 3); - return 1; - default: - printf("Unrecognized SPI transaction type %#x\n", trans->type); - return -EPROTO; - } -} - /* * Wait for up to 6s til status register bit(s) turn 1 (in case wait_til_set * below is true) or 0. In case the wait was for the bit(s) to set - write @@ -350,7 +280,7 @@ static int ich_status_poll(struct ich_spi_priv *ctlr, u16 bitmask, return -ETIMEDOUT; } -void ich_spi_config_opcode(struct udevice *dev) +static void ich_spi_config_opcode(struct udevice *dev) { struct ich_spi_priv *ctlr = dev_get_priv(dev); @@ -365,72 +295,48 @@ void ich_spi_config_opcode(struct udevice *dev) ich_writel(ctlr, SPI_OPMENU_UPPER, ctlr->opmenu + sizeof(u32)); } -static int ich_spi_xfer(struct udevice *dev, unsigned int bitlen, - const void *dout, void *din, unsigned long flags) +static int ich_spi_exec_op(struct spi_slave *slave, const struct spi_mem_op *op) { - struct udevice *bus = dev_get_parent(dev); + struct udevice *bus = dev_get_parent(slave->dev); struct ich_spi_platdata *plat = dev_get_platdata(bus); struct ich_spi_priv *ctlr = dev_get_priv(bus); uint16_t control; int16_t opcode_index; int with_address; int status; - int bytes = bitlen / 8; struct spi_trans *trans = &ctlr->trans; - unsigned type = flags & (SPI_XFER_BEGIN | SPI_XFER_END); - int using_cmd = 0; bool lock = spi_lock_status(plat, ctlr->base); - int ret; - - /* We don't support writing partial bytes */ - if (bitlen % 8) { - debug("ICH SPI: Accessing partial bytes not supported\n"); - return -EPROTONOSUPPORT; - } + int ret = 0; - /* An empty end transaction can be ignored */ - if (type == SPI_XFER_END && !dout && !din) - return 0; - - if (type & SPI_XFER_BEGIN) - memset(trans, '\0', sizeof(*trans)); + trans->in = NULL; + trans->out = NULL; + trans->type = 0xFF; - /* Dp we need to come back later to finish it? */ - if (dout && type == SPI_XFER_BEGIN) { - if (bytes > ICH_MAX_CMD_LEN) { - debug("ICH SPI: Command length limit exceeded\n"); - return -ENOSPC; + if (op->data.nbytes) { + if (op->data.dir == SPI_MEM_DATA_IN) { + trans->in = op->data.buf.in; + trans->bytesin = op->data.nbytes; + } else { + trans->out = op->data.buf.out; + trans->bytesout = op->data.nbytes; } - memcpy(trans->cmd, dout, bytes); - trans->cmd_len = bytes; - debug_trace("ICH SPI: Saved %d bytes\n", bytes); - return 0; } - /* - * We process a 'middle' spi_xfer() call, which has no - * SPI_XFER_BEGIN/END, as an independent transaction as if it had - * an end. We therefore repeat the command. This is because ICH - * seems to have no support for this, or because interest (in digging - * out the details and creating a special case in the code) is low. - */ - if (trans->cmd_len) { - trans->out = trans->cmd; - trans->bytesout = trans->cmd_len; - using_cmd = 1; - debug_trace("ICH SPI: Using %d bytes\n", trans->cmd_len); - } else { - trans->out = dout; - trans->bytesout = dout ? bytes : 0; - } + if (trans->opcode != op->cmd.opcode) + trans->opcode = op->cmd.opcode; - trans->in = din; - trans->bytesin = din ? bytes : 0; + if (lock && trans->opcode == SPI_OPCODE_WRDIS) + return 0; - /* There has to always at least be an opcode */ - if (!trans->bytesout) { - debug("ICH SPI: No opcode for transfer\n"); - return -EPROTO; + if (trans->opcode == SPI_OPCODE_WREN) { + /* + * Treat Write Enable as Atomic Pre-Op if possible + * in order to prevent the Management Engine from + * issuing a transaction between WREN and DATA. + */ + if (!lock) + ich_writew(ctlr, trans->opcode, ctlr->preop); + return 0; } ret = ich_status_poll(ctlr, SPIS_SCIP, 0); @@ -442,23 +348,29 @@ static int ich_spi_xfer(struct udevice *dev, unsigned int bitlen, else ich_writeb(ctlr, SPIS_CDS | SPIS_FCERR, ctlr->status); - spi_setup_type(trans, using_cmd ? bytes : 0); + /* Try to guess spi transaction type */ + if (op->data.dir == SPI_MEM_DATA_OUT) { + if (op->addr.nbytes) + trans->type = SPI_OPCODE_TYPE_WRITE_WITH_ADDRESS; + else + trans->type = SPI_OPCODE_TYPE_WRITE_NO_ADDRESS; + } else { + if (op->addr.nbytes) + trans->type = SPI_OPCODE_TYPE_READ_WITH_ADDRESS; + else + trans->type = SPI_OPCODE_TYPE_READ_NO_ADDRESS; + } + /* Special erase case handling */ + if (op->addr.nbytes && !op->data.buswidth) + trans->type = SPI_OPCODE_TYPE_WRITE_WITH_ADDRESS; + opcode_index = spi_setup_opcode(ctlr, trans, lock); if (opcode_index < 0) return -EINVAL; - with_address = spi_setup_offset(trans); - if (with_address < 0) - return -EINVAL; - if (trans->opcode == SPI_OPCODE_WREN) { - /* - * Treat Write Enable as Atomic Pre-Op if possible - * in order to prevent the Management Engine from - * issuing a transaction between WREN and DATA. - */ - if (!lock) - ich_writew(ctlr, trans->opcode, ctlr->preop); - return 0; + if (op->addr.nbytes) { + trans->offset = op->addr.val; + with_address = 1; } if (ctlr->speed && ctlr->max_speed >= 33000000) { @@ -472,13 +384,6 @@ static int ich_spi_xfer(struct udevice *dev, unsigned int bitlen, ich_writeb(ctlr, byte, ctlr->speed); } - /* See if we have used up the command data */ - if (using_cmd && dout && bytes) { - trans->out = dout; - trans->bytesout = bytes; - debug_trace("ICH SPI: Moving to data, %d bytes\n", bytes); - } - /* Preset control fields */ control = SPIC_SCGO | ((opcode_index & 0x07) << 4); @@ -513,22 +418,6 @@ static int ich_spi_xfer(struct udevice *dev, unsigned int bitlen, return 0; } - /* - * Check if this is a write command atempting to transfer more bytes - * than the controller can handle. Iterations for writes are not - * supported here because each SPI write command needs to be preceded - * and followed by other SPI commands, and this sequence is controlled - * by the SPI chip driver. - */ - if (trans->bytesout > ctlr->databytes) { - debug("ICH SPI: Too much to write. This should be prevented by the driver's max_write_size?\n"); - return -EPROTO; - } - - /* - * Read or write up to databytes bytes at a time until everything has - * been sent. - */ while (trans->bytesout || trans->bytesin) { uint32_t data_length; @@ -543,9 +432,7 @@ static int ich_spi_xfer(struct udevice *dev, unsigned int bitlen, /* Program data into FDATA0 to N */ if (trans->bytesout) { write_reg(ctlr, trans->out, ctlr->data, data_length); - spi_use_out(trans, data_length); - if (with_address) - trans->offset += data_length; + trans->bytesout -= data_length; } /* Add proper control fields' values */ @@ -568,9 +455,7 @@ static int ich_spi_xfer(struct udevice *dev, unsigned int bitlen, if (trans->bytesin) { read_reg(ctlr, ctlr->data, trans->in, data_length); - spi_use_in(trans, data_length); - if (with_address) - trans->offset += data_length; + trans->bytesin -= data_length; } } @@ -581,6 +466,40 @@ static int ich_spi_xfer(struct udevice *dev, unsigned int bitlen, return 0; } +static int ich_spi_adjust_size(struct spi_slave *slave, struct spi_mem_op *op) +{ + unsigned int page_offset; + int addr = op->addr.val; + unsigned int byte_count = op->data.nbytes; + + if (hweight32(ICH_BOUNDARY) == 1) { + page_offset = addr & (ICH_BOUNDARY - 1); + } else { + u64 aux = addr; + + page_offset = do_div(aux, ICH_BOUNDARY); + } + + if (op->data.dir == SPI_MEM_DATA_IN && slave->max_read_size) { + op->data.nbytes = min(ICH_BOUNDARY - page_offset, + slave->max_read_size); + } else if (slave->max_write_size) { + op->data.nbytes = min(ICH_BOUNDARY - page_offset, + slave->max_write_size); + } + + op->data.nbytes = min(op->data.nbytes, byte_count); + + return 0; +} + +static int ich_spi_xfer(struct udevice *dev, unsigned int bitlen, + const void *dout, void *din, unsigned long flags) +{ + printf("ICH SPI: Only supports memory operations\n"); + return -1; +} + static int ich_spi_probe(struct udevice *dev) { struct ich_spi_platdata *plat = dev_get_platdata(dev); @@ -686,10 +605,17 @@ static int ich_spi_ofdata_to_platdata(struct udevice *dev) return ret; } +static const struct spi_controller_mem_ops ich_controller_mem_ops = { + .adjust_op_size = ich_spi_adjust_size, + .supports_op = NULL, + .exec_op = ich_spi_exec_op, +}; + static const struct dm_spi_ops ich_spi_ops = { .xfer = ich_spi_xfer, .set_speed = ich_spi_set_speed, .set_mode = ich_spi_set_mode, + .mem_ops = &ich_controller_mem_ops, /* * cs_info is not needed, since we require all chip selects to be * in the device tree explicitly diff --git a/drivers/spi/ich.h b/drivers/spi/ich.h index a974241f98..3dfb2aaff1 100644 --- a/drivers/spi/ich.h +++ b/drivers/spi/ich.h @@ -100,13 +100,8 @@ enum { HSFC_FSMIE = 0x8000 }; -enum { - ICH_MAX_CMD_LEN = 5, -}; - struct spi_trans { - uint8_t cmd[ICH_MAX_CMD_LEN]; - int cmd_len; + uint8_t cmd; const uint8_t *out; uint32_t bytesout; uint8_t *in; @@ -166,6 +161,8 @@ struct spi_trans { #define SPI_OPMENU_LOWER ((SPI_OPMENU_3 << 24) | (SPI_OPMENU_2 << 16) | \ (SPI_OPMENU_1 << 8) | (SPI_OPMENU_0 << 0)) +#define ICH_BOUNDARY 0x1000 + enum ich_version { ICHV_7, ICHV_9, diff --git a/drivers/watchdog/Kconfig b/drivers/watchdog/Kconfig index ccda432f49..1e5d14c4ff 100644 --- a/drivers/watchdog/Kconfig +++ b/drivers/watchdog/Kconfig @@ -17,15 +17,6 @@ config WATCHDOG_RESET_DISABLE Disable reset watchdog, which can let WATCHDOG_RESET invalid, so that the watchdog will not be fed in u-boot. -config BCM2835_WDT - bool "Enable BCM2835/2836 watchdog driver" - select HW_WATCHDOG - help - Say Y here to enable the BCM2835/2836 watchdog - - This provides basic infrastructure to support BCM2835/2836 watchdog - hardware, with a max timeout of ~15secs. - config IMX_WATCHDOG bool "Enable Watchdog Timer support for IMX and LSCH2 of NXP" select HW_WATCHDOG if !WDT diff --git a/drivers/watchdog/Makefile b/drivers/watchdog/Makefile index 97aa6a836c..414ba2430a 100644 --- a/drivers/watchdog/Makefile +++ b/drivers/watchdog/Makefile @@ -20,7 +20,6 @@ obj-$(CONFIG_WDT_SANDBOX) += sandbox_wdt.o obj-$(CONFIG_WDT_ARMADA_37XX) += armada-37xx-wdt.o obj-$(CONFIG_WDT_ASPEED) += ast_wdt.o obj-$(CONFIG_WDT_BCM6345) += bcm6345_wdt.o -obj-$(CONFIG_BCM2835_WDT) += bcm2835_wdt.o obj-$(CONFIG_WDT_ORION) += orion_wdt.o obj-$(CONFIG_WDT_CDNS) += cdns_wdt.o obj-$(CONFIG_WDT_MPC8xx) += mpc8xx_wdt.o diff --git a/drivers/watchdog/bcm2835_wdt.c b/drivers/watchdog/bcm2835_wdt.c deleted file mode 100644 index 6cffcb15e0..0000000000 --- a/drivers/watchdog/bcm2835_wdt.c +++ /dev/null @@ -1,34 +0,0 @@ -// SPDX-License-Identifier: GPL-2.0 -/* - * Watchdog driver for Broadcom BCM2835 - * - * Copyright (C) 2017 Paolo Pisati <p.pisati@gmail.com> - */ - -#include <common.h> -#include <efi_loader.h> -#include <asm/io.h> -#include <asm/arch/wdog.h> - -#define SECS_TO_WDOG_TICKS(x) ((x) << 16) -#define MAX_TIMEOUT 0xf /* ~15s */ - -static __efi_runtime_data bool enabled = true; - -extern void reset_cpu(ulong ticks); - -void hw_watchdog_reset(void) -{ - if (enabled) - reset_cpu(SECS_TO_WDOG_TICKS(MAX_TIMEOUT)); -} - -void hw_watchdog_init(void) -{ - hw_watchdog_reset(); -} - -void __efi_runtime hw_watchdog_disable(void) -{ - enabled = false; -} diff --git a/drivers/watchdog/mtk_wdt.c b/drivers/watchdog/mtk_wdt.c index a7d4c7a3b8..dafd2b56a8 100644 --- a/drivers/watchdog/mtk_wdt.c +++ b/drivers/watchdog/mtk_wdt.c @@ -70,9 +70,12 @@ static int mtk_wdt_expire_now(struct udevice *dev, ulong flags) return 0; } -static void mtk_wdt_set_timeout(struct udevice *dev, unsigned int timeout_ms) +static void mtk_wdt_set_timeout(struct udevice *dev, u64 timeout_ms) { struct mtk_wdt_priv *priv = dev_get_priv(dev); + u64 timeout_us; + u32 timeout_cc; + u32 length; /* * One WDT_LENGTH count is 512 ticks of the wdt clock @@ -88,21 +91,25 @@ static void mtk_wdt_set_timeout(struct udevice *dev, unsigned int timeout_ms) * The MediaTek docs lack details to know if this is the case here. * So we enforce a minimum of 1 to guarantee operation. */ - if(timeout_ms > 15984) timeout_ms = 15984; - u64 timeout_us = timeout_ms * 1000; - u32 timeout_cc = (u32) ( (15624 + timeout_us) / 15625 ); - if(timeout_cc == 0) timeout_cc = 1; - u32 length = WDT_LENGTH_TIMEOUT(timeout_cc) | WDT_LENGTH_KEY; + if (timeout_ms > 15984) + timeout_ms = 15984; + + timeout_us = timeout_ms * 1000; + timeout_cc = (15624 + timeout_us) / 15625; + if (timeout_cc == 0) + timeout_cc = 1; + + length = WDT_LENGTH_TIMEOUT(timeout_cc) | WDT_LENGTH_KEY; writel(length, priv->base + MTK_WDT_LENGTH); } -static int mtk_wdt_start(struct udevice *dev, u64 timeout, ulong flags) +static int mtk_wdt_start(struct udevice *dev, u64 timeout_ms, ulong flags) { struct mtk_wdt_priv *priv = dev_get_priv(dev); - mtk_wdt_set_timeout(dev, timeout); + mtk_wdt_set_timeout(dev, timeout_ms); - mtk_wdt_reset(dev); + mtk_wdt_reset(dev); /* Enable watchdog reset signal */ setbits_le32(priv->base + MTK_WDT_MODE, |