ddr: marvell: a38x: Add support for DDR4 from Marvell mv-ddr-marvell repository

This syncs drivers/ddr/marvell/a38x/ with the master branch of repository
https://github.com/MarvellEmbeddedProcessors/mv-ddr-marvell.git

up to the commit "mv_ddr: a3700: Use the right size for memset to not overflow"
d5acc10c287e40cc2feeb28710b92e45c93c702c

This patch was created by following steps:

	1. Replace all a38x files in U-Boot tree by files from upstream github
	Marvell mv-ddr-marvell repository.

	2. Run following command to omit portions not relevant for a38x, ddr3, and ddr4:

	files=drivers/ddr/marvell/a38x/*
	unifdef -m -UMV_DDR -UMV_DDR_ATF -UCONFIG_APN806 \
		-UCONFIG_MC_STATIC -UCONFIG_MC_STATIC_PRINT -UCONFIG_PHY_STATIC \
		-UCONFIG_PHY_STATIC_PRINT -UCONFIG_CUSTOMER_BOARD_SUPPORT \
		-UCONFIG_A3700 -UA3900 -UA80X0 -UA70X0 -DCONFIG_ARMADA_38X -UCONFIG_ARMADA_39X \
		-UCONFIG_64BIT $files

	3. Manually change license to SPDX-License-Identifier
	(upstream license in  upstream github repository contains long license
	texts and U-Boot is using just SPDX-License-Identifier.

After applying this patch, a38x, ddr3, and ddr4 code in upstream Marvell github
repository and in U-Boot would be fully identical. So in future applying
above steps could be used to sync code again.

The only change in this patch are:
	1. Some fixes with include files.
	2. Some function return and basic type defines changes in
	mv_ddr_plat.c (to correct Marvell bug).
	3. Remove of dead code in newly copied files (as a result of the
	filter script stripping out everything other than a38x, dd3, and ddr4).

Reference:
    "ddr: marvell: a38x: Sync code with Marvell mv-ddr-marvell repository"
    107c3391b9

Signed-off-by: Tony Dinh <mibodhi@gmail.com>
Reviewed-by: Pali Rohár <pali@kernel.org>
Reviewed-by: Stefan Roese <sr@denx.de>
This commit is contained in:
Tony Dinh 2023-01-18 19:03:04 -08:00 committed by Stefan Roese
parent 17e8e58fe6
commit 54a08c4139
27 changed files with 5991 additions and 0 deletions

View file

@ -25,7 +25,11 @@ u32 *dq_map_table = NULL;
/* in case of ddr4 do not run ddr3_tip_write_additional_odt_setting function - mc odt always 'on'
* in ddr4 case the terminations are rttWR and rttPARK and the odt must be always 'on' 0x1498 = 0xf
*/
#if defined(CONFIG_DDR4)
u32 odt_config = 0;
#else
u32 odt_config = 1;
#endif
u32 nominal_avs;
u32 extension_avs;
@ -85,7 +89,11 @@ u32 mask_tune_func = (SET_MEDIUM_FREQ_MASK_BIT |
READ_LEVELING_MASK_BIT |
SET_TARGET_FREQ_MASK_BIT |
WRITE_LEVELING_TF_MASK_BIT |
#if defined(CONFIG_DDR4)
SW_READ_LEVELING_MASK_BIT |
#else /* CONFIG_DDR4 */
READ_LEVELING_TF_MASK_BIT |
#endif /* CONFIG_DDR4 */
CENTRALIZATION_RX_MASK_BIT |
CENTRALIZATION_TX_MASK_BIT);
@ -102,6 +110,10 @@ int adll_calibration(u32 dev_num, enum hws_access_type access_type,
u32 if_id, enum mv_ddr_freq frequency);
static int ddr3_tip_set_timing(u32 dev_num, enum hws_access_type access_type,
u32 if_id, enum mv_ddr_freq frequency);
#if defined(CONFIG_DDR4)
static int ddr4_tip_set_timing(u32 dev_num, enum hws_access_type access_type,
u32 if_id, enum mv_ddr_freq frequency);
#endif /* CONFIG_DDR4 */
static u8 mem_size_config[MV_DDR_DIE_CAP_LAST] = {
0x2, /* 512Mbit */
@ -173,12 +185,24 @@ static struct reg_data odpg_default_value[] = {
};
/* MR cmd and addr definitions */
#if defined(CONFIG_DDR4)
struct mv_ddr_mr_data mr_data[] = {
{MRS0_CMD, DDR4_MR0_REG},
{MRS1_CMD, DDR4_MR1_REG},
{MRS2_CMD, DDR4_MR2_REG},
{MRS3_CMD, DDR4_MR3_REG},
{MRS4_CMD, DDR4_MR4_REG},
{MRS5_CMD, DDR4_MR5_REG},
{MRS6_CMD, DDR4_MR6_REG}
};
#else
struct mv_ddr_mr_data mr_data[] = {
{MRS0_CMD, MR0_REG},
{MRS1_CMD, MR1_REG},
{MRS2_CMD, MR2_REG},
{MRS3_CMD, MR3_REG}
};
#endif
/* inverse pads */
static int ddr3_tip_pad_inv(void)
@ -664,6 +688,11 @@ int hws_ddr3_tip_init_controller(u32 dev_num, struct init_cntr_param *init_cntr_
calibration_update_control << 3, 0x3 << 3));
}
#if defined(CONFIG_DDR4)
/* dev_num, vref_en, pod_only */
CHECK_STATUS(mv_ddr4_mode_regs_init(dev_num));
CHECK_STATUS(mv_ddr4_sdram_config(dev_num));
#endif /* CONFIG_DDR4 */
if (delay_enable != 0) {
adll_tap = MEGA / (mv_ddr_freq_get(freq) * 64);
@ -1325,6 +1354,20 @@ int ddr3_tip_freq_set(u32 dev_num, enum hws_access_type access_type,
/* disable ODT in case of dll off */
if (is_dll_off == 1) {
#if defined(CONFIG_DDR4)
CHECK_STATUS(ddr3_tip_if_read
(dev_num, access_type, PARAM_NOT_CARE,
0x1974, &g_rtt_nom_cs0, MASK_ALL_BITS));
CHECK_STATUS(ddr3_tip_if_write
(dev_num, access_type, if_id,
0x1974, 0, (0x7 << 8)));
CHECK_STATUS(ddr3_tip_if_read
(dev_num, access_type, PARAM_NOT_CARE,
0x1A74, &g_rtt_nom_cs1, MASK_ALL_BITS));
CHECK_STATUS(ddr3_tip_if_write
(dev_num, access_type, if_id,
0x1A74, 0, (0x7 << 8)));
#else /* CONFIG_DDR4 */
CHECK_STATUS(ddr3_tip_if_write
(dev_num, access_type, if_id,
0x1874, 0, 0x244));
@ -1337,6 +1380,7 @@ int ddr3_tip_freq_set(u32 dev_num, enum hws_access_type access_type,
CHECK_STATUS(ddr3_tip_if_write
(dev_num, access_type, if_id,
0x18a4, 0, 0x244));
#endif /* CONFIG_DDR4 */
}
/* DFS - Enter Self-Refresh */
@ -1404,6 +1448,16 @@ int ddr3_tip_freq_set(u32 dev_num, enum hws_access_type access_type,
/* Restore original RTT values if returning from DLL OFF mode */
if (is_dll_off == 1) {
#if defined(CONFIG_DDR4)
CHECK_STATUS(ddr3_tip_if_write
(dev_num, access_type, if_id,
0x1974, g_rtt_nom_cs0, (0x7 << 8)));
CHECK_STATUS(ddr3_tip_if_write
(dev_num, access_type, if_id,
0x1A74, g_rtt_nom_cs1, (0x7 << 8)));
mv_ddr4_mode_regs_init(dev_num);
#else /* CONFIG_DDR4 */
CHECK_STATUS(ddr3_tip_if_write
(dev_num, access_type, if_id, 0x1874,
g_dic | g_rtt_nom, 0x266));
@ -1416,6 +1470,7 @@ int ddr3_tip_freq_set(u32 dev_num, enum hws_access_type access_type,
CHECK_STATUS(ddr3_tip_if_write
(dev_num, access_type, if_id, 0x18a4,
g_dic | g_rtt_nom, 0x266));
#endif /* CONFIG_DDR4 */
}
/* Reset divider_b assert -> de-assert */
@ -1669,8 +1724,13 @@ static int ddr3_tip_set_timing(u32 dev_num, enum hws_access_type access_type,
t_rtp = GET_MAX_VALUE(t_ckclk * 4, mv_ddr_speed_bin_timing_get(speed_bin_index,
SPEED_BIN_TRTP));
t_mod = GET_MAX_VALUE(t_ckclk * 12, 15000);
#if defined(CONFIG_DDR4)
t_wtr = GET_MAX_VALUE(t_ckclk * 2, mv_ddr_speed_bin_timing_get(speed_bin_index,
SPEED_BIN_TWTR));
#else /* CONFIG_DDR4 */
t_wtr = GET_MAX_VALUE(t_ckclk * 4, mv_ddr_speed_bin_timing_get(speed_bin_index,
SPEED_BIN_TWTR));
#endif /* CONFIG_DDR4 */
t_ras = time_to_nclk(mv_ddr_speed_bin_timing_get(speed_bin_index,
SPEED_BIN_TRAS),
t_ckclk);
@ -1758,10 +1818,70 @@ static int ddr3_tip_set_timing(u32 dev_num, enum hws_access_type access_type,
DDR_TIMING_TPD_MASK << DDR_TIMING_TPD_OFFS |
DDR_TIMING_TXPDLL_MASK << DDR_TIMING_TXPDLL_OFFS));
#if defined(CONFIG_DDR4)
ddr4_tip_set_timing(dev_num, access_type, if_id, frequency);
#endif /* CONFIG_DDR4 */
return MV_OK;
}
#if defined(CONFIG_DDR4)
static int ddr4_tip_set_timing(u32 dev_num, enum hws_access_type access_type,
u32 if_id, enum mv_ddr_freq frequency)
{
u32 t_rrd_l = 0, t_wtr_l = 0, t_ckclk = 0, t_mod = 0, t_ccd = 0;
u32 page_size = 0, val = 0, mask = 0;
enum mv_ddr_speed_bin speed_bin_index;
enum mv_ddr_die_capacity memory_size;
struct mv_ddr_topology_map *tm = mv_ddr_topology_map_get();
u32 freq = mv_ddr_freq_get(frequency);
speed_bin_index = tm->interface_params[if_id].speed_bin_index;
memory_size = tm->interface_params[if_id].memory_size;
page_size = mv_ddr_page_size_get(tm->interface_params[if_id].bus_width, memory_size);
t_ckclk = (MEGA / freq);
t_rrd_l = (page_size == 1) ? mv_ddr_speed_bin_timing_get(speed_bin_index, SPEED_BIN_TRRDL1K) :
mv_ddr_speed_bin_timing_get(speed_bin_index, SPEED_BIN_TRRDL2K);
t_rrd_l = GET_MAX_VALUE(t_ckclk * 4, t_rrd_l);
t_wtr_l = mv_ddr_speed_bin_timing_get(speed_bin_index, SPEED_BIN_TWTRL);
t_wtr_l = GET_MAX_VALUE(t_ckclk * 4, t_wtr_l);
t_rrd_l = time_to_nclk(t_rrd_l, t_ckclk);
t_wtr_l = time_to_nclk(t_wtr_l, t_ckclk);
val = (((t_rrd_l - 1) & DDR4_TRRD_L_MASK) << DDR4_TRRD_L_OFFS) |
(((t_wtr_l - 1) & DDR4_TWTR_L_MASK) << DDR4_TWTR_L_OFFS);
mask = (DDR4_TRRD_L_MASK << DDR4_TRRD_L_OFFS) |
(DDR4_TWTR_L_MASK << DDR4_TWTR_L_OFFS);
CHECK_STATUS(ddr3_tip_if_write(dev_num, access_type, if_id,
DRAM_LONG_TIMING_REG, val, mask));
val = 0;
mask = 0;
t_mod = mv_ddr_speed_bin_timing_get(speed_bin_index, SPEED_BIN_TMOD);
t_mod = GET_MAX_VALUE(t_ckclk * 24, t_mod);
t_mod = time_to_nclk(t_mod, t_ckclk);
val = (((t_mod - 1) & SDRAM_TIMING_HIGH_TMOD_MASK) << SDRAM_TIMING_HIGH_TMOD_OFFS) |
((((t_mod - 1) >> 4) & SDRAM_TIMING_HIGH_TMOD_HIGH_MASK) << SDRAM_TIMING_HIGH_TMOD_HIGH_OFFS);
mask = (SDRAM_TIMING_HIGH_TMOD_MASK << SDRAM_TIMING_HIGH_TMOD_OFFS) |
(SDRAM_TIMING_HIGH_TMOD_HIGH_MASK << SDRAM_TIMING_HIGH_TMOD_HIGH_OFFS);
CHECK_STATUS(ddr3_tip_if_write(dev_num, access_type, if_id,
SDRAM_TIMING_HIGH_REG, val, mask));
t_ccd = 6;
CHECK_STATUS(ddr3_tip_if_write(dev_num, access_type, if_id,
DDR_TIMING_REG,
((t_ccd - 1) & DDR_TIMING_TCCD_MASK) << DDR_TIMING_TCCD_OFFS,
DDR_TIMING_TCCD_MASK << DDR_TIMING_TCCD_OFFS));
return MV_OK;
}
#endif /* CONFIG_DDR4 */
/*
* Write CS Result
@ -2245,6 +2365,7 @@ static int ddr3_tip_ddr3_training_main_flow(u32 dev_num)
}
}
#if !defined(CONFIG_DDR4)
for (effective_cs = 0; effective_cs < max_cs; effective_cs++) {
if (mask_tune_func & PBS_RX_MASK_BIT) {
training_stage = PBS_RX;
@ -2284,6 +2405,7 @@ static int ddr3_tip_ddr3_training_main_flow(u32 dev_num)
}
/* Set to 0 after each loop to avoid illegal value may be used */
effective_cs = 0;
#endif /* CONFIG_DDR4 */
if (mask_tune_func & SET_TARGET_FREQ_MASK_BIT) {
training_stage = SET_TARGET_FREQ;
@ -2367,6 +2489,7 @@ static int ddr3_tip_ddr3_training_main_flow(u32 dev_num)
}
}
#if !defined(CONFIG_DDR4)
if (mask_tune_func & DM_PBS_TX_MASK_BIT) {
DEBUG_TRAINING_IP(DEBUG_LEVEL_INFO, ("DM_PBS_TX_MASK_BIT\n"));
}
@ -2412,6 +2535,7 @@ static int ddr3_tip_ddr3_training_main_flow(u32 dev_num)
}
/* Set to 0 after each loop to avoid illegal value may be used */
effective_cs = 0;
#endif /* CONFIG_DDR4 */
for (effective_cs = 0; effective_cs < max_cs; effective_cs++) {
if (mask_tune_func & WRITE_LEVELING_SUPP_TF_MASK_BIT) {
@ -2434,7 +2558,12 @@ static int ddr3_tip_ddr3_training_main_flow(u32 dev_num)
/* Set to 0 after each loop to avoid illegal value may be used */
effective_cs = 0;
#if defined(CONFIG_DDR4)
for (effective_cs = 0; effective_cs < max_cs; effective_cs++)
CHECK_STATUS(mv_ddr4_training_main_flow(dev_num));
#endif /* CONFIG_DDR4 */
#if !defined(CONFIG_DDR4)
for (effective_cs = 0; effective_cs < max_cs; effective_cs++) {
if (mask_tune_func & CENTRALIZATION_TX_MASK_BIT) {
training_stage = CENTRALIZATION_TX;
@ -2455,6 +2584,7 @@ static int ddr3_tip_ddr3_training_main_flow(u32 dev_num)
}
/* Set to 0 after each loop to avoid illegal value may be used */
effective_cs = 0;
#endif /* CONFIG_DDR4 */
DEBUG_TRAINING_IP(DEBUG_LEVEL_INFO, ("restore registers to default\n"));
/* restore register values */
@ -2895,3 +3025,4 @@ unsigned int mv_ddr_misl_phy_odt_n_get(void)
return odt_n;
}