From 4a5d8898bca3e442b61e34b811aec8332752efd3 Mon Sep 17 00:00:00 2001 From: Paul Burton Date: Thu, 29 Jan 2015 01:27:58 +0000 Subject: MIPS: unify cache initialization code The mips32 & mips64 cache initialization code differs only in that the mips32 code supports reading the cache size from coprocessor 0 registers at runtime. Move the more developed mips32 version to a common arch/mips/lib/cache_init.S & remove the now-redundant mips64 version in order to reduce duplication. The temporary registers used are shuffled slightly in order to work for both mips32 & mips64 builds. The RA register is defined differently to suit mips32 & mips64, but will be removed by a later commit in the series after further cleanup. Signed-off-by: Paul Burton Cc: Daniel Schwierzeck --- arch/mips/lib/cache_init.S | 277 +++++++++++++++++++++++++++++++++++++++++++++ 1 file changed, 277 insertions(+) create mode 100644 arch/mips/lib/cache_init.S (limited to 'arch/mips/lib/cache_init.S') diff --git a/arch/mips/lib/cache_init.S b/arch/mips/lib/cache_init.S new file mode 100644 index 0000000000..6c02bf9b53 --- /dev/null +++ b/arch/mips/lib/cache_init.S @@ -0,0 +1,277 @@ +/* + * Cache-handling routined for MIPS CPUs + * + * Copyright (c) 2003 Wolfgang Denk + * + * SPDX-License-Identifier: GPL-2.0+ + */ + +#include +#include +#include +#include +#include +#include +#include + +#ifndef CONFIG_SYS_MIPS_CACHE_MODE +#define CONFIG_SYS_MIPS_CACHE_MODE CONF_CM_CACHABLE_NONCOHERENT +#endif + +#ifdef CONFIG_64BIT +# define RA ta3 +#else +# define RA t7 +#endif + +#define INDEX_BASE CKSEG0 + + .macro f_fill64 dst, offset, val + LONG_S \val, (\offset + 0 * LONGSIZE)(\dst) + LONG_S \val, (\offset + 1 * LONGSIZE)(\dst) + LONG_S \val, (\offset + 2 * LONGSIZE)(\dst) + LONG_S \val, (\offset + 3 * LONGSIZE)(\dst) + LONG_S \val, (\offset + 4 * LONGSIZE)(\dst) + LONG_S \val, (\offset + 5 * LONGSIZE)(\dst) + LONG_S \val, (\offset + 6 * LONGSIZE)(\dst) + LONG_S \val, (\offset + 7 * LONGSIZE)(\dst) +#if LONGSIZE == 4 + LONG_S \val, (\offset + 8 * LONGSIZE)(\dst) + LONG_S \val, (\offset + 9 * LONGSIZE)(\dst) + LONG_S \val, (\offset + 10 * LONGSIZE)(\dst) + LONG_S \val, (\offset + 11 * LONGSIZE)(\dst) + LONG_S \val, (\offset + 12 * LONGSIZE)(\dst) + LONG_S \val, (\offset + 13 * LONGSIZE)(\dst) + LONG_S \val, (\offset + 14 * LONGSIZE)(\dst) + LONG_S \val, (\offset + 15 * LONGSIZE)(\dst) +#endif + .endm + +/* + * mips_init_icache(uint PRId, ulong icache_size, unchar icache_linesz) + */ +LEAF(mips_init_icache) + blez a1, 9f + mtc0 zero, CP0_TAGLO + /* clear tag to invalidate */ + PTR_LI t0, INDEX_BASE + PTR_ADDU t1, t0, a1 +1: cache INDEX_STORE_TAG_I, 0(t0) + PTR_ADDU t0, a2 + bne t0, t1, 1b + /* fill once, so data field parity is correct */ + PTR_LI t0, INDEX_BASE +2: cache FILL, 0(t0) + PTR_ADDU t0, a2 + bne t0, t1, 2b + /* invalidate again - prudent but not strictly neccessary */ + PTR_LI t0, INDEX_BASE +1: cache INDEX_STORE_TAG_I, 0(t0) + PTR_ADDU t0, a2 + bne t0, t1, 1b +9: jr ra + END(mips_init_icache) + +/* + * mips_init_dcache(uint PRId, ulong dcache_size, unchar dcache_linesz) + */ +LEAF(mips_init_dcache) + blez a1, 9f + mtc0 zero, CP0_TAGLO + /* clear all tags */ + PTR_LI t0, INDEX_BASE + PTR_ADDU t1, t0, a1 +1: cache INDEX_STORE_TAG_D, 0(t0) + PTR_ADDU t0, a2 + bne t0, t1, 1b + /* load from each line (in cached space) */ + PTR_LI t0, INDEX_BASE +2: LONG_L zero, 0(t0) + PTR_ADDU t0, a2 + bne t0, t1, 2b + /* clear all tags */ + PTR_LI t0, INDEX_BASE +1: cache INDEX_STORE_TAG_D, 0(t0) + PTR_ADDU t0, a2 + bne t0, t1, 1b +9: jr ra + END(mips_init_dcache) + +/* + * mips_cache_reset - low level initialisation of the primary caches + * + * This routine initialises the primary caches to ensure that they have good + * parity. It must be called by the ROM before any cached locations are used + * to prevent the possibility of data with bad parity being written to memory. + * + * To initialise the instruction cache it is essential that a source of data + * with good parity is available. This routine will initialise an area of + * memory starting at location zero to be used as a source of parity. + * + * RETURNS: N/A + * + */ +NESTED(mips_cache_reset, 0, ra) + move RA, ra + +#if !defined(CONFIG_SYS_ICACHE_SIZE) || !defined(CONFIG_SYS_DCACHE_SIZE) || \ + !defined(CONFIG_SYS_CACHELINE_SIZE) + /* read Config1 for use below */ + mfc0 t5, CP0_CONFIG, 1 +#endif + +#ifdef CONFIG_SYS_CACHELINE_SIZE + li t9, CONFIG_SYS_CACHELINE_SIZE + li t8, CONFIG_SYS_CACHELINE_SIZE +#else + /* Detect I-cache line size. */ + srl t8, t5, MIPS_CONF1_IL_SHIFT + andi t8, t8, (MIPS_CONF1_IL >> MIPS_CONF1_IL_SHIFT) + beqz t8, 1f + li t6, 2 + sllv t8, t6, t8 + +1: /* Detect D-cache line size. */ + srl t9, t5, MIPS_CONF1_DL_SHIFT + andi t9, t9, (MIPS_CONF1_DL >> MIPS_CONF1_DL_SHIFT) + beqz t9, 1f + li t6, 2 + sllv t9, t6, t9 +1: +#endif + +#ifdef CONFIG_SYS_ICACHE_SIZE + li t2, CONFIG_SYS_ICACHE_SIZE +#else + /* Detect I-cache size. */ + srl t6, t5, MIPS_CONF1_IS_SHIFT + andi t6, t6, (MIPS_CONF1_IS >> MIPS_CONF1_IS_SHIFT) + li t4, 32 + xori t2, t6, 0x7 + beqz t2, 1f + addi t6, t6, 1 + sllv t4, t4, t6 +1: /* At this point t4 == I-cache sets. */ + mul t2, t4, t8 + srl t6, t5, MIPS_CONF1_IA_SHIFT + andi t6, t6, (MIPS_CONF1_IA >> MIPS_CONF1_IA_SHIFT) + addi t6, t6, 1 + /* At this point t6 == I-cache ways. */ + mul t2, t2, t6 +#endif + +#ifdef CONFIG_SYS_DCACHE_SIZE + li t3, CONFIG_SYS_DCACHE_SIZE +#else + /* Detect D-cache size. */ + srl t6, t5, MIPS_CONF1_DS_SHIFT + andi t6, t6, (MIPS_CONF1_DS >> MIPS_CONF1_DS_SHIFT) + li t4, 32 + xori t3, t6, 0x7 + beqz t3, 1f + addi t6, t6, 1 + sllv t4, t4, t6 +1: /* At this point t4 == I-cache sets. */ + mul t3, t4, t9 + srl t6, t5, MIPS_CONF1_DA_SHIFT + andi t6, t6, (MIPS_CONF1_DA >> MIPS_CONF1_DA_SHIFT) + addi t6, t6, 1 + /* At this point t6 == I-cache ways. */ + mul t3, t3, t6 +#endif + + /* Determine the largest L1 cache size */ +#if defined(CONFIG_SYS_ICACHE_SIZE) && defined(CONFIG_SYS_DCACHE_SIZE) +#if CONFIG_SYS_ICACHE_SIZE > CONFIG_SYS_DCACHE_SIZE + li v0, CONFIG_SYS_ICACHE_SIZE +#else + li v0, CONFIG_SYS_DCACHE_SIZE +#endif +#else + move v0, t2 + sltu t1, t2, t3 + movn v0, t3, t1 +#endif + /* + * Now clear that much memory starting from zero. + */ + PTR_LI a0, CKSEG1 + PTR_ADDU a1, a0, v0 +2: PTR_ADDIU a0, 64 + f_fill64 a0, -64, zero + bne a0, a1, 2b + + /* + * The caches are probably in an indeterminate state, + * so we force good parity into them by doing an + * invalidate, load/fill, invalidate for each line. + */ + + /* + * Assume bottom of RAM will generate good parity for the cache. + */ + + /* + * Initialize the I-cache first, + */ + move a1, t2 + move a2, t8 + PTR_LA v1, mips_init_icache + jalr v1 + + /* + * then initialize D-cache. + */ + move a1, t3 + move a2, t9 + PTR_LA v1, mips_init_dcache + jalr v1 + + jr RA + END(mips_cache_reset) + +/* + * dcache_status - get cache status + * + * RETURNS: 0 - cache disabled; 1 - cache enabled + * + */ +LEAF(dcache_status) + mfc0 t0, CP0_CONFIG + li t1, CONF_CM_UNCACHED + andi t0, t0, CONF_CM_CMASK + move v0, zero + beq t0, t1, 2f + li v0, 1 +2: jr ra + END(dcache_status) + +/* + * dcache_disable - disable cache + * + * RETURNS: N/A + * + */ +LEAF(dcache_disable) + mfc0 t0, CP0_CONFIG + li t1, -8 + and t0, t0, t1 + ori t0, t0, CONF_CM_UNCACHED + mtc0 t0, CP0_CONFIG + jr ra + END(dcache_disable) + +/* + * dcache_enable - enable cache + * + * RETURNS: N/A + * + */ +LEAF(dcache_enable) + mfc0 t0, CP0_CONFIG + ori t0, CONF_CM_CMASK + xori t0, CONF_CM_CMASK + ori t0, CONFIG_SYS_MIPS_CACHE_MODE + mtc0 t0, CP0_CONFIG + jr ra + END(dcache_enable) -- cgit v1.2.3 From 536cb7ce1aca10c326ac864b3e1d05ab57b3ec7e Mon Sep 17 00:00:00 2001 From: Paul Burton Date: Thu, 29 Jan 2015 01:27:59 +0000 Subject: MIPS: refactor L1 cache config reads to a macro Reduce duplication between reading the configuration of the L1 dcache & icache by performing both using a macro which calculates the appropriate line & cache sizes from the coprocessor 0 Config1 register. Signed-off-by: Paul Burton Cc: Daniel Schwierzeck --- arch/mips/lib/cache_init.S | 97 ++++++++++++++++++++-------------------------- 1 file changed, 41 insertions(+), 56 deletions(-) (limited to 'arch/mips/lib/cache_init.S') diff --git a/arch/mips/lib/cache_init.S b/arch/mips/lib/cache_init.S index 6c02bf9b53..2e036d9b97 100644 --- a/arch/mips/lib/cache_init.S +++ b/arch/mips/lib/cache_init.S @@ -97,6 +97,43 @@ LEAF(mips_init_dcache) 9: jr ra END(mips_init_dcache) + .macro l1_info sz, line_sz, off + .set push + .set noat + + mfc0 $1, CP0_CONFIG, 1 + + /* detect line size */ + srl \line_sz, $1, \off + MIPS_CONF1_DL_SHIFT - MIPS_CONF1_DA_SHIFT + andi \line_sz, \line_sz, (MIPS_CONF1_DL >> MIPS_CONF1_DL_SHIFT) + move \sz, zero + beqz \line_sz, 10f + li \sz, 2 + sllv \line_sz, \sz, \line_sz + + /* detect associativity */ + srl \sz, $1, \off + MIPS_CONF1_DA_SHIFT - MIPS_CONF1_DA_SHIFT + andi \sz, \sz, (MIPS_CONF1_DA >> MIPS_CONF1_DA_SHIFT) + addi \sz, \sz, 1 + + /* sz *= line_sz */ + mul \sz, \sz, \line_sz + + /* detect log32(sets) */ + srl $1, $1, \off + MIPS_CONF1_DS_SHIFT - MIPS_CONF1_DA_SHIFT + andi $1, $1, (MIPS_CONF1_DS >> MIPS_CONF1_DS_SHIFT) + addiu $1, $1, 1 + andi $1, $1, 0x7 + + /* sz <<= log32(sets) */ + sllv \sz, \sz, $1 + + /* sz *= 32 */ + li $1, 32 + mul \sz, \sz, $1 +10: + .set pop + .endm /* * mips_cache_reset - low level initialisation of the primary caches * @@ -114,70 +151,18 @@ LEAF(mips_init_dcache) NESTED(mips_cache_reset, 0, ra) move RA, ra -#if !defined(CONFIG_SYS_ICACHE_SIZE) || !defined(CONFIG_SYS_DCACHE_SIZE) || \ - !defined(CONFIG_SYS_CACHELINE_SIZE) - /* read Config1 for use below */ - mfc0 t5, CP0_CONFIG, 1 -#endif - -#ifdef CONFIG_SYS_CACHELINE_SIZE - li t9, CONFIG_SYS_CACHELINE_SIZE - li t8, CONFIG_SYS_CACHELINE_SIZE -#else - /* Detect I-cache line size. */ - srl t8, t5, MIPS_CONF1_IL_SHIFT - andi t8, t8, (MIPS_CONF1_IL >> MIPS_CONF1_IL_SHIFT) - beqz t8, 1f - li t6, 2 - sllv t8, t6, t8 - -1: /* Detect D-cache line size. */ - srl t9, t5, MIPS_CONF1_DL_SHIFT - andi t9, t9, (MIPS_CONF1_DL >> MIPS_CONF1_DL_SHIFT) - beqz t9, 1f - li t6, 2 - sllv t9, t6, t9 -1: -#endif - #ifdef CONFIG_SYS_ICACHE_SIZE li t2, CONFIG_SYS_ICACHE_SIZE + li t8, CONFIG_SYS_CACHELINE_SIZE #else - /* Detect I-cache size. */ - srl t6, t5, MIPS_CONF1_IS_SHIFT - andi t6, t6, (MIPS_CONF1_IS >> MIPS_CONF1_IS_SHIFT) - li t4, 32 - xori t2, t6, 0x7 - beqz t2, 1f - addi t6, t6, 1 - sllv t4, t4, t6 -1: /* At this point t4 == I-cache sets. */ - mul t2, t4, t8 - srl t6, t5, MIPS_CONF1_IA_SHIFT - andi t6, t6, (MIPS_CONF1_IA >> MIPS_CONF1_IA_SHIFT) - addi t6, t6, 1 - /* At this point t6 == I-cache ways. */ - mul t2, t2, t6 + l1_info t2, t8, MIPS_CONF1_IA_SHIFT #endif #ifdef CONFIG_SYS_DCACHE_SIZE li t3, CONFIG_SYS_DCACHE_SIZE + li t9, CONFIG_SYS_CACHELINE_SIZE #else - /* Detect D-cache size. */ - srl t6, t5, MIPS_CONF1_DS_SHIFT - andi t6, t6, (MIPS_CONF1_DS >> MIPS_CONF1_DS_SHIFT) - li t4, 32 - xori t3, t6, 0x7 - beqz t3, 1f - addi t6, t6, 1 - sllv t4, t4, t6 -1: /* At this point t4 == I-cache sets. */ - mul t3, t4, t9 - srl t6, t5, MIPS_CONF1_DA_SHIFT - andi t6, t6, (MIPS_CONF1_DA >> MIPS_CONF1_DA_SHIFT) - addi t6, t6, 1 - /* At this point t6 == I-cache ways. */ - mul t3, t3, t6 + l1_info t3, t9, MIPS_CONF1_DA_SHIFT #endif /* Determine the largest L1 cache size */ -- cgit v1.2.3 From ac22feca1135b81ecc4d38995e98b59943d1bbf5 Mon Sep 17 00:00:00 2001 From: Paul Burton Date: Thu, 29 Jan 2015 01:28:00 +0000 Subject: MIPS: refactor cache loops to a macro Reduce duplication by performing loops through cache tags using an assembler macro. Signed-off-by: Paul Burton Cc: Daniel Schwierzeck --- arch/mips/lib/cache_init.S | 30 +++++++++++++----------------- 1 file changed, 13 insertions(+), 17 deletions(-) (limited to 'arch/mips/lib/cache_init.S') diff --git a/arch/mips/lib/cache_init.S b/arch/mips/lib/cache_init.S index 2e036d9b97..dc207a6a1c 100644 --- a/arch/mips/lib/cache_init.S +++ b/arch/mips/lib/cache_init.S @@ -47,28 +47,28 @@ #endif .endm + .macro cache_loop curr, end, line_sz, op +10: cache \op, 0(\curr) + PTR_ADDU \curr, \curr, \line_sz + bne \curr, \end, 10b + .endm + /* * mips_init_icache(uint PRId, ulong icache_size, unchar icache_linesz) */ LEAF(mips_init_icache) blez a1, 9f mtc0 zero, CP0_TAGLO - /* clear tag to invalidate */ PTR_LI t0, INDEX_BASE PTR_ADDU t1, t0, a1 -1: cache INDEX_STORE_TAG_I, 0(t0) - PTR_ADDU t0, a2 - bne t0, t1, 1b + /* clear tag to invalidate */ + cache_loop t0, t1, a2, INDEX_STORE_TAG_I /* fill once, so data field parity is correct */ PTR_LI t0, INDEX_BASE -2: cache FILL, 0(t0) - PTR_ADDU t0, a2 - bne t0, t1, 2b + cache_loop t0, t1, a2, FILL /* invalidate again - prudent but not strictly neccessary */ PTR_LI t0, INDEX_BASE -1: cache INDEX_STORE_TAG_I, 0(t0) - PTR_ADDU t0, a2 - bne t0, t1, 1b + cache_loop t0, t1, a2, INDEX_STORE_TAG_I 9: jr ra END(mips_init_icache) @@ -78,12 +78,10 @@ LEAF(mips_init_icache) LEAF(mips_init_dcache) blez a1, 9f mtc0 zero, CP0_TAGLO - /* clear all tags */ PTR_LI t0, INDEX_BASE PTR_ADDU t1, t0, a1 -1: cache INDEX_STORE_TAG_D, 0(t0) - PTR_ADDU t0, a2 - bne t0, t1, 1b + /* clear all tags */ + cache_loop t0, t1, a2, INDEX_STORE_TAG_D /* load from each line (in cached space) */ PTR_LI t0, INDEX_BASE 2: LONG_L zero, 0(t0) @@ -91,9 +89,7 @@ LEAF(mips_init_dcache) bne t0, t1, 2b /* clear all tags */ PTR_LI t0, INDEX_BASE -1: cache INDEX_STORE_TAG_D, 0(t0) - PTR_ADDU t0, a2 - bne t0, t1, 1b + cache_loop t0, t1, a2, INDEX_STORE_TAG_D 9: jr ra END(mips_init_dcache) -- cgit v1.2.3 From ca4e833cd6409c72e5b13ee803a4f08381e6d160 Mon Sep 17 00:00:00 2001 From: Paul Burton Date: Thu, 29 Jan 2015 01:28:01 +0000 Subject: MIPS: inline mips_init_[id]cache functions The mips_init_[id]cache functions are small & only called once from a single callsite. Inlining them allows mips_cache_reset to avoid having to bother moving arguments around & leaves it a leaf function which is thus able to simply keep the return address live in the ra register throughout, simplifying the code. Signed-off-by: Paul Burton Cc: Daniel Schwierzeck --- arch/mips/lib/cache_init.S | 86 +++++++++++++++------------------------------- 1 file changed, 28 insertions(+), 58 deletions(-) (limited to 'arch/mips/lib/cache_init.S') diff --git a/arch/mips/lib/cache_init.S b/arch/mips/lib/cache_init.S index dc207a6a1c..cbd04bd2f3 100644 --- a/arch/mips/lib/cache_init.S +++ b/arch/mips/lib/cache_init.S @@ -18,12 +18,6 @@ #define CONFIG_SYS_MIPS_CACHE_MODE CONF_CM_CACHABLE_NONCOHERENT #endif -#ifdef CONFIG_64BIT -# define RA ta3 -#else -# define RA t7 -#endif - #define INDEX_BASE CKSEG0 .macro f_fill64 dst, offset, val @@ -53,46 +47,6 @@ bne \curr, \end, 10b .endm -/* - * mips_init_icache(uint PRId, ulong icache_size, unchar icache_linesz) - */ -LEAF(mips_init_icache) - blez a1, 9f - mtc0 zero, CP0_TAGLO - PTR_LI t0, INDEX_BASE - PTR_ADDU t1, t0, a1 - /* clear tag to invalidate */ - cache_loop t0, t1, a2, INDEX_STORE_TAG_I - /* fill once, so data field parity is correct */ - PTR_LI t0, INDEX_BASE - cache_loop t0, t1, a2, FILL - /* invalidate again - prudent but not strictly neccessary */ - PTR_LI t0, INDEX_BASE - cache_loop t0, t1, a2, INDEX_STORE_TAG_I -9: jr ra - END(mips_init_icache) - -/* - * mips_init_dcache(uint PRId, ulong dcache_size, unchar dcache_linesz) - */ -LEAF(mips_init_dcache) - blez a1, 9f - mtc0 zero, CP0_TAGLO - PTR_LI t0, INDEX_BASE - PTR_ADDU t1, t0, a1 - /* clear all tags */ - cache_loop t0, t1, a2, INDEX_STORE_TAG_D - /* load from each line (in cached space) */ - PTR_LI t0, INDEX_BASE -2: LONG_L zero, 0(t0) - PTR_ADDU t0, a2 - bne t0, t1, 2b - /* clear all tags */ - PTR_LI t0, INDEX_BASE - cache_loop t0, t1, a2, INDEX_STORE_TAG_D -9: jr ra - END(mips_init_dcache) - .macro l1_info sz, line_sz, off .set push .set noat @@ -144,9 +98,7 @@ LEAF(mips_init_dcache) * RETURNS: N/A * */ -NESTED(mips_cache_reset, 0, ra) - move RA, ra - +LEAF(mips_cache_reset) #ifdef CONFIG_SYS_ICACHE_SIZE li t2, CONFIG_SYS_ICACHE_SIZE li t8, CONFIG_SYS_CACHELINE_SIZE @@ -195,20 +147,38 @@ NESTED(mips_cache_reset, 0, ra) /* * Initialize the I-cache first, */ - move a1, t2 - move a2, t8 - PTR_LA v1, mips_init_icache - jalr v1 + blez t2, 1f + mtc0 zero, CP0_TAGLO + PTR_LI t0, INDEX_BASE + PTR_ADDU t1, t0, t2 + /* clear tag to invalidate */ + cache_loop t0, t1, t8, INDEX_STORE_TAG_I + /* fill once, so data field parity is correct */ + PTR_LI t0, INDEX_BASE + cache_loop t0, t1, t8, FILL + /* invalidate again - prudent but not strictly neccessary */ + PTR_LI t0, INDEX_BASE + cache_loop t0, t1, t8, INDEX_STORE_TAG_I /* * then initialize D-cache. */ - move a1, t3 - move a2, t9 - PTR_LA v1, mips_init_dcache - jalr v1 +1: blez t3, 3f + mtc0 zero, CP0_TAGLO + PTR_LI t0, INDEX_BASE + PTR_ADDU t1, t0, t3 + /* clear all tags */ + cache_loop t0, t1, t9, INDEX_STORE_TAG_D + /* load from each line (in cached space) */ + PTR_LI t0, INDEX_BASE +2: LONG_L zero, 0(t0) + PTR_ADDU t0, t9 + bne t0, t1, 2b + /* clear all tags */ + PTR_LI t0, INDEX_BASE + cache_loop t0, t1, t9, INDEX_STORE_TAG_D - jr RA +3: jr ra END(mips_cache_reset) /* -- cgit v1.2.3 From dd7c72006e51f0d27e5cb1dcf60d5b9bf307565e Mon Sep 17 00:00:00 2001 From: Paul Burton Date: Thu, 29 Jan 2015 01:28:02 +0000 Subject: MIPS: allow systems to skip loads during cache init Current MIPS systems do not require that loads be performed to force the parity of cache lines, a simple invalidate by clearing the tag for each line will suffice. Thus this patch makes the loads & subsequent second invalidation conditional upon the CONFIG_SYS_MIPS_CACHE_INIT_RAM_LOAD option, and defines that for existing mips32 targets. Exceptions are malta where this is known to be unnecessary, and qemu-mips where caches are not implemented. Signed-off-by: Paul Burton Cc: Daniel Schwierzeck --- arch/mips/Kconfig | 6 ++++++ arch/mips/lib/cache_init.S | 19 +++++++++++++------ 2 files changed, 19 insertions(+), 6 deletions(-) (limited to 'arch/mips/lib/cache_init.S') diff --git a/arch/mips/Kconfig b/arch/mips/Kconfig index ef7892975a..bc4283d2f1 100644 --- a/arch/mips/Kconfig +++ b/arch/mips/Kconfig @@ -36,6 +36,7 @@ config TARGET_VCT select SUPPORTS_BIG_ENDIAN select SUPPORTS_CPU_MIPS32_R1 select SUPPORTS_CPU_MIPS32_R2 + select SYS_MIPS_CACHE_INIT_RAM_LOAD config TARGET_DBAU1X00 bool "Support dbau1x00" @@ -43,12 +44,14 @@ config TARGET_DBAU1X00 select SUPPORTS_LITTLE_ENDIAN select SUPPORTS_CPU_MIPS32_R1 select SUPPORTS_CPU_MIPS32_R2 + select SYS_MIPS_CACHE_INIT_RAM_LOAD config TARGET_PB1X00 bool "Support pb1x00" select SUPPORTS_LITTLE_ENDIAN select SUPPORTS_CPU_MIPS32_R1 select SUPPORTS_CPU_MIPS32_R2 + select SYS_MIPS_CACHE_INIT_RAM_LOAD endchoice @@ -185,6 +188,9 @@ config 64BIT config SWAP_IO_SPACE bool +config SYS_MIPS_CACHE_INIT_RAM_LOAD + bool + endif endmenu diff --git a/arch/mips/lib/cache_init.S b/arch/mips/lib/cache_init.S index cbd04bd2f3..04a36b2528 100644 --- a/arch/mips/lib/cache_init.S +++ b/arch/mips/lib/cache_init.S @@ -113,6 +113,8 @@ LEAF(mips_cache_reset) l1_info t3, t9, MIPS_CONF1_DA_SHIFT #endif +#ifdef CONFIG_SYS_MIPS_CACHE_INIT_RAM_LOAD + /* Determine the largest L1 cache size */ #if defined(CONFIG_SYS_ICACHE_SIZE) && defined(CONFIG_SYS_DCACHE_SIZE) #if CONFIG_SYS_ICACHE_SIZE > CONFIG_SYS_DCACHE_SIZE @@ -134,14 +136,15 @@ LEAF(mips_cache_reset) f_fill64 a0, -64, zero bne a0, a1, 2b - /* - * The caches are probably in an indeterminate state, - * so we force good parity into them by doing an - * invalidate, load/fill, invalidate for each line. - */ +#endif /* CONFIG_SYS_MIPS_CACHE_INIT_RAM_LOAD */ /* - * Assume bottom of RAM will generate good parity for the cache. + * The caches are probably in an indeterminate state, so we force good + * parity into them by doing an invalidate for each line. If + * CONFIG_SYS_MIPS_CACHE_INIT_RAM_LOAD is set then we'll proceed to + * perform a load/fill & a further invalidate for each line, assuming + * that the bottom of RAM (having just been cleared) will generate good + * parity for the cache. */ /* @@ -153,12 +156,14 @@ LEAF(mips_cache_reset) PTR_ADDU t1, t0, t2 /* clear tag to invalidate */ cache_loop t0, t1, t8, INDEX_STORE_TAG_I +#ifdef CONFIG_SYS_MIPS_CACHE_INIT_RAM_LOAD /* fill once, so data field parity is correct */ PTR_LI t0, INDEX_BASE cache_loop t0, t1, t8, FILL /* invalidate again - prudent but not strictly neccessary */ PTR_LI t0, INDEX_BASE cache_loop t0, t1, t8, INDEX_STORE_TAG_I +#endif /* * then initialize D-cache. @@ -169,6 +174,7 @@ LEAF(mips_cache_reset) PTR_ADDU t1, t0, t3 /* clear all tags */ cache_loop t0, t1, t9, INDEX_STORE_TAG_D +#ifdef CONFIG_SYS_MIPS_CACHE_INIT_RAM_LOAD /* load from each line (in cached space) */ PTR_LI t0, INDEX_BASE 2: LONG_L zero, 0(t0) @@ -177,6 +183,7 @@ LEAF(mips_cache_reset) /* clear all tags */ PTR_LI t0, INDEX_BASE cache_loop t0, t1, t9, INDEX_STORE_TAG_D +#endif 3: jr ra END(mips_cache_reset) -- cgit v1.2.3 From 8755d50706742e4d302a335f4e69dd6430ec12a2 Mon Sep 17 00:00:00 2001 From: Paul Burton Date: Thu, 29 Jan 2015 01:28:03 +0000 Subject: MIPS: clear TagLo select 2 during cache init Current MIPS cores from Imagination Technologies use TagLo select 2 for the data cache. The architecture requires that it is safe for software to write to this register even if it isn't present, so take the trivial option of clearing both selects 0 & 2. Signed-off-by: Paul Burton Cc: Daniel Schwierzeck --- arch/mips/lib/cache_init.S | 10 ++++++++-- 1 file changed, 8 insertions(+), 2 deletions(-) (limited to 'arch/mips/lib/cache_init.S') diff --git a/arch/mips/lib/cache_init.S b/arch/mips/lib/cache_init.S index 04a36b2528..137d7283ff 100644 --- a/arch/mips/lib/cache_init.S +++ b/arch/mips/lib/cache_init.S @@ -138,6 +138,14 @@ LEAF(mips_cache_reset) #endif /* CONFIG_SYS_MIPS_CACHE_INIT_RAM_LOAD */ + /* + * The TagLo registers used depend upon the CPU implementation, but the + * architecture requires that it is safe for software to write to both + * TagLo selects 0 & 2 covering supported cases. + */ + mtc0 zero, CP0_TAGLO + mtc0 zero, CP0_TAGLO, 2 + /* * The caches are probably in an indeterminate state, so we force good * parity into them by doing an invalidate for each line. If @@ -151,7 +159,6 @@ LEAF(mips_cache_reset) * Initialize the I-cache first, */ blez t2, 1f - mtc0 zero, CP0_TAGLO PTR_LI t0, INDEX_BASE PTR_ADDU t1, t0, t2 /* clear tag to invalidate */ @@ -169,7 +176,6 @@ LEAF(mips_cache_reset) * then initialize D-cache. */ 1: blez t3, 3f - mtc0 zero, CP0_TAGLO PTR_LI t0, INDEX_BASE PTR_ADDU t1, t0, t3 /* clear all tags */ -- cgit v1.2.3