diff options
Diffstat (limited to 'arch/mips/lib/cache_init.S')
-rw-r--r-- | arch/mips/lib/cache_init.S | 271 |
1 files changed, 251 insertions, 20 deletions
diff --git a/arch/mips/lib/cache_init.S b/arch/mips/lib/cache_init.S index bc8ab27b58..698a5afdee 100644 --- a/arch/mips/lib/cache_init.S +++ b/arch/mips/lib/cache_init.S @@ -13,6 +13,7 @@ #include <asm/mipsregs.h> #include <asm/addrspace.h> #include <asm/cacheops.h> +#include <asm/cm.h> #ifndef CONFIG_SYS_MIPS_CACHE_MODE #define CONFIG_SYS_MIPS_CACHE_MODE CONF_CM_CACHABLE_NONCOHERENT @@ -95,22 +96,147 @@ * with good parity is available. This routine will initialise an area of * memory starting at location zero to be used as a source of parity. * + * Note that this function does not follow the standard calling convention & + * may clobber typically callee-saved registers. + * * RETURNS: N/A * */ +#define R_RETURN s0 +#define R_IC_SIZE s1 +#define R_IC_LINE s2 +#define R_DC_SIZE s3 +#define R_DC_LINE s4 +#define R_L2_SIZE s5 +#define R_L2_LINE s6 +#define R_L2_BYPASSED s7 +#define R_L2_L2C t8 LEAF(mips_cache_reset) + move R_RETURN, ra + +#ifdef CONFIG_MIPS_L2_CACHE + /* + * For there to be an L2 present, Config2 must be present. If it isn't + * then we proceed knowing there's no L2 cache. + */ + move R_L2_SIZE, zero + move R_L2_LINE, zero + move R_L2_BYPASSED, zero + move R_L2_L2C, zero + mfc0 t0, CP0_CONFIG, 1 + bgez t0, l2_probe_done + + /* + * From MIPSr6 onwards the L2 cache configuration might not be reported + * by Config2. The Config5.L2C bit indicates whether this is the case, + * and if it is then we need knowledge of where else to look. For cores + * from Imagination Technologies this is a CM GCR. + */ +# if __mips_isa_rev >= 6 + /* Check that Config5 exists */ + mfc0 t0, CP0_CONFIG, 2 + bgez t0, l2_probe_cop0 + mfc0 t0, CP0_CONFIG, 3 + bgez t0, l2_probe_cop0 + mfc0 t0, CP0_CONFIG, 4 + bgez t0, l2_probe_cop0 + + /* Check Config5.L2C is set */ + mfc0 t0, CP0_CONFIG, 5 + and R_L2_L2C, t0, MIPS_CONF5_L2C + beqz R_L2_L2C, l2_probe_cop0 + + /* Config5.L2C is set */ +# ifdef CONFIG_MIPS_CM + /* The CM will provide L2 configuration */ + PTR_LI t0, CKSEG1ADDR(CONFIG_MIPS_CM_BASE) + lw t1, GCR_L2_CONFIG(t0) + bgez t1, l2_probe_done + + ext R_L2_LINE, t1, \ + GCR_L2_CONFIG_LINESZ_SHIFT, GCR_L2_CONFIG_LINESZ_BITS + beqz R_L2_LINE, l2_probe_done + li t2, 2 + sllv R_L2_LINE, t2, R_L2_LINE + + ext t2, t1, GCR_L2_CONFIG_ASSOC_SHIFT, GCR_L2_CONFIG_ASSOC_BITS + addiu t2, t2, 1 + mul R_L2_SIZE, R_L2_LINE, t2 + + ext t2, t1, GCR_L2_CONFIG_SETSZ_SHIFT, GCR_L2_CONFIG_SETSZ_BITS + sllv R_L2_SIZE, R_L2_SIZE, t2 + li t2, 64 + mul R_L2_SIZE, R_L2_SIZE, t2 + + /* Bypass the L2 cache so that we can init the L1s early */ + or t1, t1, GCR_L2_CONFIG_BYPASS + sw t1, GCR_L2_CONFIG(t0) + sync + li R_L2_BYPASSED, 1 + + /* Zero the L2 tag registers */ + sw zero, GCR_L2_TAG_ADDR(t0) + sw zero, GCR_L2_TAG_ADDR_UPPER(t0) + sw zero, GCR_L2_TAG_STATE(t0) + sw zero, GCR_L2_TAG_STATE_UPPER(t0) + sw zero, GCR_L2_DATA(t0) + sw zero, GCR_L2_DATA_UPPER(t0) + sync +# else + /* We don't know how to retrieve L2 configuration on this system */ +# endif + b l2_probe_done +# endif + + /* + * For pre-r6 systems, or r6 systems with Config5.L2C==0, probe the L2 + * cache configuration from the cop0 Config2 register. + */ +l2_probe_cop0: + mfc0 t0, CP0_CONFIG, 2 + + srl R_L2_LINE, t0, MIPS_CONF2_SL_SHF + andi R_L2_LINE, R_L2_LINE, MIPS_CONF2_SL >> MIPS_CONF2_SL_SHF + beqz R_L2_LINE, l2_probe_done + li t1, 2 + sllv R_L2_LINE, t1, R_L2_LINE + + srl t1, t0, MIPS_CONF2_SA_SHF + andi t1, t1, MIPS_CONF2_SA >> MIPS_CONF2_SA_SHF + addiu t1, t1, 1 + mul R_L2_SIZE, R_L2_LINE, t1 + + srl t1, t0, MIPS_CONF2_SS_SHF + andi t1, t1, MIPS_CONF2_SS >> MIPS_CONF2_SS_SHF + sllv R_L2_SIZE, R_L2_SIZE, t1 + li t1, 64 + mul R_L2_SIZE, R_L2_SIZE, t1 + + /* Attempt to bypass the L2 so that we can init the L1s early */ + or t0, t0, MIPS_CONF2_L2B + mtc0 t0, CP0_CONFIG, 2 + ehb + mfc0 t0, CP0_CONFIG, 2 + and R_L2_BYPASSED, t0, MIPS_CONF2_L2B + + /* Zero the L2 tag registers */ + mtc0 zero, CP0_TAGLO, 4 + ehb +l2_probe_done: +#endif + #ifndef CONFIG_SYS_CACHE_SIZE_AUTO - li t2, CONFIG_SYS_ICACHE_SIZE - li t8, CONFIG_SYS_ICACHE_LINE_SIZE + li R_IC_SIZE, CONFIG_SYS_ICACHE_SIZE + li R_IC_LINE, CONFIG_SYS_ICACHE_LINE_SIZE #else - l1_info t2, t8, MIPS_CONF1_IA_SHF + l1_info R_IC_SIZE, R_IC_LINE, MIPS_CONF1_IA_SHF #endif #ifndef CONFIG_SYS_CACHE_SIZE_AUTO - li t3, CONFIG_SYS_DCACHE_SIZE - li t9, CONFIG_SYS_DCACHE_LINE_SIZE + li R_DC_SIZE, CONFIG_SYS_DCACHE_SIZE + li R_DC_LINE, CONFIG_SYS_DCACHE_LINE_SIZE #else - l1_info t3, t9, MIPS_CONF1_DA_SHF + l1_info R_DC_SIZE, R_DC_LINE, MIPS_CONF1_DA_SHF #endif #ifdef CONFIG_SYS_MIPS_CACHE_INIT_RAM_LOAD @@ -123,9 +249,9 @@ LEAF(mips_cache_reset) li v0, CONFIG_SYS_DCACHE_SIZE #endif #else - move v0, t2 - sltu t1, t2, t3 - movn v0, t3, t1 + move v0, R_IC_SIZE + sltu t1, R_IC_SIZE, R_DC_SIZE + movn v0, R_DC_SIZE, t1 #endif /* * Now clear that much memory starting from zero. @@ -138,13 +264,36 @@ LEAF(mips_cache_reset) #endif /* CONFIG_SYS_MIPS_CACHE_INIT_RAM_LOAD */ +#ifdef CONFIG_MIPS_L2_CACHE + /* + * If the L2 is bypassed, init the L1 first so that we can execute the + * rest of the cache initialisation using the L1 instruction cache. + */ + bnez R_L2_BYPASSED, l1_init + +l2_init: + PTR_LI t0, INDEX_BASE + PTR_ADDU t1, t0, R_L2_SIZE +1: cache INDEX_STORE_TAG_SD, 0(t0) + PTR_ADDU t0, t0, R_L2_LINE + bne t0, t1, 1b + + /* + * If the L2 was bypassed then we already initialised the L1s before + * the L2, so we are now done. + */ + bnez R_L2_BYPASSED, l2_unbypass +#endif + /* * The TagLo registers used depend upon the CPU implementation, but the * architecture requires that it is safe for software to write to both * TagLo selects 0 & 2 covering supported cases. */ +l1_init: mtc0 zero, CP0_TAGLO mtc0 zero, CP0_TAGLO, 2 + ehb /* * The caches are probably in an indeterminate state, so we force good @@ -158,40 +307,122 @@ LEAF(mips_cache_reset) /* * Initialize the I-cache first, */ - blez t2, 1f + blez R_IC_SIZE, 1f PTR_LI t0, INDEX_BASE - PTR_ADDU t1, t0, t2 + PTR_ADDU t1, t0, R_IC_SIZE /* clear tag to invalidate */ - cache_loop t0, t1, t8, INDEX_STORE_TAG_I + cache_loop t0, t1, R_IC_LINE, INDEX_STORE_TAG_I #ifdef CONFIG_SYS_MIPS_CACHE_INIT_RAM_LOAD /* fill once, so data field parity is correct */ PTR_LI t0, INDEX_BASE - cache_loop t0, t1, t8, FILL + cache_loop t0, t1, R_IC_LINE, FILL /* invalidate again - prudent but not strictly neccessary */ PTR_LI t0, INDEX_BASE - cache_loop t0, t1, t8, INDEX_STORE_TAG_I + cache_loop t0, t1, R_IC_LINE, INDEX_STORE_TAG_I +#endif + + /* Enable use of the I-cache by setting Config.K0 */ + sync + mfc0 t0, CP0_CONFIG + li t1, CONFIG_SYS_MIPS_CACHE_MODE +#if __mips_isa_rev >= 2 + ins t0, t1, 0, 3 +#else + ori t0, t0, CONF_CM_CMASK + xori t0, t0, CONF_CM_CMASK + or t0, t0, t1 #endif + mtc0 t0, CP0_CONFIG /* * then initialize D-cache. */ -1: blez t3, 3f +1: blez R_DC_SIZE, 3f PTR_LI t0, INDEX_BASE - PTR_ADDU t1, t0, t3 + PTR_ADDU t1, t0, R_DC_SIZE /* clear all tags */ - cache_loop t0, t1, t9, INDEX_STORE_TAG_D + cache_loop t0, t1, R_DC_LINE, INDEX_STORE_TAG_D #ifdef CONFIG_SYS_MIPS_CACHE_INIT_RAM_LOAD /* load from each line (in cached space) */ PTR_LI t0, INDEX_BASE 2: LONG_L zero, 0(t0) - PTR_ADDU t0, t9 + PTR_ADDU t0, R_DC_LINE bne t0, t1, 2b /* clear all tags */ PTR_LI t0, INDEX_BASE - cache_loop t0, t1, t9, INDEX_STORE_TAG_D + cache_loop t0, t1, R_DC_LINE, INDEX_STORE_TAG_D #endif +3: + +#ifdef CONFIG_MIPS_L2_CACHE + /* If the L2 isn't bypassed then we're done */ + beqz R_L2_BYPASSED, return + + /* The L2 is bypassed - go initialise it */ + b l2_init -3: jr ra +l2_unbypass: +# if __mips_isa_rev >= 6 + beqz R_L2_L2C, 1f + + li t0, CKSEG1ADDR(CONFIG_MIPS_CM_BASE) + lw t1, GCR_L2_CONFIG(t0) + xor t1, t1, GCR_L2_CONFIG_BYPASS + sw t1, GCR_L2_CONFIG(t0) + sync + ehb + b 2f +# endif +1: mfc0 t0, CP0_CONFIG, 2 + xor t0, t0, MIPS_CONF2_L2B + mtc0 t0, CP0_CONFIG, 2 + ehb + +2: +# ifdef CONFIG_MIPS_CM + /* Config3 must exist for a CM to be present */ + mfc0 t0, CP0_CONFIG, 1 + bgez t0, 2f + mfc0 t0, CP0_CONFIG, 2 + bgez t0, 2f + + /* Check Config3.CMGCR to determine CM presence */ + mfc0 t0, CP0_CONFIG, 3 + and t0, t0, MIPS_CONF3_CMGCR + beqz t0, 2f + + /* Change Config.K0 to a coherent CCA */ + mfc0 t0, CP0_CONFIG + li t1, CONF_CM_CACHABLE_COW +#if __mips_isa_rev >= 2 + ins t0, t1, 0, 3 +#else + ori t0, t0, CONF_CM_CMASK + xori t0, t0, CONF_CM_CMASK + or t0, t0, t1 +#endif + mtc0 t0, CP0_CONFIG + + /* + * Join the coherent domain such that the caches of this core are kept + * coherent with those of other cores. + */ + PTR_LI t0, CKSEG1ADDR(CONFIG_MIPS_CM_BASE) + lw t1, GCR_REV(t0) + li t2, GCR_REV_CM3 + li t3, GCR_Cx_COHERENCE_EN + bge t1, t2, 1f + li t3, GCR_Cx_COHERENCE_DOM_EN +1: sw t3, GCR_Cx_COHERENCE(t0) + ehb +2: +# endif +#endif + +return: + /* Ensure all cache operations complete before returning */ + sync + jr ra END(mips_cache_reset) /* |