1 /* 2 * CPU-specific routines originally from U-Boot. 3 * See: uboot-xburst/files/arch/mips/cpu/xburst/cpu.c 4 * See: u-boot/arch/mips/include/asm/cacheops.h 5 * 6 * Copyright (C) 2000-2009 Wolfgang Denk, DENX Software Engineering, <wd@denx.de> 7 * Copyright (C) 2015, 2016 Paul Boddie <paul@boddie.org.uk> 8 * 9 * This program is free software; you can redistribute it and/or 10 * modify it under the terms of the GNU General Public License as 11 * published by the Free Software Foundation; either version 2 of 12 * the License, or (at your option) any later version. 13 * 14 * This program is distributed in the hope that it will be useful, 15 * but WITHOUT ANY WARRANTY; without even the implied warranty of 16 * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the 17 * GNU General Public License for more details. 18 * 19 * You should have received a copy of the GNU General Public License 20 * along with this program; if not, write to the Free Software 21 * Foundation, Inc., 51 Franklin Street, Fifth Floor, 22 * Boston, MA 02110-1301, USA 23 */ 24 25 #include "cpu.h" 26 #include "sdram.h" 27 28 void flush_icache_all(void) 29 { 30 u32 addr, t = 0; 31 32 asm volatile ("mtc0 $0, $28"); /* Clear Taglo */ 33 asm volatile ("mtc0 $0, $29"); /* Clear TagHi */ 34 35 for (addr = KSEG0; addr < KSEG0 + CONFIG_SYS_ICACHE_SIZE; 36 addr += CONFIG_SYS_CACHELINE_SIZE) { 37 asm volatile ( 38 ".set mips3\n\t" 39 " cache %0, 0(%1)\n\t" 40 ".set mips2\n\t" 41 : 42 : "I" (Index_Store_Tag_I), "r"(addr)); 43 } 44 45 /* invalicate btb */ 46 asm volatile ( 47 ".set mips32\n\t" 48 "mfc0 %0, $16, 7\n\t" 49 "nop\n\t" 50 "ori %0,2\n\t" 51 "mtc0 %0, $16, 7\n\t" 52 ".set mips2\n\t" 53 : 54 : "r" (t)); 55 } 56 57 void flush_dcache_all(void) 58 { 59 u32 addr; 60 61 for (addr = KSEG0; addr < KSEG0 + CONFIG_SYS_DCACHE_SIZE; 62 addr += CONFIG_SYS_CACHELINE_SIZE) { 63 asm volatile ( 64 ".set mips3\n\t" 65 " cache %0, 0(%1)\n\t" 66 ".set mips2\n\t" 67 : 68 : "I" (Index_Writeback_Inv_D), "r"(addr)); 69 } 70 71 asm volatile ("sync"); 72 } 73 74 void flush_cache_all(void) 75 { 76 flush_dcache_all(); 77 flush_icache_all(); 78 } 79 80 void handle_error_level(void) 81 { 82 asm volatile( 83 "mfc0 $t3, $12\n" /* CP0_STATUS */ 84 "li $t4, 0xfffffffb\n" /* ERL = 0 */ 85 "and $t3, $t3, $t4\n" 86 "mtc0 $t3, $12\n" 87 "nop\n"); 88 } 89 90 void enable_interrupts(void) 91 { 92 asm volatile( 93 "mfc0 $t3, $12\n" /* CP0_STATUS */ 94 "li $t4, 0x0000fc01\n" /* IE = enable interrupts */ 95 "or $t3, $t3, $t4\n" 96 "mtc0 $t3, $12\n" 97 "nop\n"); 98 } 99 100 void init_interrupts(void) 101 { 102 /* Set exception registers. */ 103 104 asm volatile( 105 "mtc0 $zero, $18\n" /* CP0_WATCHLO */ 106 "li $t3, 0x00800000\n" /* IV = 1 (use 0x80000200 for interrupts) */ 107 "mtc0 $t3, $13\n" /* CP0_CAUSE */ 108 "mfc0 $t4, $12\n" /* CP0_STATUS */ 109 "li $t3, 0xffbfffff\n" /* BEV=0 */ 110 "and $t3, $t3, $t4\n" 111 "mtc0 $t3, $12\n" 112 "nop\n"); 113 } 114 115 void set_task(u8 asid) 116 { 117 asm volatile( 118 119 /* Set the ASID. */ 120 121 "mtc0 %0, $10\n" /* CP0_ENTRYHI */ 122 "nop" 123 : 124 : "r" (asid) 125 ); 126 } 127 128 void init_stack(u32 top, u32 got, void (*function)(), u32 args[], u8 nargs) 129 { 130 u8 i; 131 132 asm volatile( 133 "move $t3, %0\n" /* refer to the stack frame */ 134 "addi $t3, $t3, -16\n" /* refer to the first parameter */ 135 : 136 : "r" (top) 137 ); 138 139 /* Provide arguments to the function. */ 140 141 for (i = 0; i < nargs; i++) 142 { 143 asm volatile( 144 "sw %0, 0($t3)\n" 145 "addi $t3, $t3, -4\n" 146 : 147 : "r" (args[i]) 148 ); 149 } 150 151 /* Store essential data for the function environment. */ 152 153 asm volatile( 154 "subu %1, %1, 0x80000000\n" /* obtain user mode addresses */ 155 "subu %2, %2, 0x80000000\n" 156 "sw %2, -100(%0)\n" /* store the function address as t9 */ 157 "sw %1, -104(%0)\n" /* store the global pointer */ 158 "sw %2, -112(%0)\n" /* store the function address as ra (for invoke_task) */ 159 "sw %2, -116(%0)\n" /* store the function address as EPC (for the handler) */ 160 : 161 : "r" (top), "r" (got), "r" (function) 162 ); 163 } 164 165 void enter_user_mode(void) 166 { 167 asm volatile( 168 "mfc0 $t3, $12\n" /* CP0_STATUS */ 169 "li $t4, 0x00000010\n" /* KSU = 2 (UM = 1) */ 170 "or $t3, $t3, $t4\n" 171 "mtc0 $t3, $12\n" 172 "nop\n"); 173 } 174 175 void init_tlb(void) 176 { 177 /* Wire in the kseg0 mapping and the page tables. */ 178 179 asm volatile( 180 "li $t1, 1\n" /* index of first randomly-replaced entry */ 181 "mtc0 $t1, $6\n" /* CP0_WIRED */ 182 "mtc0 $zero, $4\n" /* CP0_CONTEXT */ 183 "mtc0 $zero, $10\n" /* CP0_ENTRYHI */ 184 "nop\n"); 185 186 /* Map the code, making it globally available. */ 187 188 map_page_index(0x80000000, 0x00000000, 16 * 1024 * 1024, 0x1f, 0, 0); 189 } 190 191 void map_page_index(u32 virtual, u32 physical, u32 pagesize, u8 flags, u8 asid, u32 index) 192 { 193 u32 start = (virtual & 0xffffe000) | asid; /* VPN2 | ASID*/ 194 u32 lower = ((physical & 0xfffff000) >> 6) | flags; 195 u32 upper = (((physical + pagesize) & 0xfffff000) >> 6) | flags; 196 u32 pagemask = ((pagesize - 1) & 0xfffff000) << 1; 197 198 asm volatile( 199 "mtc0 %3, $5\n" /* CP0_PAGEMASK */ 200 201 /* Set the index. */ 202 203 "mtc0 %4, $0\n" /* CP0_INDEX */ 204 205 /* Set physical address. */ 206 207 "mtc0 %0, $2\n" /* CP0_ENTRYLO0 */ 208 "mtc0 %1, $3\n" /* CP0_ENTRYLO1 */ 209 210 /* Set virtual address. */ 211 212 "mtc0 %2, $10\n" /* CP0_ENTRYHI */ 213 "nop\n" 214 215 "tlbwi\n" 216 "nop" 217 : 218 : "r" (lower), "r" (upper), "r" (start), "r" (pagemask), "r" (index) 219 ); 220 } 221 222 void init_page_table(u32 page_table, u32 virtual, u32 physical, u32 pagesize, u8 flags, u8 asid) 223 { 224 u32 lower = ((physical & 0xfffff000) >> 6) | flags; 225 u32 upper = (((physical + pagesize) & 0xfffff000) >> 6) | flags; 226 227 /* 228 With a complete address space mapping involving pairs of 4KB pages 229 described by two values for each entry, there would be... 230 231 an address space of 0x100000000 requiring... 232 233 0x100000000 / (8 * 1024) == 0x100000000 >> 13 234 == 524288 entries 235 == 0x80000 entries 236 237 Thus, each task's entries would require... 238 239 0x80000 * 8 == 0x400000 bytes 240 241 The kseg2 region thus permits 256 tasks occupying 0x40000000 bytes. 242 243 However, for more modest address spaces occupying as much as 32MB there 244 would be... 245 246 an address space of 0x02000000 requiring... 247 248 0x02000000 / (8 * 1024) == 0x02000000 >> 13 249 == 4096 entries 250 == 0x1000 entries 251 252 Thus, each task's entries would only require... 253 254 0x1000 * 8 == 0x8000 bytes 255 */ 256 257 u32 base = page_table + page_table_task_size * asid; 258 259 /* Each page table entry corresponds to a pair of 4KB pages and holds two values. */ 260 261 u32 entry = ((virtual & 0xffffe000) >> 13) * 8; 262 u32 address = base + entry; 263 264 /* The page tables should be permanently mapped to avoid hierarchical TLB miss handling. */ 265 266 asm volatile( 267 "sw %1, 0(%0)\n" 268 "sw %2, 4(%0)\n" 269 : 270 : "r" (address), "r" (lower), "r" (upper) 271 ); 272 } 273 274 void map_page(u32 virtual, u32 physical, u32 pagesize, u8 flags, u8 asid) 275 { 276 u32 start = (virtual & 0xffffe000) | asid; /* VPN2 | ASID*/ 277 u32 lower = ((physical & 0xfffff000) >> 6) | flags; 278 u32 upper = (((physical + pagesize) & 0xfffff000) >> 6) | flags; 279 u32 pagemask = ((pagesize - 1) & 0xfffff000) << 1; 280 281 asm volatile( 282 "mtc0 %3, $5\n" /* CP0_PAGEMASK */ 283 284 /* Set physical address. */ 285 286 "mtc0 %0, $2\n" /* CP0_ENTRYLO0 */ 287 "mtc0 %1, $3\n" /* CP0_ENTRYLO1 */ 288 289 /* Set virtual address. */ 290 291 "mtc0 %2, $10\n" /* CP0_ENTRYHI */ 292 "nop\n" 293 294 "tlbwr\n" 295 "nop" 296 : 297 : "r" (lower), "r" (upper), "r" (start), "r" (pagemask) 298 ); 299 } 300 301 void map_page_miss(u32 physical, u32 pagesize, u8 flags) 302 { 303 u32 lower = ((physical & 0xfffff000) >> 6) | flags; 304 u32 upper = (((physical + pagesize) & 0xfffff000) >> 6) | flags; 305 u32 pagemask = ((pagesize - 1) & 0xfffff000) << 1; 306 307 asm volatile( 308 "mtc0 %2, $5\n" /* CP0_PAGEMASK */ 309 310 /* Set physical address. */ 311 312 "mtc0 %0, $2\n" /* CP0_ENTRYLO0 */ 313 "mtc0 %1, $3\n" /* CP0_ENTRYLO1 */ 314 "nop\n" 315 316 "tlbwr\n" 317 "nop" 318 : 319 : "r" (lower), "r" (upper), "r" (pagemask) 320 ); 321 } 322 323 void unmap_page(u32 virtual, u32 physical, u32 pagesize, u8 flags, u8 asid) 324 { 325 u32 start = (virtual & 0xffffe000) | asid; /* VPN2 | ASID*/ 326 u32 lower = ((physical & 0xfffff000) >> 6) | flags; 327 u32 upper = (((physical + pagesize) & 0xfffff000) >> 6) | flags; 328 u32 pagemask = ((pagesize - 1) & 0xfffff000) << 1; 329 u32 index = 0; 330 331 asm volatile( 332 "mtc0 %4, $5\n" /* CP0_PAGEMASK */ 333 334 /* Set physical address. */ 335 336 "mtc0 %1, $2\n" /* CP0_ENTRYLO0 */ 337 "mtc0 %2, $3\n" /* CP0_ENTRYLO1 */ 338 339 /* Set virtual address. */ 340 341 "mtc0 %3, $10\n" /* CP0_ENTRYHI */ 342 "nop\n" 343 344 /* Find an existing mapping. */ 345 346 "tlbp\n" 347 "nop\n" 348 349 /* Read the index register to see if a match was found. */ 350 351 "mfc0 %0, $0\n" /* CP0_INDEX */ 352 "nop" 353 : "=r" (index) 354 : "r" (lower), "r" (upper), "r" (start), "r" (pagemask) 355 ); 356 357 /* Return if the page is not mapped. */ 358 359 if (index & 0x80000000) 360 return; 361 362 /* Otherwise, invalidate the mapping. */ 363 364 map_page_index(virtual, physical, pagesize, flags & 0xfd, asid, index); 365 }