blob: 3bedb532aed92c271c6cdd6b1698071d3ed5e30e [file] [log] [blame]
Paul Mackerras9994a332005-10-10 22:36:14 +10001/*
2 * This file contains miscellaneous low-level functions.
3 * Copyright (C) 1995-1996 Gary Thomas (gdt@linuxppc.org)
4 *
5 * Largely rewritten by Cort Dougan (cort@cs.nmt.edu)
6 * and Paul Mackerras.
7 *
8 * This program is free software; you can redistribute it and/or
9 * modify it under the terms of the GNU General Public License
10 * as published by the Free Software Foundation; either version
11 * 2 of the License, or (at your option) any later version.
12 *
13 */
14
15#include <linux/config.h>
16#include <linux/sys.h>
17#include <asm/unistd.h>
18#include <asm/errno.h>
19#include <asm/reg.h>
20#include <asm/page.h>
21#include <asm/cache.h>
22#include <asm/cputable.h>
23#include <asm/mmu.h>
24#include <asm/ppc_asm.h>
25#include <asm/thread_info.h>
26#include <asm/asm-offsets.h>
27
28 .text
29
30 .align 5
31_GLOBAL(__delay)
32 cmpwi 0,r3,0
33 mtctr r3
34 beqlr
351: bdnz 1b
36 blr
37
38/*
Paul Mackerrasf2783c12005-10-20 09:23:26 +100039 * This returns the high 64 bits of the product of two 64-bit numbers.
40 */
41_GLOBAL(mulhdu)
42 cmpwi r6,0
43 cmpwi cr1,r3,0
44 mr r10,r4
45 mulhwu r4,r4,r5
46 beq 1f
47 mulhwu r0,r10,r6
48 mullw r7,r10,r5
49 addc r7,r0,r7
50 addze r4,r4
511: beqlr cr1 /* all done if high part of A is 0 */
52 mr r10,r3
53 mullw r9,r3,r5
54 mulhwu r3,r3,r5
55 beq 2f
56 mullw r0,r10,r6
57 mulhwu r8,r10,r6
58 addc r7,r0,r7
59 adde r4,r4,r8
60 addze r3,r3
612: addc r4,r4,r9
62 addze r3,r3
63 blr
64
65/*
Paul Mackerras9994a332005-10-10 22:36:14 +100066 * Returns (address we're running at) - (address we were linked at)
67 * for use before the text and data are mapped to KERNELBASE.
68 */
69_GLOBAL(reloc_offset)
70 mflr r0
71 bl 1f
721: mflr r3
Stephen Rothwell70620182005-10-12 17:44:55 +100073 LOADADDR(r4,1b)
Paul Mackerras9994a332005-10-10 22:36:14 +100074 subf r3,r4,r3
75 mtlr r0
76 blr
77
78/*
79 * add_reloc_offset(x) returns x + reloc_offset().
80 */
81_GLOBAL(add_reloc_offset)
82 mflr r0
83 bl 1f
841: mflr r5
Stephen Rothwell70620182005-10-12 17:44:55 +100085 LOADADDR(r4,1b)
Paul Mackerras9994a332005-10-10 22:36:14 +100086 subf r5,r4,r5
87 add r3,r3,r5
88 mtlr r0
89 blr
90
91/*
92 * sub_reloc_offset(x) returns x - reloc_offset().
93 */
94_GLOBAL(sub_reloc_offset)
95 mflr r0
96 bl 1f
971: mflr r5
98 lis r4,1b@ha
99 addi r4,r4,1b@l
100 subf r5,r4,r5
101 subf r3,r5,r3
102 mtlr r0
103 blr
104
105/*
106 * reloc_got2 runs through the .got2 section adding an offset
107 * to each entry.
108 */
109_GLOBAL(reloc_got2)
110 mflr r11
111 lis r7,__got2_start@ha
112 addi r7,r7,__got2_start@l
113 lis r8,__got2_end@ha
114 addi r8,r8,__got2_end@l
115 subf r8,r7,r8
116 srwi. r8,r8,2
117 beqlr
118 mtctr r8
119 bl 1f
1201: mflr r0
121 lis r4,1b@ha
122 addi r4,r4,1b@l
123 subf r0,r4,r0
124 add r7,r0,r7
1252: lwz r0,0(r7)
126 add r0,r0,r3
127 stw r0,0(r7)
128 addi r7,r7,4
129 bdnz 2b
130 mtlr r11
131 blr
132
133/*
134 * identify_cpu,
135 * called with r3 = data offset and r4 = CPU number
136 * doesn't change r3
137 */
138_GLOBAL(identify_cpu)
139 addis r8,r3,cpu_specs@ha
140 addi r8,r8,cpu_specs@l
141 mfpvr r7
1421:
143 lwz r5,CPU_SPEC_PVR_MASK(r8)
144 and r5,r5,r7
145 lwz r6,CPU_SPEC_PVR_VALUE(r8)
146 cmplw 0,r6,r5
147 beq 1f
148 addi r8,r8,CPU_SPEC_ENTRY_SIZE
149 b 1b
1501:
151 addis r6,r3,cur_cpu_spec@ha
152 addi r6,r6,cur_cpu_spec@l
153 sub r8,r8,r3
154 stw r8,0(r6)
155 blr
156
157/*
158 * do_cpu_ftr_fixups - goes through the list of CPU feature fixups
159 * and writes nop's over sections of code that don't apply for this cpu.
160 * r3 = data offset (not changed)
161 */
162_GLOBAL(do_cpu_ftr_fixups)
163 /* Get CPU 0 features */
164 addis r6,r3,cur_cpu_spec@ha
165 addi r6,r6,cur_cpu_spec@l
166 lwz r4,0(r6)
167 add r4,r4,r3
168 lwz r4,CPU_SPEC_FEATURES(r4)
169
170 /* Get the fixup table */
171 addis r6,r3,__start___ftr_fixup@ha
172 addi r6,r6,__start___ftr_fixup@l
173 addis r7,r3,__stop___ftr_fixup@ha
174 addi r7,r7,__stop___ftr_fixup@l
175
176 /* Do the fixup */
1771: cmplw 0,r6,r7
178 bgelr
179 addi r6,r6,16
180 lwz r8,-16(r6) /* mask */
181 and r8,r8,r4
182 lwz r9,-12(r6) /* value */
183 cmplw 0,r8,r9
184 beq 1b
185 lwz r8,-8(r6) /* section begin */
186 lwz r9,-4(r6) /* section end */
187 subf. r9,r8,r9
188 beq 1b
189 /* write nops over the section of code */
190 /* todo: if large section, add a branch at the start of it */
191 srwi r9,r9,2
192 mtctr r9
193 add r8,r8,r3
194 lis r0,0x60000000@h /* nop */
1953: stw r0,0(r8)
196 andi. r10,r4,CPU_FTR_SPLIT_ID_CACHE@l
197 beq 2f
198 dcbst 0,r8 /* suboptimal, but simpler */
199 sync
200 icbi 0,r8
2012: addi r8,r8,4
202 bdnz 3b
203 sync /* additional sync needed on g4 */
204 isync
205 b 1b
206
207/*
208 * call_setup_cpu - call the setup_cpu function for this cpu
209 * r3 = data offset, r24 = cpu number
210 *
211 * Setup function is called with:
212 * r3 = data offset
213 * r4 = ptr to CPU spec (relocated)
214 */
215_GLOBAL(call_setup_cpu)
216 addis r4,r3,cur_cpu_spec@ha
217 addi r4,r4,cur_cpu_spec@l
218 lwz r4,0(r4)
219 add r4,r4,r3
220 lwz r5,CPU_SPEC_SETUP(r4)
221 cmpi 0,r5,0
222 add r5,r5,r3
223 beqlr
224 mtctr r5
225 bctr
226
227#if defined(CONFIG_CPU_FREQ_PMAC) && defined(CONFIG_6xx)
228
229/* This gets called by via-pmu.c to switch the PLL selection
230 * on 750fx CPU. This function should really be moved to some
231 * other place (as most of the cpufreq code in via-pmu
232 */
233_GLOBAL(low_choose_750fx_pll)
234 /* Clear MSR:EE */
235 mfmsr r7
236 rlwinm r0,r7,0,17,15
237 mtmsr r0
238
239 /* If switching to PLL1, disable HID0:BTIC */
240 cmplwi cr0,r3,0
241 beq 1f
242 mfspr r5,SPRN_HID0
243 rlwinm r5,r5,0,27,25
244 sync
245 mtspr SPRN_HID0,r5
246 isync
247 sync
248
2491:
250 /* Calc new HID1 value */
251 mfspr r4,SPRN_HID1 /* Build a HID1:PS bit from parameter */
252 rlwinm r5,r3,16,15,15 /* Clear out HID1:PS from value read */
253 rlwinm r4,r4,0,16,14 /* Could have I used rlwimi here ? */
254 or r4,r4,r5
255 mtspr SPRN_HID1,r4
256
257 /* Store new HID1 image */
258 rlwinm r6,r1,0,0,18
259 lwz r6,TI_CPU(r6)
260 slwi r6,r6,2
261 addis r6,r6,nap_save_hid1@ha
262 stw r4,nap_save_hid1@l(r6)
263
264 /* If switching to PLL0, enable HID0:BTIC */
265 cmplwi cr0,r3,0
266 bne 1f
267 mfspr r5,SPRN_HID0
268 ori r5,r5,HID0_BTIC
269 sync
270 mtspr SPRN_HID0,r5
271 isync
272 sync
273
2741:
275 /* Return */
276 mtmsr r7
277 blr
278
279_GLOBAL(low_choose_7447a_dfs)
280 /* Clear MSR:EE */
281 mfmsr r7
282 rlwinm r0,r7,0,17,15
283 mtmsr r0
284
285 /* Calc new HID1 value */
286 mfspr r4,SPRN_HID1
287 insrwi r4,r3,1,9 /* insert parameter into bit 9 */
288 sync
289 mtspr SPRN_HID1,r4
290 sync
291 isync
292
293 /* Return */
294 mtmsr r7
295 blr
296
297#endif /* CONFIG_CPU_FREQ_PMAC && CONFIG_6xx */
298
299/*
300 * complement mask on the msr then "or" some values on.
301 * _nmask_and_or_msr(nmask, value_to_or)
302 */
303_GLOBAL(_nmask_and_or_msr)
304 mfmsr r0 /* Get current msr */
305 andc r0,r0,r3 /* And off the bits set in r3 (first parm) */
306 or r0,r0,r4 /* Or on the bits in r4 (second parm) */
307 SYNC /* Some chip revs have problems here... */
308 mtmsr r0 /* Update machine state */
309 isync
310 blr /* Done */
311
312
313/*
314 * Flush MMU TLB
315 */
316_GLOBAL(_tlbia)
317#if defined(CONFIG_40x)
318 sync /* Flush to memory before changing mapping */
319 tlbia
320 isync /* Flush shadow TLB */
321#elif defined(CONFIG_44x)
322 li r3,0
323 sync
324
325 /* Load high watermark */
326 lis r4,tlb_44x_hwater@ha
327 lwz r5,tlb_44x_hwater@l(r4)
328
3291: tlbwe r3,r3,PPC44x_TLB_PAGEID
330 addi r3,r3,1
331 cmpw 0,r3,r5
332 ble 1b
333
334 isync
335#elif defined(CONFIG_FSL_BOOKE)
336 /* Invalidate all entries in TLB0 */
337 li r3, 0x04
338 tlbivax 0,3
339 /* Invalidate all entries in TLB1 */
340 li r3, 0x0c
341 tlbivax 0,3
342 /* Invalidate all entries in TLB2 */
343 li r3, 0x14
344 tlbivax 0,3
345 /* Invalidate all entries in TLB3 */
346 li r3, 0x1c
347 tlbivax 0,3
348 msync
349#ifdef CONFIG_SMP
350 tlbsync
351#endif /* CONFIG_SMP */
352#else /* !(CONFIG_40x || CONFIG_44x || CONFIG_FSL_BOOKE) */
353#if defined(CONFIG_SMP)
354 rlwinm r8,r1,0,0,18
355 lwz r8,TI_CPU(r8)
356 oris r8,r8,10
357 mfmsr r10
358 SYNC
359 rlwinm r0,r10,0,17,15 /* clear bit 16 (MSR_EE) */
360 rlwinm r0,r0,0,28,26 /* clear DR */
361 mtmsr r0
362 SYNC_601
363 isync
364 lis r9,mmu_hash_lock@h
365 ori r9,r9,mmu_hash_lock@l
366 tophys(r9,r9)
36710: lwarx r7,0,r9
368 cmpwi 0,r7,0
369 bne- 10b
370 stwcx. r8,0,r9
371 bne- 10b
372 sync
373 tlbia
374 sync
375 TLBSYNC
376 li r0,0
377 stw r0,0(r9) /* clear mmu_hash_lock */
378 mtmsr r10
379 SYNC_601
380 isync
381#else /* CONFIG_SMP */
382 sync
383 tlbia
384 sync
385#endif /* CONFIG_SMP */
386#endif /* ! defined(CONFIG_40x) */
387 blr
388
389/*
390 * Flush MMU TLB for a particular address
391 */
392_GLOBAL(_tlbie)
393#if defined(CONFIG_40x)
394 tlbsx. r3, 0, r3
395 bne 10f
396 sync
397 /* There are only 64 TLB entries, so r3 < 64, which means bit 25 is clear.
398 * Since 25 is the V bit in the TLB_TAG, loading this value will invalidate
399 * the TLB entry. */
400 tlbwe r3, r3, TLB_TAG
401 isync
40210:
403#elif defined(CONFIG_44x)
404 mfspr r4,SPRN_MMUCR
405 mfspr r5,SPRN_PID /* Get PID */
406 rlwimi r4,r5,0,24,31 /* Set TID */
407 mtspr SPRN_MMUCR,r4
408
409 tlbsx. r3, 0, r3
410 bne 10f
411 sync
412 /* There are only 64 TLB entries, so r3 < 64,
413 * which means bit 22, is clear. Since 22 is
414 * the V bit in the TLB_PAGEID, loading this
415 * value will invalidate the TLB entry.
416 */
417 tlbwe r3, r3, PPC44x_TLB_PAGEID
418 isync
41910:
420#elif defined(CONFIG_FSL_BOOKE)
421 rlwinm r4, r3, 0, 0, 19
422 ori r5, r4, 0x08 /* TLBSEL = 1 */
423 ori r6, r4, 0x10 /* TLBSEL = 2 */
424 ori r7, r4, 0x18 /* TLBSEL = 3 */
425 tlbivax 0, r4
426 tlbivax 0, r5
427 tlbivax 0, r6
428 tlbivax 0, r7
429 msync
430#if defined(CONFIG_SMP)
431 tlbsync
432#endif /* CONFIG_SMP */
433#else /* !(CONFIG_40x || CONFIG_44x || CONFIG_FSL_BOOKE) */
434#if defined(CONFIG_SMP)
435 rlwinm r8,r1,0,0,18
436 lwz r8,TI_CPU(r8)
437 oris r8,r8,11
438 mfmsr r10
439 SYNC
440 rlwinm r0,r10,0,17,15 /* clear bit 16 (MSR_EE) */
441 rlwinm r0,r0,0,28,26 /* clear DR */
442 mtmsr r0
443 SYNC_601
444 isync
445 lis r9,mmu_hash_lock@h
446 ori r9,r9,mmu_hash_lock@l
447 tophys(r9,r9)
44810: lwarx r7,0,r9
449 cmpwi 0,r7,0
450 bne- 10b
451 stwcx. r8,0,r9
452 bne- 10b
453 eieio
454 tlbie r3
455 sync
456 TLBSYNC
457 li r0,0
458 stw r0,0(r9) /* clear mmu_hash_lock */
459 mtmsr r10
460 SYNC_601
461 isync
462#else /* CONFIG_SMP */
463 tlbie r3
464 sync
465#endif /* CONFIG_SMP */
466#endif /* ! CONFIG_40x */
467 blr
468
469/*
470 * Flush instruction cache.
471 * This is a no-op on the 601.
472 */
473_GLOBAL(flush_instruction_cache)
474#if defined(CONFIG_8xx)
475 isync
476 lis r5, IDC_INVALL@h
477 mtspr SPRN_IC_CST, r5
478#elif defined(CONFIG_4xx)
479#ifdef CONFIG_403GCX
480 li r3, 512
481 mtctr r3
482 lis r4, KERNELBASE@h
4831: iccci 0, r4
484 addi r4, r4, 16
485 bdnz 1b
486#else
487 lis r3, KERNELBASE@h
488 iccci 0,r3
489#endif
490#elif CONFIG_FSL_BOOKE
491BEGIN_FTR_SECTION
492 mfspr r3,SPRN_L1CSR0
493 ori r3,r3,L1CSR0_CFI|L1CSR0_CLFC
494 /* msync; isync recommended here */
495 mtspr SPRN_L1CSR0,r3
496 isync
497 blr
498END_FTR_SECTION_IFCLR(CPU_FTR_SPLIT_ID_CACHE)
499 mfspr r3,SPRN_L1CSR1
500 ori r3,r3,L1CSR1_ICFI|L1CSR1_ICLFR
501 mtspr SPRN_L1CSR1,r3
502#else
503 mfspr r3,SPRN_PVR
504 rlwinm r3,r3,16,16,31
505 cmpwi 0,r3,1
506 beqlr /* for 601, do nothing */
507 /* 603/604 processor - use invalidate-all bit in HID0 */
508 mfspr r3,SPRN_HID0
509 ori r3,r3,HID0_ICFI
510 mtspr SPRN_HID0,r3
511#endif /* CONFIG_8xx/4xx */
512 isync
513 blr
514
515/*
516 * Write any modified data cache blocks out to memory
517 * and invalidate the corresponding instruction cache blocks.
518 * This is a no-op on the 601.
519 *
520 * flush_icache_range(unsigned long start, unsigned long stop)
521 */
522_GLOBAL(flush_icache_range)
523BEGIN_FTR_SECTION
524 blr /* for 601, do nothing */
525END_FTR_SECTION_IFCLR(CPU_FTR_SPLIT_ID_CACHE)
Stephen Rothwell7dffb722005-10-17 11:50:32 +1000526 li r5,L1_CACHE_BYTES-1
Paul Mackerras9994a332005-10-10 22:36:14 +1000527 andc r3,r3,r5
528 subf r4,r3,r4
529 add r4,r4,r5
Stephen Rothwell7dffb722005-10-17 11:50:32 +1000530 srwi. r4,r4,L1_CACHE_SHIFT
Paul Mackerras9994a332005-10-10 22:36:14 +1000531 beqlr
532 mtctr r4
533 mr r6,r3
5341: dcbst 0,r3
Stephen Rothwell7dffb722005-10-17 11:50:32 +1000535 addi r3,r3,L1_CACHE_BYTES
Paul Mackerras9994a332005-10-10 22:36:14 +1000536 bdnz 1b
537 sync /* wait for dcbst's to get to ram */
538 mtctr r4
5392: icbi 0,r6
Stephen Rothwell7dffb722005-10-17 11:50:32 +1000540 addi r6,r6,L1_CACHE_BYTES
Paul Mackerras9994a332005-10-10 22:36:14 +1000541 bdnz 2b
542 sync /* additional sync needed on g4 */
543 isync
544 blr
545/*
546 * Write any modified data cache blocks out to memory.
547 * Does not invalidate the corresponding cache lines (especially for
548 * any corresponding instruction cache).
549 *
550 * clean_dcache_range(unsigned long start, unsigned long stop)
551 */
552_GLOBAL(clean_dcache_range)
Stephen Rothwell7dffb722005-10-17 11:50:32 +1000553 li r5,L1_CACHE_BYTES-1
Paul Mackerras9994a332005-10-10 22:36:14 +1000554 andc r3,r3,r5
555 subf r4,r3,r4
556 add r4,r4,r5
Stephen Rothwell7dffb722005-10-17 11:50:32 +1000557 srwi. r4,r4,L1_CACHE_SHIFT
Paul Mackerras9994a332005-10-10 22:36:14 +1000558 beqlr
559 mtctr r4
560
5611: dcbst 0,r3
Stephen Rothwell7dffb722005-10-17 11:50:32 +1000562 addi r3,r3,L1_CACHE_BYTES
Paul Mackerras9994a332005-10-10 22:36:14 +1000563 bdnz 1b
564 sync /* wait for dcbst's to get to ram */
565 blr
566
567/*
568 * Write any modified data cache blocks out to memory and invalidate them.
569 * Does not invalidate the corresponding instruction cache blocks.
570 *
571 * flush_dcache_range(unsigned long start, unsigned long stop)
572 */
573_GLOBAL(flush_dcache_range)
Stephen Rothwell7dffb722005-10-17 11:50:32 +1000574 li r5,L1_CACHE_BYTES-1
Paul Mackerras9994a332005-10-10 22:36:14 +1000575 andc r3,r3,r5
576 subf r4,r3,r4
577 add r4,r4,r5
Stephen Rothwell7dffb722005-10-17 11:50:32 +1000578 srwi. r4,r4,L1_CACHE_SHIFT
Paul Mackerras9994a332005-10-10 22:36:14 +1000579 beqlr
580 mtctr r4
581
5821: dcbf 0,r3
Stephen Rothwell7dffb722005-10-17 11:50:32 +1000583 addi r3,r3,L1_CACHE_BYTES
Paul Mackerras9994a332005-10-10 22:36:14 +1000584 bdnz 1b
585 sync /* wait for dcbst's to get to ram */
586 blr
587
588/*
589 * Like above, but invalidate the D-cache. This is used by the 8xx
590 * to invalidate the cache so the PPC core doesn't get stale data
591 * from the CPM (no cache snooping here :-).
592 *
593 * invalidate_dcache_range(unsigned long start, unsigned long stop)
594 */
595_GLOBAL(invalidate_dcache_range)
Stephen Rothwell7dffb722005-10-17 11:50:32 +1000596 li r5,L1_CACHE_BYTES-1
Paul Mackerras9994a332005-10-10 22:36:14 +1000597 andc r3,r3,r5
598 subf r4,r3,r4
599 add r4,r4,r5
Stephen Rothwell7dffb722005-10-17 11:50:32 +1000600 srwi. r4,r4,L1_CACHE_SHIFT
Paul Mackerras9994a332005-10-10 22:36:14 +1000601 beqlr
602 mtctr r4
603
6041: dcbi 0,r3
Stephen Rothwell7dffb722005-10-17 11:50:32 +1000605 addi r3,r3,L1_CACHE_BYTES
Paul Mackerras9994a332005-10-10 22:36:14 +1000606 bdnz 1b
607 sync /* wait for dcbi's to get to ram */
608 blr
609
610#ifdef CONFIG_NOT_COHERENT_CACHE
611/*
612 * 40x cores have 8K or 16K dcache and 32 byte line size.
613 * 44x has a 32K dcache and 32 byte line size.
614 * 8xx has 1, 2, 4, 8K variants.
615 * For now, cover the worst case of the 44x.
616 * Must be called with external interrupts disabled.
617 */
618#define CACHE_NWAYS 64
619#define CACHE_NLINES 16
620
621_GLOBAL(flush_dcache_all)
622 li r4, (2 * CACHE_NWAYS * CACHE_NLINES)
623 mtctr r4
624 lis r5, KERNELBASE@h
6251: lwz r3, 0(r5) /* Load one word from every line */
Stephen Rothwell7dffb722005-10-17 11:50:32 +1000626 addi r5, r5, L1_CACHE_BYTES
Paul Mackerras9994a332005-10-10 22:36:14 +1000627 bdnz 1b
628 blr
629#endif /* CONFIG_NOT_COHERENT_CACHE */
630
631/*
632 * Flush a particular page from the data cache to RAM.
633 * Note: this is necessary because the instruction cache does *not*
634 * snoop from the data cache.
635 * This is a no-op on the 601 which has a unified cache.
636 *
637 * void __flush_dcache_icache(void *page)
638 */
639_GLOBAL(__flush_dcache_icache)
640BEGIN_FTR_SECTION
641 blr /* for 601, do nothing */
642END_FTR_SECTION_IFCLR(CPU_FTR_SPLIT_ID_CACHE)
643 rlwinm r3,r3,0,0,19 /* Get page base address */
Stephen Rothwell7dffb722005-10-17 11:50:32 +1000644 li r4,4096/L1_CACHE_BYTES /* Number of lines in a page */
Paul Mackerras9994a332005-10-10 22:36:14 +1000645 mtctr r4
646 mr r6,r3
6470: dcbst 0,r3 /* Write line to ram */
Stephen Rothwell7dffb722005-10-17 11:50:32 +1000648 addi r3,r3,L1_CACHE_BYTES
Paul Mackerras9994a332005-10-10 22:36:14 +1000649 bdnz 0b
650 sync
651 mtctr r4
6521: icbi 0,r6
Stephen Rothwell7dffb722005-10-17 11:50:32 +1000653 addi r6,r6,L1_CACHE_BYTES
Paul Mackerras9994a332005-10-10 22:36:14 +1000654 bdnz 1b
655 sync
656 isync
657 blr
658
659/*
660 * Flush a particular page from the data cache to RAM, identified
661 * by its physical address. We turn off the MMU so we can just use
662 * the physical address (this may be a highmem page without a kernel
663 * mapping).
664 *
665 * void __flush_dcache_icache_phys(unsigned long physaddr)
666 */
667_GLOBAL(__flush_dcache_icache_phys)
668BEGIN_FTR_SECTION
669 blr /* for 601, do nothing */
670END_FTR_SECTION_IFCLR(CPU_FTR_SPLIT_ID_CACHE)
671 mfmsr r10
672 rlwinm r0,r10,0,28,26 /* clear DR */
673 mtmsr r0
674 isync
675 rlwinm r3,r3,0,0,19 /* Get page base address */
Stephen Rothwell7dffb722005-10-17 11:50:32 +1000676 li r4,4096/L1_CACHE_BYTES /* Number of lines in a page */
Paul Mackerras9994a332005-10-10 22:36:14 +1000677 mtctr r4
678 mr r6,r3
6790: dcbst 0,r3 /* Write line to ram */
Stephen Rothwell7dffb722005-10-17 11:50:32 +1000680 addi r3,r3,L1_CACHE_BYTES
Paul Mackerras9994a332005-10-10 22:36:14 +1000681 bdnz 0b
682 sync
683 mtctr r4
6841: icbi 0,r6
Stephen Rothwell7dffb722005-10-17 11:50:32 +1000685 addi r6,r6,L1_CACHE_BYTES
Paul Mackerras9994a332005-10-10 22:36:14 +1000686 bdnz 1b
687 sync
688 mtmsr r10 /* restore DR */
689 isync
690 blr
691
692/*
693 * Clear pages using the dcbz instruction, which doesn't cause any
694 * memory traffic (except to write out any cache lines which get
695 * displaced). This only works on cacheable memory.
696 *
697 * void clear_pages(void *page, int order) ;
698 */
699_GLOBAL(clear_pages)
Stephen Rothwell7dffb722005-10-17 11:50:32 +1000700 li r0,4096/L1_CACHE_BYTES
Paul Mackerras9994a332005-10-10 22:36:14 +1000701 slw r0,r0,r4
702 mtctr r0
703#ifdef CONFIG_8xx
704 li r4, 0
7051: stw r4, 0(r3)
706 stw r4, 4(r3)
707 stw r4, 8(r3)
708 stw r4, 12(r3)
709#else
7101: dcbz 0,r3
711#endif
Stephen Rothwell7dffb722005-10-17 11:50:32 +1000712 addi r3,r3,L1_CACHE_BYTES
Paul Mackerras9994a332005-10-10 22:36:14 +1000713 bdnz 1b
714 blr
715
716/*
717 * Copy a whole page. We use the dcbz instruction on the destination
718 * to reduce memory traffic (it eliminates the unnecessary reads of
719 * the destination into cache). This requires that the destination
720 * is cacheable.
721 */
722#define COPY_16_BYTES \
723 lwz r6,4(r4); \
724 lwz r7,8(r4); \
725 lwz r8,12(r4); \
726 lwzu r9,16(r4); \
727 stw r6,4(r3); \
728 stw r7,8(r3); \
729 stw r8,12(r3); \
730 stwu r9,16(r3)
731
732_GLOBAL(copy_page)
733 addi r3,r3,-4
734 addi r4,r4,-4
735
736#ifdef CONFIG_8xx
737 /* don't use prefetch on 8xx */
Stephen Rothwell7dffb722005-10-17 11:50:32 +1000738 li r0,4096/L1_CACHE_BYTES
Paul Mackerras9994a332005-10-10 22:36:14 +1000739 mtctr r0
7401: COPY_16_BYTES
741 bdnz 1b
742 blr
743
744#else /* not 8xx, we can prefetch */
745 li r5,4
746
747#if MAX_COPY_PREFETCH > 1
748 li r0,MAX_COPY_PREFETCH
749 li r11,4
750 mtctr r0
75111: dcbt r11,r4
Stephen Rothwell7dffb722005-10-17 11:50:32 +1000752 addi r11,r11,L1_CACHE_BYTES
Paul Mackerras9994a332005-10-10 22:36:14 +1000753 bdnz 11b
754#else /* MAX_COPY_PREFETCH == 1 */
755 dcbt r5,r4
Stephen Rothwell7dffb722005-10-17 11:50:32 +1000756 li r11,L1_CACHE_BYTES+4
Paul Mackerras9994a332005-10-10 22:36:14 +1000757#endif /* MAX_COPY_PREFETCH */
Stephen Rothwell7dffb722005-10-17 11:50:32 +1000758 li r0,4096/L1_CACHE_BYTES - MAX_COPY_PREFETCH
Paul Mackerras9994a332005-10-10 22:36:14 +1000759 crclr 4*cr0+eq
7602:
761 mtctr r0
7621:
763 dcbt r11,r4
764 dcbz r5,r3
765 COPY_16_BYTES
Stephen Rothwell7dffb722005-10-17 11:50:32 +1000766#if L1_CACHE_BYTES >= 32
Paul Mackerras9994a332005-10-10 22:36:14 +1000767 COPY_16_BYTES
Stephen Rothwell7dffb722005-10-17 11:50:32 +1000768#if L1_CACHE_BYTES >= 64
Paul Mackerras9994a332005-10-10 22:36:14 +1000769 COPY_16_BYTES
770 COPY_16_BYTES
Stephen Rothwell7dffb722005-10-17 11:50:32 +1000771#if L1_CACHE_BYTES >= 128
Paul Mackerras9994a332005-10-10 22:36:14 +1000772 COPY_16_BYTES
773 COPY_16_BYTES
774 COPY_16_BYTES
775 COPY_16_BYTES
776#endif
777#endif
778#endif
779 bdnz 1b
780 beqlr
781 crnot 4*cr0+eq,4*cr0+eq
782 li r0,MAX_COPY_PREFETCH
783 li r11,4
784 b 2b
785#endif /* CONFIG_8xx */
786
787/*
788 * void atomic_clear_mask(atomic_t mask, atomic_t *addr)
789 * void atomic_set_mask(atomic_t mask, atomic_t *addr);
790 */
791_GLOBAL(atomic_clear_mask)
79210: lwarx r5,0,r4
793 andc r5,r5,r3
794 PPC405_ERR77(0,r4)
795 stwcx. r5,0,r4
796 bne- 10b
797 blr
798_GLOBAL(atomic_set_mask)
79910: lwarx r5,0,r4
800 or r5,r5,r3
801 PPC405_ERR77(0,r4)
802 stwcx. r5,0,r4
803 bne- 10b
804 blr
805
806/*
807 * I/O string operations
808 *
809 * insb(port, buf, len)
810 * outsb(port, buf, len)
811 * insw(port, buf, len)
812 * outsw(port, buf, len)
813 * insl(port, buf, len)
814 * outsl(port, buf, len)
815 * insw_ns(port, buf, len)
816 * outsw_ns(port, buf, len)
817 * insl_ns(port, buf, len)
818 * outsl_ns(port, buf, len)
819 *
820 * The *_ns versions don't do byte-swapping.
821 */
822_GLOBAL(_insb)
823 cmpwi 0,r5,0
824 mtctr r5
825 subi r4,r4,1
826 blelr-
82700: lbz r5,0(r3)
828 eieio
829 stbu r5,1(r4)
830 bdnz 00b
831 blr
832
833_GLOBAL(_outsb)
834 cmpwi 0,r5,0
835 mtctr r5
836 subi r4,r4,1
837 blelr-
83800: lbzu r5,1(r4)
839 stb r5,0(r3)
840 eieio
841 bdnz 00b
842 blr
843
844_GLOBAL(_insw)
845 cmpwi 0,r5,0
846 mtctr r5
847 subi r4,r4,2
848 blelr-
84900: lhbrx r5,0,r3
850 eieio
851 sthu r5,2(r4)
852 bdnz 00b
853 blr
854
855_GLOBAL(_outsw)
856 cmpwi 0,r5,0
857 mtctr r5
858 subi r4,r4,2
859 blelr-
86000: lhzu r5,2(r4)
861 eieio
862 sthbrx r5,0,r3
863 bdnz 00b
864 blr
865
866_GLOBAL(_insl)
867 cmpwi 0,r5,0
868 mtctr r5
869 subi r4,r4,4
870 blelr-
87100: lwbrx r5,0,r3
872 eieio
873 stwu r5,4(r4)
874 bdnz 00b
875 blr
876
877_GLOBAL(_outsl)
878 cmpwi 0,r5,0
879 mtctr r5
880 subi r4,r4,4
881 blelr-
88200: lwzu r5,4(r4)
883 stwbrx r5,0,r3
884 eieio
885 bdnz 00b
886 blr
887
888_GLOBAL(__ide_mm_insw)
889_GLOBAL(_insw_ns)
890 cmpwi 0,r5,0
891 mtctr r5
892 subi r4,r4,2
893 blelr-
89400: lhz r5,0(r3)
895 eieio
896 sthu r5,2(r4)
897 bdnz 00b
898 blr
899
900_GLOBAL(__ide_mm_outsw)
901_GLOBAL(_outsw_ns)
902 cmpwi 0,r5,0
903 mtctr r5
904 subi r4,r4,2
905 blelr-
90600: lhzu r5,2(r4)
907 sth r5,0(r3)
908 eieio
909 bdnz 00b
910 blr
911
912_GLOBAL(__ide_mm_insl)
913_GLOBAL(_insl_ns)
914 cmpwi 0,r5,0
915 mtctr r5
916 subi r4,r4,4
917 blelr-
91800: lwz r5,0(r3)
919 eieio
920 stwu r5,4(r4)
921 bdnz 00b
922 blr
923
924_GLOBAL(__ide_mm_outsl)
925_GLOBAL(_outsl_ns)
926 cmpwi 0,r5,0
927 mtctr r5
928 subi r4,r4,4
929 blelr-
93000: lwzu r5,4(r4)
931 stw r5,0(r3)
932 eieio
933 bdnz 00b
934 blr
935
936/*
937 * Extended precision shifts.
938 *
939 * Updated to be valid for shift counts from 0 to 63 inclusive.
940 * -- Gabriel
941 *
942 * R3/R4 has 64 bit value
943 * R5 has shift count
944 * result in R3/R4
945 *
946 * ashrdi3: arithmetic right shift (sign propagation)
947 * lshrdi3: logical right shift
948 * ashldi3: left shift
949 */
950_GLOBAL(__ashrdi3)
951 subfic r6,r5,32
952 srw r4,r4,r5 # LSW = count > 31 ? 0 : LSW >> count
953 addi r7,r5,32 # could be xori, or addi with -32
954 slw r6,r3,r6 # t1 = count > 31 ? 0 : MSW << (32-count)
955 rlwinm r8,r7,0,32 # t3 = (count < 32) ? 32 : 0
956 sraw r7,r3,r7 # t2 = MSW >> (count-32)
957 or r4,r4,r6 # LSW |= t1
958 slw r7,r7,r8 # t2 = (count < 32) ? 0 : t2
959 sraw r3,r3,r5 # MSW = MSW >> count
960 or r4,r4,r7 # LSW |= t2
961 blr
962
963_GLOBAL(__ashldi3)
964 subfic r6,r5,32
965 slw r3,r3,r5 # MSW = count > 31 ? 0 : MSW << count
966 addi r7,r5,32 # could be xori, or addi with -32
967 srw r6,r4,r6 # t1 = count > 31 ? 0 : LSW >> (32-count)
968 slw r7,r4,r7 # t2 = count < 32 ? 0 : LSW << (count-32)
969 or r3,r3,r6 # MSW |= t1
970 slw r4,r4,r5 # LSW = LSW << count
971 or r3,r3,r7 # MSW |= t2
972 blr
973
974_GLOBAL(__lshrdi3)
975 subfic r6,r5,32
976 srw r4,r4,r5 # LSW = count > 31 ? 0 : LSW >> count
977 addi r7,r5,32 # could be xori, or addi with -32
978 slw r6,r3,r6 # t1 = count > 31 ? 0 : MSW << (32-count)
979 srw r7,r3,r7 # t2 = count < 32 ? 0 : MSW >> (count-32)
980 or r4,r4,r6 # LSW |= t1
981 srw r3,r3,r5 # MSW = MSW >> count
982 or r4,r4,r7 # LSW |= t2
983 blr
984
985_GLOBAL(abs)
986 srawi r4,r3,31
987 xor r3,r3,r4
988 sub r3,r3,r4
989 blr
990
991_GLOBAL(_get_SP)
992 mr r3,r1 /* Close enough */
993 blr
994
995/*
Paul Mackerras9994a332005-10-10 22:36:14 +1000996 * Create a kernel thread
997 * kernel_thread(fn, arg, flags)
998 */
999_GLOBAL(kernel_thread)
1000 stwu r1,-16(r1)
1001 stw r30,8(r1)
1002 stw r31,12(r1)
1003 mr r30,r3 /* function */
1004 mr r31,r4 /* argument */
1005 ori r3,r5,CLONE_VM /* flags */
1006 oris r3,r3,CLONE_UNTRACED>>16
1007 li r4,0 /* new sp (unused) */
1008 li r0,__NR_clone
1009 sc
1010 cmpwi 0,r3,0 /* parent or child? */
1011 bne 1f /* return if parent */
1012 li r0,0 /* make top-level stack frame */
1013 stwu r0,-16(r1)
1014 mtlr r30 /* fn addr in lr */
1015 mr r3,r31 /* load arg and call fn */
1016 PPC440EP_ERR42
1017 blrl
1018 li r0,__NR_exit /* exit if function returns */
1019 li r3,0
1020 sc
10211: lwz r30,8(r1)
1022 lwz r31,12(r1)
1023 addi r1,r1,16
1024 blr
1025
1026_GLOBAL(execve)
1027 li r0,__NR_execve
1028 sc
1029 bnslr
1030 neg r3,r3
1031 blr
1032
1033/*
1034 * This routine is just here to keep GCC happy - sigh...
1035 */
1036_GLOBAL(__main)
1037 blr