xref: /openbmc/linux/arch/mips/mm/page.c (revision 15e47304)
1 /*
2  * This file is subject to the terms and conditions of the GNU General Public
3  * License.  See the file "COPYING" in the main directory of this archive
4  * for more details.
5  *
6  * Copyright (C) 2003, 04, 05 Ralf Baechle (ralf@linux-mips.org)
7  * Copyright (C) 2007  Maciej W. Rozycki
8  * Copyright (C) 2008  Thiemo Seufer
9  * Copyright (C) 2012  MIPS Technologies, Inc.
10  */
11 #include <linux/init.h>
12 #include <linux/kernel.h>
13 #include <linux/sched.h>
14 #include <linux/smp.h>
15 #include <linux/mm.h>
16 #include <linux/module.h>
17 #include <linux/proc_fs.h>
18 
19 #include <asm/bugs.h>
20 #include <asm/cacheops.h>
21 #include <asm/inst.h>
22 #include <asm/io.h>
23 #include <asm/page.h>
24 #include <asm/pgtable.h>
25 #include <asm/prefetch.h>
26 #include <asm/bootinfo.h>
27 #include <asm/mipsregs.h>
28 #include <asm/mmu_context.h>
29 #include <asm/cpu.h>
30 #include <asm/war.h>
31 
32 #ifdef CONFIG_SIBYTE_DMA_PAGEOPS
33 #include <asm/sibyte/sb1250.h>
34 #include <asm/sibyte/sb1250_regs.h>
35 #include <asm/sibyte/sb1250_dma.h>
36 #endif
37 
38 #include <asm/uasm.h>
39 
40 /* Registers used in the assembled routines. */
41 #define ZERO 0
42 #define AT 2
43 #define A0 4
44 #define A1 5
45 #define A2 6
46 #define T0 8
47 #define T1 9
48 #define T2 10
49 #define T3 11
50 #define T9 25
51 #define RA 31
52 
53 /* Handle labels (which must be positive integers). */
54 enum label_id {
55 	label_clear_nopref = 1,
56 	label_clear_pref,
57 	label_copy_nopref,
58 	label_copy_pref_both,
59 	label_copy_pref_store,
60 };
61 
62 UASM_L_LA(_clear_nopref)
63 UASM_L_LA(_clear_pref)
64 UASM_L_LA(_copy_nopref)
65 UASM_L_LA(_copy_pref_both)
66 UASM_L_LA(_copy_pref_store)
67 
68 /* We need one branch and therefore one relocation per target label. */
69 static struct uasm_label __cpuinitdata labels[5];
70 static struct uasm_reloc __cpuinitdata relocs[5];
71 
72 #define cpu_is_r4600_v1_x()	((read_c0_prid() & 0xfffffff0) == 0x00002010)
73 #define cpu_is_r4600_v2_x()	((read_c0_prid() & 0xfffffff0) == 0x00002020)
74 
75 static int pref_bias_clear_store __cpuinitdata;
76 static int pref_bias_copy_load __cpuinitdata;
77 static int pref_bias_copy_store __cpuinitdata;
78 
79 static u32 pref_src_mode __cpuinitdata;
80 static u32 pref_dst_mode __cpuinitdata;
81 
82 static int clear_word_size __cpuinitdata;
83 static int copy_word_size __cpuinitdata;
84 
85 static int half_clear_loop_size __cpuinitdata;
86 static int half_copy_loop_size __cpuinitdata;
87 
88 static int cache_line_size __cpuinitdata;
89 #define cache_line_mask() (cache_line_size - 1)
90 
91 static inline void __cpuinit
92 pg_addiu(u32 **buf, unsigned int reg1, unsigned int reg2, unsigned int off)
93 {
94 	if (cpu_has_64bit_gp_regs && DADDI_WAR && r4k_daddiu_bug()) {
95 		if (off > 0x7fff) {
96 			uasm_i_lui(buf, T9, uasm_rel_hi(off));
97 			uasm_i_addiu(buf, T9, T9, uasm_rel_lo(off));
98 		} else
99 			uasm_i_addiu(buf, T9, ZERO, off);
100 		uasm_i_daddu(buf, reg1, reg2, T9);
101 	} else {
102 		if (off > 0x7fff) {
103 			uasm_i_lui(buf, T9, uasm_rel_hi(off));
104 			uasm_i_addiu(buf, T9, T9, uasm_rel_lo(off));
105 			UASM_i_ADDU(buf, reg1, reg2, T9);
106 		} else
107 			UASM_i_ADDIU(buf, reg1, reg2, off);
108 	}
109 }
110 
111 static void __cpuinit set_prefetch_parameters(void)
112 {
113 	if (cpu_has_64bit_gp_regs || cpu_has_64bit_zero_reg)
114 		clear_word_size = 8;
115 	else
116 		clear_word_size = 4;
117 
118 	if (cpu_has_64bit_gp_regs)
119 		copy_word_size = 8;
120 	else
121 		copy_word_size = 4;
122 
123 	/*
124 	 * The pref's used here are using "streaming" hints, which cause the
125 	 * copied data to be kicked out of the cache sooner.  A page copy often
126 	 * ends up copying a lot more data than is commonly used, so this seems
127 	 * to make sense in terms of reducing cache pollution, but I've no real
128 	 * performance data to back this up.
129 	 */
130 	if (cpu_has_prefetch) {
131 		/*
132 		 * XXX: Most prefetch bias values in here are based on
133 		 * guesswork.
134 		 */
135 		cache_line_size = cpu_dcache_line_size();
136 		switch (current_cpu_type()) {
137 		case CPU_R5500:
138 		case CPU_TX49XX:
139 			/* These processors only support the Pref_Load. */
140 			pref_bias_copy_load = 256;
141 			break;
142 
143 		case CPU_RM9000:
144 			/*
145 			 * As a workaround for erratum G105 which make the
146 			 * PrepareForStore hint unusable we fall back to
147 			 * StoreRetained on the RM9000.  Once it is known which
148 			 * versions of the RM9000 we'll be able to condition-
149 			 * alize this.
150 			 */
151 
152 		case CPU_R10000:
153 		case CPU_R12000:
154 		case CPU_R14000:
155 			/*
156 			 * Those values have been experimentally tuned for an
157 			 * Origin 200.
158 			 */
159 			pref_bias_clear_store = 512;
160 			pref_bias_copy_load = 256;
161 			pref_bias_copy_store = 256;
162 			pref_src_mode = Pref_LoadStreamed;
163 			pref_dst_mode = Pref_StoreStreamed;
164 			break;
165 
166 		case CPU_SB1:
167 		case CPU_SB1A:
168 			pref_bias_clear_store = 128;
169 			pref_bias_copy_load = 128;
170 			pref_bias_copy_store = 128;
171 			/*
172 			 * SB1 pass1 Pref_LoadStreamed/Pref_StoreStreamed
173 			 * hints are broken.
174 			 */
175 			if (current_cpu_type() == CPU_SB1 &&
176 			    (current_cpu_data.processor_id & 0xff) < 0x02) {
177 				pref_src_mode = Pref_Load;
178 				pref_dst_mode = Pref_Store;
179 			} else {
180 				pref_src_mode = Pref_LoadStreamed;
181 				pref_dst_mode = Pref_StoreStreamed;
182 			}
183 			break;
184 
185 		default:
186 			pref_bias_clear_store = 128;
187 			pref_bias_copy_load = 256;
188 			pref_bias_copy_store = 128;
189 			pref_src_mode = Pref_LoadStreamed;
190 			pref_dst_mode = Pref_PrepareForStore;
191 			break;
192 		}
193 	} else {
194 		if (cpu_has_cache_cdex_s)
195 			cache_line_size = cpu_scache_line_size();
196 		else if (cpu_has_cache_cdex_p)
197 			cache_line_size = cpu_dcache_line_size();
198 	}
199 	/*
200 	 * Too much unrolling will overflow the available space in
201 	 * clear_space_array / copy_page_array.
202 	 */
203 	half_clear_loop_size = min(16 * clear_word_size,
204 				   max(cache_line_size >> 1,
205 				       4 * clear_word_size));
206 	half_copy_loop_size = min(16 * copy_word_size,
207 				  max(cache_line_size >> 1,
208 				      4 * copy_word_size));
209 }
210 
211 static void __cpuinit build_clear_store(u32 **buf, int off)
212 {
213 	if (cpu_has_64bit_gp_regs || cpu_has_64bit_zero_reg) {
214 		uasm_i_sd(buf, ZERO, off, A0);
215 	} else {
216 		uasm_i_sw(buf, ZERO, off, A0);
217 	}
218 }
219 
220 static inline void __cpuinit build_clear_pref(u32 **buf, int off)
221 {
222 	if (off & cache_line_mask())
223 		return;
224 
225 	if (pref_bias_clear_store) {
226 		uasm_i_pref(buf, pref_dst_mode, pref_bias_clear_store + off,
227 			    A0);
228 	} else if (cache_line_size == (half_clear_loop_size << 1)) {
229 		if (cpu_has_cache_cdex_s) {
230 			uasm_i_cache(buf, Create_Dirty_Excl_SD, off, A0);
231 		} else if (cpu_has_cache_cdex_p) {
232 			if (R4600_V1_HIT_CACHEOP_WAR && cpu_is_r4600_v1_x()) {
233 				uasm_i_nop(buf);
234 				uasm_i_nop(buf);
235 				uasm_i_nop(buf);
236 				uasm_i_nop(buf);
237 			}
238 
239 			if (R4600_V2_HIT_CACHEOP_WAR && cpu_is_r4600_v2_x())
240 				uasm_i_lw(buf, ZERO, ZERO, AT);
241 
242 			uasm_i_cache(buf, Create_Dirty_Excl_D, off, A0);
243 		}
244 		}
245 }
246 
247 extern u32 __clear_page_start;
248 extern u32 __clear_page_end;
249 extern u32 __copy_page_start;
250 extern u32 __copy_page_end;
251 
252 void __cpuinit build_clear_page(void)
253 {
254 	int off;
255 	u32 *buf = &__clear_page_start;
256 	struct uasm_label *l = labels;
257 	struct uasm_reloc *r = relocs;
258 	int i;
259 
260 	memset(labels, 0, sizeof(labels));
261 	memset(relocs, 0, sizeof(relocs));
262 
263 	set_prefetch_parameters();
264 
265 	/*
266 	 * This algorithm makes the following assumptions:
267 	 *   - The prefetch bias is a multiple of 2 words.
268 	 *   - The prefetch bias is less than one page.
269 	 */
270 	BUG_ON(pref_bias_clear_store % (2 * clear_word_size));
271 	BUG_ON(PAGE_SIZE < pref_bias_clear_store);
272 
273 	off = PAGE_SIZE - pref_bias_clear_store;
274 	if (off > 0xffff || !pref_bias_clear_store)
275 		pg_addiu(&buf, A2, A0, off);
276 	else
277 		uasm_i_ori(&buf, A2, A0, off);
278 
279 	if (R4600_V2_HIT_CACHEOP_WAR && cpu_is_r4600_v2_x())
280 		uasm_i_lui(&buf, AT, 0xa000);
281 
282 	off = cache_line_size ? min(8, pref_bias_clear_store / cache_line_size)
283 	                        * cache_line_size : 0;
284 	while (off) {
285 		build_clear_pref(&buf, -off);
286 		off -= cache_line_size;
287 	}
288 	uasm_l_clear_pref(&l, buf);
289 	do {
290 		build_clear_pref(&buf, off);
291 		build_clear_store(&buf, off);
292 		off += clear_word_size;
293 	} while (off < half_clear_loop_size);
294 	pg_addiu(&buf, A0, A0, 2 * off);
295 	off = -off;
296 	do {
297 		build_clear_pref(&buf, off);
298 		if (off == -clear_word_size)
299 			uasm_il_bne(&buf, &r, A0, A2, label_clear_pref);
300 		build_clear_store(&buf, off);
301 		off += clear_word_size;
302 	} while (off < 0);
303 
304 	if (pref_bias_clear_store) {
305 		pg_addiu(&buf, A2, A0, pref_bias_clear_store);
306 		uasm_l_clear_nopref(&l, buf);
307 		off = 0;
308 		do {
309 			build_clear_store(&buf, off);
310 			off += clear_word_size;
311 		} while (off < half_clear_loop_size);
312 		pg_addiu(&buf, A0, A0, 2 * off);
313 		off = -off;
314 		do {
315 			if (off == -clear_word_size)
316 				uasm_il_bne(&buf, &r, A0, A2,
317 					    label_clear_nopref);
318 			build_clear_store(&buf, off);
319 			off += clear_word_size;
320 		} while (off < 0);
321 	}
322 
323 	uasm_i_jr(&buf, RA);
324 	uasm_i_nop(&buf);
325 
326 	BUG_ON(buf > &__clear_page_end);
327 
328 	uasm_resolve_relocs(relocs, labels);
329 
330 	pr_debug("Synthesized clear page handler (%u instructions).\n",
331 		 (u32)(buf - &__clear_page_start));
332 
333 	pr_debug("\t.set push\n");
334 	pr_debug("\t.set noreorder\n");
335 	for (i = 0; i < (buf - &__clear_page_start); i++)
336 		pr_debug("\t.word 0x%08x\n", (&__clear_page_start)[i]);
337 	pr_debug("\t.set pop\n");
338 }
339 
340 static void __cpuinit build_copy_load(u32 **buf, int reg, int off)
341 {
342 	if (cpu_has_64bit_gp_regs) {
343 		uasm_i_ld(buf, reg, off, A1);
344 	} else {
345 		uasm_i_lw(buf, reg, off, A1);
346 	}
347 }
348 
349 static void __cpuinit build_copy_store(u32 **buf, int reg, int off)
350 {
351 	if (cpu_has_64bit_gp_regs) {
352 		uasm_i_sd(buf, reg, off, A0);
353 	} else {
354 		uasm_i_sw(buf, reg, off, A0);
355 	}
356 }
357 
358 static inline void build_copy_load_pref(u32 **buf, int off)
359 {
360 	if (off & cache_line_mask())
361 		return;
362 
363 	if (pref_bias_copy_load)
364 		uasm_i_pref(buf, pref_src_mode, pref_bias_copy_load + off, A1);
365 }
366 
367 static inline void build_copy_store_pref(u32 **buf, int off)
368 {
369 	if (off & cache_line_mask())
370 		return;
371 
372 	if (pref_bias_copy_store) {
373 		uasm_i_pref(buf, pref_dst_mode, pref_bias_copy_store + off,
374 			    A0);
375 	} else if (cache_line_size == (half_copy_loop_size << 1)) {
376 		if (cpu_has_cache_cdex_s) {
377 			uasm_i_cache(buf, Create_Dirty_Excl_SD, off, A0);
378 		} else if (cpu_has_cache_cdex_p) {
379 			if (R4600_V1_HIT_CACHEOP_WAR && cpu_is_r4600_v1_x()) {
380 				uasm_i_nop(buf);
381 				uasm_i_nop(buf);
382 				uasm_i_nop(buf);
383 				uasm_i_nop(buf);
384 			}
385 
386 			if (R4600_V2_HIT_CACHEOP_WAR && cpu_is_r4600_v2_x())
387 				uasm_i_lw(buf, ZERO, ZERO, AT);
388 
389 			uasm_i_cache(buf, Create_Dirty_Excl_D, off, A0);
390 		}
391 	}
392 }
393 
394 void __cpuinit build_copy_page(void)
395 {
396 	int off;
397 	u32 *buf = &__copy_page_start;
398 	struct uasm_label *l = labels;
399 	struct uasm_reloc *r = relocs;
400 	int i;
401 
402 	memset(labels, 0, sizeof(labels));
403 	memset(relocs, 0, sizeof(relocs));
404 
405 	set_prefetch_parameters();
406 
407 	/*
408 	 * This algorithm makes the following assumptions:
409 	 *   - All prefetch biases are multiples of 8 words.
410 	 *   - The prefetch biases are less than one page.
411 	 *   - The store prefetch bias isn't greater than the load
412 	 *     prefetch bias.
413 	 */
414 	BUG_ON(pref_bias_copy_load % (8 * copy_word_size));
415 	BUG_ON(pref_bias_copy_store % (8 * copy_word_size));
416 	BUG_ON(PAGE_SIZE < pref_bias_copy_load);
417 	BUG_ON(pref_bias_copy_store > pref_bias_copy_load);
418 
419 	off = PAGE_SIZE - pref_bias_copy_load;
420 	if (off > 0xffff || !pref_bias_copy_load)
421 		pg_addiu(&buf, A2, A0, off);
422 	else
423 		uasm_i_ori(&buf, A2, A0, off);
424 
425 	if (R4600_V2_HIT_CACHEOP_WAR && cpu_is_r4600_v2_x())
426 		uasm_i_lui(&buf, AT, 0xa000);
427 
428 	off = cache_line_size ? min(8, pref_bias_copy_load / cache_line_size) *
429 	                        cache_line_size : 0;
430 	while (off) {
431 		build_copy_load_pref(&buf, -off);
432 		off -= cache_line_size;
433 	}
434 	off = cache_line_size ? min(8, pref_bias_copy_store / cache_line_size) *
435 	                        cache_line_size : 0;
436 	while (off) {
437 		build_copy_store_pref(&buf, -off);
438 		off -= cache_line_size;
439 	}
440 	uasm_l_copy_pref_both(&l, buf);
441 	do {
442 		build_copy_load_pref(&buf, off);
443 		build_copy_load(&buf, T0, off);
444 		build_copy_load_pref(&buf, off + copy_word_size);
445 		build_copy_load(&buf, T1, off + copy_word_size);
446 		build_copy_load_pref(&buf, off + 2 * copy_word_size);
447 		build_copy_load(&buf, T2, off + 2 * copy_word_size);
448 		build_copy_load_pref(&buf, off + 3 * copy_word_size);
449 		build_copy_load(&buf, T3, off + 3 * copy_word_size);
450 		build_copy_store_pref(&buf, off);
451 		build_copy_store(&buf, T0, off);
452 		build_copy_store_pref(&buf, off + copy_word_size);
453 		build_copy_store(&buf, T1, off + copy_word_size);
454 		build_copy_store_pref(&buf, off + 2 * copy_word_size);
455 		build_copy_store(&buf, T2, off + 2 * copy_word_size);
456 		build_copy_store_pref(&buf, off + 3 * copy_word_size);
457 		build_copy_store(&buf, T3, off + 3 * copy_word_size);
458 		off += 4 * copy_word_size;
459 	} while (off < half_copy_loop_size);
460 	pg_addiu(&buf, A1, A1, 2 * off);
461 	pg_addiu(&buf, A0, A0, 2 * off);
462 	off = -off;
463 	do {
464 		build_copy_load_pref(&buf, off);
465 		build_copy_load(&buf, T0, off);
466 		build_copy_load_pref(&buf, off + copy_word_size);
467 		build_copy_load(&buf, T1, off + copy_word_size);
468 		build_copy_load_pref(&buf, off + 2 * copy_word_size);
469 		build_copy_load(&buf, T2, off + 2 * copy_word_size);
470 		build_copy_load_pref(&buf, off + 3 * copy_word_size);
471 		build_copy_load(&buf, T3, off + 3 * copy_word_size);
472 		build_copy_store_pref(&buf, off);
473 		build_copy_store(&buf, T0, off);
474 		build_copy_store_pref(&buf, off + copy_word_size);
475 		build_copy_store(&buf, T1, off + copy_word_size);
476 		build_copy_store_pref(&buf, off + 2 * copy_word_size);
477 		build_copy_store(&buf, T2, off + 2 * copy_word_size);
478 		build_copy_store_pref(&buf, off + 3 * copy_word_size);
479 		if (off == -(4 * copy_word_size))
480 			uasm_il_bne(&buf, &r, A2, A0, label_copy_pref_both);
481 		build_copy_store(&buf, T3, off + 3 * copy_word_size);
482 		off += 4 * copy_word_size;
483 	} while (off < 0);
484 
485 	if (pref_bias_copy_load - pref_bias_copy_store) {
486 		pg_addiu(&buf, A2, A0,
487 			 pref_bias_copy_load - pref_bias_copy_store);
488 		uasm_l_copy_pref_store(&l, buf);
489 		off = 0;
490 		do {
491 			build_copy_load(&buf, T0, off);
492 			build_copy_load(&buf, T1, off + copy_word_size);
493 			build_copy_load(&buf, T2, off + 2 * copy_word_size);
494 			build_copy_load(&buf, T3, off + 3 * copy_word_size);
495 			build_copy_store_pref(&buf, off);
496 			build_copy_store(&buf, T0, off);
497 			build_copy_store_pref(&buf, off + copy_word_size);
498 			build_copy_store(&buf, T1, off + copy_word_size);
499 			build_copy_store_pref(&buf, off + 2 * copy_word_size);
500 			build_copy_store(&buf, T2, off + 2 * copy_word_size);
501 			build_copy_store_pref(&buf, off + 3 * copy_word_size);
502 			build_copy_store(&buf, T3, off + 3 * copy_word_size);
503 			off += 4 * copy_word_size;
504 		} while (off < half_copy_loop_size);
505 		pg_addiu(&buf, A1, A1, 2 * off);
506 		pg_addiu(&buf, A0, A0, 2 * off);
507 		off = -off;
508 		do {
509 			build_copy_load(&buf, T0, off);
510 			build_copy_load(&buf, T1, off + copy_word_size);
511 			build_copy_load(&buf, T2, off + 2 * copy_word_size);
512 			build_copy_load(&buf, T3, off + 3 * copy_word_size);
513 			build_copy_store_pref(&buf, off);
514 			build_copy_store(&buf, T0, off);
515 			build_copy_store_pref(&buf, off + copy_word_size);
516 			build_copy_store(&buf, T1, off + copy_word_size);
517 			build_copy_store_pref(&buf, off + 2 * copy_word_size);
518 			build_copy_store(&buf, T2, off + 2 * copy_word_size);
519 			build_copy_store_pref(&buf, off + 3 * copy_word_size);
520 			if (off == -(4 * copy_word_size))
521 				uasm_il_bne(&buf, &r, A2, A0,
522 					    label_copy_pref_store);
523 			build_copy_store(&buf, T3, off + 3 * copy_word_size);
524 			off += 4 * copy_word_size;
525 		} while (off < 0);
526 	}
527 
528 	if (pref_bias_copy_store) {
529 		pg_addiu(&buf, A2, A0, pref_bias_copy_store);
530 		uasm_l_copy_nopref(&l, buf);
531 		off = 0;
532 		do {
533 			build_copy_load(&buf, T0, off);
534 			build_copy_load(&buf, T1, off + copy_word_size);
535 			build_copy_load(&buf, T2, off + 2 * copy_word_size);
536 			build_copy_load(&buf, T3, off + 3 * copy_word_size);
537 			build_copy_store(&buf, T0, off);
538 			build_copy_store(&buf, T1, off + copy_word_size);
539 			build_copy_store(&buf, T2, off + 2 * copy_word_size);
540 			build_copy_store(&buf, T3, off + 3 * copy_word_size);
541 			off += 4 * copy_word_size;
542 		} while (off < half_copy_loop_size);
543 		pg_addiu(&buf, A1, A1, 2 * off);
544 		pg_addiu(&buf, A0, A0, 2 * off);
545 		off = -off;
546 		do {
547 			build_copy_load(&buf, T0, off);
548 			build_copy_load(&buf, T1, off + copy_word_size);
549 			build_copy_load(&buf, T2, off + 2 * copy_word_size);
550 			build_copy_load(&buf, T3, off + 3 * copy_word_size);
551 			build_copy_store(&buf, T0, off);
552 			build_copy_store(&buf, T1, off + copy_word_size);
553 			build_copy_store(&buf, T2, off + 2 * copy_word_size);
554 			if (off == -(4 * copy_word_size))
555 				uasm_il_bne(&buf, &r, A2, A0,
556 					    label_copy_nopref);
557 			build_copy_store(&buf, T3, off + 3 * copy_word_size);
558 			off += 4 * copy_word_size;
559 		} while (off < 0);
560 	}
561 
562 	uasm_i_jr(&buf, RA);
563 	uasm_i_nop(&buf);
564 
565 	BUG_ON(buf > &__copy_page_end);
566 
567 	uasm_resolve_relocs(relocs, labels);
568 
569 	pr_debug("Synthesized copy page handler (%u instructions).\n",
570 		 (u32)(buf - &__copy_page_start));
571 
572 	pr_debug("\t.set push\n");
573 	pr_debug("\t.set noreorder\n");
574 	for (i = 0; i < (buf - &__copy_page_start); i++)
575 		pr_debug("\t.word 0x%08x\n", (&__copy_page_start)[i]);
576 	pr_debug("\t.set pop\n");
577 }
578 
579 #ifdef CONFIG_SIBYTE_DMA_PAGEOPS
580 extern void clear_page_cpu(void *page);
581 extern void copy_page_cpu(void *to, void *from);
582 
583 /*
584  * Pad descriptors to cacheline, since each is exclusively owned by a
585  * particular CPU.
586  */
587 struct dmadscr {
588 	u64 dscr_a;
589 	u64 dscr_b;
590 	u64 pad_a;
591 	u64 pad_b;
592 } ____cacheline_aligned_in_smp page_descr[DM_NUM_CHANNELS];
593 
594 void sb1_dma_init(void)
595 {
596 	int i;
597 
598 	for (i = 0; i < DM_NUM_CHANNELS; i++) {
599 		const u64 base_val = CPHYSADDR((unsigned long)&page_descr[i]) |
600 				     V_DM_DSCR_BASE_RINGSZ(1);
601 		void *base_reg = IOADDR(A_DM_REGISTER(i, R_DM_DSCR_BASE));
602 
603 		__raw_writeq(base_val, base_reg);
604 		__raw_writeq(base_val | M_DM_DSCR_BASE_RESET, base_reg);
605 		__raw_writeq(base_val | M_DM_DSCR_BASE_ENABL, base_reg);
606 	}
607 }
608 
609 void clear_page(void *page)
610 {
611 	u64 to_phys = CPHYSADDR((unsigned long)page);
612 	unsigned int cpu = smp_processor_id();
613 
614 	/* if the page is not in KSEG0, use old way */
615 	if ((long)KSEGX((unsigned long)page) != (long)CKSEG0)
616 		return clear_page_cpu(page);
617 
618 	page_descr[cpu].dscr_a = to_phys | M_DM_DSCRA_ZERO_MEM |
619 				 M_DM_DSCRA_L2C_DEST | M_DM_DSCRA_INTERRUPT;
620 	page_descr[cpu].dscr_b = V_DM_DSCRB_SRC_LENGTH(PAGE_SIZE);
621 	__raw_writeq(1, IOADDR(A_DM_REGISTER(cpu, R_DM_DSCR_COUNT)));
622 
623 	/*
624 	 * Don't really want to do it this way, but there's no
625 	 * reliable way to delay completion detection.
626 	 */
627 	while (!(__raw_readq(IOADDR(A_DM_REGISTER(cpu, R_DM_DSCR_BASE_DEBUG)))
628 		 & M_DM_DSCR_BASE_INTERRUPT))
629 		;
630 	__raw_readq(IOADDR(A_DM_REGISTER(cpu, R_DM_DSCR_BASE)));
631 }
632 
633 void copy_page(void *to, void *from)
634 {
635 	u64 from_phys = CPHYSADDR((unsigned long)from);
636 	u64 to_phys = CPHYSADDR((unsigned long)to);
637 	unsigned int cpu = smp_processor_id();
638 
639 	/* if any page is not in KSEG0, use old way */
640 	if ((long)KSEGX((unsigned long)to) != (long)CKSEG0
641 	    || (long)KSEGX((unsigned long)from) != (long)CKSEG0)
642 		return copy_page_cpu(to, from);
643 
644 	page_descr[cpu].dscr_a = to_phys | M_DM_DSCRA_L2C_DEST |
645 				 M_DM_DSCRA_INTERRUPT;
646 	page_descr[cpu].dscr_b = from_phys | V_DM_DSCRB_SRC_LENGTH(PAGE_SIZE);
647 	__raw_writeq(1, IOADDR(A_DM_REGISTER(cpu, R_DM_DSCR_COUNT)));
648 
649 	/*
650 	 * Don't really want to do it this way, but there's no
651 	 * reliable way to delay completion detection.
652 	 */
653 	while (!(__raw_readq(IOADDR(A_DM_REGISTER(cpu, R_DM_DSCR_BASE_DEBUG)))
654 		 & M_DM_DSCR_BASE_INTERRUPT))
655 		;
656 	__raw_readq(IOADDR(A_DM_REGISTER(cpu, R_DM_DSCR_BASE)));
657 }
658 
659 #endif /* CONFIG_SIBYTE_DMA_PAGEOPS */
660