xref: /openbmc/linux/arch/powerpc/platforms/ps3/mm.c (revision 03638e62)
1 // SPDX-License-Identifier: GPL-2.0-only
2 /*
3  *  PS3 address space management.
4  *
5  *  Copyright (C) 2006 Sony Computer Entertainment Inc.
6  *  Copyright 2006 Sony Corp.
7  */
8 
9 #include <linux/kernel.h>
10 #include <linux/export.h>
11 #include <linux/memblock.h>
12 #include <linux/slab.h>
13 
14 #include <asm/cell-regs.h>
15 #include <asm/firmware.h>
16 #include <asm/prom.h>
17 #include <asm/udbg.h>
18 #include <asm/lv1call.h>
19 #include <asm/setup.h>
20 
21 #include "platform.h"
22 
23 #if defined(DEBUG)
24 #define DBG udbg_printf
25 #else
26 #define DBG pr_devel
27 #endif
28 
29 enum {
30 #if defined(CONFIG_PS3_DYNAMIC_DMA)
31 	USE_DYNAMIC_DMA = 1,
32 #else
33 	USE_DYNAMIC_DMA = 0,
34 #endif
35 };
36 
37 enum {
38 	PAGE_SHIFT_4K = 12U,
39 	PAGE_SHIFT_64K = 16U,
40 	PAGE_SHIFT_16M = 24U,
41 };
42 
43 static unsigned long make_page_sizes(unsigned long a, unsigned long b)
44 {
45 	return (a << 56) | (b << 48);
46 }
47 
48 enum {
49 	ALLOCATE_MEMORY_TRY_ALT_UNIT = 0X04,
50 	ALLOCATE_MEMORY_ADDR_ZERO = 0X08,
51 };
52 
53 /* valid htab sizes are {18,19,20} = 256K, 512K, 1M */
54 
55 enum {
56 	HTAB_SIZE_MAX = 20U, /* HV limit of 1MB */
57 	HTAB_SIZE_MIN = 18U, /* CPU limit of 256KB */
58 };
59 
60 /*============================================================================*/
61 /* virtual address space routines                                             */
62 /*============================================================================*/
63 
64 /**
65  * struct mem_region - memory region structure
66  * @base: base address
67  * @size: size in bytes
68  * @offset: difference between base and rm.size
69  * @destroy: flag if region should be destroyed upon shutdown
70  */
71 
72 struct mem_region {
73 	u64 base;
74 	u64 size;
75 	unsigned long offset;
76 	int destroy;
77 };
78 
79 /**
80  * struct map - address space state variables holder
81  * @total: total memory available as reported by HV
82  * @vas_id - HV virtual address space id
83  * @htab_size: htab size in bytes
84  *
85  * The HV virtual address space (vas) allows for hotplug memory regions.
86  * Memory regions can be created and destroyed in the vas at runtime.
87  * @rm: real mode (bootmem) region
88  * @r1: highmem region(s)
89  *
90  * ps3 addresses
91  * virt_addr: a cpu 'translated' effective address
92  * phys_addr: an address in what Linux thinks is the physical address space
93  * lpar_addr: an address in the HV virtual address space
94  * bus_addr: an io controller 'translated' address on a device bus
95  */
96 
97 struct map {
98 	u64 total;
99 	u64 vas_id;
100 	u64 htab_size;
101 	struct mem_region rm;
102 	struct mem_region r1;
103 };
104 
105 #define debug_dump_map(x) _debug_dump_map(x, __func__, __LINE__)
106 static void __maybe_unused _debug_dump_map(const struct map *m,
107 	const char *func, int line)
108 {
109 	DBG("%s:%d: map.total     = %llxh\n", func, line, m->total);
110 	DBG("%s:%d: map.rm.size   = %llxh\n", func, line, m->rm.size);
111 	DBG("%s:%d: map.vas_id    = %llu\n", func, line, m->vas_id);
112 	DBG("%s:%d: map.htab_size = %llxh\n", func, line, m->htab_size);
113 	DBG("%s:%d: map.r1.base   = %llxh\n", func, line, m->r1.base);
114 	DBG("%s:%d: map.r1.offset = %lxh\n", func, line, m->r1.offset);
115 	DBG("%s:%d: map.r1.size   = %llxh\n", func, line, m->r1.size);
116 }
117 
118 static struct map map;
119 
120 /**
121  * ps3_mm_phys_to_lpar - translate a linux physical address to lpar address
122  * @phys_addr: linux physical address
123  */
124 
125 unsigned long ps3_mm_phys_to_lpar(unsigned long phys_addr)
126 {
127 	BUG_ON(is_kernel_addr(phys_addr));
128 	return (phys_addr < map.rm.size || phys_addr >= map.total)
129 		? phys_addr : phys_addr + map.r1.offset;
130 }
131 
132 EXPORT_SYMBOL(ps3_mm_phys_to_lpar);
133 
134 /**
135  * ps3_mm_vas_create - create the virtual address space
136  */
137 
138 void __init ps3_mm_vas_create(unsigned long* htab_size)
139 {
140 	int result;
141 	u64 start_address;
142 	u64 size;
143 	u64 access_right;
144 	u64 max_page_size;
145 	u64 flags;
146 
147 	result = lv1_query_logical_partition_address_region_info(0,
148 		&start_address, &size, &access_right, &max_page_size,
149 		&flags);
150 
151 	if (result) {
152 		DBG("%s:%d: lv1_query_logical_partition_address_region_info "
153 			"failed: %s\n", __func__, __LINE__,
154 			ps3_result(result));
155 		goto fail;
156 	}
157 
158 	if (max_page_size < PAGE_SHIFT_16M) {
159 		DBG("%s:%d: bad max_page_size %llxh\n", __func__, __LINE__,
160 			max_page_size);
161 		goto fail;
162 	}
163 
164 	BUILD_BUG_ON(CONFIG_PS3_HTAB_SIZE > HTAB_SIZE_MAX);
165 	BUILD_BUG_ON(CONFIG_PS3_HTAB_SIZE < HTAB_SIZE_MIN);
166 
167 	result = lv1_construct_virtual_address_space(CONFIG_PS3_HTAB_SIZE,
168 			2, make_page_sizes(PAGE_SHIFT_16M, PAGE_SHIFT_64K),
169 			&map.vas_id, &map.htab_size);
170 
171 	if (result) {
172 		DBG("%s:%d: lv1_construct_virtual_address_space failed: %s\n",
173 			__func__, __LINE__, ps3_result(result));
174 		goto fail;
175 	}
176 
177 	result = lv1_select_virtual_address_space(map.vas_id);
178 
179 	if (result) {
180 		DBG("%s:%d: lv1_select_virtual_address_space failed: %s\n",
181 			__func__, __LINE__, ps3_result(result));
182 		goto fail;
183 	}
184 
185 	*htab_size = map.htab_size;
186 
187 	debug_dump_map(&map);
188 
189 	return;
190 
191 fail:
192 	panic("ps3_mm_vas_create failed");
193 }
194 
195 /**
196  * ps3_mm_vas_destroy -
197  */
198 
199 void ps3_mm_vas_destroy(void)
200 {
201 	int result;
202 
203 	DBG("%s:%d: map.vas_id    = %llu\n", __func__, __LINE__, map.vas_id);
204 
205 	if (map.vas_id) {
206 		result = lv1_select_virtual_address_space(0);
207 		BUG_ON(result);
208 		result = lv1_destruct_virtual_address_space(map.vas_id);
209 		BUG_ON(result);
210 		map.vas_id = 0;
211 	}
212 }
213 
214 static int ps3_mm_get_repository_highmem(struct mem_region *r)
215 {
216 	int result;
217 
218 	/* Assume a single highmem region. */
219 
220 	result = ps3_repository_read_highmem_info(0, &r->base, &r->size);
221 
222 	if (result)
223 		goto zero_region;
224 
225 	if (!r->base || !r->size) {
226 		result = -1;
227 		goto zero_region;
228 	}
229 
230 	r->offset = r->base - map.rm.size;
231 
232 	DBG("%s:%d: Found high region in repository: %llxh %llxh\n",
233 	    __func__, __LINE__, r->base, r->size);
234 
235 	return 0;
236 
237 zero_region:
238 	DBG("%s:%d: No high region in repository.\n", __func__, __LINE__);
239 
240 	r->size = r->base = r->offset = 0;
241 	return result;
242 }
243 
244 static int ps3_mm_set_repository_highmem(const struct mem_region *r)
245 {
246 	/* Assume a single highmem region. */
247 
248 	return r ? ps3_repository_write_highmem_info(0, r->base, r->size) :
249 		ps3_repository_write_highmem_info(0, 0, 0);
250 }
251 
252 /**
253  * ps3_mm_region_create - create a memory region in the vas
254  * @r: pointer to a struct mem_region to accept initialized values
255  * @size: requested region size
256  *
257  * This implementation creates the region with the vas large page size.
258  * @size is rounded down to a multiple of the vas large page size.
259  */
260 
261 static int ps3_mm_region_create(struct mem_region *r, unsigned long size)
262 {
263 	int result;
264 	u64 muid;
265 
266 	r->size = _ALIGN_DOWN(size, 1 << PAGE_SHIFT_16M);
267 
268 	DBG("%s:%d requested  %lxh\n", __func__, __LINE__, size);
269 	DBG("%s:%d actual     %llxh\n", __func__, __LINE__, r->size);
270 	DBG("%s:%d difference %llxh (%lluMB)\n", __func__, __LINE__,
271 		size - r->size, (size - r->size) / 1024 / 1024);
272 
273 	if (r->size == 0) {
274 		DBG("%s:%d: size == 0\n", __func__, __LINE__);
275 		result = -1;
276 		goto zero_region;
277 	}
278 
279 	result = lv1_allocate_memory(r->size, PAGE_SHIFT_16M, 0,
280 		ALLOCATE_MEMORY_TRY_ALT_UNIT, &r->base, &muid);
281 
282 	if (result || r->base < map.rm.size) {
283 		DBG("%s:%d: lv1_allocate_memory failed: %s\n",
284 			__func__, __LINE__, ps3_result(result));
285 		goto zero_region;
286 	}
287 
288 	r->destroy = 1;
289 	r->offset = r->base - map.rm.size;
290 	return result;
291 
292 zero_region:
293 	r->size = r->base = r->offset = 0;
294 	return result;
295 }
296 
297 /**
298  * ps3_mm_region_destroy - destroy a memory region
299  * @r: pointer to struct mem_region
300  */
301 
302 static void ps3_mm_region_destroy(struct mem_region *r)
303 {
304 	int result;
305 
306 	if (!r->destroy) {
307 		pr_info("%s:%d: Not destroying high region: %llxh %llxh\n",
308 			__func__, __LINE__, r->base, r->size);
309 		return;
310 	}
311 
312 	DBG("%s:%d: r->base = %llxh\n", __func__, __LINE__, r->base);
313 
314 	if (r->base) {
315 		result = lv1_release_memory(r->base);
316 		BUG_ON(result);
317 		r->size = r->base = r->offset = 0;
318 		map.total = map.rm.size;
319 	}
320 	ps3_mm_set_repository_highmem(NULL);
321 }
322 
323 /*============================================================================*/
324 /* dma routines                                                               */
325 /*============================================================================*/
326 
327 /**
328  * dma_sb_lpar_to_bus - Translate an lpar address to ioc mapped bus address.
329  * @r: pointer to dma region structure
330  * @lpar_addr: HV lpar address
331  */
332 
333 static unsigned long dma_sb_lpar_to_bus(struct ps3_dma_region *r,
334 	unsigned long lpar_addr)
335 {
336 	if (lpar_addr >= map.rm.size)
337 		lpar_addr -= map.r1.offset;
338 	BUG_ON(lpar_addr < r->offset);
339 	BUG_ON(lpar_addr >= r->offset + r->len);
340 	return r->bus_addr + lpar_addr - r->offset;
341 }
342 
343 #define dma_dump_region(_a) _dma_dump_region(_a, __func__, __LINE__)
344 static void  __maybe_unused _dma_dump_region(const struct ps3_dma_region *r,
345 	const char *func, int line)
346 {
347 	DBG("%s:%d: dev        %llu:%llu\n", func, line, r->dev->bus_id,
348 		r->dev->dev_id);
349 	DBG("%s:%d: page_size  %u\n", func, line, r->page_size);
350 	DBG("%s:%d: bus_addr   %lxh\n", func, line, r->bus_addr);
351 	DBG("%s:%d: len        %lxh\n", func, line, r->len);
352 	DBG("%s:%d: offset     %lxh\n", func, line, r->offset);
353 }
354 
355   /**
356  * dma_chunk - A chunk of dma pages mapped by the io controller.
357  * @region - The dma region that owns this chunk.
358  * @lpar_addr: Starting lpar address of the area to map.
359  * @bus_addr: Starting ioc bus address of the area to map.
360  * @len: Length in bytes of the area to map.
361  * @link: A struct list_head used with struct ps3_dma_region.chunk_list, the
362  * list of all chuncks owned by the region.
363  *
364  * This implementation uses a very simple dma page manager
365  * based on the dma_chunk structure.  This scheme assumes
366  * that all drivers use very well behaved dma ops.
367  */
368 
369 struct dma_chunk {
370 	struct ps3_dma_region *region;
371 	unsigned long lpar_addr;
372 	unsigned long bus_addr;
373 	unsigned long len;
374 	struct list_head link;
375 	unsigned int usage_count;
376 };
377 
378 #define dma_dump_chunk(_a) _dma_dump_chunk(_a, __func__, __LINE__)
379 static void _dma_dump_chunk (const struct dma_chunk* c, const char* func,
380 	int line)
381 {
382 	DBG("%s:%d: r.dev        %llu:%llu\n", func, line,
383 		c->region->dev->bus_id, c->region->dev->dev_id);
384 	DBG("%s:%d: r.bus_addr   %lxh\n", func, line, c->region->bus_addr);
385 	DBG("%s:%d: r.page_size  %u\n", func, line, c->region->page_size);
386 	DBG("%s:%d: r.len        %lxh\n", func, line, c->region->len);
387 	DBG("%s:%d: r.offset     %lxh\n", func, line, c->region->offset);
388 	DBG("%s:%d: c.lpar_addr  %lxh\n", func, line, c->lpar_addr);
389 	DBG("%s:%d: c.bus_addr   %lxh\n", func, line, c->bus_addr);
390 	DBG("%s:%d: c.len        %lxh\n", func, line, c->len);
391 }
392 
393 static struct dma_chunk * dma_find_chunk(struct ps3_dma_region *r,
394 	unsigned long bus_addr, unsigned long len)
395 {
396 	struct dma_chunk *c;
397 	unsigned long aligned_bus = _ALIGN_DOWN(bus_addr, 1 << r->page_size);
398 	unsigned long aligned_len = _ALIGN_UP(len+bus_addr-aligned_bus,
399 					      1 << r->page_size);
400 
401 	list_for_each_entry(c, &r->chunk_list.head, link) {
402 		/* intersection */
403 		if (aligned_bus >= c->bus_addr &&
404 		    aligned_bus + aligned_len <= c->bus_addr + c->len)
405 			return c;
406 
407 		/* below */
408 		if (aligned_bus + aligned_len <= c->bus_addr)
409 			continue;
410 
411 		/* above */
412 		if (aligned_bus >= c->bus_addr + c->len)
413 			continue;
414 
415 		/* we don't handle the multi-chunk case for now */
416 		dma_dump_chunk(c);
417 		BUG();
418 	}
419 	return NULL;
420 }
421 
422 static struct dma_chunk *dma_find_chunk_lpar(struct ps3_dma_region *r,
423 	unsigned long lpar_addr, unsigned long len)
424 {
425 	struct dma_chunk *c;
426 	unsigned long aligned_lpar = _ALIGN_DOWN(lpar_addr, 1 << r->page_size);
427 	unsigned long aligned_len = _ALIGN_UP(len + lpar_addr - aligned_lpar,
428 					      1 << r->page_size);
429 
430 	list_for_each_entry(c, &r->chunk_list.head, link) {
431 		/* intersection */
432 		if (c->lpar_addr <= aligned_lpar &&
433 		    aligned_lpar < c->lpar_addr + c->len) {
434 			if (aligned_lpar + aligned_len <= c->lpar_addr + c->len)
435 				return c;
436 			else {
437 				dma_dump_chunk(c);
438 				BUG();
439 			}
440 		}
441 		/* below */
442 		if (aligned_lpar + aligned_len <= c->lpar_addr) {
443 			continue;
444 		}
445 		/* above */
446 		if (c->lpar_addr + c->len <= aligned_lpar) {
447 			continue;
448 		}
449 	}
450 	return NULL;
451 }
452 
453 static int dma_sb_free_chunk(struct dma_chunk *c)
454 {
455 	int result = 0;
456 
457 	if (c->bus_addr) {
458 		result = lv1_unmap_device_dma_region(c->region->dev->bus_id,
459 			c->region->dev->dev_id, c->bus_addr, c->len);
460 		BUG_ON(result);
461 	}
462 
463 	kfree(c);
464 	return result;
465 }
466 
467 static int dma_ioc0_free_chunk(struct dma_chunk *c)
468 {
469 	int result = 0;
470 	int iopage;
471 	unsigned long offset;
472 	struct ps3_dma_region *r = c->region;
473 
474 	DBG("%s:start\n", __func__);
475 	for (iopage = 0; iopage < (c->len >> r->page_size); iopage++) {
476 		offset = (1 << r->page_size) * iopage;
477 		/* put INVALID entry */
478 		result = lv1_put_iopte(0,
479 				       c->bus_addr + offset,
480 				       c->lpar_addr + offset,
481 				       r->ioid,
482 				       0);
483 		DBG("%s: bus=%#lx, lpar=%#lx, ioid=%d\n", __func__,
484 		    c->bus_addr + offset,
485 		    c->lpar_addr + offset,
486 		    r->ioid);
487 
488 		if (result) {
489 			DBG("%s:%d: lv1_put_iopte failed: %s\n", __func__,
490 			    __LINE__, ps3_result(result));
491 		}
492 	}
493 	kfree(c);
494 	DBG("%s:end\n", __func__);
495 	return result;
496 }
497 
498 /**
499  * dma_sb_map_pages - Maps dma pages into the io controller bus address space.
500  * @r: Pointer to a struct ps3_dma_region.
501  * @phys_addr: Starting physical address of the area to map.
502  * @len: Length in bytes of the area to map.
503  * c_out: A pointer to receive an allocated struct dma_chunk for this area.
504  *
505  * This is the lowest level dma mapping routine, and is the one that will
506  * make the HV call to add the pages into the io controller address space.
507  */
508 
509 static int dma_sb_map_pages(struct ps3_dma_region *r, unsigned long phys_addr,
510 	    unsigned long len, struct dma_chunk **c_out, u64 iopte_flag)
511 {
512 	int result;
513 	struct dma_chunk *c;
514 
515 	c = kzalloc(sizeof(*c), GFP_ATOMIC);
516 	if (!c) {
517 		result = -ENOMEM;
518 		goto fail_alloc;
519 	}
520 
521 	c->region = r;
522 	c->lpar_addr = ps3_mm_phys_to_lpar(phys_addr);
523 	c->bus_addr = dma_sb_lpar_to_bus(r, c->lpar_addr);
524 	c->len = len;
525 
526 	BUG_ON(iopte_flag != 0xf800000000000000UL);
527 	result = lv1_map_device_dma_region(c->region->dev->bus_id,
528 					   c->region->dev->dev_id, c->lpar_addr,
529 					   c->bus_addr, c->len, iopte_flag);
530 	if (result) {
531 		DBG("%s:%d: lv1_map_device_dma_region failed: %s\n",
532 			__func__, __LINE__, ps3_result(result));
533 		goto fail_map;
534 	}
535 
536 	list_add(&c->link, &r->chunk_list.head);
537 
538 	*c_out = c;
539 	return 0;
540 
541 fail_map:
542 	kfree(c);
543 fail_alloc:
544 	*c_out = NULL;
545 	DBG(" <- %s:%d\n", __func__, __LINE__);
546 	return result;
547 }
548 
549 static int dma_ioc0_map_pages(struct ps3_dma_region *r, unsigned long phys_addr,
550 			      unsigned long len, struct dma_chunk **c_out,
551 			      u64 iopte_flag)
552 {
553 	int result;
554 	struct dma_chunk *c, *last;
555 	int iopage, pages;
556 	unsigned long offset;
557 
558 	DBG(KERN_ERR "%s: phy=%#lx, lpar%#lx, len=%#lx\n", __func__,
559 	    phys_addr, ps3_mm_phys_to_lpar(phys_addr), len);
560 	c = kzalloc(sizeof(*c), GFP_ATOMIC);
561 	if (!c) {
562 		result = -ENOMEM;
563 		goto fail_alloc;
564 	}
565 
566 	c->region = r;
567 	c->len = len;
568 	c->lpar_addr = ps3_mm_phys_to_lpar(phys_addr);
569 	/* allocate IO address */
570 	if (list_empty(&r->chunk_list.head)) {
571 		/* first one */
572 		c->bus_addr = r->bus_addr;
573 	} else {
574 		/* derive from last bus addr*/
575 		last  = list_entry(r->chunk_list.head.next,
576 				   struct dma_chunk, link);
577 		c->bus_addr = last->bus_addr + last->len;
578 		DBG("%s: last bus=%#lx, len=%#lx\n", __func__,
579 		    last->bus_addr, last->len);
580 	}
581 
582 	/* FIXME: check whether length exceeds region size */
583 
584 	/* build ioptes for the area */
585 	pages = len >> r->page_size;
586 	DBG("%s: pgsize=%#x len=%#lx pages=%#x iopteflag=%#llx\n", __func__,
587 	    r->page_size, r->len, pages, iopte_flag);
588 	for (iopage = 0; iopage < pages; iopage++) {
589 		offset = (1 << r->page_size) * iopage;
590 		result = lv1_put_iopte(0,
591 				       c->bus_addr + offset,
592 				       c->lpar_addr + offset,
593 				       r->ioid,
594 				       iopte_flag);
595 		if (result) {
596 			pr_warn("%s:%d: lv1_put_iopte failed: %s\n",
597 				__func__, __LINE__, ps3_result(result));
598 			goto fail_map;
599 		}
600 		DBG("%s: pg=%d bus=%#lx, lpar=%#lx, ioid=%#x\n", __func__,
601 		    iopage, c->bus_addr + offset, c->lpar_addr + offset,
602 		    r->ioid);
603 	}
604 
605 	/* be sure that last allocated one is inserted at head */
606 	list_add(&c->link, &r->chunk_list.head);
607 
608 	*c_out = c;
609 	DBG("%s: end\n", __func__);
610 	return 0;
611 
612 fail_map:
613 	for (iopage--; 0 <= iopage; iopage--) {
614 		lv1_put_iopte(0,
615 			      c->bus_addr + offset,
616 			      c->lpar_addr + offset,
617 			      r->ioid,
618 			      0);
619 	}
620 	kfree(c);
621 fail_alloc:
622 	*c_out = NULL;
623 	return result;
624 }
625 
626 /**
627  * dma_sb_region_create - Create a device dma region.
628  * @r: Pointer to a struct ps3_dma_region.
629  *
630  * This is the lowest level dma region create routine, and is the one that
631  * will make the HV call to create the region.
632  */
633 
634 static int dma_sb_region_create(struct ps3_dma_region *r)
635 {
636 	int result;
637 	u64 bus_addr;
638 
639 	DBG(" -> %s:%d:\n", __func__, __LINE__);
640 
641 	BUG_ON(!r);
642 
643 	if (!r->dev->bus_id) {
644 		pr_info("%s:%d: %llu:%llu no dma\n", __func__, __LINE__,
645 			r->dev->bus_id, r->dev->dev_id);
646 		return 0;
647 	}
648 
649 	DBG("%s:%u: len = 0x%lx, page_size = %u, offset = 0x%lx\n", __func__,
650 	    __LINE__, r->len, r->page_size, r->offset);
651 
652 	BUG_ON(!r->len);
653 	BUG_ON(!r->page_size);
654 	BUG_ON(!r->region_ops);
655 
656 	INIT_LIST_HEAD(&r->chunk_list.head);
657 	spin_lock_init(&r->chunk_list.lock);
658 
659 	result = lv1_allocate_device_dma_region(r->dev->bus_id, r->dev->dev_id,
660 		roundup_pow_of_two(r->len), r->page_size, r->region_type,
661 		&bus_addr);
662 	r->bus_addr = bus_addr;
663 
664 	if (result) {
665 		DBG("%s:%d: lv1_allocate_device_dma_region failed: %s\n",
666 			__func__, __LINE__, ps3_result(result));
667 		r->len = r->bus_addr = 0;
668 	}
669 
670 	return result;
671 }
672 
673 static int dma_ioc0_region_create(struct ps3_dma_region *r)
674 {
675 	int result;
676 	u64 bus_addr;
677 
678 	INIT_LIST_HEAD(&r->chunk_list.head);
679 	spin_lock_init(&r->chunk_list.lock);
680 
681 	result = lv1_allocate_io_segment(0,
682 					 r->len,
683 					 r->page_size,
684 					 &bus_addr);
685 	r->bus_addr = bus_addr;
686 	if (result) {
687 		DBG("%s:%d: lv1_allocate_io_segment failed: %s\n",
688 			__func__, __LINE__, ps3_result(result));
689 		r->len = r->bus_addr = 0;
690 	}
691 	DBG("%s: len=%#lx, pg=%d, bus=%#lx\n", __func__,
692 	    r->len, r->page_size, r->bus_addr);
693 	return result;
694 }
695 
696 /**
697  * dma_region_free - Free a device dma region.
698  * @r: Pointer to a struct ps3_dma_region.
699  *
700  * This is the lowest level dma region free routine, and is the one that
701  * will make the HV call to free the region.
702  */
703 
704 static int dma_sb_region_free(struct ps3_dma_region *r)
705 {
706 	int result;
707 	struct dma_chunk *c;
708 	struct dma_chunk *tmp;
709 
710 	BUG_ON(!r);
711 
712 	if (!r->dev->bus_id) {
713 		pr_info("%s:%d: %llu:%llu no dma\n", __func__, __LINE__,
714 			r->dev->bus_id, r->dev->dev_id);
715 		return 0;
716 	}
717 
718 	list_for_each_entry_safe(c, tmp, &r->chunk_list.head, link) {
719 		list_del(&c->link);
720 		dma_sb_free_chunk(c);
721 	}
722 
723 	result = lv1_free_device_dma_region(r->dev->bus_id, r->dev->dev_id,
724 		r->bus_addr);
725 
726 	if (result)
727 		DBG("%s:%d: lv1_free_device_dma_region failed: %s\n",
728 			__func__, __LINE__, ps3_result(result));
729 
730 	r->bus_addr = 0;
731 
732 	return result;
733 }
734 
735 static int dma_ioc0_region_free(struct ps3_dma_region *r)
736 {
737 	int result;
738 	struct dma_chunk *c, *n;
739 
740 	DBG("%s: start\n", __func__);
741 	list_for_each_entry_safe(c, n, &r->chunk_list.head, link) {
742 		list_del(&c->link);
743 		dma_ioc0_free_chunk(c);
744 	}
745 
746 	result = lv1_release_io_segment(0, r->bus_addr);
747 
748 	if (result)
749 		DBG("%s:%d: lv1_free_device_dma_region failed: %s\n",
750 			__func__, __LINE__, ps3_result(result));
751 
752 	r->bus_addr = 0;
753 	DBG("%s: end\n", __func__);
754 
755 	return result;
756 }
757 
758 /**
759  * dma_sb_map_area - Map an area of memory into a device dma region.
760  * @r: Pointer to a struct ps3_dma_region.
761  * @virt_addr: Starting virtual address of the area to map.
762  * @len: Length in bytes of the area to map.
763  * @bus_addr: A pointer to return the starting ioc bus address of the area to
764  * map.
765  *
766  * This is the common dma mapping routine.
767  */
768 
769 static int dma_sb_map_area(struct ps3_dma_region *r, unsigned long virt_addr,
770 	   unsigned long len, dma_addr_t *bus_addr,
771 	   u64 iopte_flag)
772 {
773 	int result;
774 	unsigned long flags;
775 	struct dma_chunk *c;
776 	unsigned long phys_addr = is_kernel_addr(virt_addr) ? __pa(virt_addr)
777 		: virt_addr;
778 	unsigned long aligned_phys = _ALIGN_DOWN(phys_addr, 1 << r->page_size);
779 	unsigned long aligned_len = _ALIGN_UP(len + phys_addr - aligned_phys,
780 					      1 << r->page_size);
781 	*bus_addr = dma_sb_lpar_to_bus(r, ps3_mm_phys_to_lpar(phys_addr));
782 
783 	if (!USE_DYNAMIC_DMA) {
784 		unsigned long lpar_addr = ps3_mm_phys_to_lpar(phys_addr);
785 		DBG(" -> %s:%d\n", __func__, __LINE__);
786 		DBG("%s:%d virt_addr %lxh\n", __func__, __LINE__,
787 			virt_addr);
788 		DBG("%s:%d phys_addr %lxh\n", __func__, __LINE__,
789 			phys_addr);
790 		DBG("%s:%d lpar_addr %lxh\n", __func__, __LINE__,
791 			lpar_addr);
792 		DBG("%s:%d len       %lxh\n", __func__, __LINE__, len);
793 		DBG("%s:%d bus_addr  %llxh (%lxh)\n", __func__, __LINE__,
794 		*bus_addr, len);
795 	}
796 
797 	spin_lock_irqsave(&r->chunk_list.lock, flags);
798 	c = dma_find_chunk(r, *bus_addr, len);
799 
800 	if (c) {
801 		DBG("%s:%d: reusing mapped chunk", __func__, __LINE__);
802 		dma_dump_chunk(c);
803 		c->usage_count++;
804 		spin_unlock_irqrestore(&r->chunk_list.lock, flags);
805 		return 0;
806 	}
807 
808 	result = dma_sb_map_pages(r, aligned_phys, aligned_len, &c, iopte_flag);
809 
810 	if (result) {
811 		*bus_addr = 0;
812 		DBG("%s:%d: dma_sb_map_pages failed (%d)\n",
813 			__func__, __LINE__, result);
814 		spin_unlock_irqrestore(&r->chunk_list.lock, flags);
815 		return result;
816 	}
817 
818 	c->usage_count = 1;
819 
820 	spin_unlock_irqrestore(&r->chunk_list.lock, flags);
821 	return result;
822 }
823 
824 static int dma_ioc0_map_area(struct ps3_dma_region *r, unsigned long virt_addr,
825 	     unsigned long len, dma_addr_t *bus_addr,
826 	     u64 iopte_flag)
827 {
828 	int result;
829 	unsigned long flags;
830 	struct dma_chunk *c;
831 	unsigned long phys_addr = is_kernel_addr(virt_addr) ? __pa(virt_addr)
832 		: virt_addr;
833 	unsigned long aligned_phys = _ALIGN_DOWN(phys_addr, 1 << r->page_size);
834 	unsigned long aligned_len = _ALIGN_UP(len + phys_addr - aligned_phys,
835 					      1 << r->page_size);
836 
837 	DBG(KERN_ERR "%s: vaddr=%#lx, len=%#lx\n", __func__,
838 	    virt_addr, len);
839 	DBG(KERN_ERR "%s: ph=%#lx a_ph=%#lx a_l=%#lx\n", __func__,
840 	    phys_addr, aligned_phys, aligned_len);
841 
842 	spin_lock_irqsave(&r->chunk_list.lock, flags);
843 	c = dma_find_chunk_lpar(r, ps3_mm_phys_to_lpar(phys_addr), len);
844 
845 	if (c) {
846 		/* FIXME */
847 		BUG();
848 		*bus_addr = c->bus_addr + phys_addr - aligned_phys;
849 		c->usage_count++;
850 		spin_unlock_irqrestore(&r->chunk_list.lock, flags);
851 		return 0;
852 	}
853 
854 	result = dma_ioc0_map_pages(r, aligned_phys, aligned_len, &c,
855 				    iopte_flag);
856 
857 	if (result) {
858 		*bus_addr = 0;
859 		DBG("%s:%d: dma_ioc0_map_pages failed (%d)\n",
860 			__func__, __LINE__, result);
861 		spin_unlock_irqrestore(&r->chunk_list.lock, flags);
862 		return result;
863 	}
864 	*bus_addr = c->bus_addr + phys_addr - aligned_phys;
865 	DBG("%s: va=%#lx pa=%#lx a_pa=%#lx bus=%#llx\n", __func__,
866 	    virt_addr, phys_addr, aligned_phys, *bus_addr);
867 	c->usage_count = 1;
868 
869 	spin_unlock_irqrestore(&r->chunk_list.lock, flags);
870 	return result;
871 }
872 
873 /**
874  * dma_sb_unmap_area - Unmap an area of memory from a device dma region.
875  * @r: Pointer to a struct ps3_dma_region.
876  * @bus_addr: The starting ioc bus address of the area to unmap.
877  * @len: Length in bytes of the area to unmap.
878  *
879  * This is the common dma unmap routine.
880  */
881 
882 static int dma_sb_unmap_area(struct ps3_dma_region *r, dma_addr_t bus_addr,
883 	unsigned long len)
884 {
885 	unsigned long flags;
886 	struct dma_chunk *c;
887 
888 	spin_lock_irqsave(&r->chunk_list.lock, flags);
889 	c = dma_find_chunk(r, bus_addr, len);
890 
891 	if (!c) {
892 		unsigned long aligned_bus = _ALIGN_DOWN(bus_addr,
893 			1 << r->page_size);
894 		unsigned long aligned_len = _ALIGN_UP(len + bus_addr
895 			- aligned_bus, 1 << r->page_size);
896 		DBG("%s:%d: not found: bus_addr %llxh\n",
897 			__func__, __LINE__, bus_addr);
898 		DBG("%s:%d: not found: len %lxh\n",
899 			__func__, __LINE__, len);
900 		DBG("%s:%d: not found: aligned_bus %lxh\n",
901 			__func__, __LINE__, aligned_bus);
902 		DBG("%s:%d: not found: aligned_len %lxh\n",
903 			__func__, __LINE__, aligned_len);
904 		BUG();
905 	}
906 
907 	c->usage_count--;
908 
909 	if (!c->usage_count) {
910 		list_del(&c->link);
911 		dma_sb_free_chunk(c);
912 	}
913 
914 	spin_unlock_irqrestore(&r->chunk_list.lock, flags);
915 	return 0;
916 }
917 
918 static int dma_ioc0_unmap_area(struct ps3_dma_region *r,
919 			dma_addr_t bus_addr, unsigned long len)
920 {
921 	unsigned long flags;
922 	struct dma_chunk *c;
923 
924 	DBG("%s: start a=%#llx l=%#lx\n", __func__, bus_addr, len);
925 	spin_lock_irqsave(&r->chunk_list.lock, flags);
926 	c = dma_find_chunk(r, bus_addr, len);
927 
928 	if (!c) {
929 		unsigned long aligned_bus = _ALIGN_DOWN(bus_addr,
930 							1 << r->page_size);
931 		unsigned long aligned_len = _ALIGN_UP(len + bus_addr
932 						      - aligned_bus,
933 						      1 << r->page_size);
934 		DBG("%s:%d: not found: bus_addr %llxh\n",
935 		    __func__, __LINE__, bus_addr);
936 		DBG("%s:%d: not found: len %lxh\n",
937 		    __func__, __LINE__, len);
938 		DBG("%s:%d: not found: aligned_bus %lxh\n",
939 		    __func__, __LINE__, aligned_bus);
940 		DBG("%s:%d: not found: aligned_len %lxh\n",
941 		    __func__, __LINE__, aligned_len);
942 		BUG();
943 	}
944 
945 	c->usage_count--;
946 
947 	if (!c->usage_count) {
948 		list_del(&c->link);
949 		dma_ioc0_free_chunk(c);
950 	}
951 
952 	spin_unlock_irqrestore(&r->chunk_list.lock, flags);
953 	DBG("%s: end\n", __func__);
954 	return 0;
955 }
956 
957 /**
958  * dma_sb_region_create_linear - Setup a linear dma mapping for a device.
959  * @r: Pointer to a struct ps3_dma_region.
960  *
961  * This routine creates an HV dma region for the device and maps all available
962  * ram into the io controller bus address space.
963  */
964 
965 static int dma_sb_region_create_linear(struct ps3_dma_region *r)
966 {
967 	int result;
968 	unsigned long virt_addr, len;
969 	dma_addr_t tmp;
970 
971 	if (r->len > 16*1024*1024) {	/* FIXME: need proper fix */
972 		/* force 16M dma pages for linear mapping */
973 		if (r->page_size != PS3_DMA_16M) {
974 			pr_info("%s:%d: forcing 16M pages for linear map\n",
975 				__func__, __LINE__);
976 			r->page_size = PS3_DMA_16M;
977 			r->len = _ALIGN_UP(r->len, 1 << r->page_size);
978 		}
979 	}
980 
981 	result = dma_sb_region_create(r);
982 	BUG_ON(result);
983 
984 	if (r->offset < map.rm.size) {
985 		/* Map (part of) 1st RAM chunk */
986 		virt_addr = map.rm.base + r->offset;
987 		len = map.rm.size - r->offset;
988 		if (len > r->len)
989 			len = r->len;
990 		result = dma_sb_map_area(r, virt_addr, len, &tmp,
991 			CBE_IOPTE_PP_W | CBE_IOPTE_PP_R | CBE_IOPTE_SO_RW |
992 			CBE_IOPTE_M);
993 		BUG_ON(result);
994 	}
995 
996 	if (r->offset + r->len > map.rm.size) {
997 		/* Map (part of) 2nd RAM chunk */
998 		virt_addr = map.rm.size;
999 		len = r->len;
1000 		if (r->offset >= map.rm.size)
1001 			virt_addr += r->offset - map.rm.size;
1002 		else
1003 			len -= map.rm.size - r->offset;
1004 		result = dma_sb_map_area(r, virt_addr, len, &tmp,
1005 			CBE_IOPTE_PP_W | CBE_IOPTE_PP_R | CBE_IOPTE_SO_RW |
1006 			CBE_IOPTE_M);
1007 		BUG_ON(result);
1008 	}
1009 
1010 	return result;
1011 }
1012 
1013 /**
1014  * dma_sb_region_free_linear - Free a linear dma mapping for a device.
1015  * @r: Pointer to a struct ps3_dma_region.
1016  *
1017  * This routine will unmap all mapped areas and free the HV dma region.
1018  */
1019 
1020 static int dma_sb_region_free_linear(struct ps3_dma_region *r)
1021 {
1022 	int result;
1023 	dma_addr_t bus_addr;
1024 	unsigned long len, lpar_addr;
1025 
1026 	if (r->offset < map.rm.size) {
1027 		/* Unmap (part of) 1st RAM chunk */
1028 		lpar_addr = map.rm.base + r->offset;
1029 		len = map.rm.size - r->offset;
1030 		if (len > r->len)
1031 			len = r->len;
1032 		bus_addr = dma_sb_lpar_to_bus(r, lpar_addr);
1033 		result = dma_sb_unmap_area(r, bus_addr, len);
1034 		BUG_ON(result);
1035 	}
1036 
1037 	if (r->offset + r->len > map.rm.size) {
1038 		/* Unmap (part of) 2nd RAM chunk */
1039 		lpar_addr = map.r1.base;
1040 		len = r->len;
1041 		if (r->offset >= map.rm.size)
1042 			lpar_addr += r->offset - map.rm.size;
1043 		else
1044 			len -= map.rm.size - r->offset;
1045 		bus_addr = dma_sb_lpar_to_bus(r, lpar_addr);
1046 		result = dma_sb_unmap_area(r, bus_addr, len);
1047 		BUG_ON(result);
1048 	}
1049 
1050 	result = dma_sb_region_free(r);
1051 	BUG_ON(result);
1052 
1053 	return result;
1054 }
1055 
1056 /**
1057  * dma_sb_map_area_linear - Map an area of memory into a device dma region.
1058  * @r: Pointer to a struct ps3_dma_region.
1059  * @virt_addr: Starting virtual address of the area to map.
1060  * @len: Length in bytes of the area to map.
1061  * @bus_addr: A pointer to return the starting ioc bus address of the area to
1062  * map.
1063  *
1064  * This routine just returns the corresponding bus address.  Actual mapping
1065  * occurs in dma_region_create_linear().
1066  */
1067 
1068 static int dma_sb_map_area_linear(struct ps3_dma_region *r,
1069 	unsigned long virt_addr, unsigned long len, dma_addr_t *bus_addr,
1070 	u64 iopte_flag)
1071 {
1072 	unsigned long phys_addr = is_kernel_addr(virt_addr) ? __pa(virt_addr)
1073 		: virt_addr;
1074 	*bus_addr = dma_sb_lpar_to_bus(r, ps3_mm_phys_to_lpar(phys_addr));
1075 	return 0;
1076 }
1077 
1078 /**
1079  * dma_unmap_area_linear - Unmap an area of memory from a device dma region.
1080  * @r: Pointer to a struct ps3_dma_region.
1081  * @bus_addr: The starting ioc bus address of the area to unmap.
1082  * @len: Length in bytes of the area to unmap.
1083  *
1084  * This routine does nothing.  Unmapping occurs in dma_sb_region_free_linear().
1085  */
1086 
1087 static int dma_sb_unmap_area_linear(struct ps3_dma_region *r,
1088 	dma_addr_t bus_addr, unsigned long len)
1089 {
1090 	return 0;
1091 };
1092 
1093 static const struct ps3_dma_region_ops ps3_dma_sb_region_ops =  {
1094 	.create = dma_sb_region_create,
1095 	.free = dma_sb_region_free,
1096 	.map = dma_sb_map_area,
1097 	.unmap = dma_sb_unmap_area
1098 };
1099 
1100 static const struct ps3_dma_region_ops ps3_dma_sb_region_linear_ops = {
1101 	.create = dma_sb_region_create_linear,
1102 	.free = dma_sb_region_free_linear,
1103 	.map = dma_sb_map_area_linear,
1104 	.unmap = dma_sb_unmap_area_linear
1105 };
1106 
1107 static const struct ps3_dma_region_ops ps3_dma_ioc0_region_ops = {
1108 	.create = dma_ioc0_region_create,
1109 	.free = dma_ioc0_region_free,
1110 	.map = dma_ioc0_map_area,
1111 	.unmap = dma_ioc0_unmap_area
1112 };
1113 
1114 int ps3_dma_region_init(struct ps3_system_bus_device *dev,
1115 	struct ps3_dma_region *r, enum ps3_dma_page_size page_size,
1116 	enum ps3_dma_region_type region_type, void *addr, unsigned long len)
1117 {
1118 	unsigned long lpar_addr;
1119 
1120 	lpar_addr = addr ? ps3_mm_phys_to_lpar(__pa(addr)) : 0;
1121 
1122 	r->dev = dev;
1123 	r->page_size = page_size;
1124 	r->region_type = region_type;
1125 	r->offset = lpar_addr;
1126 	if (r->offset >= map.rm.size)
1127 		r->offset -= map.r1.offset;
1128 	r->len = len ? len : _ALIGN_UP(map.total, 1 << r->page_size);
1129 
1130 	switch (dev->dev_type) {
1131 	case PS3_DEVICE_TYPE_SB:
1132 		r->region_ops =  (USE_DYNAMIC_DMA)
1133 			? &ps3_dma_sb_region_ops
1134 			: &ps3_dma_sb_region_linear_ops;
1135 		break;
1136 	case PS3_DEVICE_TYPE_IOC0:
1137 		r->region_ops = &ps3_dma_ioc0_region_ops;
1138 		break;
1139 	default:
1140 		BUG();
1141 		return -EINVAL;
1142 	}
1143 	return 0;
1144 }
1145 EXPORT_SYMBOL(ps3_dma_region_init);
1146 
1147 int ps3_dma_region_create(struct ps3_dma_region *r)
1148 {
1149 	BUG_ON(!r);
1150 	BUG_ON(!r->region_ops);
1151 	BUG_ON(!r->region_ops->create);
1152 	return r->region_ops->create(r);
1153 }
1154 EXPORT_SYMBOL(ps3_dma_region_create);
1155 
1156 int ps3_dma_region_free(struct ps3_dma_region *r)
1157 {
1158 	BUG_ON(!r);
1159 	BUG_ON(!r->region_ops);
1160 	BUG_ON(!r->region_ops->free);
1161 	return r->region_ops->free(r);
1162 }
1163 EXPORT_SYMBOL(ps3_dma_region_free);
1164 
1165 int ps3_dma_map(struct ps3_dma_region *r, unsigned long virt_addr,
1166 	unsigned long len, dma_addr_t *bus_addr,
1167 	u64 iopte_flag)
1168 {
1169 	return r->region_ops->map(r, virt_addr, len, bus_addr, iopte_flag);
1170 }
1171 
1172 int ps3_dma_unmap(struct ps3_dma_region *r, dma_addr_t bus_addr,
1173 	unsigned long len)
1174 {
1175 	return r->region_ops->unmap(r, bus_addr, len);
1176 }
1177 
1178 /*============================================================================*/
1179 /* system startup routines                                                    */
1180 /*============================================================================*/
1181 
1182 /**
1183  * ps3_mm_init - initialize the address space state variables
1184  */
1185 
1186 void __init ps3_mm_init(void)
1187 {
1188 	int result;
1189 
1190 	DBG(" -> %s:%d\n", __func__, __LINE__);
1191 
1192 	result = ps3_repository_read_mm_info(&map.rm.base, &map.rm.size,
1193 		&map.total);
1194 
1195 	if (result)
1196 		panic("ps3_repository_read_mm_info() failed");
1197 
1198 	map.rm.offset = map.rm.base;
1199 	map.vas_id = map.htab_size = 0;
1200 
1201 	/* this implementation assumes map.rm.base is zero */
1202 
1203 	BUG_ON(map.rm.base);
1204 	BUG_ON(!map.rm.size);
1205 
1206 	/* Check if we got the highmem region from an earlier boot step */
1207 
1208 	if (ps3_mm_get_repository_highmem(&map.r1)) {
1209 		result = ps3_mm_region_create(&map.r1, map.total - map.rm.size);
1210 
1211 		if (!result)
1212 			ps3_mm_set_repository_highmem(&map.r1);
1213 	}
1214 
1215 	/* correct map.total for the real total amount of memory we use */
1216 	map.total = map.rm.size + map.r1.size;
1217 
1218 	if (!map.r1.size) {
1219 		DBG("%s:%d: No highmem region found\n", __func__, __LINE__);
1220 	} else {
1221 		DBG("%s:%d: Adding highmem region: %llxh %llxh\n",
1222 			__func__, __LINE__, map.rm.size,
1223 			map.total - map.rm.size);
1224 		memblock_add(map.rm.size, map.total - map.rm.size);
1225 	}
1226 
1227 	DBG(" <- %s:%d\n", __func__, __LINE__);
1228 }
1229 
1230 /**
1231  * ps3_mm_shutdown - final cleanup of address space
1232  */
1233 
1234 void ps3_mm_shutdown(void)
1235 {
1236 	ps3_mm_region_destroy(&map.r1);
1237 }
1238