1 /*
2  * SPDX-License-Identifier: MIT
3  *
4  * Copyright © 2008-2012 Intel Corporation
5  */
6 
7 #include <linux/errno.h>
8 #include <linux/mutex.h>
9 
10 #include <drm/drm_mm.h>
11 #include <drm/i915_drm.h>
12 
13 #include "gem/i915_gem_lmem.h"
14 #include "gem/i915_gem_region.h"
15 #include "i915_drv.h"
16 #include "i915_gem_stolen.h"
17 #include "i915_reg.h"
18 #include "i915_vgpu.h"
19 #include "intel_mchbar_regs.h"
20 
21 /*
22  * The BIOS typically reserves some of the system's memory for the exclusive
23  * use of the integrated graphics. This memory is no longer available for
24  * use by the OS and so the user finds that his system has less memory
25  * available than he put in. We refer to this memory as stolen.
26  *
27  * The BIOS will allocate its framebuffer from the stolen memory. Our
28  * goal is try to reuse that object for our own fbcon which must always
29  * be available for panics. Anything else we can reuse the stolen memory
30  * for is a boon.
31  */
32 
33 int i915_gem_stolen_insert_node_in_range(struct drm_i915_private *i915,
34 					 struct drm_mm_node *node, u64 size,
35 					 unsigned alignment, u64 start, u64 end)
36 {
37 	int ret;
38 
39 	if (!drm_mm_initialized(&i915->mm.stolen))
40 		return -ENODEV;
41 
42 	/* WaSkipStolenMemoryFirstPage:bdw+ */
43 	if (GRAPHICS_VER(i915) >= 8 && start < 4096)
44 		start = 4096;
45 
46 	mutex_lock(&i915->mm.stolen_lock);
47 	ret = drm_mm_insert_node_in_range(&i915->mm.stolen, node,
48 					  size, alignment, 0,
49 					  start, end, DRM_MM_INSERT_BEST);
50 	mutex_unlock(&i915->mm.stolen_lock);
51 
52 	return ret;
53 }
54 
55 int i915_gem_stolen_insert_node(struct drm_i915_private *i915,
56 				struct drm_mm_node *node, u64 size,
57 				unsigned alignment)
58 {
59 	return i915_gem_stolen_insert_node_in_range(i915, node,
60 						    size, alignment,
61 						    I915_GEM_STOLEN_BIAS,
62 						    U64_MAX);
63 }
64 
65 void i915_gem_stolen_remove_node(struct drm_i915_private *i915,
66 				 struct drm_mm_node *node)
67 {
68 	mutex_lock(&i915->mm.stolen_lock);
69 	drm_mm_remove_node(node);
70 	mutex_unlock(&i915->mm.stolen_lock);
71 }
72 
73 static int i915_adjust_stolen(struct drm_i915_private *i915,
74 			      struct resource *dsm)
75 {
76 	struct i915_ggtt *ggtt = to_gt(i915)->ggtt;
77 	struct intel_uncore *uncore = ggtt->vm.gt->uncore;
78 	struct resource *r;
79 
80 	if (dsm->start == 0 || dsm->end <= dsm->start)
81 		return -EINVAL;
82 
83 	/*
84 	 * TODO: We have yet too encounter the case where the GTT wasn't at the
85 	 * end of stolen. With that assumption we could simplify this.
86 	 */
87 
88 	/* Make sure we don't clobber the GTT if it's within stolen memory */
89 	if (GRAPHICS_VER(i915) <= 4 &&
90 	    !IS_G33(i915) && !IS_PINEVIEW(i915) && !IS_G4X(i915)) {
91 		struct resource stolen[2] = {*dsm, *dsm};
92 		struct resource ggtt_res;
93 		resource_size_t ggtt_start;
94 
95 		ggtt_start = intel_uncore_read(uncore, PGTBL_CTL);
96 		if (GRAPHICS_VER(i915) == 4)
97 			ggtt_start = (ggtt_start & PGTBL_ADDRESS_LO_MASK) |
98 				     (ggtt_start & PGTBL_ADDRESS_HI_MASK) << 28;
99 		else
100 			ggtt_start &= PGTBL_ADDRESS_LO_MASK;
101 
102 		ggtt_res =
103 			(struct resource) DEFINE_RES_MEM(ggtt_start,
104 							 ggtt_total_entries(ggtt) * 4);
105 
106 		if (ggtt_res.start >= stolen[0].start && ggtt_res.start < stolen[0].end)
107 			stolen[0].end = ggtt_res.start;
108 		if (ggtt_res.end > stolen[1].start && ggtt_res.end <= stolen[1].end)
109 			stolen[1].start = ggtt_res.end;
110 
111 		/* Pick the larger of the two chunks */
112 		if (resource_size(&stolen[0]) > resource_size(&stolen[1]))
113 			*dsm = stolen[0];
114 		else
115 			*dsm = stolen[1];
116 
117 		if (stolen[0].start != stolen[1].start ||
118 		    stolen[0].end != stolen[1].end) {
119 			drm_dbg(&i915->drm,
120 				"GTT within stolen memory at %pR\n",
121 				&ggtt_res);
122 			drm_dbg(&i915->drm, "Stolen memory adjusted to %pR\n",
123 				dsm);
124 		}
125 	}
126 
127 	/*
128 	 * With stolen lmem, we don't need to check if the address range
129 	 * overlaps with the non-stolen system memory range, since lmem is local
130 	 * to the gpu.
131 	 */
132 	if (HAS_LMEM(i915))
133 		return 0;
134 
135 	/*
136 	 * Verify that nothing else uses this physical address. Stolen
137 	 * memory should be reserved by the BIOS and hidden from the
138 	 * kernel. So if the region is already marked as busy, something
139 	 * is seriously wrong.
140 	 */
141 	r = devm_request_mem_region(i915->drm.dev, dsm->start,
142 				    resource_size(dsm),
143 				    "Graphics Stolen Memory");
144 	if (r == NULL) {
145 		/*
146 		 * One more attempt but this time requesting region from
147 		 * start + 1, as we have seen that this resolves the region
148 		 * conflict with the PCI Bus.
149 		 * This is a BIOS w/a: Some BIOS wrap stolen in the root
150 		 * PCI bus, but have an off-by-one error. Hence retry the
151 		 * reservation starting from 1 instead of 0.
152 		 * There's also BIOS with off-by-one on the other end.
153 		 */
154 		r = devm_request_mem_region(i915->drm.dev, dsm->start + 1,
155 					    resource_size(dsm) - 2,
156 					    "Graphics Stolen Memory");
157 		/*
158 		 * GEN3 firmware likes to smash pci bridges into the stolen
159 		 * range. Apparently this works.
160 		 */
161 		if (!r && GRAPHICS_VER(i915) != 3) {
162 			drm_err(&i915->drm,
163 				"conflict detected with stolen region: %pR\n",
164 				dsm);
165 
166 			return -EBUSY;
167 		}
168 	}
169 
170 	return 0;
171 }
172 
173 static void i915_gem_cleanup_stolen(struct drm_i915_private *i915)
174 {
175 	if (!drm_mm_initialized(&i915->mm.stolen))
176 		return;
177 
178 	drm_mm_takedown(&i915->mm.stolen);
179 }
180 
181 static void g4x_get_stolen_reserved(struct drm_i915_private *i915,
182 				    struct intel_uncore *uncore,
183 				    resource_size_t *base,
184 				    resource_size_t *size)
185 {
186 	u32 reg_val = intel_uncore_read(uncore,
187 					IS_GM45(i915) ?
188 					CTG_STOLEN_RESERVED :
189 					ELK_STOLEN_RESERVED);
190 	resource_size_t stolen_top = i915->dsm.end + 1;
191 
192 	drm_dbg(&i915->drm, "%s_STOLEN_RESERVED = %08x\n",
193 		IS_GM45(i915) ? "CTG" : "ELK", reg_val);
194 
195 	if ((reg_val & G4X_STOLEN_RESERVED_ENABLE) == 0)
196 		return;
197 
198 	/*
199 	 * Whether ILK really reuses the ELK register for this is unclear.
200 	 * Let's see if we catch anyone with this supposedly enabled on ILK.
201 	 */
202 	drm_WARN(&i915->drm, GRAPHICS_VER(i915) == 5,
203 		 "ILK stolen reserved found? 0x%08x\n",
204 		 reg_val);
205 
206 	if (!(reg_val & G4X_STOLEN_RESERVED_ADDR2_MASK))
207 		return;
208 
209 	*base = (reg_val & G4X_STOLEN_RESERVED_ADDR2_MASK) << 16;
210 	drm_WARN_ON(&i915->drm,
211 		    (reg_val & G4X_STOLEN_RESERVED_ADDR1_MASK) < *base);
212 
213 	*size = stolen_top - *base;
214 }
215 
216 static void gen6_get_stolen_reserved(struct drm_i915_private *i915,
217 				     struct intel_uncore *uncore,
218 				     resource_size_t *base,
219 				     resource_size_t *size)
220 {
221 	u32 reg_val = intel_uncore_read(uncore, GEN6_STOLEN_RESERVED);
222 
223 	drm_dbg(&i915->drm, "GEN6_STOLEN_RESERVED = %08x\n", reg_val);
224 
225 	if (!(reg_val & GEN6_STOLEN_RESERVED_ENABLE))
226 		return;
227 
228 	*base = reg_val & GEN6_STOLEN_RESERVED_ADDR_MASK;
229 
230 	switch (reg_val & GEN6_STOLEN_RESERVED_SIZE_MASK) {
231 	case GEN6_STOLEN_RESERVED_1M:
232 		*size = 1024 * 1024;
233 		break;
234 	case GEN6_STOLEN_RESERVED_512K:
235 		*size = 512 * 1024;
236 		break;
237 	case GEN6_STOLEN_RESERVED_256K:
238 		*size = 256 * 1024;
239 		break;
240 	case GEN6_STOLEN_RESERVED_128K:
241 		*size = 128 * 1024;
242 		break;
243 	default:
244 		*size = 1024 * 1024;
245 		MISSING_CASE(reg_val & GEN6_STOLEN_RESERVED_SIZE_MASK);
246 	}
247 }
248 
249 static void vlv_get_stolen_reserved(struct drm_i915_private *i915,
250 				    struct intel_uncore *uncore,
251 				    resource_size_t *base,
252 				    resource_size_t *size)
253 {
254 	u32 reg_val = intel_uncore_read(uncore, GEN6_STOLEN_RESERVED);
255 	resource_size_t stolen_top = i915->dsm.end + 1;
256 
257 	drm_dbg(&i915->drm, "GEN6_STOLEN_RESERVED = %08x\n", reg_val);
258 
259 	if (!(reg_val & GEN6_STOLEN_RESERVED_ENABLE))
260 		return;
261 
262 	switch (reg_val & GEN7_STOLEN_RESERVED_SIZE_MASK) {
263 	default:
264 		MISSING_CASE(reg_val & GEN7_STOLEN_RESERVED_SIZE_MASK);
265 		fallthrough;
266 	case GEN7_STOLEN_RESERVED_1M:
267 		*size = 1024 * 1024;
268 		break;
269 	}
270 
271 	/*
272 	 * On vlv, the ADDR_MASK portion is left as 0 and HW deduces the
273 	 * reserved location as (top - size).
274 	 */
275 	*base = stolen_top - *size;
276 }
277 
278 static void gen7_get_stolen_reserved(struct drm_i915_private *i915,
279 				     struct intel_uncore *uncore,
280 				     resource_size_t *base,
281 				     resource_size_t *size)
282 {
283 	u32 reg_val = intel_uncore_read(uncore, GEN6_STOLEN_RESERVED);
284 
285 	drm_dbg(&i915->drm, "GEN6_STOLEN_RESERVED = %08x\n", reg_val);
286 
287 	if (!(reg_val & GEN6_STOLEN_RESERVED_ENABLE))
288 		return;
289 
290 	*base = reg_val & GEN7_STOLEN_RESERVED_ADDR_MASK;
291 
292 	switch (reg_val & GEN7_STOLEN_RESERVED_SIZE_MASK) {
293 	case GEN7_STOLEN_RESERVED_1M:
294 		*size = 1024 * 1024;
295 		break;
296 	case GEN7_STOLEN_RESERVED_256K:
297 		*size = 256 * 1024;
298 		break;
299 	default:
300 		*size = 1024 * 1024;
301 		MISSING_CASE(reg_val & GEN7_STOLEN_RESERVED_SIZE_MASK);
302 	}
303 }
304 
305 static void chv_get_stolen_reserved(struct drm_i915_private *i915,
306 				    struct intel_uncore *uncore,
307 				    resource_size_t *base,
308 				    resource_size_t *size)
309 {
310 	u32 reg_val = intel_uncore_read(uncore, GEN6_STOLEN_RESERVED);
311 
312 	drm_dbg(&i915->drm, "GEN6_STOLEN_RESERVED = %08x\n", reg_val);
313 
314 	if (!(reg_val & GEN6_STOLEN_RESERVED_ENABLE))
315 		return;
316 
317 	*base = reg_val & GEN6_STOLEN_RESERVED_ADDR_MASK;
318 
319 	switch (reg_val & GEN8_STOLEN_RESERVED_SIZE_MASK) {
320 	case GEN8_STOLEN_RESERVED_1M:
321 		*size = 1024 * 1024;
322 		break;
323 	case GEN8_STOLEN_RESERVED_2M:
324 		*size = 2 * 1024 * 1024;
325 		break;
326 	case GEN8_STOLEN_RESERVED_4M:
327 		*size = 4 * 1024 * 1024;
328 		break;
329 	case GEN8_STOLEN_RESERVED_8M:
330 		*size = 8 * 1024 * 1024;
331 		break;
332 	default:
333 		*size = 8 * 1024 * 1024;
334 		MISSING_CASE(reg_val & GEN8_STOLEN_RESERVED_SIZE_MASK);
335 	}
336 }
337 
338 static void bdw_get_stolen_reserved(struct drm_i915_private *i915,
339 				    struct intel_uncore *uncore,
340 				    resource_size_t *base,
341 				    resource_size_t *size)
342 {
343 	u32 reg_val = intel_uncore_read(uncore, GEN6_STOLEN_RESERVED);
344 	resource_size_t stolen_top = i915->dsm.end + 1;
345 
346 	drm_dbg(&i915->drm, "GEN6_STOLEN_RESERVED = %08x\n", reg_val);
347 
348 	if (!(reg_val & GEN6_STOLEN_RESERVED_ENABLE))
349 		return;
350 
351 	if (!(reg_val & GEN6_STOLEN_RESERVED_ADDR_MASK))
352 		return;
353 
354 	*base = reg_val & GEN6_STOLEN_RESERVED_ADDR_MASK;
355 	*size = stolen_top - *base;
356 }
357 
358 static void icl_get_stolen_reserved(struct drm_i915_private *i915,
359 				    struct intel_uncore *uncore,
360 				    resource_size_t *base,
361 				    resource_size_t *size)
362 {
363 	u64 reg_val = intel_uncore_read64(uncore, GEN6_STOLEN_RESERVED);
364 
365 	drm_dbg(&i915->drm, "GEN6_STOLEN_RESERVED = 0x%016llx\n", reg_val);
366 
367 	*base = reg_val & GEN11_STOLEN_RESERVED_ADDR_MASK;
368 
369 	switch (reg_val & GEN8_STOLEN_RESERVED_SIZE_MASK) {
370 	case GEN8_STOLEN_RESERVED_1M:
371 		*size = 1024 * 1024;
372 		break;
373 	case GEN8_STOLEN_RESERVED_2M:
374 		*size = 2 * 1024 * 1024;
375 		break;
376 	case GEN8_STOLEN_RESERVED_4M:
377 		*size = 4 * 1024 * 1024;
378 		break;
379 	case GEN8_STOLEN_RESERVED_8M:
380 		*size = 8 * 1024 * 1024;
381 		break;
382 	default:
383 		*size = 8 * 1024 * 1024;
384 		MISSING_CASE(reg_val & GEN8_STOLEN_RESERVED_SIZE_MASK);
385 	}
386 }
387 
388 static int i915_gem_init_stolen(struct intel_memory_region *mem)
389 {
390 	struct drm_i915_private *i915 = mem->i915;
391 	struct intel_uncore *uncore = &i915->uncore;
392 	resource_size_t reserved_base, stolen_top;
393 	resource_size_t reserved_total, reserved_size;
394 
395 	mutex_init(&i915->mm.stolen_lock);
396 
397 	if (intel_vgpu_active(i915)) {
398 		drm_notice(&i915->drm,
399 			   "%s, disabling use of stolen memory\n",
400 			   "iGVT-g active");
401 		return 0;
402 	}
403 
404 	if (intel_vtd_active(i915) && GRAPHICS_VER(i915) < 8) {
405 		drm_notice(&i915->drm,
406 			   "%s, disabling use of stolen memory\n",
407 			   "DMAR active");
408 		return 0;
409 	}
410 
411 	if (resource_size(&mem->region) == 0)
412 		return 0;
413 
414 	i915->dsm = mem->region;
415 
416 	if (i915_adjust_stolen(i915, &i915->dsm))
417 		return 0;
418 
419 	GEM_BUG_ON(i915->dsm.start == 0);
420 	GEM_BUG_ON(i915->dsm.end <= i915->dsm.start);
421 
422 	stolen_top = i915->dsm.end + 1;
423 	reserved_base = stolen_top;
424 	reserved_size = 0;
425 
426 	switch (GRAPHICS_VER(i915)) {
427 	case 2:
428 	case 3:
429 		break;
430 	case 4:
431 		if (!IS_G4X(i915))
432 			break;
433 		fallthrough;
434 	case 5:
435 		g4x_get_stolen_reserved(i915, uncore,
436 					&reserved_base, &reserved_size);
437 		break;
438 	case 6:
439 		gen6_get_stolen_reserved(i915, uncore,
440 					 &reserved_base, &reserved_size);
441 		break;
442 	case 7:
443 		if (IS_VALLEYVIEW(i915))
444 			vlv_get_stolen_reserved(i915, uncore,
445 						&reserved_base, &reserved_size);
446 		else
447 			gen7_get_stolen_reserved(i915, uncore,
448 						 &reserved_base, &reserved_size);
449 		break;
450 	case 8:
451 	case 9:
452 		if (IS_LP(i915))
453 			chv_get_stolen_reserved(i915, uncore,
454 						&reserved_base, &reserved_size);
455 		else
456 			bdw_get_stolen_reserved(i915, uncore,
457 						&reserved_base, &reserved_size);
458 		break;
459 	default:
460 		MISSING_CASE(GRAPHICS_VER(i915));
461 		fallthrough;
462 	case 11:
463 	case 12:
464 		icl_get_stolen_reserved(i915, uncore,
465 					&reserved_base,
466 					&reserved_size);
467 		break;
468 	}
469 
470 	/*
471 	 * Our expectation is that the reserved space is at the top of the
472 	 * stolen region and *never* at the bottom. If we see !reserved_base,
473 	 * it likely means we failed to read the registers correctly.
474 	 */
475 	if (!reserved_base) {
476 		drm_err(&i915->drm,
477 			"inconsistent reservation %pa + %pa; ignoring\n",
478 			&reserved_base, &reserved_size);
479 		reserved_base = stolen_top;
480 		reserved_size = 0;
481 	}
482 
483 	i915->dsm_reserved =
484 		(struct resource)DEFINE_RES_MEM(reserved_base, reserved_size);
485 
486 	if (!resource_contains(&i915->dsm, &i915->dsm_reserved)) {
487 		drm_err(&i915->drm,
488 			"Stolen reserved area %pR outside stolen memory %pR\n",
489 			&i915->dsm_reserved, &i915->dsm);
490 		return 0;
491 	}
492 
493 	/* Exclude the reserved region from driver use */
494 	mem->region.end = reserved_base - 1;
495 	mem->io_size = resource_size(&mem->region);
496 
497 	/* It is possible for the reserved area to end before the end of stolen
498 	 * memory, so just consider the start. */
499 	reserved_total = stolen_top - reserved_base;
500 
501 	i915->stolen_usable_size =
502 		resource_size(&i915->dsm) - reserved_total;
503 
504 	drm_dbg(&i915->drm,
505 		"Memory reserved for graphics device: %lluK, usable: %lluK\n",
506 		(u64)resource_size(&i915->dsm) >> 10,
507 		(u64)i915->stolen_usable_size >> 10);
508 
509 	if (i915->stolen_usable_size == 0)
510 		return 0;
511 
512 	/* Basic memrange allocator for stolen space. */
513 	drm_mm_init(&i915->mm.stolen, 0, i915->stolen_usable_size);
514 
515 	return 0;
516 }
517 
518 static void dbg_poison(struct i915_ggtt *ggtt,
519 		       dma_addr_t addr, resource_size_t size,
520 		       u8 x)
521 {
522 #if IS_ENABLED(CONFIG_DRM_I915_DEBUG_GEM)
523 	if (!drm_mm_node_allocated(&ggtt->error_capture))
524 		return;
525 
526 	if (ggtt->vm.bind_async_flags & I915_VMA_GLOBAL_BIND)
527 		return; /* beware stop_machine() inversion */
528 
529 	GEM_BUG_ON(!IS_ALIGNED(size, PAGE_SIZE));
530 
531 	mutex_lock(&ggtt->error_mutex);
532 	while (size) {
533 		void __iomem *s;
534 
535 		ggtt->vm.insert_page(&ggtt->vm, addr,
536 				     ggtt->error_capture.start,
537 				     I915_CACHE_NONE, 0);
538 		mb();
539 
540 		s = io_mapping_map_wc(&ggtt->iomap,
541 				      ggtt->error_capture.start,
542 				      PAGE_SIZE);
543 		memset_io(s, x, PAGE_SIZE);
544 		io_mapping_unmap(s);
545 
546 		addr += PAGE_SIZE;
547 		size -= PAGE_SIZE;
548 	}
549 	mb();
550 	ggtt->vm.clear_range(&ggtt->vm, ggtt->error_capture.start, PAGE_SIZE);
551 	mutex_unlock(&ggtt->error_mutex);
552 #endif
553 }
554 
555 static struct sg_table *
556 i915_pages_create_for_stolen(struct drm_device *dev,
557 			     resource_size_t offset, resource_size_t size)
558 {
559 	struct drm_i915_private *i915 = to_i915(dev);
560 	struct sg_table *st;
561 	struct scatterlist *sg;
562 
563 	GEM_BUG_ON(range_overflows(offset, size, resource_size(&i915->dsm)));
564 
565 	/* We hide that we have no struct page backing our stolen object
566 	 * by wrapping the contiguous physical allocation with a fake
567 	 * dma mapping in a single scatterlist.
568 	 */
569 
570 	st = kmalloc(sizeof(*st), GFP_KERNEL);
571 	if (st == NULL)
572 		return ERR_PTR(-ENOMEM);
573 
574 	if (sg_alloc_table(st, 1, GFP_KERNEL)) {
575 		kfree(st);
576 		return ERR_PTR(-ENOMEM);
577 	}
578 
579 	sg = st->sgl;
580 	sg->offset = 0;
581 	sg->length = size;
582 
583 	sg_dma_address(sg) = (dma_addr_t)i915->dsm.start + offset;
584 	sg_dma_len(sg) = size;
585 
586 	return st;
587 }
588 
589 static int i915_gem_object_get_pages_stolen(struct drm_i915_gem_object *obj)
590 {
591 	struct drm_i915_private *i915 = to_i915(obj->base.dev);
592 	struct sg_table *pages =
593 		i915_pages_create_for_stolen(obj->base.dev,
594 					     obj->stolen->start,
595 					     obj->stolen->size);
596 	if (IS_ERR(pages))
597 		return PTR_ERR(pages);
598 
599 	dbg_poison(to_gt(i915)->ggtt,
600 		   sg_dma_address(pages->sgl),
601 		   sg_dma_len(pages->sgl),
602 		   POISON_INUSE);
603 
604 	__i915_gem_object_set_pages(obj, pages, obj->stolen->size);
605 
606 	return 0;
607 }
608 
609 static void i915_gem_object_put_pages_stolen(struct drm_i915_gem_object *obj,
610 					     struct sg_table *pages)
611 {
612 	struct drm_i915_private *i915 = to_i915(obj->base.dev);
613 	/* Should only be called from i915_gem_object_release_stolen() */
614 
615 	dbg_poison(to_gt(i915)->ggtt,
616 		   sg_dma_address(pages->sgl),
617 		   sg_dma_len(pages->sgl),
618 		   POISON_FREE);
619 
620 	sg_free_table(pages);
621 	kfree(pages);
622 }
623 
624 static void
625 i915_gem_object_release_stolen(struct drm_i915_gem_object *obj)
626 {
627 	struct drm_i915_private *i915 = to_i915(obj->base.dev);
628 	struct drm_mm_node *stolen = fetch_and_zero(&obj->stolen);
629 
630 	GEM_BUG_ON(!stolen);
631 	i915_gem_stolen_remove_node(i915, stolen);
632 	kfree(stolen);
633 
634 	i915_gem_object_release_memory_region(obj);
635 }
636 
637 static const struct drm_i915_gem_object_ops i915_gem_object_stolen_ops = {
638 	.name = "i915_gem_object_stolen",
639 	.get_pages = i915_gem_object_get_pages_stolen,
640 	.put_pages = i915_gem_object_put_pages_stolen,
641 	.release = i915_gem_object_release_stolen,
642 };
643 
644 static int __i915_gem_object_create_stolen(struct intel_memory_region *mem,
645 					   struct drm_i915_gem_object *obj,
646 					   struct drm_mm_node *stolen)
647 {
648 	static struct lock_class_key lock_class;
649 	unsigned int cache_level;
650 	unsigned int flags;
651 	int err;
652 
653 	/*
654 	 * Stolen objects are always physically contiguous since we just
655 	 * allocate one big block underneath using the drm_mm range allocator.
656 	 */
657 	flags = I915_BO_ALLOC_CONTIGUOUS;
658 
659 	drm_gem_private_object_init(&mem->i915->drm, &obj->base, stolen->size);
660 	i915_gem_object_init(obj, &i915_gem_object_stolen_ops, &lock_class, flags);
661 
662 	obj->stolen = stolen;
663 	obj->read_domains = I915_GEM_DOMAIN_CPU | I915_GEM_DOMAIN_GTT;
664 	cache_level = HAS_LLC(mem->i915) ? I915_CACHE_LLC : I915_CACHE_NONE;
665 	i915_gem_object_set_cache_coherency(obj, cache_level);
666 
667 	if (WARN_ON(!i915_gem_object_trylock(obj, NULL)))
668 		return -EBUSY;
669 
670 	i915_gem_object_init_memory_region(obj, mem);
671 
672 	err = i915_gem_object_pin_pages(obj);
673 	if (err)
674 		i915_gem_object_release_memory_region(obj);
675 	i915_gem_object_unlock(obj);
676 
677 	return err;
678 }
679 
680 static int _i915_gem_object_stolen_init(struct intel_memory_region *mem,
681 					struct drm_i915_gem_object *obj,
682 					resource_size_t size,
683 					resource_size_t page_size,
684 					unsigned int flags)
685 {
686 	struct drm_i915_private *i915 = mem->i915;
687 	struct drm_mm_node *stolen;
688 	int ret;
689 
690 	if (!drm_mm_initialized(&i915->mm.stolen))
691 		return -ENODEV;
692 
693 	if (size == 0)
694 		return -EINVAL;
695 
696 	stolen = kzalloc(sizeof(*stolen), GFP_KERNEL);
697 	if (!stolen)
698 		return -ENOMEM;
699 
700 	ret = i915_gem_stolen_insert_node(i915, stolen, size,
701 					  mem->min_page_size);
702 	if (ret)
703 		goto err_free;
704 
705 	ret = __i915_gem_object_create_stolen(mem, obj, stolen);
706 	if (ret)
707 		goto err_remove;
708 
709 	return 0;
710 
711 err_remove:
712 	i915_gem_stolen_remove_node(i915, stolen);
713 err_free:
714 	kfree(stolen);
715 	return ret;
716 }
717 
718 struct drm_i915_gem_object *
719 i915_gem_object_create_stolen(struct drm_i915_private *i915,
720 			      resource_size_t size)
721 {
722 	return i915_gem_object_create_region(i915->mm.stolen_region, size, 0, 0);
723 }
724 
725 static int init_stolen_smem(struct intel_memory_region *mem)
726 {
727 	/*
728 	 * Initialise stolen early so that we may reserve preallocated
729 	 * objects for the BIOS to KMS transition.
730 	 */
731 	return i915_gem_init_stolen(mem);
732 }
733 
734 static int release_stolen_smem(struct intel_memory_region *mem)
735 {
736 	i915_gem_cleanup_stolen(mem->i915);
737 	return 0;
738 }
739 
740 static const struct intel_memory_region_ops i915_region_stolen_smem_ops = {
741 	.init = init_stolen_smem,
742 	.release = release_stolen_smem,
743 	.init_object = _i915_gem_object_stolen_init,
744 };
745 
746 static int init_stolen_lmem(struct intel_memory_region *mem)
747 {
748 	int err;
749 
750 	if (GEM_WARN_ON(resource_size(&mem->region) == 0))
751 		return -ENODEV;
752 
753 	if (!io_mapping_init_wc(&mem->iomap,
754 				mem->io_start,
755 				mem->io_size))
756 		return -EIO;
757 
758 	/*
759 	 * TODO: For stolen lmem we mostly just care about populating the dsm
760 	 * related bits and setting up the drm_mm allocator for the range.
761 	 * Perhaps split up i915_gem_init_stolen() for this.
762 	 */
763 	err = i915_gem_init_stolen(mem);
764 	if (err)
765 		goto err_fini;
766 
767 	return 0;
768 
769 err_fini:
770 	io_mapping_fini(&mem->iomap);
771 	return err;
772 }
773 
774 static int release_stolen_lmem(struct intel_memory_region *mem)
775 {
776 	io_mapping_fini(&mem->iomap);
777 	i915_gem_cleanup_stolen(mem->i915);
778 	return 0;
779 }
780 
781 static const struct intel_memory_region_ops i915_region_stolen_lmem_ops = {
782 	.init = init_stolen_lmem,
783 	.release = release_stolen_lmem,
784 	.init_object = _i915_gem_object_stolen_init,
785 };
786 
787 struct intel_memory_region *
788 i915_gem_stolen_lmem_setup(struct drm_i915_private *i915, u16 type,
789 			   u16 instance)
790 {
791 	struct intel_uncore *uncore = &i915->uncore;
792 	struct pci_dev *pdev = to_pci_dev(i915->drm.dev);
793 	struct intel_memory_region *mem;
794 	resource_size_t min_page_size;
795 	resource_size_t io_start;
796 	resource_size_t lmem_size;
797 	u64 lmem_base;
798 
799 	lmem_base = intel_uncore_read64(uncore, GEN12_DSMBASE);
800 	if (GEM_WARN_ON(lmem_base >= pci_resource_len(pdev, 2)))
801 		return ERR_PTR(-ENODEV);
802 
803 	lmem_size = pci_resource_len(pdev, 2) - lmem_base;
804 	io_start = pci_resource_start(pdev, 2) + lmem_base;
805 
806 	min_page_size = HAS_64K_PAGES(i915) ? I915_GTT_PAGE_SIZE_64K :
807 						I915_GTT_PAGE_SIZE_4K;
808 
809 	mem = intel_memory_region_create(i915, lmem_base, lmem_size,
810 					 min_page_size,
811 					 io_start, lmem_size,
812 					 type, instance,
813 					 &i915_region_stolen_lmem_ops);
814 	if (IS_ERR(mem))
815 		return mem;
816 
817 	/*
818 	 * TODO: consider creating common helper to just print all the
819 	 * interesting stuff from intel_memory_region, which we can use for all
820 	 * our probed regions.
821 	 */
822 
823 	drm_dbg(&i915->drm, "Stolen Local memory IO start: %pa\n",
824 		&mem->io_start);
825 
826 	intel_memory_region_set_name(mem, "stolen-local");
827 
828 	mem->private = true;
829 
830 	return mem;
831 }
832 
833 struct intel_memory_region*
834 i915_gem_stolen_smem_setup(struct drm_i915_private *i915, u16 type,
835 			   u16 instance)
836 {
837 	struct intel_memory_region *mem;
838 
839 	mem = intel_memory_region_create(i915,
840 					 intel_graphics_stolen_res.start,
841 					 resource_size(&intel_graphics_stolen_res),
842 					 PAGE_SIZE, 0, 0, type, instance,
843 					 &i915_region_stolen_smem_ops);
844 	if (IS_ERR(mem))
845 		return mem;
846 
847 	intel_memory_region_set_name(mem, "stolen-system");
848 
849 	mem->private = true;
850 	return mem;
851 }
852 
853 struct drm_i915_gem_object *
854 i915_gem_object_create_stolen_for_preallocated(struct drm_i915_private *i915,
855 					       resource_size_t stolen_offset,
856 					       resource_size_t size)
857 {
858 	struct intel_memory_region *mem = i915->mm.stolen_region;
859 	struct drm_i915_gem_object *obj;
860 	struct drm_mm_node *stolen;
861 	int ret;
862 
863 	if (!drm_mm_initialized(&i915->mm.stolen))
864 		return ERR_PTR(-ENODEV);
865 
866 	drm_dbg(&i915->drm,
867 		"creating preallocated stolen object: stolen_offset=%pa, size=%pa\n",
868 		&stolen_offset, &size);
869 
870 	/* KISS and expect everything to be page-aligned */
871 	if (GEM_WARN_ON(size == 0) ||
872 	    GEM_WARN_ON(!IS_ALIGNED(size, mem->min_page_size)) ||
873 	    GEM_WARN_ON(!IS_ALIGNED(stolen_offset, mem->min_page_size)))
874 		return ERR_PTR(-EINVAL);
875 
876 	stolen = kzalloc(sizeof(*stolen), GFP_KERNEL);
877 	if (!stolen)
878 		return ERR_PTR(-ENOMEM);
879 
880 	stolen->start = stolen_offset;
881 	stolen->size = size;
882 	mutex_lock(&i915->mm.stolen_lock);
883 	ret = drm_mm_reserve_node(&i915->mm.stolen, stolen);
884 	mutex_unlock(&i915->mm.stolen_lock);
885 	if (ret)
886 		goto err_free;
887 
888 	obj = i915_gem_object_alloc();
889 	if (!obj) {
890 		ret = -ENOMEM;
891 		goto err_stolen;
892 	}
893 
894 	ret = __i915_gem_object_create_stolen(mem, obj, stolen);
895 	if (ret)
896 		goto err_object_free;
897 
898 	i915_gem_object_set_cache_coherency(obj, I915_CACHE_NONE);
899 	return obj;
900 
901 err_object_free:
902 	i915_gem_object_free(obj);
903 err_stolen:
904 	i915_gem_stolen_remove_node(i915, stolen);
905 err_free:
906 	kfree(stolen);
907 	return ERR_PTR(ret);
908 }
909 
910 bool i915_gem_object_is_stolen(const struct drm_i915_gem_object *obj)
911 {
912 	return obj->ops == &i915_gem_object_stolen_ops;
913 }
914