1 /*
2  * SPDX-License-Identifier: MIT
3  *
4  * Copyright © 2008-2012 Intel Corporation
5  */
6 
7 #include <linux/errno.h>
8 #include <linux/mutex.h>
9 
10 #include <drm/drm_mm.h>
11 #include <drm/i915_drm.h>
12 
13 #include "gem/i915_gem_region.h"
14 #include "i915_drv.h"
15 #include "i915_gem_stolen.h"
16 #include "i915_vgpu.h"
17 
18 /*
19  * The BIOS typically reserves some of the system's memory for the exclusive
20  * use of the integrated graphics. This memory is no longer available for
21  * use by the OS and so the user finds that his system has less memory
22  * available than he put in. We refer to this memory as stolen.
23  *
24  * The BIOS will allocate its framebuffer from the stolen memory. Our
25  * goal is try to reuse that object for our own fbcon which must always
26  * be available for panics. Anything else we can reuse the stolen memory
27  * for is a boon.
28  */
29 
30 int i915_gem_stolen_insert_node_in_range(struct drm_i915_private *i915,
31 					 struct drm_mm_node *node, u64 size,
32 					 unsigned alignment, u64 start, u64 end)
33 {
34 	int ret;
35 
36 	if (!drm_mm_initialized(&i915->mm.stolen))
37 		return -ENODEV;
38 
39 	/* WaSkipStolenMemoryFirstPage:bdw+ */
40 	if (INTEL_GEN(i915) >= 8 && start < 4096)
41 		start = 4096;
42 
43 	mutex_lock(&i915->mm.stolen_lock);
44 	ret = drm_mm_insert_node_in_range(&i915->mm.stolen, node,
45 					  size, alignment, 0,
46 					  start, end, DRM_MM_INSERT_BEST);
47 	mutex_unlock(&i915->mm.stolen_lock);
48 
49 	return ret;
50 }
51 
52 int i915_gem_stolen_insert_node(struct drm_i915_private *i915,
53 				struct drm_mm_node *node, u64 size,
54 				unsigned alignment)
55 {
56 	return i915_gem_stolen_insert_node_in_range(i915, node,
57 						    size, alignment,
58 						    I915_GEM_STOLEN_BIAS,
59 						    U64_MAX);
60 }
61 
62 void i915_gem_stolen_remove_node(struct drm_i915_private *i915,
63 				 struct drm_mm_node *node)
64 {
65 	mutex_lock(&i915->mm.stolen_lock);
66 	drm_mm_remove_node(node);
67 	mutex_unlock(&i915->mm.stolen_lock);
68 }
69 
70 static int i915_adjust_stolen(struct drm_i915_private *i915,
71 			      struct resource *dsm)
72 {
73 	struct i915_ggtt *ggtt = &i915->ggtt;
74 	struct intel_uncore *uncore = ggtt->vm.gt->uncore;
75 	struct resource *r;
76 
77 	if (dsm->start == 0 || dsm->end <= dsm->start)
78 		return -EINVAL;
79 
80 	/*
81 	 * TODO: We have yet too encounter the case where the GTT wasn't at the
82 	 * end of stolen. With that assumption we could simplify this.
83 	 */
84 
85 	/* Make sure we don't clobber the GTT if it's within stolen memory */
86 	if (INTEL_GEN(i915) <= 4 &&
87 	    !IS_G33(i915) && !IS_PINEVIEW(i915) && !IS_G4X(i915)) {
88 		struct resource stolen[2] = {*dsm, *dsm};
89 		struct resource ggtt_res;
90 		resource_size_t ggtt_start;
91 
92 		ggtt_start = intel_uncore_read(uncore, PGTBL_CTL);
93 		if (IS_GEN(i915, 4))
94 			ggtt_start = (ggtt_start & PGTBL_ADDRESS_LO_MASK) |
95 				     (ggtt_start & PGTBL_ADDRESS_HI_MASK) << 28;
96 		else
97 			ggtt_start &= PGTBL_ADDRESS_LO_MASK;
98 
99 		ggtt_res =
100 			(struct resource) DEFINE_RES_MEM(ggtt_start,
101 							 ggtt_total_entries(ggtt) * 4);
102 
103 		if (ggtt_res.start >= stolen[0].start && ggtt_res.start < stolen[0].end)
104 			stolen[0].end = ggtt_res.start;
105 		if (ggtt_res.end > stolen[1].start && ggtt_res.end <= stolen[1].end)
106 			stolen[1].start = ggtt_res.end;
107 
108 		/* Pick the larger of the two chunks */
109 		if (resource_size(&stolen[0]) > resource_size(&stolen[1]))
110 			*dsm = stolen[0];
111 		else
112 			*dsm = stolen[1];
113 
114 		if (stolen[0].start != stolen[1].start ||
115 		    stolen[0].end != stolen[1].end) {
116 			drm_dbg(&i915->drm,
117 				"GTT within stolen memory at %pR\n",
118 				&ggtt_res);
119 			drm_dbg(&i915->drm, "Stolen memory adjusted to %pR\n",
120 				dsm);
121 		}
122 	}
123 
124 	/*
125 	 * Verify that nothing else uses this physical address. Stolen
126 	 * memory should be reserved by the BIOS and hidden from the
127 	 * kernel. So if the region is already marked as busy, something
128 	 * is seriously wrong.
129 	 */
130 	r = devm_request_mem_region(i915->drm.dev, dsm->start,
131 				    resource_size(dsm),
132 				    "Graphics Stolen Memory");
133 	if (r == NULL) {
134 		/*
135 		 * One more attempt but this time requesting region from
136 		 * start + 1, as we have seen that this resolves the region
137 		 * conflict with the PCI Bus.
138 		 * This is a BIOS w/a: Some BIOS wrap stolen in the root
139 		 * PCI bus, but have an off-by-one error. Hence retry the
140 		 * reservation starting from 1 instead of 0.
141 		 * There's also BIOS with off-by-one on the other end.
142 		 */
143 		r = devm_request_mem_region(i915->drm.dev, dsm->start + 1,
144 					    resource_size(dsm) - 2,
145 					    "Graphics Stolen Memory");
146 		/*
147 		 * GEN3 firmware likes to smash pci bridges into the stolen
148 		 * range. Apparently this works.
149 		 */
150 		if (!r && !IS_GEN(i915, 3)) {
151 			drm_err(&i915->drm,
152 				"conflict detected with stolen region: %pR\n",
153 				dsm);
154 
155 			return -EBUSY;
156 		}
157 	}
158 
159 	return 0;
160 }
161 
162 static void i915_gem_cleanup_stolen(struct drm_i915_private *i915)
163 {
164 	if (!drm_mm_initialized(&i915->mm.stolen))
165 		return;
166 
167 	drm_mm_takedown(&i915->mm.stolen);
168 }
169 
170 static void g4x_get_stolen_reserved(struct drm_i915_private *i915,
171 				    struct intel_uncore *uncore,
172 				    resource_size_t *base,
173 				    resource_size_t *size)
174 {
175 	u32 reg_val = intel_uncore_read(uncore,
176 					IS_GM45(i915) ?
177 					CTG_STOLEN_RESERVED :
178 					ELK_STOLEN_RESERVED);
179 	resource_size_t stolen_top = i915->dsm.end + 1;
180 
181 	drm_dbg(&i915->drm, "%s_STOLEN_RESERVED = %08x\n",
182 		IS_GM45(i915) ? "CTG" : "ELK", reg_val);
183 
184 	if ((reg_val & G4X_STOLEN_RESERVED_ENABLE) == 0)
185 		return;
186 
187 	/*
188 	 * Whether ILK really reuses the ELK register for this is unclear.
189 	 * Let's see if we catch anyone with this supposedly enabled on ILK.
190 	 */
191 	drm_WARN(&i915->drm, IS_GEN(i915, 5),
192 		 "ILK stolen reserved found? 0x%08x\n",
193 		 reg_val);
194 
195 	if (!(reg_val & G4X_STOLEN_RESERVED_ADDR2_MASK))
196 		return;
197 
198 	*base = (reg_val & G4X_STOLEN_RESERVED_ADDR2_MASK) << 16;
199 	drm_WARN_ON(&i915->drm,
200 		    (reg_val & G4X_STOLEN_RESERVED_ADDR1_MASK) < *base);
201 
202 	*size = stolen_top - *base;
203 }
204 
205 static void gen6_get_stolen_reserved(struct drm_i915_private *i915,
206 				     struct intel_uncore *uncore,
207 				     resource_size_t *base,
208 				     resource_size_t *size)
209 {
210 	u32 reg_val = intel_uncore_read(uncore, GEN6_STOLEN_RESERVED);
211 
212 	drm_dbg(&i915->drm, "GEN6_STOLEN_RESERVED = %08x\n", reg_val);
213 
214 	if (!(reg_val & GEN6_STOLEN_RESERVED_ENABLE))
215 		return;
216 
217 	*base = reg_val & GEN6_STOLEN_RESERVED_ADDR_MASK;
218 
219 	switch (reg_val & GEN6_STOLEN_RESERVED_SIZE_MASK) {
220 	case GEN6_STOLEN_RESERVED_1M:
221 		*size = 1024 * 1024;
222 		break;
223 	case GEN6_STOLEN_RESERVED_512K:
224 		*size = 512 * 1024;
225 		break;
226 	case GEN6_STOLEN_RESERVED_256K:
227 		*size = 256 * 1024;
228 		break;
229 	case GEN6_STOLEN_RESERVED_128K:
230 		*size = 128 * 1024;
231 		break;
232 	default:
233 		*size = 1024 * 1024;
234 		MISSING_CASE(reg_val & GEN6_STOLEN_RESERVED_SIZE_MASK);
235 	}
236 }
237 
238 static void vlv_get_stolen_reserved(struct drm_i915_private *i915,
239 				    struct intel_uncore *uncore,
240 				    resource_size_t *base,
241 				    resource_size_t *size)
242 {
243 	u32 reg_val = intel_uncore_read(uncore, GEN6_STOLEN_RESERVED);
244 	resource_size_t stolen_top = i915->dsm.end + 1;
245 
246 	drm_dbg(&i915->drm, "GEN6_STOLEN_RESERVED = %08x\n", reg_val);
247 
248 	if (!(reg_val & GEN6_STOLEN_RESERVED_ENABLE))
249 		return;
250 
251 	switch (reg_val & GEN7_STOLEN_RESERVED_SIZE_MASK) {
252 	default:
253 		MISSING_CASE(reg_val & GEN7_STOLEN_RESERVED_SIZE_MASK);
254 		fallthrough;
255 	case GEN7_STOLEN_RESERVED_1M:
256 		*size = 1024 * 1024;
257 		break;
258 	}
259 
260 	/*
261 	 * On vlv, the ADDR_MASK portion is left as 0 and HW deduces the
262 	 * reserved location as (top - size).
263 	 */
264 	*base = stolen_top - *size;
265 }
266 
267 static void gen7_get_stolen_reserved(struct drm_i915_private *i915,
268 				     struct intel_uncore *uncore,
269 				     resource_size_t *base,
270 				     resource_size_t *size)
271 {
272 	u32 reg_val = intel_uncore_read(uncore, GEN6_STOLEN_RESERVED);
273 
274 	drm_dbg(&i915->drm, "GEN6_STOLEN_RESERVED = %08x\n", reg_val);
275 
276 	if (!(reg_val & GEN6_STOLEN_RESERVED_ENABLE))
277 		return;
278 
279 	*base = reg_val & GEN7_STOLEN_RESERVED_ADDR_MASK;
280 
281 	switch (reg_val & GEN7_STOLEN_RESERVED_SIZE_MASK) {
282 	case GEN7_STOLEN_RESERVED_1M:
283 		*size = 1024 * 1024;
284 		break;
285 	case GEN7_STOLEN_RESERVED_256K:
286 		*size = 256 * 1024;
287 		break;
288 	default:
289 		*size = 1024 * 1024;
290 		MISSING_CASE(reg_val & GEN7_STOLEN_RESERVED_SIZE_MASK);
291 	}
292 }
293 
294 static void chv_get_stolen_reserved(struct drm_i915_private *i915,
295 				    struct intel_uncore *uncore,
296 				    resource_size_t *base,
297 				    resource_size_t *size)
298 {
299 	u32 reg_val = intel_uncore_read(uncore, GEN6_STOLEN_RESERVED);
300 
301 	drm_dbg(&i915->drm, "GEN6_STOLEN_RESERVED = %08x\n", reg_val);
302 
303 	if (!(reg_val & GEN6_STOLEN_RESERVED_ENABLE))
304 		return;
305 
306 	*base = reg_val & GEN6_STOLEN_RESERVED_ADDR_MASK;
307 
308 	switch (reg_val & GEN8_STOLEN_RESERVED_SIZE_MASK) {
309 	case GEN8_STOLEN_RESERVED_1M:
310 		*size = 1024 * 1024;
311 		break;
312 	case GEN8_STOLEN_RESERVED_2M:
313 		*size = 2 * 1024 * 1024;
314 		break;
315 	case GEN8_STOLEN_RESERVED_4M:
316 		*size = 4 * 1024 * 1024;
317 		break;
318 	case GEN8_STOLEN_RESERVED_8M:
319 		*size = 8 * 1024 * 1024;
320 		break;
321 	default:
322 		*size = 8 * 1024 * 1024;
323 		MISSING_CASE(reg_val & GEN8_STOLEN_RESERVED_SIZE_MASK);
324 	}
325 }
326 
327 static void bdw_get_stolen_reserved(struct drm_i915_private *i915,
328 				    struct intel_uncore *uncore,
329 				    resource_size_t *base,
330 				    resource_size_t *size)
331 {
332 	u32 reg_val = intel_uncore_read(uncore, GEN6_STOLEN_RESERVED);
333 	resource_size_t stolen_top = i915->dsm.end + 1;
334 
335 	drm_dbg(&i915->drm, "GEN6_STOLEN_RESERVED = %08x\n", reg_val);
336 
337 	if (!(reg_val & GEN6_STOLEN_RESERVED_ENABLE))
338 		return;
339 
340 	if (!(reg_val & GEN6_STOLEN_RESERVED_ADDR_MASK))
341 		return;
342 
343 	*base = reg_val & GEN6_STOLEN_RESERVED_ADDR_MASK;
344 	*size = stolen_top - *base;
345 }
346 
347 static void icl_get_stolen_reserved(struct drm_i915_private *i915,
348 				    struct intel_uncore *uncore,
349 				    resource_size_t *base,
350 				    resource_size_t *size)
351 {
352 	u64 reg_val = intel_uncore_read64(uncore, GEN6_STOLEN_RESERVED);
353 
354 	drm_dbg(&i915->drm, "GEN6_STOLEN_RESERVED = 0x%016llx\n", reg_val);
355 
356 	*base = reg_val & GEN11_STOLEN_RESERVED_ADDR_MASK;
357 
358 	switch (reg_val & GEN8_STOLEN_RESERVED_SIZE_MASK) {
359 	case GEN8_STOLEN_RESERVED_1M:
360 		*size = 1024 * 1024;
361 		break;
362 	case GEN8_STOLEN_RESERVED_2M:
363 		*size = 2 * 1024 * 1024;
364 		break;
365 	case GEN8_STOLEN_RESERVED_4M:
366 		*size = 4 * 1024 * 1024;
367 		break;
368 	case GEN8_STOLEN_RESERVED_8M:
369 		*size = 8 * 1024 * 1024;
370 		break;
371 	default:
372 		*size = 8 * 1024 * 1024;
373 		MISSING_CASE(reg_val & GEN8_STOLEN_RESERVED_SIZE_MASK);
374 	}
375 }
376 
377 static int i915_gem_init_stolen(struct drm_i915_private *i915)
378 {
379 	struct intel_uncore *uncore = &i915->uncore;
380 	resource_size_t reserved_base, stolen_top;
381 	resource_size_t reserved_total, reserved_size;
382 
383 	mutex_init(&i915->mm.stolen_lock);
384 
385 	if (intel_vgpu_active(i915)) {
386 		drm_notice(&i915->drm,
387 			   "%s, disabling use of stolen memory\n",
388 			   "iGVT-g active");
389 		return 0;
390 	}
391 
392 	if (intel_vtd_active() && INTEL_GEN(i915) < 8) {
393 		drm_notice(&i915->drm,
394 			   "%s, disabling use of stolen memory\n",
395 			   "DMAR active");
396 		return 0;
397 	}
398 
399 	if (resource_size(&intel_graphics_stolen_res) == 0)
400 		return 0;
401 
402 	i915->dsm = intel_graphics_stolen_res;
403 
404 	if (i915_adjust_stolen(i915, &i915->dsm))
405 		return 0;
406 
407 	GEM_BUG_ON(i915->dsm.start == 0);
408 	GEM_BUG_ON(i915->dsm.end <= i915->dsm.start);
409 
410 	stolen_top = i915->dsm.end + 1;
411 	reserved_base = stolen_top;
412 	reserved_size = 0;
413 
414 	switch (INTEL_GEN(i915)) {
415 	case 2:
416 	case 3:
417 		break;
418 	case 4:
419 		if (!IS_G4X(i915))
420 			break;
421 		fallthrough;
422 	case 5:
423 		g4x_get_stolen_reserved(i915, uncore,
424 					&reserved_base, &reserved_size);
425 		break;
426 	case 6:
427 		gen6_get_stolen_reserved(i915, uncore,
428 					 &reserved_base, &reserved_size);
429 		break;
430 	case 7:
431 		if (IS_VALLEYVIEW(i915))
432 			vlv_get_stolen_reserved(i915, uncore,
433 						&reserved_base, &reserved_size);
434 		else
435 			gen7_get_stolen_reserved(i915, uncore,
436 						 &reserved_base, &reserved_size);
437 		break;
438 	case 8:
439 	case 9:
440 	case 10:
441 		if (IS_LP(i915))
442 			chv_get_stolen_reserved(i915, uncore,
443 						&reserved_base, &reserved_size);
444 		else
445 			bdw_get_stolen_reserved(i915, uncore,
446 						&reserved_base, &reserved_size);
447 		break;
448 	default:
449 		MISSING_CASE(INTEL_GEN(i915));
450 		fallthrough;
451 	case 11:
452 	case 12:
453 		icl_get_stolen_reserved(i915, uncore,
454 					&reserved_base,
455 					&reserved_size);
456 		break;
457 	}
458 
459 	/*
460 	 * Our expectation is that the reserved space is at the top of the
461 	 * stolen region and *never* at the bottom. If we see !reserved_base,
462 	 * it likely means we failed to read the registers correctly.
463 	 */
464 	if (!reserved_base) {
465 		drm_err(&i915->drm,
466 			"inconsistent reservation %pa + %pa; ignoring\n",
467 			&reserved_base, &reserved_size);
468 		reserved_base = stolen_top;
469 		reserved_size = 0;
470 	}
471 
472 	i915->dsm_reserved =
473 		(struct resource)DEFINE_RES_MEM(reserved_base, reserved_size);
474 
475 	if (!resource_contains(&i915->dsm, &i915->dsm_reserved)) {
476 		drm_err(&i915->drm,
477 			"Stolen reserved area %pR outside stolen memory %pR\n",
478 			&i915->dsm_reserved, &i915->dsm);
479 		return 0;
480 	}
481 
482 	/* It is possible for the reserved area to end before the end of stolen
483 	 * memory, so just consider the start. */
484 	reserved_total = stolen_top - reserved_base;
485 
486 	drm_dbg(&i915->drm,
487 		"Memory reserved for graphics device: %lluK, usable: %lluK\n",
488 		(u64)resource_size(&i915->dsm) >> 10,
489 		((u64)resource_size(&i915->dsm) - reserved_total) >> 10);
490 
491 	i915->stolen_usable_size =
492 		resource_size(&i915->dsm) - reserved_total;
493 
494 	/* Basic memrange allocator for stolen space. */
495 	drm_mm_init(&i915->mm.stolen, 0, i915->stolen_usable_size);
496 
497 	return 0;
498 }
499 
500 static void dbg_poison(struct i915_ggtt *ggtt,
501 		       dma_addr_t addr, resource_size_t size,
502 		       u8 x)
503 {
504 #if IS_ENABLED(CONFIG_DRM_I915_DEBUG_GEM)
505 	if (!drm_mm_node_allocated(&ggtt->error_capture))
506 		return;
507 
508 	if (ggtt->vm.bind_async_flags & I915_VMA_GLOBAL_BIND)
509 		return; /* beware stop_machine() inversion */
510 
511 	GEM_BUG_ON(!IS_ALIGNED(size, PAGE_SIZE));
512 
513 	mutex_lock(&ggtt->error_mutex);
514 	while (size) {
515 		void __iomem *s;
516 
517 		ggtt->vm.insert_page(&ggtt->vm, addr,
518 				     ggtt->error_capture.start,
519 				     I915_CACHE_NONE, 0);
520 		mb();
521 
522 		s = io_mapping_map_wc(&ggtt->iomap,
523 				      ggtt->error_capture.start,
524 				      PAGE_SIZE);
525 		memset_io(s, x, PAGE_SIZE);
526 		io_mapping_unmap(s);
527 
528 		addr += PAGE_SIZE;
529 		size -= PAGE_SIZE;
530 	}
531 	mb();
532 	ggtt->vm.clear_range(&ggtt->vm, ggtt->error_capture.start, PAGE_SIZE);
533 	mutex_unlock(&ggtt->error_mutex);
534 #endif
535 }
536 
537 static struct sg_table *
538 i915_pages_create_for_stolen(struct drm_device *dev,
539 			     resource_size_t offset, resource_size_t size)
540 {
541 	struct drm_i915_private *i915 = to_i915(dev);
542 	struct sg_table *st;
543 	struct scatterlist *sg;
544 
545 	GEM_BUG_ON(range_overflows(offset, size, resource_size(&i915->dsm)));
546 
547 	/* We hide that we have no struct page backing our stolen object
548 	 * by wrapping the contiguous physical allocation with a fake
549 	 * dma mapping in a single scatterlist.
550 	 */
551 
552 	st = kmalloc(sizeof(*st), GFP_KERNEL);
553 	if (st == NULL)
554 		return ERR_PTR(-ENOMEM);
555 
556 	if (sg_alloc_table(st, 1, GFP_KERNEL)) {
557 		kfree(st);
558 		return ERR_PTR(-ENOMEM);
559 	}
560 
561 	sg = st->sgl;
562 	sg->offset = 0;
563 	sg->length = size;
564 
565 	sg_dma_address(sg) = (dma_addr_t)i915->dsm.start + offset;
566 	sg_dma_len(sg) = size;
567 
568 	return st;
569 }
570 
571 static int i915_gem_object_get_pages_stolen(struct drm_i915_gem_object *obj)
572 {
573 	struct sg_table *pages =
574 		i915_pages_create_for_stolen(obj->base.dev,
575 					     obj->stolen->start,
576 					     obj->stolen->size);
577 	if (IS_ERR(pages))
578 		return PTR_ERR(pages);
579 
580 	dbg_poison(&to_i915(obj->base.dev)->ggtt,
581 		   sg_dma_address(pages->sgl),
582 		   sg_dma_len(pages->sgl),
583 		   POISON_INUSE);
584 
585 	__i915_gem_object_set_pages(obj, pages, obj->stolen->size);
586 
587 	return 0;
588 }
589 
590 static void i915_gem_object_put_pages_stolen(struct drm_i915_gem_object *obj,
591 					     struct sg_table *pages)
592 {
593 	/* Should only be called from i915_gem_object_release_stolen() */
594 
595 	dbg_poison(&to_i915(obj->base.dev)->ggtt,
596 		   sg_dma_address(pages->sgl),
597 		   sg_dma_len(pages->sgl),
598 		   POISON_FREE);
599 
600 	sg_free_table(pages);
601 	kfree(pages);
602 }
603 
604 static void
605 i915_gem_object_release_stolen(struct drm_i915_gem_object *obj)
606 {
607 	struct drm_i915_private *i915 = to_i915(obj->base.dev);
608 	struct drm_mm_node *stolen = fetch_and_zero(&obj->stolen);
609 
610 	GEM_BUG_ON(!stolen);
611 
612 	i915_gem_object_release_memory_region(obj);
613 
614 	i915_gem_stolen_remove_node(i915, stolen);
615 	kfree(stolen);
616 }
617 
618 static const struct drm_i915_gem_object_ops i915_gem_object_stolen_ops = {
619 	.name = "i915_gem_object_stolen",
620 	.get_pages = i915_gem_object_get_pages_stolen,
621 	.put_pages = i915_gem_object_put_pages_stolen,
622 	.release = i915_gem_object_release_stolen,
623 };
624 
625 static struct drm_i915_gem_object *
626 __i915_gem_object_create_stolen(struct intel_memory_region *mem,
627 				struct drm_mm_node *stolen)
628 {
629 	static struct lock_class_key lock_class;
630 	struct drm_i915_gem_object *obj;
631 	unsigned int cache_level;
632 	int err = -ENOMEM;
633 
634 	obj = i915_gem_object_alloc();
635 	if (!obj)
636 		goto err;
637 
638 	drm_gem_private_object_init(&mem->i915->drm, &obj->base, stolen->size);
639 	i915_gem_object_init(obj, &i915_gem_object_stolen_ops, &lock_class);
640 
641 	obj->stolen = stolen;
642 	obj->read_domains = I915_GEM_DOMAIN_CPU | I915_GEM_DOMAIN_GTT;
643 	cache_level = HAS_LLC(mem->i915) ? I915_CACHE_LLC : I915_CACHE_NONE;
644 	i915_gem_object_set_cache_coherency(obj, cache_level);
645 
646 	err = i915_gem_object_pin_pages(obj);
647 	if (err)
648 		goto cleanup;
649 
650 	i915_gem_object_init_memory_region(obj, mem, 0);
651 
652 	return obj;
653 
654 cleanup:
655 	i915_gem_object_free(obj);
656 err:
657 	return ERR_PTR(err);
658 }
659 
660 static struct drm_i915_gem_object *
661 _i915_gem_object_create_stolen(struct intel_memory_region *mem,
662 			       resource_size_t size,
663 			       unsigned int flags)
664 {
665 	struct drm_i915_private *i915 = mem->i915;
666 	struct drm_i915_gem_object *obj;
667 	struct drm_mm_node *stolen;
668 	int ret;
669 
670 	if (!drm_mm_initialized(&i915->mm.stolen))
671 		return ERR_PTR(-ENODEV);
672 
673 	if (size == 0)
674 		return ERR_PTR(-EINVAL);
675 
676 	stolen = kzalloc(sizeof(*stolen), GFP_KERNEL);
677 	if (!stolen)
678 		return ERR_PTR(-ENOMEM);
679 
680 	ret = i915_gem_stolen_insert_node(i915, stolen, size, 4096);
681 	if (ret) {
682 		obj = ERR_PTR(ret);
683 		goto err_free;
684 	}
685 
686 	obj = __i915_gem_object_create_stolen(mem, stolen);
687 	if (IS_ERR(obj))
688 		goto err_remove;
689 
690 	return obj;
691 
692 err_remove:
693 	i915_gem_stolen_remove_node(i915, stolen);
694 err_free:
695 	kfree(stolen);
696 	return obj;
697 }
698 
699 struct drm_i915_gem_object *
700 i915_gem_object_create_stolen(struct drm_i915_private *i915,
701 			      resource_size_t size)
702 {
703 	return i915_gem_object_create_region(i915->mm.regions[INTEL_REGION_STOLEN],
704 					     size, I915_BO_ALLOC_CONTIGUOUS);
705 }
706 
707 static int init_stolen(struct intel_memory_region *mem)
708 {
709 	intel_memory_region_set_name(mem, "stolen");
710 
711 	/*
712 	 * Initialise stolen early so that we may reserve preallocated
713 	 * objects for the BIOS to KMS transition.
714 	 */
715 	return i915_gem_init_stolen(mem->i915);
716 }
717 
718 static void release_stolen(struct intel_memory_region *mem)
719 {
720 	i915_gem_cleanup_stolen(mem->i915);
721 }
722 
723 static const struct intel_memory_region_ops i915_region_stolen_ops = {
724 	.init = init_stolen,
725 	.release = release_stolen,
726 	.create_object = _i915_gem_object_create_stolen,
727 };
728 
729 struct intel_memory_region *i915_gem_stolen_setup(struct drm_i915_private *i915)
730 {
731 	return intel_memory_region_create(i915,
732 					  intel_graphics_stolen_res.start,
733 					  resource_size(&intel_graphics_stolen_res),
734 					  PAGE_SIZE, 0,
735 					  &i915_region_stolen_ops);
736 }
737 
738 struct drm_i915_gem_object *
739 i915_gem_object_create_stolen_for_preallocated(struct drm_i915_private *i915,
740 					       resource_size_t stolen_offset,
741 					       resource_size_t size)
742 {
743 	struct intel_memory_region *mem = i915->mm.regions[INTEL_REGION_STOLEN];
744 	struct drm_i915_gem_object *obj;
745 	struct drm_mm_node *stolen;
746 	int ret;
747 
748 	if (!drm_mm_initialized(&i915->mm.stolen))
749 		return ERR_PTR(-ENODEV);
750 
751 	drm_dbg(&i915->drm,
752 		"creating preallocated stolen object: stolen_offset=%pa, size=%pa\n",
753 		&stolen_offset, &size);
754 
755 	/* KISS and expect everything to be page-aligned */
756 	if (GEM_WARN_ON(size == 0) ||
757 	    GEM_WARN_ON(!IS_ALIGNED(size, I915_GTT_PAGE_SIZE)) ||
758 	    GEM_WARN_ON(!IS_ALIGNED(stolen_offset, I915_GTT_MIN_ALIGNMENT)))
759 		return ERR_PTR(-EINVAL);
760 
761 	stolen = kzalloc(sizeof(*stolen), GFP_KERNEL);
762 	if (!stolen)
763 		return ERR_PTR(-ENOMEM);
764 
765 	stolen->start = stolen_offset;
766 	stolen->size = size;
767 	mutex_lock(&i915->mm.stolen_lock);
768 	ret = drm_mm_reserve_node(&i915->mm.stolen, stolen);
769 	mutex_unlock(&i915->mm.stolen_lock);
770 	if (ret) {
771 		obj = ERR_PTR(ret);
772 		goto err_free;
773 	}
774 
775 	obj = __i915_gem_object_create_stolen(mem, stolen);
776 	if (IS_ERR(obj))
777 		goto err_stolen;
778 
779 	i915_gem_object_set_cache_coherency(obj, I915_CACHE_NONE);
780 	return obj;
781 
782 err_stolen:
783 	i915_gem_stolen_remove_node(i915, stolen);
784 err_free:
785 	kfree(stolen);
786 	return obj;
787 }
788