1 /*
2  * SPDX-License-Identifier: MIT
3  *
4  * Copyright © 2008-2012 Intel Corporation
5  */
6 
7 #include <linux/errno.h>
8 #include <linux/mutex.h>
9 
10 #include <drm/drm_mm.h>
11 #include <drm/i915_drm.h>
12 
13 #include "i915_drv.h"
14 #include "i915_gem_stolen.h"
15 
16 /*
17  * The BIOS typically reserves some of the system's memory for the exclusive
18  * use of the integrated graphics. This memory is no longer available for
19  * use by the OS and so the user finds that his system has less memory
20  * available than he put in. We refer to this memory as stolen.
21  *
22  * The BIOS will allocate its framebuffer from the stolen memory. Our
23  * goal is try to reuse that object for our own fbcon which must always
24  * be available for panics. Anything else we can reuse the stolen memory
25  * for is a boon.
26  */
27 
28 int i915_gem_stolen_insert_node_in_range(struct drm_i915_private *dev_priv,
29 					 struct drm_mm_node *node, u64 size,
30 					 unsigned alignment, u64 start, u64 end)
31 {
32 	int ret;
33 
34 	if (!drm_mm_initialized(&dev_priv->mm.stolen))
35 		return -ENODEV;
36 
37 	/* WaSkipStolenMemoryFirstPage:bdw+ */
38 	if (INTEL_GEN(dev_priv) >= 8 && start < 4096)
39 		start = 4096;
40 
41 	mutex_lock(&dev_priv->mm.stolen_lock);
42 	ret = drm_mm_insert_node_in_range(&dev_priv->mm.stolen, node,
43 					  size, alignment, 0,
44 					  start, end, DRM_MM_INSERT_BEST);
45 	mutex_unlock(&dev_priv->mm.stolen_lock);
46 
47 	return ret;
48 }
49 
50 int i915_gem_stolen_insert_node(struct drm_i915_private *dev_priv,
51 				struct drm_mm_node *node, u64 size,
52 				unsigned alignment)
53 {
54 	return i915_gem_stolen_insert_node_in_range(dev_priv, node, size,
55 						    alignment, 0, U64_MAX);
56 }
57 
58 void i915_gem_stolen_remove_node(struct drm_i915_private *dev_priv,
59 				 struct drm_mm_node *node)
60 {
61 	mutex_lock(&dev_priv->mm.stolen_lock);
62 	drm_mm_remove_node(node);
63 	mutex_unlock(&dev_priv->mm.stolen_lock);
64 }
65 
66 static int i915_adjust_stolen(struct drm_i915_private *dev_priv,
67 			      struct resource *dsm)
68 {
69 	struct i915_ggtt *ggtt = &dev_priv->ggtt;
70 	struct resource *r;
71 
72 	if (dsm->start == 0 || dsm->end <= dsm->start)
73 		return -EINVAL;
74 
75 	/*
76 	 * TODO: We have yet too encounter the case where the GTT wasn't at the
77 	 * end of stolen. With that assumption we could simplify this.
78 	 */
79 
80 	/* Make sure we don't clobber the GTT if it's within stolen memory */
81 	if (INTEL_GEN(dev_priv) <= 4 &&
82 	    !IS_G33(dev_priv) && !IS_PINEVIEW(dev_priv) && !IS_G4X(dev_priv)) {
83 		struct resource stolen[2] = {*dsm, *dsm};
84 		struct resource ggtt_res;
85 		resource_size_t ggtt_start;
86 
87 		ggtt_start = I915_READ(PGTBL_CTL);
88 		if (IS_GEN(dev_priv, 4))
89 			ggtt_start = (ggtt_start & PGTBL_ADDRESS_LO_MASK) |
90 				     (ggtt_start & PGTBL_ADDRESS_HI_MASK) << 28;
91 		else
92 			ggtt_start &= PGTBL_ADDRESS_LO_MASK;
93 
94 		ggtt_res =
95 			(struct resource) DEFINE_RES_MEM(ggtt_start,
96 							 ggtt_total_entries(ggtt) * 4);
97 
98 		if (ggtt_res.start >= stolen[0].start && ggtt_res.start < stolen[0].end)
99 			stolen[0].end = ggtt_res.start;
100 		if (ggtt_res.end > stolen[1].start && ggtt_res.end <= stolen[1].end)
101 			stolen[1].start = ggtt_res.end;
102 
103 		/* Pick the larger of the two chunks */
104 		if (resource_size(&stolen[0]) > resource_size(&stolen[1]))
105 			*dsm = stolen[0];
106 		else
107 			*dsm = stolen[1];
108 
109 		if (stolen[0].start != stolen[1].start ||
110 		    stolen[0].end != stolen[1].end) {
111 			DRM_DEBUG_DRIVER("GTT within stolen memory at %pR\n", &ggtt_res);
112 			DRM_DEBUG_DRIVER("Stolen memory adjusted to %pR\n", dsm);
113 		}
114 	}
115 
116 	/*
117 	 * Verify that nothing else uses this physical address. Stolen
118 	 * memory should be reserved by the BIOS and hidden from the
119 	 * kernel. So if the region is already marked as busy, something
120 	 * is seriously wrong.
121 	 */
122 	r = devm_request_mem_region(dev_priv->drm.dev, dsm->start,
123 				    resource_size(dsm),
124 				    "Graphics Stolen Memory");
125 	if (r == NULL) {
126 		/*
127 		 * One more attempt but this time requesting region from
128 		 * start + 1, as we have seen that this resolves the region
129 		 * conflict with the PCI Bus.
130 		 * This is a BIOS w/a: Some BIOS wrap stolen in the root
131 		 * PCI bus, but have an off-by-one error. Hence retry the
132 		 * reservation starting from 1 instead of 0.
133 		 * There's also BIOS with off-by-one on the other end.
134 		 */
135 		r = devm_request_mem_region(dev_priv->drm.dev, dsm->start + 1,
136 					    resource_size(dsm) - 2,
137 					    "Graphics Stolen Memory");
138 		/*
139 		 * GEN3 firmware likes to smash pci bridges into the stolen
140 		 * range. Apparently this works.
141 		 */
142 		if (r == NULL && !IS_GEN(dev_priv, 3)) {
143 			DRM_ERROR("conflict detected with stolen region: %pR\n",
144 				  dsm);
145 
146 			return -EBUSY;
147 		}
148 	}
149 
150 	return 0;
151 }
152 
153 void i915_gem_cleanup_stolen(struct drm_i915_private *dev_priv)
154 {
155 	if (!drm_mm_initialized(&dev_priv->mm.stolen))
156 		return;
157 
158 	drm_mm_takedown(&dev_priv->mm.stolen);
159 }
160 
161 static void g4x_get_stolen_reserved(struct drm_i915_private *dev_priv,
162 				    resource_size_t *base,
163 				    resource_size_t *size)
164 {
165 	u32 reg_val = I915_READ(IS_GM45(dev_priv) ?
166 				CTG_STOLEN_RESERVED :
167 				ELK_STOLEN_RESERVED);
168 	resource_size_t stolen_top = dev_priv->dsm.end + 1;
169 
170 	DRM_DEBUG_DRIVER("%s_STOLEN_RESERVED = %08x\n",
171 			 IS_GM45(dev_priv) ? "CTG" : "ELK", reg_val);
172 
173 	if ((reg_val & G4X_STOLEN_RESERVED_ENABLE) == 0)
174 		return;
175 
176 	/*
177 	 * Whether ILK really reuses the ELK register for this is unclear.
178 	 * Let's see if we catch anyone with this supposedly enabled on ILK.
179 	 */
180 	WARN(IS_GEN(dev_priv, 5), "ILK stolen reserved found? 0x%08x\n",
181 	     reg_val);
182 
183 	if (!(reg_val & G4X_STOLEN_RESERVED_ADDR2_MASK))
184 		return;
185 
186 	*base = (reg_val & G4X_STOLEN_RESERVED_ADDR2_MASK) << 16;
187 	WARN_ON((reg_val & G4X_STOLEN_RESERVED_ADDR1_MASK) < *base);
188 
189 	*size = stolen_top - *base;
190 }
191 
192 static void gen6_get_stolen_reserved(struct drm_i915_private *dev_priv,
193 				     resource_size_t *base,
194 				     resource_size_t *size)
195 {
196 	u32 reg_val = I915_READ(GEN6_STOLEN_RESERVED);
197 
198 	DRM_DEBUG_DRIVER("GEN6_STOLEN_RESERVED = %08x\n", reg_val);
199 
200 	if (!(reg_val & GEN6_STOLEN_RESERVED_ENABLE))
201 		return;
202 
203 	*base = reg_val & GEN6_STOLEN_RESERVED_ADDR_MASK;
204 
205 	switch (reg_val & GEN6_STOLEN_RESERVED_SIZE_MASK) {
206 	case GEN6_STOLEN_RESERVED_1M:
207 		*size = 1024 * 1024;
208 		break;
209 	case GEN6_STOLEN_RESERVED_512K:
210 		*size = 512 * 1024;
211 		break;
212 	case GEN6_STOLEN_RESERVED_256K:
213 		*size = 256 * 1024;
214 		break;
215 	case GEN6_STOLEN_RESERVED_128K:
216 		*size = 128 * 1024;
217 		break;
218 	default:
219 		*size = 1024 * 1024;
220 		MISSING_CASE(reg_val & GEN6_STOLEN_RESERVED_SIZE_MASK);
221 	}
222 }
223 
224 static void vlv_get_stolen_reserved(struct drm_i915_private *dev_priv,
225 				    resource_size_t *base,
226 				    resource_size_t *size)
227 {
228 	u32 reg_val = I915_READ(GEN6_STOLEN_RESERVED);
229 	resource_size_t stolen_top = dev_priv->dsm.end + 1;
230 
231 	DRM_DEBUG_DRIVER("GEN6_STOLEN_RESERVED = %08x\n", reg_val);
232 
233 	if (!(reg_val & GEN6_STOLEN_RESERVED_ENABLE))
234 		return;
235 
236 	switch (reg_val & GEN7_STOLEN_RESERVED_SIZE_MASK) {
237 	default:
238 		MISSING_CASE(reg_val & GEN7_STOLEN_RESERVED_SIZE_MASK);
239 		/* fall through */
240 	case GEN7_STOLEN_RESERVED_1M:
241 		*size = 1024 * 1024;
242 		break;
243 	}
244 
245 	/*
246 	 * On vlv, the ADDR_MASK portion is left as 0 and HW deduces the
247 	 * reserved location as (top - size).
248 	 */
249 	*base = stolen_top - *size;
250 }
251 
252 static void gen7_get_stolen_reserved(struct drm_i915_private *dev_priv,
253 				     resource_size_t *base,
254 				     resource_size_t *size)
255 {
256 	u32 reg_val = I915_READ(GEN6_STOLEN_RESERVED);
257 
258 	DRM_DEBUG_DRIVER("GEN6_STOLEN_RESERVED = %08x\n", reg_val);
259 
260 	if (!(reg_val & GEN6_STOLEN_RESERVED_ENABLE))
261 		return;
262 
263 	*base = reg_val & GEN7_STOLEN_RESERVED_ADDR_MASK;
264 
265 	switch (reg_val & GEN7_STOLEN_RESERVED_SIZE_MASK) {
266 	case GEN7_STOLEN_RESERVED_1M:
267 		*size = 1024 * 1024;
268 		break;
269 	case GEN7_STOLEN_RESERVED_256K:
270 		*size = 256 * 1024;
271 		break;
272 	default:
273 		*size = 1024 * 1024;
274 		MISSING_CASE(reg_val & GEN7_STOLEN_RESERVED_SIZE_MASK);
275 	}
276 }
277 
278 static void chv_get_stolen_reserved(struct drm_i915_private *dev_priv,
279 				    resource_size_t *base,
280 				    resource_size_t *size)
281 {
282 	u32 reg_val = I915_READ(GEN6_STOLEN_RESERVED);
283 
284 	DRM_DEBUG_DRIVER("GEN6_STOLEN_RESERVED = %08x\n", reg_val);
285 
286 	if (!(reg_val & GEN6_STOLEN_RESERVED_ENABLE))
287 		return;
288 
289 	*base = reg_val & GEN6_STOLEN_RESERVED_ADDR_MASK;
290 
291 	switch (reg_val & GEN8_STOLEN_RESERVED_SIZE_MASK) {
292 	case GEN8_STOLEN_RESERVED_1M:
293 		*size = 1024 * 1024;
294 		break;
295 	case GEN8_STOLEN_RESERVED_2M:
296 		*size = 2 * 1024 * 1024;
297 		break;
298 	case GEN8_STOLEN_RESERVED_4M:
299 		*size = 4 * 1024 * 1024;
300 		break;
301 	case GEN8_STOLEN_RESERVED_8M:
302 		*size = 8 * 1024 * 1024;
303 		break;
304 	default:
305 		*size = 8 * 1024 * 1024;
306 		MISSING_CASE(reg_val & GEN8_STOLEN_RESERVED_SIZE_MASK);
307 	}
308 }
309 
310 static void bdw_get_stolen_reserved(struct drm_i915_private *dev_priv,
311 				    resource_size_t *base,
312 				    resource_size_t *size)
313 {
314 	u32 reg_val = I915_READ(GEN6_STOLEN_RESERVED);
315 	resource_size_t stolen_top = dev_priv->dsm.end + 1;
316 
317 	DRM_DEBUG_DRIVER("GEN6_STOLEN_RESERVED = %08x\n", reg_val);
318 
319 	if (!(reg_val & GEN6_STOLEN_RESERVED_ENABLE))
320 		return;
321 
322 	if (!(reg_val & GEN6_STOLEN_RESERVED_ADDR_MASK))
323 		return;
324 
325 	*base = reg_val & GEN6_STOLEN_RESERVED_ADDR_MASK;
326 	*size = stolen_top - *base;
327 }
328 
329 static void icl_get_stolen_reserved(struct drm_i915_private *i915,
330 				    resource_size_t *base,
331 				    resource_size_t *size)
332 {
333 	u64 reg_val = intel_uncore_read64(&i915->uncore, GEN6_STOLEN_RESERVED);
334 
335 	DRM_DEBUG_DRIVER("GEN6_STOLEN_RESERVED = 0x%016llx\n", reg_val);
336 
337 	*base = reg_val & GEN11_STOLEN_RESERVED_ADDR_MASK;
338 
339 	switch (reg_val & GEN8_STOLEN_RESERVED_SIZE_MASK) {
340 	case GEN8_STOLEN_RESERVED_1M:
341 		*size = 1024 * 1024;
342 		break;
343 	case GEN8_STOLEN_RESERVED_2M:
344 		*size = 2 * 1024 * 1024;
345 		break;
346 	case GEN8_STOLEN_RESERVED_4M:
347 		*size = 4 * 1024 * 1024;
348 		break;
349 	case GEN8_STOLEN_RESERVED_8M:
350 		*size = 8 * 1024 * 1024;
351 		break;
352 	default:
353 		*size = 8 * 1024 * 1024;
354 		MISSING_CASE(reg_val & GEN8_STOLEN_RESERVED_SIZE_MASK);
355 	}
356 }
357 
358 int i915_gem_init_stolen(struct drm_i915_private *dev_priv)
359 {
360 	resource_size_t reserved_base, stolen_top;
361 	resource_size_t reserved_total, reserved_size;
362 
363 	mutex_init(&dev_priv->mm.stolen_lock);
364 
365 	if (intel_vgpu_active(dev_priv)) {
366 		dev_notice(dev_priv->drm.dev,
367 			   "%s, disabling use of stolen memory\n",
368 			   "iGVT-g active");
369 		return 0;
370 	}
371 
372 	if (intel_vtd_active() && INTEL_GEN(dev_priv) < 8) {
373 		dev_notice(dev_priv->drm.dev,
374 			   "%s, disabling use of stolen memory\n",
375 			   "DMAR active");
376 		return 0;
377 	}
378 
379 	if (resource_size(&intel_graphics_stolen_res) == 0)
380 		return 0;
381 
382 	dev_priv->dsm = intel_graphics_stolen_res;
383 
384 	if (i915_adjust_stolen(dev_priv, &dev_priv->dsm))
385 		return 0;
386 
387 	GEM_BUG_ON(dev_priv->dsm.start == 0);
388 	GEM_BUG_ON(dev_priv->dsm.end <= dev_priv->dsm.start);
389 
390 	stolen_top = dev_priv->dsm.end + 1;
391 	reserved_base = stolen_top;
392 	reserved_size = 0;
393 
394 	switch (INTEL_GEN(dev_priv)) {
395 	case 2:
396 	case 3:
397 		break;
398 	case 4:
399 		if (!IS_G4X(dev_priv))
400 			break;
401 		/* fall through */
402 	case 5:
403 		g4x_get_stolen_reserved(dev_priv,
404 					&reserved_base, &reserved_size);
405 		break;
406 	case 6:
407 		gen6_get_stolen_reserved(dev_priv,
408 					 &reserved_base, &reserved_size);
409 		break;
410 	case 7:
411 		if (IS_VALLEYVIEW(dev_priv))
412 			vlv_get_stolen_reserved(dev_priv,
413 						&reserved_base, &reserved_size);
414 		else
415 			gen7_get_stolen_reserved(dev_priv,
416 						 &reserved_base, &reserved_size);
417 		break;
418 	case 8:
419 	case 9:
420 	case 10:
421 		if (IS_LP(dev_priv))
422 			chv_get_stolen_reserved(dev_priv,
423 						&reserved_base, &reserved_size);
424 		else
425 			bdw_get_stolen_reserved(dev_priv,
426 						&reserved_base, &reserved_size);
427 		break;
428 	default:
429 		MISSING_CASE(INTEL_GEN(dev_priv));
430 		/* fall-through */
431 	case 11:
432 	case 12:
433 		icl_get_stolen_reserved(dev_priv, &reserved_base,
434 					&reserved_size);
435 		break;
436 	}
437 
438 	/*
439 	 * Our expectation is that the reserved space is at the top of the
440 	 * stolen region and *never* at the bottom. If we see !reserved_base,
441 	 * it likely means we failed to read the registers correctly.
442 	 */
443 	if (!reserved_base) {
444 		DRM_ERROR("inconsistent reservation %pa + %pa; ignoring\n",
445 			  &reserved_base, &reserved_size);
446 		reserved_base = stolen_top;
447 		reserved_size = 0;
448 	}
449 
450 	dev_priv->dsm_reserved =
451 		(struct resource) DEFINE_RES_MEM(reserved_base, reserved_size);
452 
453 	if (!resource_contains(&dev_priv->dsm, &dev_priv->dsm_reserved)) {
454 		DRM_ERROR("Stolen reserved area %pR outside stolen memory %pR\n",
455 			  &dev_priv->dsm_reserved, &dev_priv->dsm);
456 		return 0;
457 	}
458 
459 	/* It is possible for the reserved area to end before the end of stolen
460 	 * memory, so just consider the start. */
461 	reserved_total = stolen_top - reserved_base;
462 
463 	DRM_DEBUG_DRIVER("Memory reserved for graphics device: %lluK, usable: %lluK\n",
464 			 (u64)resource_size(&dev_priv->dsm) >> 10,
465 			 ((u64)resource_size(&dev_priv->dsm) - reserved_total) >> 10);
466 
467 	dev_priv->stolen_usable_size =
468 		resource_size(&dev_priv->dsm) - reserved_total;
469 
470 	/* Basic memrange allocator for stolen space. */
471 	drm_mm_init(&dev_priv->mm.stolen, 0, dev_priv->stolen_usable_size);
472 
473 	return 0;
474 }
475 
476 static struct sg_table *
477 i915_pages_create_for_stolen(struct drm_device *dev,
478 			     resource_size_t offset, resource_size_t size)
479 {
480 	struct drm_i915_private *dev_priv = to_i915(dev);
481 	struct sg_table *st;
482 	struct scatterlist *sg;
483 
484 	GEM_BUG_ON(range_overflows(offset, size, resource_size(&dev_priv->dsm)));
485 
486 	/* We hide that we have no struct page backing our stolen object
487 	 * by wrapping the contiguous physical allocation with a fake
488 	 * dma mapping in a single scatterlist.
489 	 */
490 
491 	st = kmalloc(sizeof(*st), GFP_KERNEL);
492 	if (st == NULL)
493 		return ERR_PTR(-ENOMEM);
494 
495 	if (sg_alloc_table(st, 1, GFP_KERNEL)) {
496 		kfree(st);
497 		return ERR_PTR(-ENOMEM);
498 	}
499 
500 	sg = st->sgl;
501 	sg->offset = 0;
502 	sg->length = size;
503 
504 	sg_dma_address(sg) = (dma_addr_t)dev_priv->dsm.start + offset;
505 	sg_dma_len(sg) = size;
506 
507 	return st;
508 }
509 
510 static int i915_gem_object_get_pages_stolen(struct drm_i915_gem_object *obj)
511 {
512 	struct sg_table *pages =
513 		i915_pages_create_for_stolen(obj->base.dev,
514 					     obj->stolen->start,
515 					     obj->stolen->size);
516 	if (IS_ERR(pages))
517 		return PTR_ERR(pages);
518 
519 	__i915_gem_object_set_pages(obj, pages, obj->stolen->size);
520 
521 	return 0;
522 }
523 
524 static void i915_gem_object_put_pages_stolen(struct drm_i915_gem_object *obj,
525 					     struct sg_table *pages)
526 {
527 	/* Should only be called from i915_gem_object_release_stolen() */
528 	sg_free_table(pages);
529 	kfree(pages);
530 }
531 
532 static void
533 i915_gem_object_release_stolen(struct drm_i915_gem_object *obj)
534 {
535 	struct drm_i915_private *dev_priv = to_i915(obj->base.dev);
536 	struct drm_mm_node *stolen = fetch_and_zero(&obj->stolen);
537 
538 	GEM_BUG_ON(!stolen);
539 
540 	i915_gem_stolen_remove_node(dev_priv, stolen);
541 	kfree(stolen);
542 }
543 
544 static const struct drm_i915_gem_object_ops i915_gem_object_stolen_ops = {
545 	.get_pages = i915_gem_object_get_pages_stolen,
546 	.put_pages = i915_gem_object_put_pages_stolen,
547 	.release = i915_gem_object_release_stolen,
548 };
549 
550 static struct drm_i915_gem_object *
551 _i915_gem_object_create_stolen(struct drm_i915_private *dev_priv,
552 			       struct drm_mm_node *stolen)
553 {
554 	struct drm_i915_gem_object *obj;
555 	unsigned int cache_level;
556 	int err = -ENOMEM;
557 
558 	obj = i915_gem_object_alloc();
559 	if (!obj)
560 		goto err;
561 
562 	drm_gem_private_object_init(&dev_priv->drm, &obj->base, stolen->size);
563 	i915_gem_object_init(obj, &i915_gem_object_stolen_ops);
564 
565 	obj->stolen = stolen;
566 	obj->read_domains = I915_GEM_DOMAIN_CPU | I915_GEM_DOMAIN_GTT;
567 	cache_level = HAS_LLC(dev_priv) ? I915_CACHE_LLC : I915_CACHE_NONE;
568 	i915_gem_object_set_cache_coherency(obj, cache_level);
569 
570 	err = i915_gem_object_pin_pages(obj);
571 	if (err)
572 		goto cleanup;
573 
574 	return obj;
575 
576 cleanup:
577 	i915_gem_object_free(obj);
578 err:
579 	return ERR_PTR(err);
580 }
581 
582 struct drm_i915_gem_object *
583 i915_gem_object_create_stolen(struct drm_i915_private *dev_priv,
584 			      resource_size_t size)
585 {
586 	struct drm_i915_gem_object *obj;
587 	struct drm_mm_node *stolen;
588 	int ret;
589 
590 	if (!drm_mm_initialized(&dev_priv->mm.stolen))
591 		return ERR_PTR(-ENODEV);
592 
593 	if (size == 0)
594 		return ERR_PTR(-EINVAL);
595 
596 	stolen = kzalloc(sizeof(*stolen), GFP_KERNEL);
597 	if (!stolen)
598 		return ERR_PTR(-ENOMEM);
599 
600 	ret = i915_gem_stolen_insert_node(dev_priv, stolen, size, 4096);
601 	if (ret) {
602 		obj = ERR_PTR(ret);
603 		goto err_free;
604 	}
605 
606 	obj = _i915_gem_object_create_stolen(dev_priv, stolen);
607 	if (IS_ERR(obj))
608 		goto err_remove;
609 
610 	return obj;
611 
612 err_remove:
613 	i915_gem_stolen_remove_node(dev_priv, stolen);
614 err_free:
615 	kfree(stolen);
616 	return obj;
617 }
618 
619 struct drm_i915_gem_object *
620 i915_gem_object_create_stolen_for_preallocated(struct drm_i915_private *dev_priv,
621 					       resource_size_t stolen_offset,
622 					       resource_size_t gtt_offset,
623 					       resource_size_t size)
624 {
625 	struct i915_ggtt *ggtt = &dev_priv->ggtt;
626 	struct drm_i915_gem_object *obj;
627 	struct drm_mm_node *stolen;
628 	struct i915_vma *vma;
629 	int ret;
630 
631 	if (!drm_mm_initialized(&dev_priv->mm.stolen))
632 		return ERR_PTR(-ENODEV);
633 
634 	DRM_DEBUG_DRIVER("creating preallocated stolen object: stolen_offset=%pa, gtt_offset=%pa, size=%pa\n",
635 			 &stolen_offset, &gtt_offset, &size);
636 
637 	/* KISS and expect everything to be page-aligned */
638 	if (WARN_ON(size == 0) ||
639 	    WARN_ON(!IS_ALIGNED(size, I915_GTT_PAGE_SIZE)) ||
640 	    WARN_ON(!IS_ALIGNED(stolen_offset, I915_GTT_MIN_ALIGNMENT)))
641 		return ERR_PTR(-EINVAL);
642 
643 	stolen = kzalloc(sizeof(*stolen), GFP_KERNEL);
644 	if (!stolen)
645 		return ERR_PTR(-ENOMEM);
646 
647 	stolen->start = stolen_offset;
648 	stolen->size = size;
649 	mutex_lock(&dev_priv->mm.stolen_lock);
650 	ret = drm_mm_reserve_node(&dev_priv->mm.stolen, stolen);
651 	mutex_unlock(&dev_priv->mm.stolen_lock);
652 	if (ret) {
653 		DRM_DEBUG_DRIVER("failed to allocate stolen space\n");
654 		kfree(stolen);
655 		return ERR_PTR(ret);
656 	}
657 
658 	obj = _i915_gem_object_create_stolen(dev_priv, stolen);
659 	if (IS_ERR(obj)) {
660 		DRM_DEBUG_DRIVER("failed to allocate stolen object\n");
661 		i915_gem_stolen_remove_node(dev_priv, stolen);
662 		kfree(stolen);
663 		return obj;
664 	}
665 
666 	/* Some objects just need physical mem from stolen space */
667 	if (gtt_offset == I915_GTT_OFFSET_NONE)
668 		return obj;
669 
670 	ret = i915_gem_object_pin_pages(obj);
671 	if (ret)
672 		goto err;
673 
674 	vma = i915_vma_instance(obj, &ggtt->vm, NULL);
675 	if (IS_ERR(vma)) {
676 		ret = PTR_ERR(vma);
677 		goto err_pages;
678 	}
679 
680 	/* To simplify the initialisation sequence between KMS and GTT,
681 	 * we allow construction of the stolen object prior to
682 	 * setting up the GTT space. The actual reservation will occur
683 	 * later.
684 	 */
685 	mutex_lock(&ggtt->vm.mutex);
686 	ret = i915_gem_gtt_reserve(&ggtt->vm, &vma->node,
687 				   size, gtt_offset, obj->cache_level,
688 				   0);
689 	if (ret) {
690 		DRM_DEBUG_DRIVER("failed to allocate stolen GTT space\n");
691 		mutex_unlock(&ggtt->vm.mutex);
692 		goto err_pages;
693 	}
694 
695 	GEM_BUG_ON(!drm_mm_node_allocated(&vma->node));
696 
697 	GEM_BUG_ON(vma->pages);
698 	vma->pages = obj->mm.pages;
699 	atomic_set(&vma->pages_count, I915_VMA_PAGES_ACTIVE);
700 
701 	set_bit(I915_VMA_GLOBAL_BIND_BIT, __i915_vma_flags(vma));
702 	__i915_vma_set_map_and_fenceable(vma);
703 
704 	list_add_tail(&vma->vm_link, &ggtt->vm.bound_list);
705 	mutex_unlock(&ggtt->vm.mutex);
706 
707 	GEM_BUG_ON(i915_gem_object_is_shrinkable(obj));
708 	atomic_inc(&obj->bind_count);
709 
710 	return obj;
711 
712 err_pages:
713 	i915_gem_object_unpin_pages(obj);
714 err:
715 	i915_gem_object_put(obj);
716 	return ERR_PTR(ret);
717 }
718