xref: /openbmc/linux/drivers/gpu/drm/i915/gvt/aperture_gm.c (revision 0791faebfe750292a8a842b64795a390ca4a3b51)
1  /*
2   * Copyright(c) 2011-2016 Intel Corporation. All rights reserved.
3   *
4   * Permission is hereby granted, free of charge, to any person obtaining a
5   * copy of this software and associated documentation files (the "Software"),
6   * to deal in the Software without restriction, including without limitation
7   * the rights to use, copy, modify, merge, publish, distribute, sublicense,
8   * and/or sell copies of the Software, and to permit persons to whom the
9   * Software is furnished to do so, subject to the following conditions:
10   *
11   * The above copyright notice and this permission notice (including the next
12   * paragraph) shall be included in all copies or substantial portions of the
13   * Software.
14   *
15   * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
16   * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
17   * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT.  IN NO EVENT SHALL
18   * THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
19   * LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM,
20   * OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE
21   * SOFTWARE.
22   *
23   * Authors:
24   *    Kevin Tian <kevin.tian@intel.com>
25   *    Dexuan Cui
26   *
27   * Contributors:
28   *    Pei Zhang <pei.zhang@intel.com>
29   *    Min He <min.he@intel.com>
30   *    Niu Bing <bing.niu@intel.com>
31   *    Yulei Zhang <yulei.zhang@intel.com>
32   *    Zhenyu Wang <zhenyuw@linux.intel.com>
33   *    Zhi Wang <zhi.a.wang@intel.com>
34   *
35   */
36  
37  #include "i915_drv.h"
38  #include "i915_reg.h"
39  #include "gt/intel_ggtt_fencing.h"
40  #include "gvt.h"
41  
alloc_gm(struct intel_vgpu * vgpu,bool high_gm)42  static int alloc_gm(struct intel_vgpu *vgpu, bool high_gm)
43  {
44  	struct intel_gvt *gvt = vgpu->gvt;
45  	struct intel_gt *gt = gvt->gt;
46  	unsigned int flags;
47  	u64 start, end, size;
48  	struct drm_mm_node *node;
49  	int ret;
50  
51  	if (high_gm) {
52  		node = &vgpu->gm.high_gm_node;
53  		size = vgpu_hidden_sz(vgpu);
54  		start = ALIGN(gvt_hidden_gmadr_base(gvt), I915_GTT_PAGE_SIZE);
55  		end = ALIGN(gvt_hidden_gmadr_end(gvt), I915_GTT_PAGE_SIZE);
56  		flags = PIN_HIGH;
57  	} else {
58  		node = &vgpu->gm.low_gm_node;
59  		size = vgpu_aperture_sz(vgpu);
60  		start = ALIGN(gvt_aperture_gmadr_base(gvt), I915_GTT_PAGE_SIZE);
61  		end = ALIGN(gvt_aperture_gmadr_end(gvt), I915_GTT_PAGE_SIZE);
62  		flags = PIN_MAPPABLE;
63  	}
64  
65  	mutex_lock(&gt->ggtt->vm.mutex);
66  	mmio_hw_access_pre(gt);
67  	ret = i915_gem_gtt_insert(&gt->ggtt->vm, NULL, node,
68  				  size, I915_GTT_PAGE_SIZE,
69  				  I915_COLOR_UNEVICTABLE,
70  				  start, end, flags);
71  	mmio_hw_access_post(gt);
72  	mutex_unlock(&gt->ggtt->vm.mutex);
73  	if (ret)
74  		gvt_err("fail to alloc %s gm space from host\n",
75  			high_gm ? "high" : "low");
76  
77  	return ret;
78  }
79  
alloc_vgpu_gm(struct intel_vgpu * vgpu)80  static int alloc_vgpu_gm(struct intel_vgpu *vgpu)
81  {
82  	struct intel_gvt *gvt = vgpu->gvt;
83  	struct intel_gt *gt = gvt->gt;
84  	int ret;
85  
86  	ret = alloc_gm(vgpu, false);
87  	if (ret)
88  		return ret;
89  
90  	ret = alloc_gm(vgpu, true);
91  	if (ret)
92  		goto out_free_aperture;
93  
94  	gvt_dbg_core("vgpu%d: alloc low GM start %llx size %llx\n", vgpu->id,
95  		     vgpu_aperture_offset(vgpu), vgpu_aperture_sz(vgpu));
96  
97  	gvt_dbg_core("vgpu%d: alloc high GM start %llx size %llx\n", vgpu->id,
98  		     vgpu_hidden_offset(vgpu), vgpu_hidden_sz(vgpu));
99  
100  	return 0;
101  out_free_aperture:
102  	mutex_lock(&gt->ggtt->vm.mutex);
103  	drm_mm_remove_node(&vgpu->gm.low_gm_node);
104  	mutex_unlock(&gt->ggtt->vm.mutex);
105  	return ret;
106  }
107  
free_vgpu_gm(struct intel_vgpu * vgpu)108  static void free_vgpu_gm(struct intel_vgpu *vgpu)
109  {
110  	struct intel_gvt *gvt = vgpu->gvt;
111  	struct intel_gt *gt = gvt->gt;
112  
113  	mutex_lock(&gt->ggtt->vm.mutex);
114  	drm_mm_remove_node(&vgpu->gm.low_gm_node);
115  	drm_mm_remove_node(&vgpu->gm.high_gm_node);
116  	mutex_unlock(&gt->ggtt->vm.mutex);
117  }
118  
119  /**
120   * intel_vgpu_write_fence - write fence registers owned by a vGPU
121   * @vgpu: vGPU instance
122   * @fence: vGPU fence register number
123   * @value: Fence register value to be written
124   *
125   * This function is used to write fence registers owned by a vGPU. The vGPU
126   * fence register number will be translated into HW fence register number.
127   *
128   */
intel_vgpu_write_fence(struct intel_vgpu * vgpu,u32 fence,u64 value)129  void intel_vgpu_write_fence(struct intel_vgpu *vgpu,
130  		u32 fence, u64 value)
131  {
132  	struct intel_gvt *gvt = vgpu->gvt;
133  	struct drm_i915_private *i915 = gvt->gt->i915;
134  	struct intel_uncore *uncore = gvt->gt->uncore;
135  	struct i915_fence_reg *reg;
136  	i915_reg_t fence_reg_lo, fence_reg_hi;
137  
138  	assert_rpm_wakelock_held(uncore->rpm);
139  
140  	if (drm_WARN_ON(&i915->drm, fence >= vgpu_fence_sz(vgpu)))
141  		return;
142  
143  	reg = vgpu->fence.regs[fence];
144  	if (drm_WARN_ON(&i915->drm, !reg))
145  		return;
146  
147  	fence_reg_lo = FENCE_REG_GEN6_LO(reg->id);
148  	fence_reg_hi = FENCE_REG_GEN6_HI(reg->id);
149  
150  	intel_uncore_write(uncore, fence_reg_lo, 0);
151  	intel_uncore_posting_read(uncore, fence_reg_lo);
152  
153  	intel_uncore_write(uncore, fence_reg_hi, upper_32_bits(value));
154  	intel_uncore_write(uncore, fence_reg_lo, lower_32_bits(value));
155  	intel_uncore_posting_read(uncore, fence_reg_lo);
156  }
157  
_clear_vgpu_fence(struct intel_vgpu * vgpu)158  static void _clear_vgpu_fence(struct intel_vgpu *vgpu)
159  {
160  	int i;
161  
162  	for (i = 0; i < vgpu_fence_sz(vgpu); i++)
163  		intel_vgpu_write_fence(vgpu, i, 0);
164  }
165  
free_vgpu_fence(struct intel_vgpu * vgpu)166  static void free_vgpu_fence(struct intel_vgpu *vgpu)
167  {
168  	struct intel_gvt *gvt = vgpu->gvt;
169  	struct intel_uncore *uncore = gvt->gt->uncore;
170  	struct i915_fence_reg *reg;
171  	intel_wakeref_t wakeref;
172  	u32 i;
173  
174  	if (drm_WARN_ON(&gvt->gt->i915->drm, !vgpu_fence_sz(vgpu)))
175  		return;
176  
177  	wakeref = intel_runtime_pm_get(uncore->rpm);
178  
179  	mutex_lock(&gvt->gt->ggtt->vm.mutex);
180  	_clear_vgpu_fence(vgpu);
181  	for (i = 0; i < vgpu_fence_sz(vgpu); i++) {
182  		reg = vgpu->fence.regs[i];
183  		i915_unreserve_fence(reg);
184  		vgpu->fence.regs[i] = NULL;
185  	}
186  	mutex_unlock(&gvt->gt->ggtt->vm.mutex);
187  
188  	intel_runtime_pm_put(uncore->rpm, wakeref);
189  }
190  
alloc_vgpu_fence(struct intel_vgpu * vgpu)191  static int alloc_vgpu_fence(struct intel_vgpu *vgpu)
192  {
193  	struct intel_gvt *gvt = vgpu->gvt;
194  	struct intel_uncore *uncore = gvt->gt->uncore;
195  	struct i915_fence_reg *reg;
196  	intel_wakeref_t wakeref;
197  	int i;
198  
199  	wakeref = intel_runtime_pm_get(uncore->rpm);
200  
201  	/* Request fences from host */
202  	mutex_lock(&gvt->gt->ggtt->vm.mutex);
203  
204  	for (i = 0; i < vgpu_fence_sz(vgpu); i++) {
205  		reg = i915_reserve_fence(gvt->gt->ggtt);
206  		if (IS_ERR(reg))
207  			goto out_free_fence;
208  
209  		vgpu->fence.regs[i] = reg;
210  	}
211  
212  	_clear_vgpu_fence(vgpu);
213  
214  	mutex_unlock(&gvt->gt->ggtt->vm.mutex);
215  	intel_runtime_pm_put(uncore->rpm, wakeref);
216  	return 0;
217  
218  out_free_fence:
219  	gvt_vgpu_err("Failed to alloc fences\n");
220  	/* Return fences to host, if fail */
221  	for (i = 0; i < vgpu_fence_sz(vgpu); i++) {
222  		reg = vgpu->fence.regs[i];
223  		if (!reg)
224  			continue;
225  		i915_unreserve_fence(reg);
226  		vgpu->fence.regs[i] = NULL;
227  	}
228  	mutex_unlock(&gvt->gt->ggtt->vm.mutex);
229  	intel_runtime_pm_put_unchecked(uncore->rpm);
230  	return -ENOSPC;
231  }
232  
free_resource(struct intel_vgpu * vgpu)233  static void free_resource(struct intel_vgpu *vgpu)
234  {
235  	struct intel_gvt *gvt = vgpu->gvt;
236  
237  	gvt->gm.vgpu_allocated_low_gm_size -= vgpu_aperture_sz(vgpu);
238  	gvt->gm.vgpu_allocated_high_gm_size -= vgpu_hidden_sz(vgpu);
239  	gvt->fence.vgpu_allocated_fence_num -= vgpu_fence_sz(vgpu);
240  }
241  
alloc_resource(struct intel_vgpu * vgpu,const struct intel_vgpu_config * conf)242  static int alloc_resource(struct intel_vgpu *vgpu,
243  		const struct intel_vgpu_config *conf)
244  {
245  	struct intel_gvt *gvt = vgpu->gvt;
246  	unsigned long request, avail, max, taken;
247  	const char *item;
248  
249  	if (!conf->low_mm || !conf->high_mm || !conf->fence) {
250  		gvt_vgpu_err("Invalid vGPU creation params\n");
251  		return -EINVAL;
252  	}
253  
254  	item = "low GM space";
255  	max = gvt_aperture_sz(gvt) - HOST_LOW_GM_SIZE;
256  	taken = gvt->gm.vgpu_allocated_low_gm_size;
257  	avail = max - taken;
258  	request = conf->low_mm;
259  
260  	if (request > avail)
261  		goto no_enough_resource;
262  
263  	vgpu_aperture_sz(vgpu) = ALIGN(request, I915_GTT_PAGE_SIZE);
264  
265  	item = "high GM space";
266  	max = gvt_hidden_sz(gvt) - HOST_HIGH_GM_SIZE;
267  	taken = gvt->gm.vgpu_allocated_high_gm_size;
268  	avail = max - taken;
269  	request = conf->high_mm;
270  
271  	if (request > avail)
272  		goto no_enough_resource;
273  
274  	vgpu_hidden_sz(vgpu) = ALIGN(request, I915_GTT_PAGE_SIZE);
275  
276  	item = "fence";
277  	max = gvt_fence_sz(gvt) - HOST_FENCE;
278  	taken = gvt->fence.vgpu_allocated_fence_num;
279  	avail = max - taken;
280  	request = conf->fence;
281  
282  	if (request > avail)
283  		goto no_enough_resource;
284  
285  	vgpu_fence_sz(vgpu) = request;
286  
287  	gvt->gm.vgpu_allocated_low_gm_size += conf->low_mm;
288  	gvt->gm.vgpu_allocated_high_gm_size += conf->high_mm;
289  	gvt->fence.vgpu_allocated_fence_num += conf->fence;
290  	return 0;
291  
292  no_enough_resource:
293  	gvt_err("fail to allocate resource %s\n", item);
294  	gvt_err("request %luMB avail %luMB max %luMB taken %luMB\n",
295  		BYTES_TO_MB(request), BYTES_TO_MB(avail),
296  		BYTES_TO_MB(max), BYTES_TO_MB(taken));
297  	return -ENOSPC;
298  }
299  
300  /**
301   * intel_vgpu_free_resource() - free HW resource owned by a vGPU
302   * @vgpu: a vGPU
303   *
304   * This function is used to free the HW resource owned by a vGPU.
305   *
306   */
intel_vgpu_free_resource(struct intel_vgpu * vgpu)307  void intel_vgpu_free_resource(struct intel_vgpu *vgpu)
308  {
309  	free_vgpu_gm(vgpu);
310  	free_vgpu_fence(vgpu);
311  	free_resource(vgpu);
312  }
313  
314  /**
315   * intel_vgpu_reset_resource - reset resource state owned by a vGPU
316   * @vgpu: a vGPU
317   *
318   * This function is used to reset resource state owned by a vGPU.
319   *
320   */
intel_vgpu_reset_resource(struct intel_vgpu * vgpu)321  void intel_vgpu_reset_resource(struct intel_vgpu *vgpu)
322  {
323  	struct intel_gvt *gvt = vgpu->gvt;
324  	intel_wakeref_t wakeref;
325  
326  	with_intel_runtime_pm(gvt->gt->uncore->rpm, wakeref)
327  		_clear_vgpu_fence(vgpu);
328  }
329  
330  /**
331   * intel_vgpu_alloc_resource() - allocate HW resource for a vGPU
332   * @vgpu: vGPU
333   * @conf: vGPU creation params
334   *
335   * This function is used to allocate HW resource for a vGPU. User specifies
336   * the resource configuration through the creation params.
337   *
338   * Returns:
339   * zero on success, negative error code if failed.
340   *
341   */
intel_vgpu_alloc_resource(struct intel_vgpu * vgpu,const struct intel_vgpu_config * conf)342  int intel_vgpu_alloc_resource(struct intel_vgpu *vgpu,
343  		const struct intel_vgpu_config *conf)
344  {
345  	int ret;
346  
347  	ret = alloc_resource(vgpu, conf);
348  	if (ret)
349  		return ret;
350  
351  	ret = alloc_vgpu_gm(vgpu);
352  	if (ret)
353  		goto out_free_resource;
354  
355  	ret = alloc_vgpu_fence(vgpu);
356  	if (ret)
357  		goto out_free_vgpu_gm;
358  
359  	return 0;
360  
361  out_free_vgpu_gm:
362  	free_vgpu_gm(vgpu);
363  out_free_resource:
364  	free_resource(vgpu);
365  	return ret;
366  }
367