xref: /openbmc/linux/drivers/gpu/drm/vmwgfx/vmwgfx_binding.c (revision 03ab8e6297acd1bc0eedaa050e2a1635c576fd11)
1  // SPDX-License-Identifier: GPL-2.0 OR MIT
2  /**************************************************************************
3   *
4   * Copyright 2015 VMware, Inc., Palo Alto, CA., USA
5   *
6   * Permission is hereby granted, free of charge, to any person obtaining a
7   * copy of this software and associated documentation files (the
8   * "Software"), to deal in the Software without restriction, including
9   * without limitation the rights to use, copy, modify, merge, publish,
10   * distribute, sub license, and/or sell copies of the Software, and to
11   * permit persons to whom the Software is furnished to do so, subject to
12   * the following conditions:
13   *
14   * The above copyright notice and this permission notice (including the
15   * next paragraph) shall be included in all copies or substantial portions
16   * of the Software.
17   *
18   * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
19   * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
20   * FITNESS FOR A PARTICULAR PURPOSE AND NON-INFRINGEMENT. IN NO EVENT SHALL
21   * THE COPYRIGHT HOLDERS, AUTHORS AND/OR ITS SUPPLIERS BE LIABLE FOR ANY CLAIM,
22   * DAMAGES OR OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR
23   * OTHERWISE, ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE
24   * USE OR OTHER DEALINGS IN THE SOFTWARE.
25   *
26   **************************************************************************/
27  /*
28   * This file implements the vmwgfx context binding manager,
29   * The sole reason for having to use this code is that vmware guest
30   * backed contexts can be swapped out to their backing mobs by the device
31   * at any time, also swapped in at any time. At swapin time, the device
32   * validates the context bindings to make sure they point to valid resources.
33   * It's this outside-of-drawcall validation (that can happen at any time),
34   * that makes this code necessary.
35   *
36   * We therefore need to kill any context bindings pointing to a resource
37   * when the resource is swapped out. Furthermore, if the vmwgfx driver has
38   * swapped out the context we can't swap it in again to kill bindings because
39   * of backing mob reservation lockdep violations, so as part of
40   * context swapout, also kill all bindings of a context, so that they are
41   * already killed if a resource to which a binding points
42   * needs to be swapped out.
43   *
44   * Note that a resource can be pointed to by bindings from multiple contexts,
45   * Therefore we can't easily protect this data by a per context mutex
46   * (unless we use deadlock-safe WW mutexes). So we use a global binding_mutex
47   * to protect all binding manager data.
48   *
49   * Finally, any association between a context and a global resource
50   * (surface, shader or even DX query) is conceptually a context binding that
51   * needs to be tracked by this code.
52   */
53  
54  #include "vmwgfx_drv.h"
55  #include "vmwgfx_binding.h"
56  #include "device_include/svga3d_reg.h"
57  
58  #define VMW_BINDING_RT_BIT     0
59  #define VMW_BINDING_PS_BIT     1
60  #define VMW_BINDING_SO_T_BIT   2
61  #define VMW_BINDING_VB_BIT     3
62  #define VMW_BINDING_UAV_BIT    4
63  #define VMW_BINDING_CS_UAV_BIT 5
64  #define VMW_BINDING_NUM_BITS   6
65  
66  #define VMW_BINDING_PS_SR_BIT  0
67  
68  /**
69   * struct vmw_ctx_binding_state - per context binding state
70   *
71   * @dev_priv: Pointer to device private structure.
72   * @list: linked list of individual active bindings.
73   * @render_targets: Render target bindings.
74   * @texture_units: Texture units bindings.
75   * @ds_view: Depth-stencil view binding.
76   * @so_targets: StreamOutput target bindings.
77   * @vertex_buffers: Vertex buffer bindings.
78   * @index_buffer: Index buffer binding.
79   * @per_shader: Per shader-type bindings.
80   * @ua_views: UAV bindings.
81   * @so_state: StreamOutput bindings.
82   * @dirty: Bitmap tracking per binding-type changes that have not yet
83   * been emitted to the device.
84   * @dirty_vb: Bitmap tracking individual vertex buffer binding changes that
85   * have not yet been emitted to the device.
86   * @bind_cmd_buffer: Scratch space used to construct binding commands.
87   * @bind_cmd_count: Number of binding command data entries in @bind_cmd_buffer
88   * @bind_first_slot: Used together with @bind_cmd_buffer to indicate the
89   * device binding slot of the first command data entry in @bind_cmd_buffer.
90   *
91   * Note that this structure also provides storage space for the individual
92   * struct vmw_ctx_binding objects, so that no dynamic allocation is needed
93   * for individual bindings.
94   *
95   */
96  struct vmw_ctx_binding_state {
97  	struct vmw_private *dev_priv;
98  	struct list_head list;
99  	struct vmw_ctx_bindinfo_view render_targets[SVGA3D_RT_MAX];
100  	struct vmw_ctx_bindinfo_tex texture_units[SVGA3D_NUM_TEXTURE_UNITS];
101  	struct vmw_ctx_bindinfo_view ds_view;
102  	struct vmw_ctx_bindinfo_so_target so_targets[SVGA3D_DX_MAX_SOTARGETS];
103  	struct vmw_ctx_bindinfo_vb vertex_buffers[SVGA3D_DX_MAX_VERTEXBUFFERS];
104  	struct vmw_ctx_bindinfo_ib index_buffer;
105  	struct vmw_dx_shader_bindings per_shader[SVGA3D_NUM_SHADERTYPE];
106  	struct vmw_ctx_bindinfo_uav ua_views[VMW_MAX_UAV_BIND_TYPE];
107  	struct vmw_ctx_bindinfo_so so_state;
108  
109  	unsigned long dirty;
110  	DECLARE_BITMAP(dirty_vb, SVGA3D_DX_MAX_VERTEXBUFFERS);
111  
112  	u32 bind_cmd_buffer[VMW_MAX_VIEW_BINDINGS];
113  	u32 bind_cmd_count;
114  	u32 bind_first_slot;
115  };
116  
117  static int vmw_binding_scrub_shader(struct vmw_ctx_bindinfo *bi, bool rebind);
118  static int vmw_binding_scrub_render_target(struct vmw_ctx_bindinfo *bi,
119  					   bool rebind);
120  static int vmw_binding_scrub_texture(struct vmw_ctx_bindinfo *bi, bool rebind);
121  static int vmw_binding_scrub_cb(struct vmw_ctx_bindinfo *bi, bool rebind);
122  static int vmw_binding_scrub_dx_rt(struct vmw_ctx_bindinfo *bi, bool rebind);
123  static int vmw_binding_scrub_sr(struct vmw_ctx_bindinfo *bi, bool rebind);
124  static int vmw_binding_scrub_so_target(struct vmw_ctx_bindinfo *bi, bool rebind);
125  static int vmw_binding_emit_dirty(struct vmw_ctx_binding_state *cbs);
126  static int vmw_binding_scrub_dx_shader(struct vmw_ctx_bindinfo *bi,
127  				       bool rebind);
128  static int vmw_binding_scrub_ib(struct vmw_ctx_bindinfo *bi, bool rebind);
129  static int vmw_binding_scrub_vb(struct vmw_ctx_bindinfo *bi, bool rebind);
130  static int vmw_binding_scrub_uav(struct vmw_ctx_bindinfo *bi, bool rebind);
131  static int vmw_binding_scrub_cs_uav(struct vmw_ctx_bindinfo *bi, bool rebind);
132  static int vmw_binding_scrub_so(struct vmw_ctx_bindinfo *bi, bool rebind);
133  
134  static void vmw_binding_build_asserts(void) __attribute__ ((unused));
135  
136  typedef int (*vmw_scrub_func)(struct vmw_ctx_bindinfo *, bool);
137  
138  /**
139   * struct vmw_binding_info - Per binding type information for the binding
140   * manager
141   *
142   * @size: The size of the struct binding derived from a struct vmw_ctx_bindinfo.
143   * @offsets: array[shader_slot] of offsets to the array[slot]
144   * of struct bindings for the binding type.
145   * @scrub_func: Pointer to the scrub function for this binding type.
146   *
147   * Holds static information to help optimize the binding manager and avoid
148   * an excessive amount of switch statements.
149   */
150  struct vmw_binding_info {
151  	size_t size;
152  	const size_t *offsets;
153  	vmw_scrub_func scrub_func;
154  };
155  
156  /*
157   * A number of static variables that help determine the scrub func and the
158   * location of the struct vmw_ctx_bindinfo slots for each binding type.
159   */
160  static const size_t vmw_binding_shader_offsets[] = {
161  	offsetof(struct vmw_ctx_binding_state, per_shader[0].shader),
162  	offsetof(struct vmw_ctx_binding_state, per_shader[1].shader),
163  	offsetof(struct vmw_ctx_binding_state, per_shader[2].shader),
164  	offsetof(struct vmw_ctx_binding_state, per_shader[3].shader),
165  	offsetof(struct vmw_ctx_binding_state, per_shader[4].shader),
166  	offsetof(struct vmw_ctx_binding_state, per_shader[5].shader),
167  };
168  static const size_t vmw_binding_rt_offsets[] = {
169  	offsetof(struct vmw_ctx_binding_state, render_targets),
170  };
171  static const size_t vmw_binding_tex_offsets[] = {
172  	offsetof(struct vmw_ctx_binding_state, texture_units),
173  };
174  static const size_t vmw_binding_cb_offsets[] = {
175  	offsetof(struct vmw_ctx_binding_state, per_shader[0].const_buffers),
176  	offsetof(struct vmw_ctx_binding_state, per_shader[1].const_buffers),
177  	offsetof(struct vmw_ctx_binding_state, per_shader[2].const_buffers),
178  	offsetof(struct vmw_ctx_binding_state, per_shader[3].const_buffers),
179  	offsetof(struct vmw_ctx_binding_state, per_shader[4].const_buffers),
180  	offsetof(struct vmw_ctx_binding_state, per_shader[5].const_buffers),
181  };
182  static const size_t vmw_binding_dx_ds_offsets[] = {
183  	offsetof(struct vmw_ctx_binding_state, ds_view),
184  };
185  static const size_t vmw_binding_sr_offsets[] = {
186  	offsetof(struct vmw_ctx_binding_state, per_shader[0].shader_res),
187  	offsetof(struct vmw_ctx_binding_state, per_shader[1].shader_res),
188  	offsetof(struct vmw_ctx_binding_state, per_shader[2].shader_res),
189  	offsetof(struct vmw_ctx_binding_state, per_shader[3].shader_res),
190  	offsetof(struct vmw_ctx_binding_state, per_shader[4].shader_res),
191  	offsetof(struct vmw_ctx_binding_state, per_shader[5].shader_res),
192  };
193  static const size_t vmw_binding_so_target_offsets[] = {
194  	offsetof(struct vmw_ctx_binding_state, so_targets),
195  };
196  static const size_t vmw_binding_vb_offsets[] = {
197  	offsetof(struct vmw_ctx_binding_state, vertex_buffers),
198  };
199  static const size_t vmw_binding_ib_offsets[] = {
200  	offsetof(struct vmw_ctx_binding_state, index_buffer),
201  };
202  static const size_t vmw_binding_uav_offsets[] = {
203  	offsetof(struct vmw_ctx_binding_state, ua_views[0].views),
204  };
205  static const size_t vmw_binding_cs_uav_offsets[] = {
206  	offsetof(struct vmw_ctx_binding_state, ua_views[1].views),
207  };
208  static const size_t vmw_binding_so_offsets[] = {
209  	offsetof(struct vmw_ctx_binding_state, so_state),
210  };
211  
212  static const struct vmw_binding_info vmw_binding_infos[] = {
213  	[vmw_ctx_binding_shader] = {
214  		.size = sizeof(struct vmw_ctx_bindinfo_shader),
215  		.offsets = vmw_binding_shader_offsets,
216  		.scrub_func = vmw_binding_scrub_shader},
217  	[vmw_ctx_binding_rt] = {
218  		.size = sizeof(struct vmw_ctx_bindinfo_view),
219  		.offsets = vmw_binding_rt_offsets,
220  		.scrub_func = vmw_binding_scrub_render_target},
221  	[vmw_ctx_binding_tex] = {
222  		.size = sizeof(struct vmw_ctx_bindinfo_tex),
223  		.offsets = vmw_binding_tex_offsets,
224  		.scrub_func = vmw_binding_scrub_texture},
225  	[vmw_ctx_binding_cb] = {
226  		.size = sizeof(struct vmw_ctx_bindinfo_cb),
227  		.offsets = vmw_binding_cb_offsets,
228  		.scrub_func = vmw_binding_scrub_cb},
229  	[vmw_ctx_binding_dx_shader] = {
230  		.size = sizeof(struct vmw_ctx_bindinfo_shader),
231  		.offsets = vmw_binding_shader_offsets,
232  		.scrub_func = vmw_binding_scrub_dx_shader},
233  	[vmw_ctx_binding_dx_rt] = {
234  		.size = sizeof(struct vmw_ctx_bindinfo_view),
235  		.offsets = vmw_binding_rt_offsets,
236  		.scrub_func = vmw_binding_scrub_dx_rt},
237  	[vmw_ctx_binding_sr] = {
238  		.size = sizeof(struct vmw_ctx_bindinfo_view),
239  		.offsets = vmw_binding_sr_offsets,
240  		.scrub_func = vmw_binding_scrub_sr},
241  	[vmw_ctx_binding_ds] = {
242  		.size = sizeof(struct vmw_ctx_bindinfo_view),
243  		.offsets = vmw_binding_dx_ds_offsets,
244  		.scrub_func = vmw_binding_scrub_dx_rt},
245  	[vmw_ctx_binding_so_target] = {
246  		.size = sizeof(struct vmw_ctx_bindinfo_so_target),
247  		.offsets = vmw_binding_so_target_offsets,
248  		.scrub_func = vmw_binding_scrub_so_target},
249  	[vmw_ctx_binding_vb] = {
250  		.size = sizeof(struct vmw_ctx_bindinfo_vb),
251  		.offsets = vmw_binding_vb_offsets,
252  		.scrub_func = vmw_binding_scrub_vb},
253  	[vmw_ctx_binding_ib] = {
254  		.size = sizeof(struct vmw_ctx_bindinfo_ib),
255  		.offsets = vmw_binding_ib_offsets,
256  		.scrub_func = vmw_binding_scrub_ib},
257  	[vmw_ctx_binding_uav] = {
258  		.size = sizeof(struct vmw_ctx_bindinfo_view),
259  		.offsets = vmw_binding_uav_offsets,
260  		.scrub_func = vmw_binding_scrub_uav},
261  	[vmw_ctx_binding_cs_uav] = {
262  		.size = sizeof(struct vmw_ctx_bindinfo_view),
263  		.offsets = vmw_binding_cs_uav_offsets,
264  		.scrub_func = vmw_binding_scrub_cs_uav},
265  	[vmw_ctx_binding_so] = {
266  		.size = sizeof(struct vmw_ctx_bindinfo_so),
267  		.offsets = vmw_binding_so_offsets,
268  		.scrub_func = vmw_binding_scrub_so},
269  };
270  
271  /**
272   * vmw_cbs_context - Return a pointer to the context resource of a
273   * context binding state tracker.
274   *
275   * @cbs: The context binding state tracker.
276   *
277   * Provided there are any active bindings, this function will return an
278   * unreferenced pointer to the context resource that owns the context
279   * binding state tracker. If there are no active bindings, this function
280   * will return NULL. Note that the caller must somehow ensure that a reference
281   * is held on the context resource prior to calling this function.
282   */
283  static const struct vmw_resource *
vmw_cbs_context(const struct vmw_ctx_binding_state * cbs)284  vmw_cbs_context(const struct vmw_ctx_binding_state *cbs)
285  {
286  	if (list_empty(&cbs->list))
287  		return NULL;
288  
289  	return list_first_entry(&cbs->list, struct vmw_ctx_bindinfo,
290  				ctx_list)->ctx;
291  }
292  
293  /**
294   * vmw_binding_loc - determine the struct vmw_ctx_bindinfo slot location.
295   *
296   * @cbs: Pointer to a struct vmw_ctx_binding state which holds the slot.
297   * @bt: The binding type.
298   * @shader_slot: The shader slot of the binding. If none, then set to 0.
299   * @slot: The slot of the binding.
300   */
301  static struct vmw_ctx_bindinfo *
vmw_binding_loc(struct vmw_ctx_binding_state * cbs,enum vmw_ctx_binding_type bt,u32 shader_slot,u32 slot)302  vmw_binding_loc(struct vmw_ctx_binding_state *cbs,
303  		enum vmw_ctx_binding_type bt, u32 shader_slot, u32 slot)
304  {
305  	const struct vmw_binding_info *b = &vmw_binding_infos[bt];
306  	size_t offset = b->offsets[shader_slot] + b->size*slot;
307  
308  	return (struct vmw_ctx_bindinfo *)((u8 *) cbs + offset);
309  }
310  
311  /**
312   * vmw_binding_drop: Stop tracking a context binding
313   *
314   * @bi: Pointer to binding tracker storage.
315   *
316   * Stops tracking a context binding, and re-initializes its storage.
317   * Typically used when the context binding is replaced with a binding to
318   * another (or the same, for that matter) resource.
319   */
vmw_binding_drop(struct vmw_ctx_bindinfo * bi)320  static void vmw_binding_drop(struct vmw_ctx_bindinfo *bi)
321  {
322  	list_del(&bi->ctx_list);
323  	if (!list_empty(&bi->res_list))
324  		list_del(&bi->res_list);
325  	bi->ctx = NULL;
326  }
327  
328  /**
329   * vmw_binding_add: Start tracking a context binding
330   *
331   * @cbs: Pointer to the context binding state tracker.
332   * @bi: Information about the binding to track.
333   * @shader_slot: The shader slot of the binding.
334   * @slot: The slot of the binding.
335   *
336   * Starts tracking the binding in the context binding
337   * state structure @cbs.
338   */
vmw_binding_add(struct vmw_ctx_binding_state * cbs,const struct vmw_ctx_bindinfo * bi,u32 shader_slot,u32 slot)339  void vmw_binding_add(struct vmw_ctx_binding_state *cbs,
340  		    const struct vmw_ctx_bindinfo *bi,
341  		    u32 shader_slot, u32 slot)
342  {
343  	struct vmw_ctx_bindinfo *loc =
344  		vmw_binding_loc(cbs, bi->bt, shader_slot, slot);
345  	const struct vmw_binding_info *b = &vmw_binding_infos[bi->bt];
346  
347  	if (loc->ctx != NULL)
348  		vmw_binding_drop(loc);
349  
350  	memcpy(loc, bi, b->size);
351  	loc->scrubbed = false;
352  	list_add(&loc->ctx_list, &cbs->list);
353  	INIT_LIST_HEAD(&loc->res_list);
354  }
355  
356  /**
357   * vmw_binding_cb_offset_update: Update the offset of a cb binding
358   *
359   * @cbs: Pointer to the context binding state tracker.
360   * @shader_slot: The shader slot of the binding.
361   * @slot: The slot of the binding.
362   * @offsetInBytes: The new offset of the binding.
363   *
364   * Updates the offset of an existing cb binding in the context binding
365   * state structure @cbs.
366   */
vmw_binding_cb_offset_update(struct vmw_ctx_binding_state * cbs,u32 shader_slot,u32 slot,u32 offsetInBytes)367  void vmw_binding_cb_offset_update(struct vmw_ctx_binding_state *cbs,
368  				  u32 shader_slot, u32 slot, u32 offsetInBytes)
369  {
370  	struct vmw_ctx_bindinfo *loc =
371  		vmw_binding_loc(cbs, vmw_ctx_binding_cb, shader_slot, slot);
372  	struct vmw_ctx_bindinfo_cb *loc_cb =
373  		(struct vmw_ctx_bindinfo_cb *)((u8 *) loc);
374  	loc_cb->offset = offsetInBytes;
375  }
376  
377  /**
378   * vmw_binding_add_uav_index - Add UAV index for tracking.
379   * @cbs: Pointer to the context binding state tracker.
380   * @slot: UAV type to which bind this index.
381   * @index: The splice index to track.
382   */
vmw_binding_add_uav_index(struct vmw_ctx_binding_state * cbs,uint32 slot,uint32 index)383  void vmw_binding_add_uav_index(struct vmw_ctx_binding_state *cbs, uint32 slot,
384  			       uint32 index)
385  {
386  	cbs->ua_views[slot].index = index;
387  }
388  
389  /**
390   * vmw_binding_transfer: Transfer a context binding tracking entry.
391   *
392   * @cbs: Pointer to the persistent context binding state tracker.
393   * @from: Staged binding info built during execbuf
394   * @bi: Information about the binding to track.
395   *
396   */
vmw_binding_transfer(struct vmw_ctx_binding_state * cbs,const struct vmw_ctx_binding_state * from,const struct vmw_ctx_bindinfo * bi)397  static void vmw_binding_transfer(struct vmw_ctx_binding_state *cbs,
398  				 const struct vmw_ctx_binding_state *from,
399  				 const struct vmw_ctx_bindinfo *bi)
400  {
401  	size_t offset = (unsigned long)bi - (unsigned long)from;
402  	struct vmw_ctx_bindinfo *loc = (struct vmw_ctx_bindinfo *)
403  		((unsigned long) cbs + offset);
404  
405  	if (loc->ctx != NULL) {
406  		WARN_ON(bi->scrubbed);
407  
408  		vmw_binding_drop(loc);
409  	}
410  
411  	if (bi->res != NULL) {
412  		memcpy(loc, bi, vmw_binding_infos[bi->bt].size);
413  		list_add_tail(&loc->ctx_list, &cbs->list);
414  		list_add_tail(&loc->res_list, &loc->res->binding_head);
415  	}
416  }
417  
418  /**
419   * vmw_binding_state_kill - Kill all bindings associated with a
420   * struct vmw_ctx_binding state structure, and re-initialize the structure.
421   *
422   * @cbs: Pointer to the context binding state tracker.
423   *
424   * Emits commands to scrub all bindings associated with the
425   * context binding state tracker. Then re-initializes the whole structure.
426   */
vmw_binding_state_kill(struct vmw_ctx_binding_state * cbs)427  void vmw_binding_state_kill(struct vmw_ctx_binding_state *cbs)
428  {
429  	struct vmw_ctx_bindinfo *entry, *next;
430  
431  	vmw_binding_state_scrub(cbs);
432  	list_for_each_entry_safe(entry, next, &cbs->list, ctx_list)
433  		vmw_binding_drop(entry);
434  }
435  
436  /**
437   * vmw_binding_state_scrub - Scrub all bindings associated with a
438   * struct vmw_ctx_binding state structure.
439   *
440   * @cbs: Pointer to the context binding state tracker.
441   *
442   * Emits commands to scrub all bindings associated with the
443   * context binding state tracker.
444   */
vmw_binding_state_scrub(struct vmw_ctx_binding_state * cbs)445  void vmw_binding_state_scrub(struct vmw_ctx_binding_state *cbs)
446  {
447  	struct vmw_ctx_bindinfo *entry;
448  
449  	list_for_each_entry(entry, &cbs->list, ctx_list) {
450  		if (!entry->scrubbed) {
451  			(void) vmw_binding_infos[entry->bt].scrub_func
452  				(entry, false);
453  			entry->scrubbed = true;
454  		}
455  	}
456  
457  	(void) vmw_binding_emit_dirty(cbs);
458  }
459  
460  /**
461   * vmw_binding_res_list_kill - Kill all bindings on a
462   * resource binding list
463   *
464   * @head: list head of resource binding list
465   *
466   * Kills all bindings associated with a specific resource. Typically
467   * called before the resource is destroyed.
468   */
vmw_binding_res_list_kill(struct list_head * head)469  void vmw_binding_res_list_kill(struct list_head *head)
470  {
471  	struct vmw_ctx_bindinfo *entry, *next;
472  
473  	vmw_binding_res_list_scrub(head);
474  	list_for_each_entry_safe(entry, next, head, res_list)
475  		vmw_binding_drop(entry);
476  }
477  
478  /**
479   * vmw_binding_res_list_scrub - Scrub all bindings on a
480   * resource binding list
481   *
482   * @head: list head of resource binding list
483   *
484   * Scrub all bindings associated with a specific resource. Typically
485   * called before the resource is evicted.
486   */
vmw_binding_res_list_scrub(struct list_head * head)487  void vmw_binding_res_list_scrub(struct list_head *head)
488  {
489  	struct vmw_ctx_bindinfo *entry;
490  
491  	list_for_each_entry(entry, head, res_list) {
492  		if (!entry->scrubbed) {
493  			(void) vmw_binding_infos[entry->bt].scrub_func
494  				(entry, false);
495  			entry->scrubbed = true;
496  		}
497  	}
498  
499  	list_for_each_entry(entry, head, res_list) {
500  		struct vmw_ctx_binding_state *cbs =
501  			vmw_context_binding_state(entry->ctx);
502  
503  		(void) vmw_binding_emit_dirty(cbs);
504  	}
505  }
506  
507  
508  /**
509   * vmw_binding_state_commit - Commit staged binding info
510   *
511   * @to:   Staged binding info area to copy into to.
512   * @from: Staged binding info built during execbuf.
513   *
514   * Transfers binding info from a temporary structure
515   * (typically used by execbuf) to the persistent
516   * structure in the context. This can be done once commands have been
517   * submitted to hardware
518   */
vmw_binding_state_commit(struct vmw_ctx_binding_state * to,struct vmw_ctx_binding_state * from)519  void vmw_binding_state_commit(struct vmw_ctx_binding_state *to,
520  			      struct vmw_ctx_binding_state *from)
521  {
522  	struct vmw_ctx_bindinfo *entry, *next;
523  
524  	list_for_each_entry_safe(entry, next, &from->list, ctx_list) {
525  		vmw_binding_transfer(to, from, entry);
526  		vmw_binding_drop(entry);
527  	}
528  
529  	/* Also transfer uav splice indices */
530  	to->ua_views[0].index = from->ua_views[0].index;
531  	to->ua_views[1].index = from->ua_views[1].index;
532  }
533  
534  /**
535   * vmw_binding_rebind_all - Rebind all scrubbed bindings of a context
536   *
537   * @cbs: Pointer to the context binding state tracker.
538   *
539   * Walks through the context binding list and rebinds all scrubbed
540   * resources.
541   */
vmw_binding_rebind_all(struct vmw_ctx_binding_state * cbs)542  int vmw_binding_rebind_all(struct vmw_ctx_binding_state *cbs)
543  {
544  	struct vmw_ctx_bindinfo *entry;
545  	int ret;
546  
547  	list_for_each_entry(entry, &cbs->list, ctx_list) {
548  		if (likely(!entry->scrubbed))
549  			continue;
550  
551  		if ((entry->res == NULL || entry->res->id ==
552  			    SVGA3D_INVALID_ID))
553  			continue;
554  
555  		ret = vmw_binding_infos[entry->bt].scrub_func(entry, true);
556  		if (unlikely(ret != 0))
557  			return ret;
558  
559  		entry->scrubbed = false;
560  	}
561  
562  	return vmw_binding_emit_dirty(cbs);
563  }
564  
565  /**
566   * vmw_binding_scrub_shader - scrub a shader binding from a context.
567   *
568   * @bi: single binding information.
569   * @rebind: Whether to issue a bind instead of scrub command.
570   */
vmw_binding_scrub_shader(struct vmw_ctx_bindinfo * bi,bool rebind)571  static int vmw_binding_scrub_shader(struct vmw_ctx_bindinfo *bi, bool rebind)
572  {
573  	struct vmw_ctx_bindinfo_shader *binding =
574  		container_of(bi, typeof(*binding), bi);
575  	struct vmw_private *dev_priv = bi->ctx->dev_priv;
576  	struct {
577  		SVGA3dCmdHeader header;
578  		SVGA3dCmdSetShader body;
579  	} *cmd;
580  
581  	cmd = VMW_CMD_RESERVE(dev_priv, sizeof(*cmd));
582  	if (unlikely(cmd == NULL))
583  		return -ENOMEM;
584  
585  	cmd->header.id = SVGA_3D_CMD_SET_SHADER;
586  	cmd->header.size = sizeof(cmd->body);
587  	cmd->body.cid = bi->ctx->id;
588  	cmd->body.type = binding->shader_slot + SVGA3D_SHADERTYPE_MIN;
589  	cmd->body.shid = ((rebind) ? bi->res->id : SVGA3D_INVALID_ID);
590  	vmw_cmd_commit(dev_priv, sizeof(*cmd));
591  
592  	return 0;
593  }
594  
595  /**
596   * vmw_binding_scrub_render_target - scrub a render target binding
597   * from a context.
598   *
599   * @bi: single binding information.
600   * @rebind: Whether to issue a bind instead of scrub command.
601   */
vmw_binding_scrub_render_target(struct vmw_ctx_bindinfo * bi,bool rebind)602  static int vmw_binding_scrub_render_target(struct vmw_ctx_bindinfo *bi,
603  					   bool rebind)
604  {
605  	struct vmw_ctx_bindinfo_view *binding =
606  		container_of(bi, typeof(*binding), bi);
607  	struct vmw_private *dev_priv = bi->ctx->dev_priv;
608  	struct {
609  		SVGA3dCmdHeader header;
610  		SVGA3dCmdSetRenderTarget body;
611  	} *cmd;
612  
613  	cmd = VMW_CMD_RESERVE(dev_priv, sizeof(*cmd));
614  	if (unlikely(cmd == NULL))
615  		return -ENOMEM;
616  
617  	cmd->header.id = SVGA_3D_CMD_SETRENDERTARGET;
618  	cmd->header.size = sizeof(cmd->body);
619  	cmd->body.cid = bi->ctx->id;
620  	cmd->body.type = binding->slot;
621  	cmd->body.target.sid = ((rebind) ? bi->res->id : SVGA3D_INVALID_ID);
622  	cmd->body.target.face = 0;
623  	cmd->body.target.mipmap = 0;
624  	vmw_cmd_commit(dev_priv, sizeof(*cmd));
625  
626  	return 0;
627  }
628  
629  /**
630   * vmw_binding_scrub_texture - scrub a texture binding from a context.
631   *
632   * @bi: single binding information.
633   * @rebind: Whether to issue a bind instead of scrub command.
634   *
635   * TODO: Possibly complement this function with a function that takes
636   * a list of texture bindings and combines them to a single command.
637   */
vmw_binding_scrub_texture(struct vmw_ctx_bindinfo * bi,bool rebind)638  static int vmw_binding_scrub_texture(struct vmw_ctx_bindinfo *bi,
639  				     bool rebind)
640  {
641  	struct vmw_ctx_bindinfo_tex *binding =
642  		container_of(bi, typeof(*binding), bi);
643  	struct vmw_private *dev_priv = bi->ctx->dev_priv;
644  	struct {
645  		SVGA3dCmdHeader header;
646  		struct {
647  			SVGA3dCmdSetTextureState c;
648  			SVGA3dTextureState s1;
649  		} body;
650  	} *cmd;
651  
652  	cmd = VMW_CMD_RESERVE(dev_priv, sizeof(*cmd));
653  	if (unlikely(cmd == NULL))
654  		return -ENOMEM;
655  
656  	cmd->header.id = SVGA_3D_CMD_SETTEXTURESTATE;
657  	cmd->header.size = sizeof(cmd->body);
658  	cmd->body.c.cid = bi->ctx->id;
659  	cmd->body.s1.stage = binding->texture_stage;
660  	cmd->body.s1.name = SVGA3D_TS_BIND_TEXTURE;
661  	cmd->body.s1.value = ((rebind) ? bi->res->id : SVGA3D_INVALID_ID);
662  	vmw_cmd_commit(dev_priv, sizeof(*cmd));
663  
664  	return 0;
665  }
666  
667  /**
668   * vmw_binding_scrub_dx_shader - scrub a dx shader binding from a context.
669   *
670   * @bi: single binding information.
671   * @rebind: Whether to issue a bind instead of scrub command.
672   */
vmw_binding_scrub_dx_shader(struct vmw_ctx_bindinfo * bi,bool rebind)673  static int vmw_binding_scrub_dx_shader(struct vmw_ctx_bindinfo *bi, bool rebind)
674  {
675  	struct vmw_ctx_bindinfo_shader *binding =
676  		container_of(bi, typeof(*binding), bi);
677  	struct vmw_private *dev_priv = bi->ctx->dev_priv;
678  	struct {
679  		SVGA3dCmdHeader header;
680  		SVGA3dCmdDXSetShader body;
681  	} *cmd;
682  
683  	cmd = VMW_CMD_CTX_RESERVE(dev_priv, sizeof(*cmd), bi->ctx->id);
684  	if (unlikely(cmd == NULL))
685  		return -ENOMEM;
686  
687  	cmd->header.id = SVGA_3D_CMD_DX_SET_SHADER;
688  	cmd->header.size = sizeof(cmd->body);
689  	cmd->body.type = binding->shader_slot + SVGA3D_SHADERTYPE_MIN;
690  	cmd->body.shaderId = ((rebind) ? bi->res->id : SVGA3D_INVALID_ID);
691  	vmw_cmd_commit(dev_priv, sizeof(*cmd));
692  
693  	return 0;
694  }
695  
696  /**
697   * vmw_binding_scrub_cb - scrub a constant buffer binding from a context.
698   *
699   * @bi: single binding information.
700   * @rebind: Whether to issue a bind instead of scrub command.
701   */
vmw_binding_scrub_cb(struct vmw_ctx_bindinfo * bi,bool rebind)702  static int vmw_binding_scrub_cb(struct vmw_ctx_bindinfo *bi, bool rebind)
703  {
704  	struct vmw_ctx_bindinfo_cb *binding =
705  		container_of(bi, typeof(*binding), bi);
706  	struct vmw_private *dev_priv = bi->ctx->dev_priv;
707  	struct {
708  		SVGA3dCmdHeader header;
709  		SVGA3dCmdDXSetSingleConstantBuffer body;
710  	} *cmd;
711  
712  	cmd = VMW_CMD_CTX_RESERVE(dev_priv, sizeof(*cmd), bi->ctx->id);
713  	if (unlikely(cmd == NULL))
714  		return -ENOMEM;
715  
716  	cmd->header.id = SVGA_3D_CMD_DX_SET_SINGLE_CONSTANT_BUFFER;
717  	cmd->header.size = sizeof(cmd->body);
718  	cmd->body.slot = binding->slot;
719  	cmd->body.type = binding->shader_slot + SVGA3D_SHADERTYPE_MIN;
720  	if (rebind) {
721  		cmd->body.offsetInBytes = binding->offset;
722  		cmd->body.sizeInBytes = binding->size;
723  		cmd->body.sid = bi->res->id;
724  	} else {
725  		cmd->body.offsetInBytes = 0;
726  		cmd->body.sizeInBytes = 0;
727  		cmd->body.sid = SVGA3D_INVALID_ID;
728  	}
729  	vmw_cmd_commit(dev_priv, sizeof(*cmd));
730  
731  	return 0;
732  }
733  
734  /**
735   * vmw_collect_view_ids - Build view id data for a view binding command
736   * without checking which bindings actually need to be emitted
737   *
738   * @cbs: Pointer to the context's struct vmw_ctx_binding_state
739   * @biv: Pointer to where the binding info array is stored in @cbs
740   * @max_num: Maximum number of entries in the @bi array.
741   *
742   * Scans the @bi array for bindings and builds a buffer of view id data.
743   * Stops at the first non-existing binding in the @bi array.
744   * On output, @cbs->bind_cmd_count contains the number of bindings to be
745   * emitted, @cbs->bind_first_slot is set to zero, and @cbs->bind_cmd_buffer
746   * contains the command data.
747   */
vmw_collect_view_ids(struct vmw_ctx_binding_state * cbs,const struct vmw_ctx_bindinfo_view * biv,u32 max_num)748  static void vmw_collect_view_ids(struct vmw_ctx_binding_state *cbs,
749  				 const struct vmw_ctx_bindinfo_view *biv,
750  				 u32 max_num)
751  {
752  	unsigned long i;
753  
754  	cbs->bind_cmd_count = 0;
755  	cbs->bind_first_slot = 0;
756  
757  	for (i = 0; i < max_num; ++i, ++biv) {
758  		if (!biv->bi.ctx)
759  			break;
760  
761  		cbs->bind_cmd_buffer[cbs->bind_cmd_count++] =
762  			((biv->bi.scrubbed) ?
763  			 SVGA3D_INVALID_ID : biv->bi.res->id);
764  	}
765  }
766  
767  /**
768   * vmw_collect_dirty_view_ids - Build view id data for a view binding command
769   *
770   * @cbs: Pointer to the context's struct vmw_ctx_binding_state
771   * @bi: Pointer to where the binding info array is stored in @cbs
772   * @dirty: Bitmap indicating which bindings need to be emitted.
773   * @max_num: Maximum number of entries in the @bi array.
774   *
775   * Scans the @bi array for bindings that need to be emitted and
776   * builds a buffer of view id data.
777   * On output, @cbs->bind_cmd_count contains the number of bindings to be
778   * emitted, @cbs->bind_first_slot indicates the index of the first emitted
779   * binding, and @cbs->bind_cmd_buffer contains the command data.
780   */
vmw_collect_dirty_view_ids(struct vmw_ctx_binding_state * cbs,const struct vmw_ctx_bindinfo * bi,unsigned long * dirty,u32 max_num)781  static void vmw_collect_dirty_view_ids(struct vmw_ctx_binding_state *cbs,
782  				       const struct vmw_ctx_bindinfo *bi,
783  				       unsigned long *dirty,
784  				       u32 max_num)
785  {
786  	const struct vmw_ctx_bindinfo_view *biv =
787  		container_of(bi, struct vmw_ctx_bindinfo_view, bi);
788  	unsigned long i, next_bit;
789  
790  	cbs->bind_cmd_count = 0;
791  	i = find_first_bit(dirty, max_num);
792  	next_bit = i;
793  	cbs->bind_first_slot = i;
794  
795  	biv += i;
796  	for (; i < max_num; ++i, ++biv) {
797  		cbs->bind_cmd_buffer[cbs->bind_cmd_count++] =
798  			((!biv->bi.ctx || biv->bi.scrubbed) ?
799  			 SVGA3D_INVALID_ID : biv->bi.res->id);
800  
801  		if (next_bit == i) {
802  			next_bit = find_next_bit(dirty, max_num, i + 1);
803  			if (next_bit >= max_num)
804  				break;
805  		}
806  	}
807  }
808  
809  /**
810   * vmw_emit_set_sr - Issue delayed DX shader resource binding commands
811   *
812   * @cbs: Pointer to the context's struct vmw_ctx_binding_state
813   * @shader_slot: The shader slot of the binding.
814   */
vmw_emit_set_sr(struct vmw_ctx_binding_state * cbs,int shader_slot)815  static int vmw_emit_set_sr(struct vmw_ctx_binding_state *cbs,
816  			   int shader_slot)
817  {
818  	const struct vmw_ctx_bindinfo *loc =
819  		&cbs->per_shader[shader_slot].shader_res[0].bi;
820  	struct {
821  		SVGA3dCmdHeader header;
822  		SVGA3dCmdDXSetShaderResources body;
823  	} *cmd;
824  	size_t cmd_size, view_id_size;
825  	const struct vmw_resource *ctx = vmw_cbs_context(cbs);
826  
827  	vmw_collect_dirty_view_ids(cbs, loc,
828  				   cbs->per_shader[shader_slot].dirty_sr,
829  				   SVGA3D_DX_MAX_SRVIEWS);
830  	if (cbs->bind_cmd_count == 0)
831  		return 0;
832  
833  	view_id_size = cbs->bind_cmd_count*sizeof(uint32);
834  	cmd_size = sizeof(*cmd) + view_id_size;
835  	cmd = VMW_CMD_CTX_RESERVE(ctx->dev_priv, cmd_size, ctx->id);
836  	if (unlikely(cmd == NULL))
837  		return -ENOMEM;
838  
839  	cmd->header.id = SVGA_3D_CMD_DX_SET_SHADER_RESOURCES;
840  	cmd->header.size = sizeof(cmd->body) + view_id_size;
841  	cmd->body.type = shader_slot + SVGA3D_SHADERTYPE_MIN;
842  	cmd->body.startView = cbs->bind_first_slot;
843  
844  	memcpy(&cmd[1], cbs->bind_cmd_buffer, view_id_size);
845  
846  	vmw_cmd_commit(ctx->dev_priv, cmd_size);
847  	bitmap_clear(cbs->per_shader[shader_slot].dirty_sr,
848  		     cbs->bind_first_slot, cbs->bind_cmd_count);
849  
850  	return 0;
851  }
852  
853  /**
854   * vmw_emit_set_rt - Issue delayed DX rendertarget binding commands
855   *
856   * @cbs: Pointer to the context's struct vmw_ctx_binding_state
857   */
vmw_emit_set_rt(struct vmw_ctx_binding_state * cbs)858  static int vmw_emit_set_rt(struct vmw_ctx_binding_state *cbs)
859  {
860  	const struct vmw_ctx_bindinfo_view *loc = &cbs->render_targets[0];
861  	struct {
862  		SVGA3dCmdHeader header;
863  		SVGA3dCmdDXSetRenderTargets body;
864  	} *cmd;
865  	size_t cmd_size, view_id_size;
866  	const struct vmw_resource *ctx = vmw_cbs_context(cbs);
867  
868  	vmw_collect_view_ids(cbs, loc, SVGA3D_DX_MAX_RENDER_TARGETS);
869  	view_id_size = cbs->bind_cmd_count*sizeof(uint32);
870  	cmd_size = sizeof(*cmd) + view_id_size;
871  	cmd = VMW_CMD_CTX_RESERVE(ctx->dev_priv, cmd_size, ctx->id);
872  	if (unlikely(cmd == NULL))
873  		return -ENOMEM;
874  
875  	cmd->header.id = SVGA_3D_CMD_DX_SET_RENDERTARGETS;
876  	cmd->header.size = sizeof(cmd->body) + view_id_size;
877  
878  	if (cbs->ds_view.bi.ctx && !cbs->ds_view.bi.scrubbed)
879  		cmd->body.depthStencilViewId = cbs->ds_view.bi.res->id;
880  	else
881  		cmd->body.depthStencilViewId = SVGA3D_INVALID_ID;
882  
883  	memcpy(&cmd[1], cbs->bind_cmd_buffer, view_id_size);
884  
885  	vmw_cmd_commit(ctx->dev_priv, cmd_size);
886  
887  	return 0;
888  
889  }
890  
891  /**
892   * vmw_collect_so_targets - Build SVGA3dSoTarget data for a binding command
893   * without checking which bindings actually need to be emitted
894   *
895   * @cbs: Pointer to the context's struct vmw_ctx_binding_state
896   * @biso: Pointer to where the binding info array is stored in @cbs
897   * @max_num: Maximum number of entries in the @bi array.
898   *
899   * Scans the @bi array for bindings and builds a buffer of SVGA3dSoTarget data.
900   * Stops at the first non-existing binding in the @bi array.
901   * On output, @cbs->bind_cmd_count contains the number of bindings to be
902   * emitted, @cbs->bind_first_slot is set to zero, and @cbs->bind_cmd_buffer
903   * contains the command data.
904   */
vmw_collect_so_targets(struct vmw_ctx_binding_state * cbs,const struct vmw_ctx_bindinfo_so_target * biso,u32 max_num)905  static void vmw_collect_so_targets(struct vmw_ctx_binding_state *cbs,
906  				   const struct vmw_ctx_bindinfo_so_target *biso,
907  				   u32 max_num)
908  {
909  	unsigned long i;
910  	SVGA3dSoTarget *so_buffer = (SVGA3dSoTarget *) cbs->bind_cmd_buffer;
911  
912  	cbs->bind_cmd_count = 0;
913  	cbs->bind_first_slot = 0;
914  
915  	for (i = 0; i < max_num; ++i, ++biso, ++so_buffer,
916  		    ++cbs->bind_cmd_count) {
917  		if (!biso->bi.ctx)
918  			break;
919  
920  		if (!biso->bi.scrubbed) {
921  			so_buffer->sid = biso->bi.res->id;
922  			so_buffer->offset = biso->offset;
923  			so_buffer->sizeInBytes = biso->size;
924  		} else {
925  			so_buffer->sid = SVGA3D_INVALID_ID;
926  			so_buffer->offset = 0;
927  			so_buffer->sizeInBytes = 0;
928  		}
929  	}
930  }
931  
932  /**
933   * vmw_emit_set_so_target - Issue delayed streamout binding commands
934   *
935   * @cbs: Pointer to the context's struct vmw_ctx_binding_state
936   */
vmw_emit_set_so_target(struct vmw_ctx_binding_state * cbs)937  static int vmw_emit_set_so_target(struct vmw_ctx_binding_state *cbs)
938  {
939  	const struct vmw_ctx_bindinfo_so_target *loc = &cbs->so_targets[0];
940  	struct {
941  		SVGA3dCmdHeader header;
942  		SVGA3dCmdDXSetSOTargets body;
943  	} *cmd;
944  	size_t cmd_size, so_target_size;
945  	const struct vmw_resource *ctx = vmw_cbs_context(cbs);
946  
947  	vmw_collect_so_targets(cbs, loc, SVGA3D_DX_MAX_SOTARGETS);
948  	if (cbs->bind_cmd_count == 0)
949  		return 0;
950  
951  	so_target_size = cbs->bind_cmd_count*sizeof(SVGA3dSoTarget);
952  	cmd_size = sizeof(*cmd) + so_target_size;
953  	cmd = VMW_CMD_CTX_RESERVE(ctx->dev_priv, cmd_size, ctx->id);
954  	if (unlikely(cmd == NULL))
955  		return -ENOMEM;
956  
957  	cmd->header.id = SVGA_3D_CMD_DX_SET_SOTARGETS;
958  	cmd->header.size = sizeof(cmd->body) + so_target_size;
959  	memcpy(&cmd[1], cbs->bind_cmd_buffer, so_target_size);
960  
961  	vmw_cmd_commit(ctx->dev_priv, cmd_size);
962  
963  	return 0;
964  
965  }
966  
967  /**
968   * vmw_binding_emit_dirty_ps - Issue delayed per shader binding commands
969   *
970   * @cbs: Pointer to the context's struct vmw_ctx_binding_state
971   *
972   */
vmw_binding_emit_dirty_ps(struct vmw_ctx_binding_state * cbs)973  static int vmw_binding_emit_dirty_ps(struct vmw_ctx_binding_state *cbs)
974  {
975  	struct vmw_dx_shader_bindings *sb = &cbs->per_shader[0];
976  	u32 i;
977  	int ret;
978  
979  	for (i = 0; i < SVGA3D_NUM_SHADERTYPE_DX10; ++i, ++sb) {
980  		if (!test_bit(VMW_BINDING_PS_SR_BIT, &sb->dirty))
981  			continue;
982  
983  		ret = vmw_emit_set_sr(cbs, i);
984  		if (ret)
985  			break;
986  
987  		__clear_bit(VMW_BINDING_PS_SR_BIT, &sb->dirty);
988  	}
989  
990  	return 0;
991  }
992  
993  /**
994   * vmw_collect_dirty_vbs - Build SVGA3dVertexBuffer data for a
995   * SVGA3dCmdDXSetVertexBuffers command
996   *
997   * @cbs: Pointer to the context's struct vmw_ctx_binding_state
998   * @bi: Pointer to where the binding info array is stored in @cbs
999   * @dirty: Bitmap indicating which bindings need to be emitted.
1000   * @max_num: Maximum number of entries in the @bi array.
1001   *
1002   * Scans the @bi array for bindings that need to be emitted and
1003   * builds a buffer of SVGA3dVertexBuffer data.
1004   * On output, @cbs->bind_cmd_count contains the number of bindings to be
1005   * emitted, @cbs->bind_first_slot indicates the index of the first emitted
1006   * binding, and @cbs->bind_cmd_buffer contains the command data.
1007   */
vmw_collect_dirty_vbs(struct vmw_ctx_binding_state * cbs,const struct vmw_ctx_bindinfo * bi,unsigned long * dirty,u32 max_num)1008  static void vmw_collect_dirty_vbs(struct vmw_ctx_binding_state *cbs,
1009  				  const struct vmw_ctx_bindinfo *bi,
1010  				  unsigned long *dirty,
1011  				  u32 max_num)
1012  {
1013  	const struct vmw_ctx_bindinfo_vb *biv =
1014  		container_of(bi, struct vmw_ctx_bindinfo_vb, bi);
1015  	unsigned long i, next_bit;
1016  	SVGA3dVertexBuffer *vbs = (SVGA3dVertexBuffer *) &cbs->bind_cmd_buffer;
1017  
1018  	cbs->bind_cmd_count = 0;
1019  	i = find_first_bit(dirty, max_num);
1020  	next_bit = i;
1021  	cbs->bind_first_slot = i;
1022  
1023  	biv += i;
1024  	for (; i < max_num; ++i, ++biv, ++vbs) {
1025  		if (!biv->bi.ctx || biv->bi.scrubbed) {
1026  			vbs->sid = SVGA3D_INVALID_ID;
1027  			vbs->stride = 0;
1028  			vbs->offset = 0;
1029  		} else {
1030  			vbs->sid = biv->bi.res->id;
1031  			vbs->stride = biv->stride;
1032  			vbs->offset = biv->offset;
1033  		}
1034  		cbs->bind_cmd_count++;
1035  		if (next_bit == i) {
1036  			next_bit = find_next_bit(dirty, max_num, i + 1);
1037  			if (next_bit >= max_num)
1038  				break;
1039  		}
1040  	}
1041  }
1042  
1043  /**
1044   * vmw_emit_set_vb - Issue delayed vertex buffer binding commands
1045   *
1046   * @cbs: Pointer to the context's struct vmw_ctx_binding_state
1047   *
1048   */
vmw_emit_set_vb(struct vmw_ctx_binding_state * cbs)1049  static int vmw_emit_set_vb(struct vmw_ctx_binding_state *cbs)
1050  {
1051  	const struct vmw_ctx_bindinfo *loc =
1052  		&cbs->vertex_buffers[0].bi;
1053  	struct {
1054  		SVGA3dCmdHeader header;
1055  		SVGA3dCmdDXSetVertexBuffers body;
1056  	} *cmd;
1057  	size_t cmd_size, set_vb_size;
1058  	const struct vmw_resource *ctx = vmw_cbs_context(cbs);
1059  
1060  	vmw_collect_dirty_vbs(cbs, loc, cbs->dirty_vb,
1061  			     SVGA3D_DX_MAX_VERTEXBUFFERS);
1062  	if (cbs->bind_cmd_count == 0)
1063  		return 0;
1064  
1065  	set_vb_size = cbs->bind_cmd_count*sizeof(SVGA3dVertexBuffer);
1066  	cmd_size = sizeof(*cmd) + set_vb_size;
1067  	cmd = VMW_CMD_CTX_RESERVE(ctx->dev_priv, cmd_size, ctx->id);
1068  	if (unlikely(cmd == NULL))
1069  		return -ENOMEM;
1070  
1071  	cmd->header.id = SVGA_3D_CMD_DX_SET_VERTEX_BUFFERS;
1072  	cmd->header.size = sizeof(cmd->body) + set_vb_size;
1073  	cmd->body.startBuffer = cbs->bind_first_slot;
1074  
1075  	memcpy(&cmd[1], cbs->bind_cmd_buffer, set_vb_size);
1076  
1077  	vmw_cmd_commit(ctx->dev_priv, cmd_size);
1078  	bitmap_clear(cbs->dirty_vb,
1079  		     cbs->bind_first_slot, cbs->bind_cmd_count);
1080  
1081  	return 0;
1082  }
1083  
vmw_emit_set_uav(struct vmw_ctx_binding_state * cbs)1084  static int vmw_emit_set_uav(struct vmw_ctx_binding_state *cbs)
1085  {
1086  	const struct vmw_ctx_bindinfo_view *loc = &cbs->ua_views[0].views[0];
1087  	struct {
1088  		SVGA3dCmdHeader header;
1089  		SVGA3dCmdDXSetUAViews body;
1090  	} *cmd;
1091  	size_t cmd_size, view_id_size;
1092  	const struct vmw_resource *ctx = vmw_cbs_context(cbs);
1093  
1094  	vmw_collect_view_ids(cbs, loc, vmw_max_num_uavs(cbs->dev_priv));
1095  	view_id_size = cbs->bind_cmd_count*sizeof(uint32);
1096  	cmd_size = sizeof(*cmd) + view_id_size;
1097  	cmd = VMW_CMD_CTX_RESERVE(ctx->dev_priv, cmd_size, ctx->id);
1098  	if (!cmd)
1099  		return -ENOMEM;
1100  
1101  	cmd->header.id = SVGA_3D_CMD_DX_SET_UA_VIEWS;
1102  	cmd->header.size = sizeof(cmd->body) + view_id_size;
1103  
1104  	/* Splice index is specified user-space   */
1105  	cmd->body.uavSpliceIndex = cbs->ua_views[0].index;
1106  
1107  	memcpy(&cmd[1], cbs->bind_cmd_buffer, view_id_size);
1108  
1109  	vmw_cmd_commit(ctx->dev_priv, cmd_size);
1110  
1111  	return 0;
1112  }
1113  
vmw_emit_set_cs_uav(struct vmw_ctx_binding_state * cbs)1114  static int vmw_emit_set_cs_uav(struct vmw_ctx_binding_state *cbs)
1115  {
1116  	const struct vmw_ctx_bindinfo_view *loc = &cbs->ua_views[1].views[0];
1117  	struct {
1118  		SVGA3dCmdHeader header;
1119  		SVGA3dCmdDXSetCSUAViews body;
1120  	} *cmd;
1121  	size_t cmd_size, view_id_size;
1122  	const struct vmw_resource *ctx = vmw_cbs_context(cbs);
1123  
1124  	vmw_collect_view_ids(cbs, loc, vmw_max_num_uavs(cbs->dev_priv));
1125  	view_id_size = cbs->bind_cmd_count*sizeof(uint32);
1126  	cmd_size = sizeof(*cmd) + view_id_size;
1127  	cmd = VMW_CMD_CTX_RESERVE(ctx->dev_priv, cmd_size, ctx->id);
1128  	if (!cmd)
1129  		return -ENOMEM;
1130  
1131  	cmd->header.id = SVGA_3D_CMD_DX_SET_CS_UA_VIEWS;
1132  	cmd->header.size = sizeof(cmd->body) + view_id_size;
1133  
1134  	/* Start index is specified user-space */
1135  	cmd->body.startIndex = cbs->ua_views[1].index;
1136  
1137  	memcpy(&cmd[1], cbs->bind_cmd_buffer, view_id_size);
1138  
1139  	vmw_cmd_commit(ctx->dev_priv, cmd_size);
1140  
1141  	return 0;
1142  }
1143  
1144  /**
1145   * vmw_binding_emit_dirty - Issue delayed binding commands
1146   *
1147   * @cbs: Pointer to the context's struct vmw_ctx_binding_state
1148   *
1149   * This function issues the delayed binding commands that arise from
1150   * previous scrub / unscrub calls. These binding commands are typically
1151   * commands that batch a number of bindings and therefore it makes sense
1152   * to delay them.
1153   */
vmw_binding_emit_dirty(struct vmw_ctx_binding_state * cbs)1154  static int vmw_binding_emit_dirty(struct vmw_ctx_binding_state *cbs)
1155  {
1156  	int ret = 0;
1157  	unsigned long hit = 0;
1158  
1159  	while ((hit = find_next_bit(&cbs->dirty, VMW_BINDING_NUM_BITS, hit))
1160  	      < VMW_BINDING_NUM_BITS) {
1161  
1162  		switch (hit) {
1163  		case VMW_BINDING_RT_BIT:
1164  			ret = vmw_emit_set_rt(cbs);
1165  			break;
1166  		case VMW_BINDING_PS_BIT:
1167  			ret = vmw_binding_emit_dirty_ps(cbs);
1168  			break;
1169  		case VMW_BINDING_SO_T_BIT:
1170  			ret = vmw_emit_set_so_target(cbs);
1171  			break;
1172  		case VMW_BINDING_VB_BIT:
1173  			ret = vmw_emit_set_vb(cbs);
1174  			break;
1175  		case VMW_BINDING_UAV_BIT:
1176  			ret = vmw_emit_set_uav(cbs);
1177  			break;
1178  		case VMW_BINDING_CS_UAV_BIT:
1179  			ret = vmw_emit_set_cs_uav(cbs);
1180  			break;
1181  		default:
1182  			BUG();
1183  		}
1184  		if (ret)
1185  			return ret;
1186  
1187  		__clear_bit(hit, &cbs->dirty);
1188  		hit++;
1189  	}
1190  
1191  	return 0;
1192  }
1193  
1194  /**
1195   * vmw_binding_scrub_sr - Schedule a dx shaderresource binding
1196   * scrub from a context
1197   *
1198   * @bi: single binding information.
1199   * @rebind: Whether to issue a bind instead of scrub command.
1200   */
vmw_binding_scrub_sr(struct vmw_ctx_bindinfo * bi,bool rebind)1201  static int vmw_binding_scrub_sr(struct vmw_ctx_bindinfo *bi, bool rebind)
1202  {
1203  	struct vmw_ctx_bindinfo_view *biv =
1204  		container_of(bi, struct vmw_ctx_bindinfo_view, bi);
1205  	struct vmw_ctx_binding_state *cbs =
1206  		vmw_context_binding_state(bi->ctx);
1207  
1208  	__set_bit(biv->slot, cbs->per_shader[biv->shader_slot].dirty_sr);
1209  	__set_bit(VMW_BINDING_PS_SR_BIT,
1210  		  &cbs->per_shader[biv->shader_slot].dirty);
1211  	__set_bit(VMW_BINDING_PS_BIT, &cbs->dirty);
1212  
1213  	return 0;
1214  }
1215  
1216  /**
1217   * vmw_binding_scrub_dx_rt - Schedule a dx rendertarget binding
1218   * scrub from a context
1219   *
1220   * @bi: single binding information.
1221   * @rebind: Whether to issue a bind instead of scrub command.
1222   */
vmw_binding_scrub_dx_rt(struct vmw_ctx_bindinfo * bi,bool rebind)1223  static int vmw_binding_scrub_dx_rt(struct vmw_ctx_bindinfo *bi, bool rebind)
1224  {
1225  	struct vmw_ctx_binding_state *cbs =
1226  		vmw_context_binding_state(bi->ctx);
1227  
1228  	__set_bit(VMW_BINDING_RT_BIT, &cbs->dirty);
1229  
1230  	return 0;
1231  }
1232  
1233  /**
1234   * vmw_binding_scrub_so_target - Schedule a dx streamoutput buffer binding
1235   * scrub from a context
1236   *
1237   * @bi: single binding information.
1238   * @rebind: Whether to issue a bind instead of scrub command.
1239   */
vmw_binding_scrub_so_target(struct vmw_ctx_bindinfo * bi,bool rebind)1240  static int vmw_binding_scrub_so_target(struct vmw_ctx_bindinfo *bi, bool rebind)
1241  {
1242  	struct vmw_ctx_binding_state *cbs =
1243  		vmw_context_binding_state(bi->ctx);
1244  
1245  	__set_bit(VMW_BINDING_SO_T_BIT, &cbs->dirty);
1246  
1247  	return 0;
1248  }
1249  
1250  /**
1251   * vmw_binding_scrub_vb - Schedule a dx vertex buffer binding
1252   * scrub from a context
1253   *
1254   * @bi: single binding information.
1255   * @rebind: Whether to issue a bind instead of scrub command.
1256   */
vmw_binding_scrub_vb(struct vmw_ctx_bindinfo * bi,bool rebind)1257  static int vmw_binding_scrub_vb(struct vmw_ctx_bindinfo *bi, bool rebind)
1258  {
1259  	struct vmw_ctx_bindinfo_vb *bivb =
1260  		container_of(bi, struct vmw_ctx_bindinfo_vb, bi);
1261  	struct vmw_ctx_binding_state *cbs =
1262  		vmw_context_binding_state(bi->ctx);
1263  
1264  	__set_bit(bivb->slot, cbs->dirty_vb);
1265  	__set_bit(VMW_BINDING_VB_BIT, &cbs->dirty);
1266  
1267  	return 0;
1268  }
1269  
1270  /**
1271   * vmw_binding_scrub_ib - scrub a dx index buffer binding from a context
1272   *
1273   * @bi: single binding information.
1274   * @rebind: Whether to issue a bind instead of scrub command.
1275   */
vmw_binding_scrub_ib(struct vmw_ctx_bindinfo * bi,bool rebind)1276  static int vmw_binding_scrub_ib(struct vmw_ctx_bindinfo *bi, bool rebind)
1277  {
1278  	struct vmw_ctx_bindinfo_ib *binding =
1279  		container_of(bi, typeof(*binding), bi);
1280  	struct vmw_private *dev_priv = bi->ctx->dev_priv;
1281  	struct {
1282  		SVGA3dCmdHeader header;
1283  		SVGA3dCmdDXSetIndexBuffer body;
1284  	} *cmd;
1285  
1286  	cmd = VMW_CMD_CTX_RESERVE(dev_priv, sizeof(*cmd), bi->ctx->id);
1287  	if (unlikely(cmd == NULL))
1288  		return -ENOMEM;
1289  
1290  	cmd->header.id = SVGA_3D_CMD_DX_SET_INDEX_BUFFER;
1291  	cmd->header.size = sizeof(cmd->body);
1292  	if (rebind) {
1293  		cmd->body.sid = bi->res->id;
1294  		cmd->body.format = binding->format;
1295  		cmd->body.offset = binding->offset;
1296  	} else {
1297  		cmd->body.sid = SVGA3D_INVALID_ID;
1298  		cmd->body.format = 0;
1299  		cmd->body.offset = 0;
1300  	}
1301  
1302  	vmw_cmd_commit(dev_priv, sizeof(*cmd));
1303  
1304  	return 0;
1305  }
1306  
vmw_binding_scrub_uav(struct vmw_ctx_bindinfo * bi,bool rebind)1307  static int vmw_binding_scrub_uav(struct vmw_ctx_bindinfo *bi, bool rebind)
1308  {
1309  	struct vmw_ctx_binding_state *cbs = vmw_context_binding_state(bi->ctx);
1310  
1311  	__set_bit(VMW_BINDING_UAV_BIT, &cbs->dirty);
1312  	return 0;
1313  }
1314  
vmw_binding_scrub_cs_uav(struct vmw_ctx_bindinfo * bi,bool rebind)1315  static int vmw_binding_scrub_cs_uav(struct vmw_ctx_bindinfo *bi, bool rebind)
1316  {
1317  	struct vmw_ctx_binding_state *cbs = vmw_context_binding_state(bi->ctx);
1318  
1319  	__set_bit(VMW_BINDING_CS_UAV_BIT, &cbs->dirty);
1320  	return 0;
1321  }
1322  
1323  /**
1324   * vmw_binding_scrub_so - Scrub a streamoutput binding from context.
1325   * @bi: Single binding information.
1326   * @rebind: Whether to issue a bind instead of scrub command.
1327   */
vmw_binding_scrub_so(struct vmw_ctx_bindinfo * bi,bool rebind)1328  static int vmw_binding_scrub_so(struct vmw_ctx_bindinfo *bi, bool rebind)
1329  {
1330  	struct vmw_ctx_bindinfo_so *binding =
1331  		container_of(bi, typeof(*binding), bi);
1332  	struct vmw_private *dev_priv = bi->ctx->dev_priv;
1333  	struct {
1334  		SVGA3dCmdHeader header;
1335  		SVGA3dCmdDXSetStreamOutput body;
1336  	} *cmd;
1337  
1338  	cmd = VMW_CMD_CTX_RESERVE(dev_priv, sizeof(*cmd), bi->ctx->id);
1339  	if (!cmd)
1340  		return -ENOMEM;
1341  
1342  	cmd->header.id = SVGA_3D_CMD_DX_SET_STREAMOUTPUT;
1343  	cmd->header.size = sizeof(cmd->body);
1344  	cmd->body.soid = rebind ? bi->res->id : SVGA3D_INVALID_ID;
1345  	vmw_cmd_commit(dev_priv, sizeof(*cmd));
1346  
1347  	return 0;
1348  }
1349  
1350  /**
1351   * vmw_binding_state_alloc - Allocate a struct vmw_ctx_binding_state.
1352   *
1353   * @dev_priv: Pointer to a device private structure.
1354   *
1355   * Returns a pointer to a newly allocated struct or an error pointer on error.
1356   */
1357  struct vmw_ctx_binding_state *
vmw_binding_state_alloc(struct vmw_private * dev_priv)1358  vmw_binding_state_alloc(struct vmw_private *dev_priv)
1359  {
1360  	struct vmw_ctx_binding_state *cbs;
1361  
1362  	cbs = vzalloc(sizeof(*cbs));
1363  	if (!cbs) {
1364  		return ERR_PTR(-ENOMEM);
1365  	}
1366  
1367  	cbs->dev_priv = dev_priv;
1368  	INIT_LIST_HEAD(&cbs->list);
1369  
1370  	return cbs;
1371  }
1372  
1373  /**
1374   * vmw_binding_state_free - Free a struct vmw_ctx_binding_state.
1375   *
1376   * @cbs: Pointer to the struct vmw_ctx_binding_state to be freed.
1377   */
vmw_binding_state_free(struct vmw_ctx_binding_state * cbs)1378  void vmw_binding_state_free(struct vmw_ctx_binding_state *cbs)
1379  {
1380  	vfree(cbs);
1381  }
1382  
1383  /**
1384   * vmw_binding_state_list - Get the binding list of a
1385   * struct vmw_ctx_binding_state
1386   *
1387   * @cbs: Pointer to the struct vmw_ctx_binding_state
1388   *
1389   * Returns the binding list which can be used to traverse through the bindings
1390   * and access the resource information of all bindings.
1391   */
vmw_binding_state_list(struct vmw_ctx_binding_state * cbs)1392  struct list_head *vmw_binding_state_list(struct vmw_ctx_binding_state *cbs)
1393  {
1394  	return &cbs->list;
1395  }
1396  
1397  /**
1398   * vmw_binding_state_reset - clear a struct vmw_ctx_binding_state
1399   *
1400   * @cbs: Pointer to the struct vmw_ctx_binding_state to be cleared
1401   *
1402   * Drops all bindings registered in @cbs. No device binding actions are
1403   * performed.
1404   */
vmw_binding_state_reset(struct vmw_ctx_binding_state * cbs)1405  void vmw_binding_state_reset(struct vmw_ctx_binding_state *cbs)
1406  {
1407  	struct vmw_ctx_bindinfo *entry, *next;
1408  
1409  	list_for_each_entry_safe(entry, next, &cbs->list, ctx_list)
1410  		vmw_binding_drop(entry);
1411  }
1412  
1413  /**
1414   * vmw_binding_dirtying - Return whether a binding type is dirtying its resource
1415   * @binding_type: The binding type
1416   *
1417   * Each time a resource is put on the validation list as the result of a
1418   * context binding referencing it, we need to determine whether that resource
1419   * will be dirtied (written to by the GPU) as a result of the corresponding
1420   * GPU operation. Currently rendertarget-, depth-stencil-, stream-output-target
1421   * and unordered access view bindings are capable of dirtying its resource.
1422   *
1423   * Return: Whether the binding type dirties the resource its binding points to.
1424   */
vmw_binding_dirtying(enum vmw_ctx_binding_type binding_type)1425  u32 vmw_binding_dirtying(enum vmw_ctx_binding_type binding_type)
1426  {
1427  	static u32 is_binding_dirtying[vmw_ctx_binding_max] = {
1428  		[vmw_ctx_binding_rt] = VMW_RES_DIRTY_SET,
1429  		[vmw_ctx_binding_dx_rt] = VMW_RES_DIRTY_SET,
1430  		[vmw_ctx_binding_ds] = VMW_RES_DIRTY_SET,
1431  		[vmw_ctx_binding_so_target] = VMW_RES_DIRTY_SET,
1432  		[vmw_ctx_binding_uav] = VMW_RES_DIRTY_SET,
1433  		[vmw_ctx_binding_cs_uav] = VMW_RES_DIRTY_SET,
1434  	};
1435  
1436  	/* Review this function as new bindings are added. */
1437  	BUILD_BUG_ON(vmw_ctx_binding_max != 14);
1438  	return is_binding_dirtying[binding_type];
1439  }
1440  
1441  /*
1442   * This function is unused at run-time, and only used to hold various build
1443   * asserts important for code optimization assumptions.
1444   */
vmw_binding_build_asserts(void)1445  static void vmw_binding_build_asserts(void)
1446  {
1447  	BUILD_BUG_ON(SVGA3D_NUM_SHADERTYPE_DX10 != 3);
1448  	BUILD_BUG_ON(SVGA3D_DX_MAX_RENDER_TARGETS > SVGA3D_RT_MAX);
1449  	BUILD_BUG_ON(sizeof(uint32) != sizeof(u32));
1450  
1451  	/*
1452  	 * struct vmw_ctx_binding_state::bind_cmd_buffer is used for various
1453  	 * view id arrays.
1454  	 */
1455  	BUILD_BUG_ON(VMW_MAX_VIEW_BINDINGS < SVGA3D_RT_MAX);
1456  	BUILD_BUG_ON(VMW_MAX_VIEW_BINDINGS < SVGA3D_DX_MAX_SRVIEWS);
1457  	BUILD_BUG_ON(VMW_MAX_VIEW_BINDINGS < SVGA3D_DX_MAX_CONSTBUFFERS);
1458  
1459  	/*
1460  	 * struct vmw_ctx_binding_state::bind_cmd_buffer is used for
1461  	 * u32 view ids, SVGA3dSoTargets and SVGA3dVertexBuffers
1462  	 */
1463  	BUILD_BUG_ON(SVGA3D_DX_MAX_SOTARGETS*sizeof(SVGA3dSoTarget) >
1464  		     VMW_MAX_VIEW_BINDINGS*sizeof(u32));
1465  	BUILD_BUG_ON(SVGA3D_DX_MAX_VERTEXBUFFERS*sizeof(SVGA3dVertexBuffer) >
1466  		     VMW_MAX_VIEW_BINDINGS*sizeof(u32));
1467  }
1468