1 // SPDX-License-Identifier: GPL-2.0 OR MIT
2 /**************************************************************************
3  *
4  * Copyright 2009-2015 VMware, Inc., Palo Alto, CA., USA
5  *
6  * Permission is hereby granted, free of charge, to any person obtaining a
7  * copy of this software and associated documentation files (the
8  * "Software"), to deal in the Software without restriction, including
9  * without limitation the rights to use, copy, modify, merge, publish,
10  * distribute, sub license, and/or sell copies of the Software, and to
11  * permit persons to whom the Software is furnished to do so, subject to
12  * the following conditions:
13  *
14  * The above copyright notice and this permission notice (including the
15  * next paragraph) shall be included in all copies or substantial portions
16  * of the Software.
17  *
18  * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
19  * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
20  * FITNESS FOR A PARTICULAR PURPOSE AND NON-INFRINGEMENT. IN NO EVENT SHALL
21  * THE COPYRIGHT HOLDERS, AUTHORS AND/OR ITS SUPPLIERS BE LIABLE FOR ANY CLAIM,
22  * DAMAGES OR OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR
23  * OTHERWISE, ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE
24  * USE OR OTHER DEALINGS IN THE SOFTWARE.
25  *
26  **************************************************************************/
27 
28 
29 #include "vmwgfx_drv.h"
30 
31 #define VMW_PPN_SIZE (sizeof(unsigned long))
32 /* A future safe maximum remap size. */
33 #define VMW_PPN_PER_REMAP ((31 * 1024) / VMW_PPN_SIZE)
34 #define DMA_ADDR_INVALID ((dma_addr_t) 0)
35 #define DMA_PAGE_INVALID 0UL
36 
vmw_gmr2_bind(struct vmw_private * dev_priv,struct vmw_piter * iter,unsigned long num_pages,int gmr_id)37 static int vmw_gmr2_bind(struct vmw_private *dev_priv,
38 			 struct vmw_piter *iter,
39 			 unsigned long num_pages,
40 			 int gmr_id)
41 {
42 	SVGAFifoCmdDefineGMR2 define_cmd;
43 	SVGAFifoCmdRemapGMR2 remap_cmd;
44 	uint32_t *cmd;
45 	uint32_t *cmd_orig;
46 	uint32_t define_size = sizeof(define_cmd) + sizeof(*cmd);
47 	uint32_t remap_num = num_pages / VMW_PPN_PER_REMAP + ((num_pages % VMW_PPN_PER_REMAP) > 0);
48 	uint32_t remap_size = VMW_PPN_SIZE * num_pages + (sizeof(remap_cmd) + sizeof(*cmd)) * remap_num;
49 	uint32_t remap_pos = 0;
50 	uint32_t cmd_size = define_size + remap_size;
51 	uint32_t i;
52 
53 	cmd_orig = cmd = VMW_CMD_RESERVE(dev_priv, cmd_size);
54 	if (unlikely(cmd == NULL))
55 		return -ENOMEM;
56 
57 	define_cmd.gmrId = gmr_id;
58 	define_cmd.numPages = num_pages;
59 
60 	*cmd++ = SVGA_CMD_DEFINE_GMR2;
61 	memcpy(cmd, &define_cmd, sizeof(define_cmd));
62 	cmd += sizeof(define_cmd) / sizeof(*cmd);
63 
64 	/*
65 	 * Need to split the command if there are too many
66 	 * pages that goes into the gmr.
67 	 */
68 
69 	remap_cmd.gmrId = gmr_id;
70 	remap_cmd.flags = (VMW_PPN_SIZE > sizeof(*cmd)) ?
71 		SVGA_REMAP_GMR2_PPN64 : SVGA_REMAP_GMR2_PPN32;
72 
73 	while (num_pages > 0) {
74 		unsigned long nr = min_t(unsigned long, num_pages, VMW_PPN_PER_REMAP);
75 
76 		remap_cmd.offsetPages = remap_pos;
77 		remap_cmd.numPages = nr;
78 
79 		*cmd++ = SVGA_CMD_REMAP_GMR2;
80 		memcpy(cmd, &remap_cmd, sizeof(remap_cmd));
81 		cmd += sizeof(remap_cmd) / sizeof(*cmd);
82 
83 		for (i = 0; i < nr; ++i) {
84 			if (VMW_PPN_SIZE <= 4)
85 				*cmd = vmw_piter_dma_addr(iter) >> PAGE_SHIFT;
86 			else
87 				*((uint64_t *)cmd) = vmw_piter_dma_addr(iter) >>
88 					PAGE_SHIFT;
89 
90 			cmd += VMW_PPN_SIZE / sizeof(*cmd);
91 			vmw_piter_next(iter);
92 		}
93 
94 		num_pages -= nr;
95 		remap_pos += nr;
96 	}
97 
98 	BUG_ON(cmd != cmd_orig + cmd_size / sizeof(*cmd));
99 
100 	vmw_cmd_commit(dev_priv, cmd_size);
101 
102 	return 0;
103 }
104 
vmw_gmr2_unbind(struct vmw_private * dev_priv,int gmr_id)105 static void vmw_gmr2_unbind(struct vmw_private *dev_priv,
106 			    int gmr_id)
107 {
108 	SVGAFifoCmdDefineGMR2 define_cmd;
109 	uint32_t define_size = sizeof(define_cmd) + 4;
110 	uint32_t *cmd;
111 
112 	cmd = VMW_CMD_RESERVE(dev_priv, define_size);
113 	if (unlikely(cmd == NULL))
114 		return;
115 
116 	define_cmd.gmrId = gmr_id;
117 	define_cmd.numPages = 0;
118 
119 	*cmd++ = SVGA_CMD_DEFINE_GMR2;
120 	memcpy(cmd, &define_cmd, sizeof(define_cmd));
121 
122 	vmw_cmd_commit(dev_priv, define_size);
123 }
124 
125 
vmw_gmr_bind(struct vmw_private * dev_priv,const struct vmw_sg_table * vsgt,unsigned long num_pages,int gmr_id)126 int vmw_gmr_bind(struct vmw_private *dev_priv,
127 		 const struct vmw_sg_table *vsgt,
128 		 unsigned long num_pages,
129 		 int gmr_id)
130 {
131 	struct vmw_piter data_iter;
132 
133 	vmw_piter_start(&data_iter, vsgt, 0);
134 
135 	if (unlikely(!vmw_piter_next(&data_iter)))
136 		return 0;
137 
138 	if (unlikely(!(dev_priv->capabilities & SVGA_CAP_GMR2)))
139 		return -EINVAL;
140 
141 	return vmw_gmr2_bind(dev_priv, &data_iter, num_pages, gmr_id);
142 }
143 
144 
vmw_gmr_unbind(struct vmw_private * dev_priv,int gmr_id)145 void vmw_gmr_unbind(struct vmw_private *dev_priv, int gmr_id)
146 {
147 	if (likely(dev_priv->capabilities & SVGA_CAP_GMR2))
148 		vmw_gmr2_unbind(dev_priv, gmr_id);
149 }
150