xref: /openbmc/linux/drivers/misc/sgi-gru/grukdump.c (revision 2b702b28addc0f9eb2f37148f0b99f546cadd30a)
1 /*
2  * SN Platform GRU Driver
3  *
4  *            Dump GRU State
5  *
6  * This file is subject to the terms and conditions of the GNU General Public
7  * License.  See the file "COPYING" in the main directory of this archive
8  * for more details.
9  *
10  * Copyright (c) 2008 Silicon Graphics, Inc.  All Rights Reserved.
11  */
12 
13 #include <linux/kernel.h>
14 #include <linux/mm.h>
15 #include <linux/spinlock.h>
16 #include <linux/uaccess.h>
17 #include <linux/delay.h>
18 #include <linux/bitops.h>
19 #include <asm/uv/uv_hub.h>
20 #include "gru.h"
21 #include "grutables.h"
22 #include "gruhandles.h"
23 #include "grulib.h"
24 
25 #define CCH_LOCK_ATTEMPTS	10
26 
27 static int gru_user_copy_handle(void __user **dp, void *s)
28 {
29 	if (copy_to_user(*dp, s, GRU_HANDLE_BYTES))
30 		return -1;
31 	*dp += GRU_HANDLE_BYTES;
32 	return 0;
33 }
34 
35 static int gru_dump_context_data(void *grubase,
36 			struct gru_context_configuration_handle *cch,
37 			void __user *ubuf, int ctxnum, int dsrcnt)
38 {
39 	void *cb, *cbe, *tfh, *gseg;
40 	int i, scr;
41 
42 	gseg = grubase + ctxnum * GRU_GSEG_STRIDE;
43 	cb = gseg + GRU_CB_BASE;
44 	cbe = grubase + GRU_CBE_BASE;
45 	tfh = grubase + GRU_TFH_BASE;
46 
47 	for_each_cbr_in_allocation_map(i, &cch->cbr_allocation_map, scr) {
48 		if (gru_user_copy_handle(&ubuf, cb))
49 			goto fail;
50 		if (gru_user_copy_handle(&ubuf, tfh + i * GRU_HANDLE_STRIDE))
51 			goto fail;
52 		if (gru_user_copy_handle(&ubuf, cbe + i * GRU_HANDLE_STRIDE))
53 			goto fail;
54 		cb += GRU_HANDLE_STRIDE;
55 	}
56 	if (dsrcnt)
57 		memcpy(ubuf, gseg + GRU_DS_BASE, dsrcnt * GRU_HANDLE_STRIDE);
58 	return 0;
59 
60 fail:
61 	return -EFAULT;
62 }
63 
64 static int gru_dump_tfm(struct gru_state *gru,
65 		void __user *ubuf, void __user *ubufend)
66 {
67 	struct gru_tlb_fault_map *tfm;
68 	int i, ret, bytes;
69 
70 	bytes = GRU_NUM_TFM * GRU_CACHE_LINE_BYTES;
71 	if (bytes > ubufend - ubuf)
72 		ret = -EFBIG;
73 
74 	for (i = 0; i < GRU_NUM_TFM; i++) {
75 		tfm = get_tfm(gru->gs_gru_base_vaddr, i);
76 		if (gru_user_copy_handle(&ubuf, tfm))
77 			goto fail;
78 	}
79 	return GRU_NUM_TFM * GRU_CACHE_LINE_BYTES;
80 
81 fail:
82 	return -EFAULT;
83 }
84 
85 static int gru_dump_tgh(struct gru_state *gru,
86 		void __user *ubuf, void __user *ubufend)
87 {
88 	struct gru_tlb_global_handle *tgh;
89 	int i, ret, bytes;
90 
91 	bytes = GRU_NUM_TGH * GRU_CACHE_LINE_BYTES;
92 	if (bytes > ubufend - ubuf)
93 		ret = -EFBIG;
94 
95 	for (i = 0; i < GRU_NUM_TGH; i++) {
96 		tgh = get_tgh(gru->gs_gru_base_vaddr, i);
97 		if (gru_user_copy_handle(&ubuf, tgh))
98 			goto fail;
99 	}
100 	return GRU_NUM_TGH * GRU_CACHE_LINE_BYTES;
101 
102 fail:
103 	return -EFAULT;
104 }
105 
106 static int gru_dump_context(struct gru_state *gru, int ctxnum,
107 		void __user *ubuf, void __user *ubufend, char data_opt,
108 		char lock_cch)
109 {
110 	struct gru_dump_context_header hdr;
111 	struct gru_dump_context_header __user *uhdr = ubuf;
112 	struct gru_context_configuration_handle *cch, *ubufcch;
113 	struct gru_thread_state *gts;
114 	int try, cch_locked, cbrcnt = 0, dsrcnt = 0, bytes = 0, ret = 0;
115 	void *grubase;
116 
117 	memset(&hdr, 0, sizeof(hdr));
118 	grubase = gru->gs_gru_base_vaddr;
119 	cch = get_cch(grubase, ctxnum);
120 	for (try = 0; try < CCH_LOCK_ATTEMPTS; try++) {
121 		cch_locked =  trylock_cch_handle(cch);
122 		if (cch_locked)
123 			break;
124 		msleep(1);
125 	}
126 
127 	ubuf += sizeof(hdr);
128 	ubufcch = ubuf;
129 	if (gru_user_copy_handle(&ubuf, cch))
130 		goto fail;
131 	if (cch_locked)
132 		ubufcch->delresp = 0;
133 	bytes = sizeof(hdr) + GRU_CACHE_LINE_BYTES;
134 
135 	if (cch_locked || !lock_cch) {
136 		gts = gru->gs_gts[ctxnum];
137 		if (gts && gts->ts_vma) {
138 			hdr.pid = gts->ts_tgid_owner;
139 			hdr.vaddr = gts->ts_vma->vm_start;
140 		}
141 		if (cch->state != CCHSTATE_INACTIVE) {
142 			cbrcnt = hweight64(cch->cbr_allocation_map) *
143 						GRU_CBR_AU_SIZE;
144 			dsrcnt = data_opt ? hweight32(cch->dsr_allocation_map) *
145 						GRU_DSR_AU_CL : 0;
146 		}
147 		bytes += (3 * cbrcnt + dsrcnt) * GRU_CACHE_LINE_BYTES;
148 		if (bytes > ubufend - ubuf)
149 			ret = -EFBIG;
150 		else
151 			ret = gru_dump_context_data(grubase, cch, ubuf, ctxnum,
152 							dsrcnt);
153 
154 	}
155 	if (cch_locked)
156 		unlock_cch_handle(cch);
157 	if (ret)
158 		return ret;
159 
160 	hdr.magic = GRU_DUMP_MAGIC;
161 	hdr.gid = gru->gs_gid;
162 	hdr.ctxnum = ctxnum;
163 	hdr.cbrcnt = cbrcnt;
164 	hdr.dsrcnt = dsrcnt;
165 	hdr.cch_locked = cch_locked;
166 	if (!ret && copy_to_user((void __user *)uhdr, &hdr, sizeof(hdr)))
167 		ret = -EFAULT;
168 
169 	return ret ? ret : bytes;
170 
171 fail:
172 	unlock_cch_handle(cch);
173 	return -EFAULT;
174 }
175 
176 int gru_dump_chiplet_request(unsigned long arg)
177 {
178 	struct gru_state *gru;
179 	struct gru_dump_chiplet_state_req req;
180 	void __user *ubuf;
181 	void __user *ubufend;
182 	int ctxnum, ret, cnt = 0;
183 
184 	if (copy_from_user(&req, (void __user *)arg, sizeof(req)))
185 		return -EFAULT;
186 
187 	/* Currently, only dump by gid is implemented */
188 	if (req.gid >= gru_max_gids || req.gid < 0)
189 		return -EINVAL;
190 
191 	gru = GID_TO_GRU(req.gid);
192 	ubuf = req.buf;
193 	ubufend = req.buf + req.buflen;
194 
195 	ret = gru_dump_tfm(gru, ubuf, ubufend);
196 	if (ret < 0)
197 		goto fail;
198 	ubuf += ret;
199 
200 	ret = gru_dump_tgh(gru, ubuf, ubufend);
201 	if (ret < 0)
202 		goto fail;
203 	ubuf += ret;
204 
205 	for (ctxnum = 0; ctxnum < GRU_NUM_CCH; ctxnum++) {
206 		if (req.ctxnum == ctxnum || req.ctxnum < 0) {
207 			ret = gru_dump_context(gru, ctxnum, ubuf, ubufend,
208 						req.data_opt, req.lock_cch);
209 			if (ret < 0)
210 				goto fail;
211 			ubuf += ret;
212 			cnt++;
213 		}
214 	}
215 
216 	if (copy_to_user((void __user *)arg, &req, sizeof(req)))
217 		return -EFAULT;
218 	return cnt;
219 
220 fail:
221 	return ret;
222 }
223