1 /* 2 * SN Platform GRU Driver 3 * 4 * Dump GRU State 5 * 6 * Copyright (c) 2008 Silicon Graphics, Inc. All Rights Reserved. 7 * 8 * This program is free software; you can redistribute it and/or modify 9 * it under the terms of the GNU General Public License as published by 10 * the Free Software Foundation; either version 2 of the License, or 11 * (at your option) any later version. 12 * 13 * This program is distributed in the hope that it will be useful, 14 * but WITHOUT ANY WARRANTY; without even the implied warranty of 15 * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the 16 * GNU General Public License for more details. 17 * 18 * You should have received a copy of the GNU General Public License 19 * along with this program; if not, write to the Free Software 20 * Foundation, Inc., 59 Temple Place, Suite 330, Boston, MA 02111-1307 USA 21 */ 22 23 #include <linux/kernel.h> 24 #include <linux/mm.h> 25 #include <linux/spinlock.h> 26 #include <linux/uaccess.h> 27 #include <linux/delay.h> 28 #include <linux/bitops.h> 29 #include <asm/uv/uv_hub.h> 30 #include "gru.h" 31 #include "grutables.h" 32 #include "gruhandles.h" 33 #include "grulib.h" 34 35 #define CCH_LOCK_ATTEMPTS 10 36 37 static int gru_user_copy_handle(void __user **dp, void *s) 38 { 39 if (copy_to_user(*dp, s, GRU_HANDLE_BYTES)) 40 return -1; 41 *dp += GRU_HANDLE_BYTES; 42 return 0; 43 } 44 45 static int gru_dump_context_data(void *grubase, 46 struct gru_context_configuration_handle *cch, 47 void __user *ubuf, int ctxnum, int dsrcnt, 48 int flush_cbrs) 49 { 50 void *cb, *cbe, *tfh, *gseg; 51 int i, scr; 52 53 gseg = grubase + ctxnum * GRU_GSEG_STRIDE; 54 cb = gseg + GRU_CB_BASE; 55 cbe = grubase + GRU_CBE_BASE; 56 tfh = grubase + GRU_TFH_BASE; 57 58 for_each_cbr_in_allocation_map(i, &cch->cbr_allocation_map, scr) { 59 if (flush_cbrs) 60 gru_flush_cache(cb); 61 if (gru_user_copy_handle(&ubuf, cb)) 62 goto fail; 63 if (gru_user_copy_handle(&ubuf, tfh + i * GRU_HANDLE_STRIDE)) 64 goto fail; 65 if (gru_user_copy_handle(&ubuf, cbe + i * GRU_HANDLE_STRIDE)) 66 goto fail; 67 cb += GRU_HANDLE_STRIDE; 68 } 69 if (dsrcnt) 70 memcpy(ubuf, gseg + GRU_DS_BASE, dsrcnt * GRU_HANDLE_STRIDE); 71 return 0; 72 73 fail: 74 return -EFAULT; 75 } 76 77 static int gru_dump_tfm(struct gru_state *gru, 78 void __user *ubuf, void __user *ubufend) 79 { 80 struct gru_tlb_fault_map *tfm; 81 int i, ret, bytes; 82 83 bytes = GRU_NUM_TFM * GRU_CACHE_LINE_BYTES; 84 if (bytes > ubufend - ubuf) 85 ret = -EFBIG; 86 87 for (i = 0; i < GRU_NUM_TFM; i++) { 88 tfm = get_tfm(gru->gs_gru_base_vaddr, i); 89 if (gru_user_copy_handle(&ubuf, tfm)) 90 goto fail; 91 } 92 return GRU_NUM_TFM * GRU_CACHE_LINE_BYTES; 93 94 fail: 95 return -EFAULT; 96 } 97 98 static int gru_dump_tgh(struct gru_state *gru, 99 void __user *ubuf, void __user *ubufend) 100 { 101 struct gru_tlb_global_handle *tgh; 102 int i, ret, bytes; 103 104 bytes = GRU_NUM_TGH * GRU_CACHE_LINE_BYTES; 105 if (bytes > ubufend - ubuf) 106 ret = -EFBIG; 107 108 for (i = 0; i < GRU_NUM_TGH; i++) { 109 tgh = get_tgh(gru->gs_gru_base_vaddr, i); 110 if (gru_user_copy_handle(&ubuf, tgh)) 111 goto fail; 112 } 113 return GRU_NUM_TGH * GRU_CACHE_LINE_BYTES; 114 115 fail: 116 return -EFAULT; 117 } 118 119 static int gru_dump_context(struct gru_state *gru, int ctxnum, 120 void __user *ubuf, void __user *ubufend, char data_opt, 121 char lock_cch, char flush_cbrs) 122 { 123 struct gru_dump_context_header hdr; 124 struct gru_dump_context_header __user *uhdr = ubuf; 125 struct gru_context_configuration_handle *cch, *ubufcch; 126 struct gru_thread_state *gts; 127 int try, cch_locked, cbrcnt = 0, dsrcnt = 0, bytes = 0, ret = 0; 128 void *grubase; 129 130 memset(&hdr, 0, sizeof(hdr)); 131 grubase = gru->gs_gru_base_vaddr; 132 cch = get_cch(grubase, ctxnum); 133 for (try = 0; try < CCH_LOCK_ATTEMPTS; try++) { 134 cch_locked = trylock_cch_handle(cch); 135 if (cch_locked) 136 break; 137 msleep(1); 138 } 139 140 ubuf += sizeof(hdr); 141 ubufcch = ubuf; 142 if (gru_user_copy_handle(&ubuf, cch)) { 143 if (cch_locked) 144 unlock_cch_handle(cch); 145 return -EFAULT; 146 } 147 if (cch_locked) 148 ubufcch->delresp = 0; 149 bytes = sizeof(hdr) + GRU_CACHE_LINE_BYTES; 150 151 if (cch_locked || !lock_cch) { 152 gts = gru->gs_gts[ctxnum]; 153 if (gts && gts->ts_vma) { 154 hdr.pid = gts->ts_tgid_owner; 155 hdr.vaddr = gts->ts_vma->vm_start; 156 } 157 if (cch->state != CCHSTATE_INACTIVE) { 158 cbrcnt = hweight64(cch->cbr_allocation_map) * 159 GRU_CBR_AU_SIZE; 160 dsrcnt = data_opt ? hweight32(cch->dsr_allocation_map) * 161 GRU_DSR_AU_CL : 0; 162 } 163 bytes += (3 * cbrcnt + dsrcnt) * GRU_CACHE_LINE_BYTES; 164 if (bytes > ubufend - ubuf) 165 ret = -EFBIG; 166 else 167 ret = gru_dump_context_data(grubase, cch, ubuf, ctxnum, 168 dsrcnt, flush_cbrs); 169 } 170 if (cch_locked) 171 unlock_cch_handle(cch); 172 if (ret) 173 return ret; 174 175 hdr.magic = GRU_DUMP_MAGIC; 176 hdr.gid = gru->gs_gid; 177 hdr.ctxnum = ctxnum; 178 hdr.cbrcnt = cbrcnt; 179 hdr.dsrcnt = dsrcnt; 180 hdr.cch_locked = cch_locked; 181 if (copy_to_user(uhdr, &hdr, sizeof(hdr))) 182 return -EFAULT; 183 184 return bytes; 185 } 186 187 int gru_dump_chiplet_request(unsigned long arg) 188 { 189 struct gru_state *gru; 190 struct gru_dump_chiplet_state_req req; 191 void __user *ubuf; 192 void __user *ubufend; 193 int ctxnum, ret, cnt = 0; 194 195 if (copy_from_user(&req, (void __user *)arg, sizeof(req))) 196 return -EFAULT; 197 198 /* Currently, only dump by gid is implemented */ 199 if (req.gid >= gru_max_gids || req.gid < 0) 200 return -EINVAL; 201 202 gru = GID_TO_GRU(req.gid); 203 ubuf = req.buf; 204 ubufend = req.buf + req.buflen; 205 206 ret = gru_dump_tfm(gru, ubuf, ubufend); 207 if (ret < 0) 208 goto fail; 209 ubuf += ret; 210 211 ret = gru_dump_tgh(gru, ubuf, ubufend); 212 if (ret < 0) 213 goto fail; 214 ubuf += ret; 215 216 for (ctxnum = 0; ctxnum < GRU_NUM_CCH; ctxnum++) { 217 if (req.ctxnum == ctxnum || req.ctxnum < 0) { 218 ret = gru_dump_context(gru, ctxnum, ubuf, ubufend, 219 req.data_opt, req.lock_cch, 220 req.flush_cbrs); 221 if (ret < 0) 222 goto fail; 223 ubuf += ret; 224 cnt++; 225 } 226 } 227 228 if (copy_to_user((void __user *)arg, &req, sizeof(req))) 229 return -EFAULT; 230 return cnt; 231 232 fail: 233 return ret; 234 } 235