1 /* 2 * This file is subject to the terms and conditions of the GNU General Public 3 * License. See the file "COPYING" in the main directory of this archive 4 * for more details. 5 * 6 * Copyright (C) 2004, 2005 MIPS Technologies, Inc. All rights reserved. 7 * Copyright (C) 2013 Imagination Technologies Ltd. 8 */ 9 #include <linux/kernel.h> 10 #include <linux/device.h> 11 #include <linux/fs.h> 12 #include <linux/slab.h> 13 #include <linux/export.h> 14 15 #include <asm/mipsregs.h> 16 #include <asm/mipsmtregs.h> 17 #include <asm/mips_mt.h> 18 #include <asm/vpe.h> 19 20 static int major; 21 22 /* The number of TCs and VPEs physically available on the core */ 23 static int hw_tcs, hw_vpes; 24 25 /* We are prepared so configure and start the VPE... */ 26 int vpe_run(struct vpe *v) 27 { 28 unsigned long flags, val, dmt_flag; 29 struct vpe_notifications *notifier; 30 unsigned int vpeflags; 31 struct tc *t; 32 33 /* check we are the Master VPE */ 34 local_irq_save(flags); 35 val = read_c0_vpeconf0(); 36 if (!(val & VPECONF0_MVP)) { 37 pr_warn("VPE loader: only Master VPE's are able to config MT\n"); 38 local_irq_restore(flags); 39 40 return -1; 41 } 42 43 dmt_flag = dmt(); 44 vpeflags = dvpe(); 45 46 if (list_empty(&v->tc)) { 47 evpe(vpeflags); 48 emt(dmt_flag); 49 local_irq_restore(flags); 50 51 pr_warn("VPE loader: No TC's associated with VPE %d\n", 52 v->minor); 53 54 return -ENOEXEC; 55 } 56 57 t = list_first_entry(&v->tc, struct tc, tc); 58 59 /* Put MVPE's into 'configuration state' */ 60 set_c0_mvpcontrol(MVPCONTROL_VPC); 61 62 settc(t->index); 63 64 /* should check it is halted, and not activated */ 65 if ((read_tc_c0_tcstatus() & TCSTATUS_A) || 66 !(read_tc_c0_tchalt() & TCHALT_H)) { 67 evpe(vpeflags); 68 emt(dmt_flag); 69 local_irq_restore(flags); 70 71 pr_warn("VPE loader: TC %d is already active!\n", 72 t->index); 73 74 return -ENOEXEC; 75 } 76 77 /* 78 * Write the address we want it to start running from in the TCPC 79 * register. 80 */ 81 write_tc_c0_tcrestart((unsigned long)v->__start); 82 write_tc_c0_tccontext((unsigned long)0); 83 84 /* 85 * Mark the TC as activated, not interrupt exempt and not dynamically 86 * allocatable 87 */ 88 val = read_tc_c0_tcstatus(); 89 val = (val & ~(TCSTATUS_DA | TCSTATUS_IXMT)) | TCSTATUS_A; 90 write_tc_c0_tcstatus(val); 91 92 write_tc_c0_tchalt(read_tc_c0_tchalt() & ~TCHALT_H); 93 94 /* 95 * The sde-kit passes 'memsize' to __start in $a3, so set something 96 * here... Or set $a3 to zero and define DFLT_STACK_SIZE and 97 * DFLT_HEAP_SIZE when you compile your program 98 */ 99 mttgpr(6, v->ntcs); 100 mttgpr(7, physical_memsize); 101 102 /* set up VPE1 */ 103 /* 104 * bind the TC to VPE 1 as late as possible so we only have the final 105 * VPE registers to set up, and so an EJTAG probe can trigger on it 106 */ 107 write_tc_c0_tcbind((read_tc_c0_tcbind() & ~TCBIND_CURVPE) | 1); 108 109 write_vpe_c0_vpeconf0(read_vpe_c0_vpeconf0() & ~(VPECONF0_VPA)); 110 111 back_to_back_c0_hazard(); 112 113 /* Set up the XTC bit in vpeconf0 to point at our tc */ 114 write_vpe_c0_vpeconf0((read_vpe_c0_vpeconf0() & ~(VPECONF0_XTC)) 115 | (t->index << VPECONF0_XTC_SHIFT)); 116 117 back_to_back_c0_hazard(); 118 119 /* enable this VPE */ 120 write_vpe_c0_vpeconf0(read_vpe_c0_vpeconf0() | VPECONF0_VPA); 121 122 /* clear out any left overs from a previous program */ 123 write_vpe_c0_status(0); 124 write_vpe_c0_cause(0); 125 126 /* take system out of configuration state */ 127 clear_c0_mvpcontrol(MVPCONTROL_VPC); 128 129 /* 130 * SMVP kernels manage VPE enable independently, but uniprocessor 131 * kernels need to turn it on, even if that wasn't the pre-dvpe() state. 132 */ 133 #ifdef CONFIG_SMP 134 evpe(vpeflags); 135 #else 136 evpe(EVPE_ENABLE); 137 #endif 138 emt(dmt_flag); 139 local_irq_restore(flags); 140 141 list_for_each_entry(notifier, &v->notify, list) 142 notifier->start(VPE_MODULE_MINOR); 143 144 return 0; 145 } 146 147 void cleanup_tc(struct tc *tc) 148 { 149 unsigned long flags; 150 unsigned int mtflags, vpflags; 151 int tmp; 152 153 local_irq_save(flags); 154 mtflags = dmt(); 155 vpflags = dvpe(); 156 /* Put MVPE's into 'configuration state' */ 157 set_c0_mvpcontrol(MVPCONTROL_VPC); 158 159 settc(tc->index); 160 tmp = read_tc_c0_tcstatus(); 161 162 /* mark not allocated and not dynamically allocatable */ 163 tmp &= ~(TCSTATUS_A | TCSTATUS_DA); 164 tmp |= TCSTATUS_IXMT; /* interrupt exempt */ 165 write_tc_c0_tcstatus(tmp); 166 167 write_tc_c0_tchalt(TCHALT_H); 168 mips_ihb(); 169 170 clear_c0_mvpcontrol(MVPCONTROL_VPC); 171 evpe(vpflags); 172 emt(mtflags); 173 local_irq_restore(flags); 174 } 175 176 /* module wrapper entry points */ 177 /* give me a vpe */ 178 void *vpe_alloc(void) 179 { 180 int i; 181 struct vpe *v; 182 183 /* find a vpe */ 184 for (i = 1; i < MAX_VPES; i++) { 185 v = get_vpe(i); 186 if (v != NULL) { 187 v->state = VPE_STATE_INUSE; 188 return v; 189 } 190 } 191 return NULL; 192 } 193 EXPORT_SYMBOL(vpe_alloc); 194 195 /* start running from here */ 196 int vpe_start(void *vpe, unsigned long start) 197 { 198 struct vpe *v = vpe; 199 200 v->__start = start; 201 return vpe_run(v); 202 } 203 EXPORT_SYMBOL(vpe_start); 204 205 /* halt it for now */ 206 int vpe_stop(void *vpe) 207 { 208 struct vpe *v = vpe; 209 struct tc *t; 210 unsigned int evpe_flags; 211 212 evpe_flags = dvpe(); 213 214 t = list_entry(v->tc.next, struct tc, tc); 215 if (t != NULL) { 216 settc(t->index); 217 write_vpe_c0_vpeconf0(read_vpe_c0_vpeconf0() & ~VPECONF0_VPA); 218 } 219 220 evpe(evpe_flags); 221 222 return 0; 223 } 224 EXPORT_SYMBOL(vpe_stop); 225 226 /* I've done with it thank you */ 227 int vpe_free(void *vpe) 228 { 229 struct vpe *v = vpe; 230 struct tc *t; 231 unsigned int evpe_flags; 232 233 t = list_entry(v->tc.next, struct tc, tc); 234 if (t == NULL) 235 return -ENOEXEC; 236 237 evpe_flags = dvpe(); 238 239 /* Put MVPE's into 'configuration state' */ 240 set_c0_mvpcontrol(MVPCONTROL_VPC); 241 242 settc(t->index); 243 write_vpe_c0_vpeconf0(read_vpe_c0_vpeconf0() & ~VPECONF0_VPA); 244 245 /* halt the TC */ 246 write_tc_c0_tchalt(TCHALT_H); 247 mips_ihb(); 248 249 /* mark the TC unallocated */ 250 write_tc_c0_tcstatus(read_tc_c0_tcstatus() & ~TCSTATUS_A); 251 252 v->state = VPE_STATE_UNUSED; 253 254 clear_c0_mvpcontrol(MVPCONTROL_VPC); 255 evpe(evpe_flags); 256 257 return 0; 258 } 259 EXPORT_SYMBOL(vpe_free); 260 261 static ssize_t store_kill(struct device *dev, struct device_attribute *attr, 262 const char *buf, size_t len) 263 { 264 struct vpe *vpe = get_vpe(aprp_cpu_index()); 265 struct vpe_notifications *notifier; 266 267 list_for_each_entry(notifier, &vpe->notify, list) 268 notifier->stop(aprp_cpu_index()); 269 270 release_progmem(vpe->load_addr); 271 cleanup_tc(get_tc(aprp_cpu_index())); 272 vpe_stop(vpe); 273 vpe_free(vpe); 274 275 return len; 276 } 277 static DEVICE_ATTR(kill, S_IWUSR, NULL, store_kill); 278 279 static ssize_t ntcs_show(struct device *cd, struct device_attribute *attr, 280 char *buf) 281 { 282 struct vpe *vpe = get_vpe(aprp_cpu_index()); 283 284 return sprintf(buf, "%d\n", vpe->ntcs); 285 } 286 287 static ssize_t ntcs_store(struct device *dev, struct device_attribute *attr, 288 const char *buf, size_t len) 289 { 290 struct vpe *vpe = get_vpe(aprp_cpu_index()); 291 unsigned long new; 292 int ret; 293 294 ret = kstrtoul(buf, 0, &new); 295 if (ret < 0) 296 return ret; 297 298 if (new == 0 || new > (hw_tcs - aprp_cpu_index())) 299 return -EINVAL; 300 301 vpe->ntcs = new; 302 303 return len; 304 } 305 static DEVICE_ATTR_RW(ntcs); 306 307 static struct attribute *vpe_attrs[] = { 308 &dev_attr_kill.attr, 309 &dev_attr_ntcs.attr, 310 NULL, 311 }; 312 ATTRIBUTE_GROUPS(vpe); 313 314 static void vpe_device_release(struct device *cd) 315 { 316 } 317 318 static struct class vpe_class = { 319 .name = "vpe", 320 .owner = THIS_MODULE, 321 .dev_release = vpe_device_release, 322 .dev_groups = vpe_groups, 323 }; 324 325 static struct device vpe_device; 326 327 int __init vpe_module_init(void) 328 { 329 unsigned int mtflags, vpflags; 330 unsigned long flags, val; 331 struct vpe *v = NULL; 332 struct tc *t; 333 int tc, err; 334 335 if (!cpu_has_mipsmt) { 336 pr_warn("VPE loader: not a MIPS MT capable processor\n"); 337 return -ENODEV; 338 } 339 340 if (vpelimit == 0) { 341 pr_warn("No VPEs reserved for AP/SP, not initialize VPE loader\n" 342 "Pass maxvpes=<n> argument as kernel argument\n"); 343 344 return -ENODEV; 345 } 346 347 if (aprp_cpu_index() == 0) { 348 pr_warn("No TCs reserved for AP/SP, not initialize VPE loader\n" 349 "Pass maxtcs=<n> argument as kernel argument\n"); 350 351 return -ENODEV; 352 } 353 354 major = register_chrdev(0, VPE_MODULE_NAME, &vpe_fops); 355 if (major < 0) { 356 pr_warn("VPE loader: unable to register character device\n"); 357 return major; 358 } 359 360 err = class_register(&vpe_class); 361 if (err) { 362 pr_err("vpe_class registration failed\n"); 363 goto out_chrdev; 364 } 365 366 device_initialize(&vpe_device); 367 vpe_device.class = &vpe_class; 368 vpe_device.parent = NULL; 369 dev_set_name(&vpe_device, "vpe1"); 370 vpe_device.devt = MKDEV(major, VPE_MODULE_MINOR); 371 err = device_add(&vpe_device); 372 if (err) { 373 pr_err("Adding vpe_device failed\n"); 374 goto out_class; 375 } 376 377 local_irq_save(flags); 378 mtflags = dmt(); 379 vpflags = dvpe(); 380 381 /* Put MVPE's into 'configuration state' */ 382 set_c0_mvpcontrol(MVPCONTROL_VPC); 383 384 val = read_c0_mvpconf0(); 385 hw_tcs = (val & MVPCONF0_PTC) + 1; 386 hw_vpes = ((val & MVPCONF0_PVPE) >> MVPCONF0_PVPE_SHIFT) + 1; 387 388 for (tc = aprp_cpu_index(); tc < hw_tcs; tc++) { 389 /* 390 * Must re-enable multithreading temporarily or in case we 391 * reschedule send IPIs or similar we might hang. 392 */ 393 clear_c0_mvpcontrol(MVPCONTROL_VPC); 394 evpe(vpflags); 395 emt(mtflags); 396 local_irq_restore(flags); 397 t = alloc_tc(tc); 398 if (!t) { 399 err = -ENOMEM; 400 goto out_dev; 401 } 402 403 local_irq_save(flags); 404 mtflags = dmt(); 405 vpflags = dvpe(); 406 set_c0_mvpcontrol(MVPCONTROL_VPC); 407 408 /* VPE's */ 409 if (tc < hw_tcs) { 410 settc(tc); 411 412 v = alloc_vpe(tc); 413 if (v == NULL) { 414 pr_warn("VPE: unable to allocate VPE\n"); 415 goto out_reenable; 416 } 417 418 v->ntcs = hw_tcs - aprp_cpu_index(); 419 420 /* add the tc to the list of this vpe's tc's. */ 421 list_add(&t->tc, &v->tc); 422 423 /* deactivate all but vpe0 */ 424 if (tc >= aprp_cpu_index()) { 425 unsigned long tmp = read_vpe_c0_vpeconf0(); 426 427 tmp &= ~VPECONF0_VPA; 428 429 /* master VPE */ 430 tmp |= VPECONF0_MVP; 431 write_vpe_c0_vpeconf0(tmp); 432 } 433 434 /* disable multi-threading with TC's */ 435 write_vpe_c0_vpecontrol(read_vpe_c0_vpecontrol() & 436 ~VPECONTROL_TE); 437 438 if (tc >= vpelimit) { 439 /* 440 * Set config to be the same as vpe0, 441 * particularly kseg0 coherency alg 442 */ 443 write_vpe_c0_config(read_c0_config()); 444 } 445 } 446 447 /* TC's */ 448 t->pvpe = v; /* set the parent vpe */ 449 450 if (tc >= aprp_cpu_index()) { 451 unsigned long tmp; 452 453 settc(tc); 454 455 /* 456 * A TC that is bound to any other VPE gets bound to 457 * VPE0, ideally I'd like to make it homeless but it 458 * doesn't appear to let me bind a TC to a non-existent 459 * VPE. Which is perfectly reasonable. 460 * 461 * The (un)bound state is visible to an EJTAG probe so 462 * may notify GDB... 463 */ 464 tmp = read_tc_c0_tcbind(); 465 if (tmp & TCBIND_CURVPE) { 466 /* tc is bound >vpe0 */ 467 write_tc_c0_tcbind(tmp & ~TCBIND_CURVPE); 468 469 t->pvpe = get_vpe(0); /* set the parent vpe */ 470 } 471 472 /* halt the TC */ 473 write_tc_c0_tchalt(TCHALT_H); 474 mips_ihb(); 475 476 tmp = read_tc_c0_tcstatus(); 477 478 /* mark not activated and not dynamically allocatable */ 479 tmp &= ~(TCSTATUS_A | TCSTATUS_DA); 480 tmp |= TCSTATUS_IXMT; /* interrupt exempt */ 481 write_tc_c0_tcstatus(tmp); 482 } 483 } 484 485 out_reenable: 486 /* release config state */ 487 clear_c0_mvpcontrol(MVPCONTROL_VPC); 488 489 evpe(vpflags); 490 emt(mtflags); 491 local_irq_restore(flags); 492 493 return 0; 494 495 out_dev: 496 device_del(&vpe_device); 497 498 out_class: 499 put_device(&vpe_device); 500 class_unregister(&vpe_class); 501 502 out_chrdev: 503 unregister_chrdev(major, VPE_MODULE_NAME); 504 505 return err; 506 } 507 508 void __exit vpe_module_exit(void) 509 { 510 struct vpe *v, *n; 511 512 device_unregister(&vpe_device); 513 class_unregister(&vpe_class); 514 unregister_chrdev(major, VPE_MODULE_NAME); 515 516 /* No locking needed here */ 517 list_for_each_entry_safe(v, n, &vpecontrol.vpe_list, list) { 518 if (v->state != VPE_STATE_UNUSED) 519 release_vpe(v); 520 } 521 } 522