1 /* 2 * PS3 Platform spu routines. 3 * 4 * Copyright (C) 2006 Sony Computer Entertainment Inc. 5 * Copyright 2006 Sony Corp. 6 * 7 * This program is free software; you can redistribute it and/or modify 8 * it under the terms of the GNU General Public License as published by 9 * the Free Software Foundation; version 2 of the License. 10 * 11 * This program is distributed in the hope that it will be useful, 12 * but WITHOUT ANY WARRANTY; without even the implied warranty of 13 * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the 14 * GNU General Public License for more details. 15 * 16 * You should have received a copy of the GNU General Public License 17 * along with this program; if not, write to the Free Software 18 * Foundation, Inc., 59 Temple Place, Suite 330, Boston, MA 02111-1307 USA 19 */ 20 21 #include <linux/kernel.h> 22 #include <linux/init.h> 23 #include <linux/mmzone.h> 24 #include <linux/io.h> 25 #include <linux/mm.h> 26 27 #include <asm/spu.h> 28 #include <asm/spu_priv1.h> 29 #include <asm/lv1call.h> 30 31 #include "../cell/spufs/spufs.h" 32 #include "platform.h" 33 34 /* spu_management_ops */ 35 36 /** 37 * enum spe_type - Type of spe to create. 38 * @spe_type_logical: Standard logical spe. 39 * 40 * For use with lv1_construct_logical_spe(). The current HV does not support 41 * any types other than those listed. 42 */ 43 44 enum spe_type { 45 SPE_TYPE_LOGICAL = 0, 46 }; 47 48 /** 49 * struct spe_shadow - logical spe shadow register area. 50 * 51 * Read-only shadow of spe registers. 52 */ 53 54 struct spe_shadow { 55 u8 padding_0140[0x0140]; 56 u64 int_status_class0_RW; /* 0x0140 */ 57 u64 int_status_class1_RW; /* 0x0148 */ 58 u64 int_status_class2_RW; /* 0x0150 */ 59 u8 padding_0158[0x0610-0x0158]; 60 u64 mfc_dsisr_RW; /* 0x0610 */ 61 u8 padding_0618[0x0620-0x0618]; 62 u64 mfc_dar_RW; /* 0x0620 */ 63 u8 padding_0628[0x0800-0x0628]; 64 u64 mfc_dsipr_R; /* 0x0800 */ 65 u8 padding_0808[0x0810-0x0808]; 66 u64 mfc_lscrr_R; /* 0x0810 */ 67 u8 padding_0818[0x0c00-0x0818]; 68 u64 mfc_cer_R; /* 0x0c00 */ 69 u8 padding_0c08[0x0f00-0x0c08]; 70 u64 spe_execution_status; /* 0x0f00 */ 71 u8 padding_0f08[0x1000-0x0f08]; 72 }; 73 74 /** 75 * enum spe_ex_state - Logical spe execution state. 76 * @spe_ex_state_unexecutable: Uninitialized. 77 * @spe_ex_state_executable: Enabled, not ready. 78 * @spe_ex_state_executed: Ready for use. 79 * 80 * The execution state (status) of the logical spe as reported in 81 * struct spe_shadow:spe_execution_status. 82 */ 83 84 enum spe_ex_state { 85 SPE_EX_STATE_UNEXECUTABLE = 0, 86 SPE_EX_STATE_EXECUTABLE = 2, 87 SPE_EX_STATE_EXECUTED = 3, 88 }; 89 90 /** 91 * struct priv1_cache - Cached values of priv1 registers. 92 * @masks[]: Array of cached spe interrupt masks, indexed by class. 93 * @sr1: Cached mfc_sr1 register. 94 * @tclass_id: Cached mfc_tclass_id register. 95 */ 96 97 struct priv1_cache { 98 u64 masks[3]; 99 u64 sr1; 100 u64 tclass_id; 101 }; 102 103 /** 104 * struct spu_pdata - Platform state variables. 105 * @spe_id: HV spe id returned by lv1_construct_logical_spe(). 106 * @resource_id: HV spe resource id returned by 107 * ps3_repository_read_spe_resource_id(). 108 * @priv2_addr: lpar address of spe priv2 area returned by 109 * lv1_construct_logical_spe(). 110 * @shadow_addr: lpar address of spe register shadow area returned by 111 * lv1_construct_logical_spe(). 112 * @shadow: Virtual (ioremap) address of spe register shadow area. 113 * @cache: Cached values of priv1 registers. 114 */ 115 116 struct spu_pdata { 117 u64 spe_id; 118 u64 resource_id; 119 u64 priv2_addr; 120 u64 shadow_addr; 121 struct spe_shadow __iomem *shadow; 122 struct priv1_cache cache; 123 }; 124 125 static struct spu_pdata *spu_pdata(struct spu *spu) 126 { 127 return spu->pdata; 128 } 129 130 #define dump_areas(_a, _b, _c, _d, _e) \ 131 _dump_areas(_a, _b, _c, _d, _e, __func__, __LINE__) 132 static void _dump_areas(unsigned int spe_id, unsigned long priv2, 133 unsigned long problem, unsigned long ls, unsigned long shadow, 134 const char* func, int line) 135 { 136 pr_debug("%s:%d: spe_id: %xh (%u)\n", func, line, spe_id, spe_id); 137 pr_debug("%s:%d: priv2: %lxh\n", func, line, priv2); 138 pr_debug("%s:%d: problem: %lxh\n", func, line, problem); 139 pr_debug("%s:%d: ls: %lxh\n", func, line, ls); 140 pr_debug("%s:%d: shadow: %lxh\n", func, line, shadow); 141 } 142 143 static unsigned long get_vas_id(void) 144 { 145 unsigned long id; 146 147 lv1_get_logical_ppe_id(&id); 148 lv1_get_virtual_address_space_id_of_ppe(id, &id); 149 150 return id; 151 } 152 153 static int __init construct_spu(struct spu *spu) 154 { 155 int result; 156 unsigned long unused; 157 158 result = lv1_construct_logical_spe(PAGE_SHIFT, PAGE_SHIFT, PAGE_SHIFT, 159 PAGE_SHIFT, PAGE_SHIFT, get_vas_id(), SPE_TYPE_LOGICAL, 160 &spu_pdata(spu)->priv2_addr, &spu->problem_phys, 161 &spu->local_store_phys, &unused, 162 &spu_pdata(spu)->shadow_addr, 163 &spu_pdata(spu)->spe_id); 164 165 if (result) { 166 pr_debug("%s:%d: lv1_construct_logical_spe failed: %s\n", 167 __func__, __LINE__, ps3_result(result)); 168 return result; 169 } 170 171 return result; 172 } 173 174 static void spu_unmap(struct spu *spu) 175 { 176 iounmap(spu->priv2); 177 iounmap(spu->problem); 178 iounmap((__force u8 __iomem *)spu->local_store); 179 iounmap(spu_pdata(spu)->shadow); 180 } 181 182 static int __init setup_areas(struct spu *spu) 183 { 184 struct table {char* name; unsigned long addr; unsigned long size;}; 185 186 spu_pdata(spu)->shadow = ioremap_flags(spu_pdata(spu)->shadow_addr, 187 sizeof(struct spe_shadow), 188 pgprot_val(PAGE_READONLY) | 189 _PAGE_NO_CACHE); 190 if (!spu_pdata(spu)->shadow) { 191 pr_debug("%s:%d: ioremap shadow failed\n", __func__, __LINE__); 192 goto fail_ioremap; 193 } 194 195 spu->local_store = (__force void *)ioremap_flags(spu->local_store_phys, 196 LS_SIZE, _PAGE_NO_CACHE); 197 198 if (!spu->local_store) { 199 pr_debug("%s:%d: ioremap local_store failed\n", 200 __func__, __LINE__); 201 goto fail_ioremap; 202 } 203 204 spu->problem = ioremap(spu->problem_phys, 205 sizeof(struct spu_problem)); 206 207 if (!spu->problem) { 208 pr_debug("%s:%d: ioremap problem failed\n", __func__, __LINE__); 209 goto fail_ioremap; 210 } 211 212 spu->priv2 = ioremap(spu_pdata(spu)->priv2_addr, 213 sizeof(struct spu_priv2)); 214 215 if (!spu->priv2) { 216 pr_debug("%s:%d: ioremap priv2 failed\n", __func__, __LINE__); 217 goto fail_ioremap; 218 } 219 220 dump_areas(spu_pdata(spu)->spe_id, spu_pdata(spu)->priv2_addr, 221 spu->problem_phys, spu->local_store_phys, 222 spu_pdata(spu)->shadow_addr); 223 dump_areas(spu_pdata(spu)->spe_id, (unsigned long)spu->priv2, 224 (unsigned long)spu->problem, (unsigned long)spu->local_store, 225 (unsigned long)spu_pdata(spu)->shadow); 226 227 return 0; 228 229 fail_ioremap: 230 spu_unmap(spu); 231 232 return -ENOMEM; 233 } 234 235 static int __init setup_interrupts(struct spu *spu) 236 { 237 int result; 238 239 result = ps3_spe_irq_setup(PS3_BINDING_CPU_ANY, spu_pdata(spu)->spe_id, 240 0, &spu->irqs[0]); 241 242 if (result) 243 goto fail_alloc_0; 244 245 result = ps3_spe_irq_setup(PS3_BINDING_CPU_ANY, spu_pdata(spu)->spe_id, 246 1, &spu->irqs[1]); 247 248 if (result) 249 goto fail_alloc_1; 250 251 result = ps3_spe_irq_setup(PS3_BINDING_CPU_ANY, spu_pdata(spu)->spe_id, 252 2, &spu->irqs[2]); 253 254 if (result) 255 goto fail_alloc_2; 256 257 return result; 258 259 fail_alloc_2: 260 ps3_spe_irq_destroy(spu->irqs[1]); 261 fail_alloc_1: 262 ps3_spe_irq_destroy(spu->irqs[0]); 263 fail_alloc_0: 264 spu->irqs[0] = spu->irqs[1] = spu->irqs[2] = NO_IRQ; 265 return result; 266 } 267 268 static int __init enable_spu(struct spu *spu) 269 { 270 int result; 271 272 result = lv1_enable_logical_spe(spu_pdata(spu)->spe_id, 273 spu_pdata(spu)->resource_id); 274 275 if (result) { 276 pr_debug("%s:%d: lv1_enable_logical_spe failed: %s\n", 277 __func__, __LINE__, ps3_result(result)); 278 goto fail_enable; 279 } 280 281 result = setup_areas(spu); 282 283 if (result) 284 goto fail_areas; 285 286 result = setup_interrupts(spu); 287 288 if (result) 289 goto fail_interrupts; 290 291 return 0; 292 293 fail_interrupts: 294 spu_unmap(spu); 295 fail_areas: 296 lv1_disable_logical_spe(spu_pdata(spu)->spe_id, 0); 297 fail_enable: 298 return result; 299 } 300 301 static int ps3_destroy_spu(struct spu *spu) 302 { 303 int result; 304 305 pr_debug("%s:%d spu_%d\n", __func__, __LINE__, spu->number); 306 307 result = lv1_disable_logical_spe(spu_pdata(spu)->spe_id, 0); 308 BUG_ON(result); 309 310 ps3_spe_irq_destroy(spu->irqs[2]); 311 ps3_spe_irq_destroy(spu->irqs[1]); 312 ps3_spe_irq_destroy(spu->irqs[0]); 313 314 spu->irqs[0] = spu->irqs[1] = spu->irqs[2] = NO_IRQ; 315 316 spu_unmap(spu); 317 318 result = lv1_destruct_logical_spe(spu_pdata(spu)->spe_id); 319 BUG_ON(result); 320 321 kfree(spu->pdata); 322 spu->pdata = NULL; 323 324 return 0; 325 } 326 327 static int __init ps3_create_spu(struct spu *spu, void *data) 328 { 329 int result; 330 331 pr_debug("%s:%d spu_%d\n", __func__, __LINE__, spu->number); 332 333 spu->pdata = kzalloc(sizeof(struct spu_pdata), 334 GFP_KERNEL); 335 336 if (!spu->pdata) { 337 result = -ENOMEM; 338 goto fail_malloc; 339 } 340 341 spu_pdata(spu)->resource_id = (unsigned long)data; 342 343 /* Init cached reg values to HV defaults. */ 344 345 spu_pdata(spu)->cache.sr1 = 0x33; 346 347 result = construct_spu(spu); 348 349 if (result) 350 goto fail_construct; 351 352 /* For now, just go ahead and enable it. */ 353 354 result = enable_spu(spu); 355 356 if (result) 357 goto fail_enable; 358 359 /* Make sure the spu is in SPE_EX_STATE_EXECUTED. */ 360 361 /* need something better here!!! */ 362 while (in_be64(&spu_pdata(spu)->shadow->spe_execution_status) 363 != SPE_EX_STATE_EXECUTED) 364 (void)0; 365 366 return result; 367 368 fail_enable: 369 fail_construct: 370 ps3_destroy_spu(spu); 371 fail_malloc: 372 return result; 373 } 374 375 static int __init ps3_enumerate_spus(int (*fn)(void *data)) 376 { 377 int result; 378 unsigned int num_resource_id; 379 unsigned int i; 380 381 result = ps3_repository_read_num_spu_resource_id(&num_resource_id); 382 383 pr_debug("%s:%d: num_resource_id %u\n", __func__, __LINE__, 384 num_resource_id); 385 386 /* 387 * For now, just create logical spus equal to the number 388 * of physical spus reserved for the partition. 389 */ 390 391 for (i = 0; i < num_resource_id; i++) { 392 enum ps3_spu_resource_type resource_type; 393 unsigned int resource_id; 394 395 result = ps3_repository_read_spu_resource_id(i, 396 &resource_type, &resource_id); 397 398 if (result) 399 break; 400 401 if (resource_type == PS3_SPU_RESOURCE_TYPE_EXCLUSIVE) { 402 result = fn((void*)(unsigned long)resource_id); 403 404 if (result) 405 break; 406 } 407 } 408 409 if (result) { 410 printk(KERN_WARNING "%s:%d: Error initializing spus\n", 411 __func__, __LINE__); 412 return result; 413 } 414 415 return num_resource_id; 416 } 417 418 static int ps3_init_affinity(void) 419 { 420 return 0; 421 } 422 423 /** 424 * ps3_enable_spu - Enable SPU run control. 425 * 426 * An outstanding enhancement for the PS3 would be to add a guard to check 427 * for incorrect access to the spu problem state when the spu context is 428 * disabled. This check could be implemented with a flag added to the spu 429 * context that would inhibit mapping problem state pages, and a routine 430 * to unmap spu problem state pages. When the spu is enabled with 431 * ps3_enable_spu() the flag would be set allowing pages to be mapped, 432 * and when the spu is disabled with ps3_disable_spu() the flag would be 433 * cleared and the mapped problem state pages would be unmapped. 434 */ 435 436 static void ps3_enable_spu(struct spu_context *ctx) 437 { 438 } 439 440 static void ps3_disable_spu(struct spu_context *ctx) 441 { 442 ctx->ops->runcntl_stop(ctx); 443 } 444 445 const struct spu_management_ops spu_management_ps3_ops = { 446 .enumerate_spus = ps3_enumerate_spus, 447 .create_spu = ps3_create_spu, 448 .destroy_spu = ps3_destroy_spu, 449 .enable_spu = ps3_enable_spu, 450 .disable_spu = ps3_disable_spu, 451 .init_affinity = ps3_init_affinity, 452 }; 453 454 /* spu_priv1_ops */ 455 456 static void int_mask_and(struct spu *spu, int class, u64 mask) 457 { 458 u64 old_mask; 459 460 /* are these serialized by caller??? */ 461 old_mask = spu_int_mask_get(spu, class); 462 spu_int_mask_set(spu, class, old_mask & mask); 463 } 464 465 static void int_mask_or(struct spu *spu, int class, u64 mask) 466 { 467 u64 old_mask; 468 469 old_mask = spu_int_mask_get(spu, class); 470 spu_int_mask_set(spu, class, old_mask | mask); 471 } 472 473 static void int_mask_set(struct spu *spu, int class, u64 mask) 474 { 475 spu_pdata(spu)->cache.masks[class] = mask; 476 lv1_set_spe_interrupt_mask(spu_pdata(spu)->spe_id, class, 477 spu_pdata(spu)->cache.masks[class]); 478 } 479 480 static u64 int_mask_get(struct spu *spu, int class) 481 { 482 return spu_pdata(spu)->cache.masks[class]; 483 } 484 485 static void int_stat_clear(struct spu *spu, int class, u64 stat) 486 { 487 /* Note that MFC_DSISR will be cleared when class1[MF] is set. */ 488 489 lv1_clear_spe_interrupt_status(spu_pdata(spu)->spe_id, class, 490 stat, 0); 491 } 492 493 static u64 int_stat_get(struct spu *spu, int class) 494 { 495 u64 stat; 496 497 lv1_get_spe_interrupt_status(spu_pdata(spu)->spe_id, class, &stat); 498 return stat; 499 } 500 501 static void cpu_affinity_set(struct spu *spu, int cpu) 502 { 503 /* No support. */ 504 } 505 506 static u64 mfc_dar_get(struct spu *spu) 507 { 508 return in_be64(&spu_pdata(spu)->shadow->mfc_dar_RW); 509 } 510 511 static void mfc_dsisr_set(struct spu *spu, u64 dsisr) 512 { 513 /* Nothing to do, cleared in int_stat_clear(). */ 514 } 515 516 static u64 mfc_dsisr_get(struct spu *spu) 517 { 518 return in_be64(&spu_pdata(spu)->shadow->mfc_dsisr_RW); 519 } 520 521 static void mfc_sdr_setup(struct spu *spu) 522 { 523 /* Nothing to do. */ 524 } 525 526 static void mfc_sr1_set(struct spu *spu, u64 sr1) 527 { 528 /* Check bits allowed by HV. */ 529 530 static const u64 allowed = ~(MFC_STATE1_LOCAL_STORAGE_DECODE_MASK 531 | MFC_STATE1_PROBLEM_STATE_MASK); 532 533 BUG_ON((sr1 & allowed) != (spu_pdata(spu)->cache.sr1 & allowed)); 534 535 spu_pdata(spu)->cache.sr1 = sr1; 536 lv1_set_spe_privilege_state_area_1_register( 537 spu_pdata(spu)->spe_id, 538 offsetof(struct spu_priv1, mfc_sr1_RW), 539 spu_pdata(spu)->cache.sr1); 540 } 541 542 static u64 mfc_sr1_get(struct spu *spu) 543 { 544 return spu_pdata(spu)->cache.sr1; 545 } 546 547 static void mfc_tclass_id_set(struct spu *spu, u64 tclass_id) 548 { 549 spu_pdata(spu)->cache.tclass_id = tclass_id; 550 lv1_set_spe_privilege_state_area_1_register( 551 spu_pdata(spu)->spe_id, 552 offsetof(struct spu_priv1, mfc_tclass_id_RW), 553 spu_pdata(spu)->cache.tclass_id); 554 } 555 556 static u64 mfc_tclass_id_get(struct spu *spu) 557 { 558 return spu_pdata(spu)->cache.tclass_id; 559 } 560 561 static void tlb_invalidate(struct spu *spu) 562 { 563 /* Nothing to do. */ 564 } 565 566 static void resource_allocation_groupID_set(struct spu *spu, u64 id) 567 { 568 /* No support. */ 569 } 570 571 static u64 resource_allocation_groupID_get(struct spu *spu) 572 { 573 return 0; /* No support. */ 574 } 575 576 static void resource_allocation_enable_set(struct spu *spu, u64 enable) 577 { 578 /* No support. */ 579 } 580 581 static u64 resource_allocation_enable_get(struct spu *spu) 582 { 583 return 0; /* No support. */ 584 } 585 586 const struct spu_priv1_ops spu_priv1_ps3_ops = { 587 .int_mask_and = int_mask_and, 588 .int_mask_or = int_mask_or, 589 .int_mask_set = int_mask_set, 590 .int_mask_get = int_mask_get, 591 .int_stat_clear = int_stat_clear, 592 .int_stat_get = int_stat_get, 593 .cpu_affinity_set = cpu_affinity_set, 594 .mfc_dar_get = mfc_dar_get, 595 .mfc_dsisr_set = mfc_dsisr_set, 596 .mfc_dsisr_get = mfc_dsisr_get, 597 .mfc_sdr_setup = mfc_sdr_setup, 598 .mfc_sr1_set = mfc_sr1_set, 599 .mfc_sr1_get = mfc_sr1_get, 600 .mfc_tclass_id_set = mfc_tclass_id_set, 601 .mfc_tclass_id_get = mfc_tclass_id_get, 602 .tlb_invalidate = tlb_invalidate, 603 .resource_allocation_groupID_set = resource_allocation_groupID_set, 604 .resource_allocation_groupID_get = resource_allocation_groupID_get, 605 .resource_allocation_enable_set = resource_allocation_enable_set, 606 .resource_allocation_enable_get = resource_allocation_enable_get, 607 }; 608 609 void ps3_spu_set_platform(void) 610 { 611 spu_priv1_ops = &spu_priv1_ps3_ops; 612 spu_management_ops = &spu_management_ps3_ops; 613 } 614