1 /* 2 * PS3 Platform spu routines. 3 * 4 * Copyright (C) 2006 Sony Computer Entertainment Inc. 5 * Copyright 2006 Sony Corp. 6 * 7 * This program is free software; you can redistribute it and/or modify 8 * it under the terms of the GNU General Public License as published by 9 * the Free Software Foundation; version 2 of the License. 10 * 11 * This program is distributed in the hope that it will be useful, 12 * but WITHOUT ANY WARRANTY; without even the implied warranty of 13 * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the 14 * GNU General Public License for more details. 15 * 16 * You should have received a copy of the GNU General Public License 17 * along with this program; if not, write to the Free Software 18 * Foundation, Inc., 59 Temple Place, Suite 330, Boston, MA 02111-1307 USA 19 */ 20 21 #include <linux/kernel.h> 22 #include <linux/init.h> 23 #include <linux/mmzone.h> 24 #include <linux/io.h> 25 #include <linux/mm.h> 26 27 #include <asm/spu.h> 28 #include <asm/spu_priv1.h> 29 #include <asm/lv1call.h> 30 31 #include "platform.h" 32 33 /* spu_management_ops */ 34 35 /** 36 * enum spe_type - Type of spe to create. 37 * @spe_type_logical: Standard logical spe. 38 * 39 * For use with lv1_construct_logical_spe(). The current HV does not support 40 * any types other than those listed. 41 */ 42 43 enum spe_type { 44 SPE_TYPE_LOGICAL = 0, 45 }; 46 47 /** 48 * struct spe_shadow - logical spe shadow register area. 49 * 50 * Read-only shadow of spe registers. 51 */ 52 53 struct spe_shadow { 54 u8 padding_0140[0x0140]; 55 u64 int_status_class0_RW; /* 0x0140 */ 56 u64 int_status_class1_RW; /* 0x0148 */ 57 u64 int_status_class2_RW; /* 0x0150 */ 58 u8 padding_0158[0x0610-0x0158]; 59 u64 mfc_dsisr_RW; /* 0x0610 */ 60 u8 padding_0618[0x0620-0x0618]; 61 u64 mfc_dar_RW; /* 0x0620 */ 62 u8 padding_0628[0x0800-0x0628]; 63 u64 mfc_dsipr_R; /* 0x0800 */ 64 u8 padding_0808[0x0810-0x0808]; 65 u64 mfc_lscrr_R; /* 0x0810 */ 66 u8 padding_0818[0x0c00-0x0818]; 67 u64 mfc_cer_R; /* 0x0c00 */ 68 u8 padding_0c08[0x0f00-0x0c08]; 69 u64 spe_execution_status; /* 0x0f00 */ 70 u8 padding_0f08[0x1000-0x0f08]; 71 }; 72 73 /** 74 * enum spe_ex_state - Logical spe execution state. 75 * @spe_ex_state_unexecutable: Uninitialized. 76 * @spe_ex_state_executable: Enabled, not ready. 77 * @spe_ex_state_executed: Ready for use. 78 * 79 * The execution state (status) of the logical spe as reported in 80 * struct spe_shadow:spe_execution_status. 81 */ 82 83 enum spe_ex_state { 84 SPE_EX_STATE_UNEXECUTABLE = 0, 85 SPE_EX_STATE_EXECUTABLE = 2, 86 SPE_EX_STATE_EXECUTED = 3, 87 }; 88 89 /** 90 * struct priv1_cache - Cached values of priv1 registers. 91 * @masks[]: Array of cached spe interrupt masks, indexed by class. 92 * @sr1: Cached mfc_sr1 register. 93 * @tclass_id: Cached mfc_tclass_id register. 94 */ 95 96 struct priv1_cache { 97 u64 masks[3]; 98 u64 sr1; 99 u64 tclass_id; 100 }; 101 102 /** 103 * struct spu_pdata - Platform state variables. 104 * @spe_id: HV spe id returned by lv1_construct_logical_spe(). 105 * @resource_id: HV spe resource id returned by 106 * ps3_repository_read_spe_resource_id(). 107 * @priv2_addr: lpar address of spe priv2 area returned by 108 * lv1_construct_logical_spe(). 109 * @shadow_addr: lpar address of spe register shadow area returned by 110 * lv1_construct_logical_spe(). 111 * @shadow: Virtual (ioremap) address of spe register shadow area. 112 * @cache: Cached values of priv1 registers. 113 */ 114 115 struct spu_pdata { 116 u64 spe_id; 117 u64 resource_id; 118 u64 priv2_addr; 119 u64 shadow_addr; 120 struct spe_shadow __iomem *shadow; 121 struct priv1_cache cache; 122 }; 123 124 static struct spu_pdata *spu_pdata(struct spu *spu) 125 { 126 return spu->pdata; 127 } 128 129 #define dump_areas(_a, _b, _c, _d, _e) \ 130 _dump_areas(_a, _b, _c, _d, _e, __func__, __LINE__) 131 static void _dump_areas(unsigned int spe_id, unsigned long priv2, 132 unsigned long problem, unsigned long ls, unsigned long shadow, 133 const char* func, int line) 134 { 135 pr_debug("%s:%d: spe_id: %xh (%u)\n", func, line, spe_id, spe_id); 136 pr_debug("%s:%d: priv2: %lxh\n", func, line, priv2); 137 pr_debug("%s:%d: problem: %lxh\n", func, line, problem); 138 pr_debug("%s:%d: ls: %lxh\n", func, line, ls); 139 pr_debug("%s:%d: shadow: %lxh\n", func, line, shadow); 140 } 141 142 static unsigned long get_vas_id(void) 143 { 144 unsigned long id; 145 146 lv1_get_logical_ppe_id(&id); 147 lv1_get_virtual_address_space_id_of_ppe(id, &id); 148 149 return id; 150 } 151 152 static int __init construct_spu(struct spu *spu) 153 { 154 int result; 155 unsigned long unused; 156 157 result = lv1_construct_logical_spe(PAGE_SHIFT, PAGE_SHIFT, PAGE_SHIFT, 158 PAGE_SHIFT, PAGE_SHIFT, get_vas_id(), SPE_TYPE_LOGICAL, 159 &spu_pdata(spu)->priv2_addr, &spu->problem_phys, 160 &spu->local_store_phys, &unused, 161 &spu_pdata(spu)->shadow_addr, 162 &spu_pdata(spu)->spe_id); 163 164 if (result) { 165 pr_debug("%s:%d: lv1_construct_logical_spe failed: %s\n", 166 __func__, __LINE__, ps3_result(result)); 167 return result; 168 } 169 170 return result; 171 } 172 173 static void spu_unmap(struct spu *spu) 174 { 175 iounmap(spu->priv2); 176 iounmap(spu->problem); 177 iounmap((__force u8 __iomem *)spu->local_store); 178 iounmap(spu_pdata(spu)->shadow); 179 } 180 181 static int __init setup_areas(struct spu *spu) 182 { 183 struct table {char* name; unsigned long addr; unsigned long size;}; 184 185 spu_pdata(spu)->shadow = __ioremap( 186 spu_pdata(spu)->shadow_addr, sizeof(struct spe_shadow), 187 pgprot_val(PAGE_READONLY) | _PAGE_NO_CACHE | _PAGE_GUARDED); 188 if (!spu_pdata(spu)->shadow) { 189 pr_debug("%s:%d: ioremap shadow failed\n", __func__, __LINE__); 190 goto fail_ioremap; 191 } 192 193 spu->local_store = ioremap(spu->local_store_phys, LS_SIZE); 194 if (!spu->local_store) { 195 pr_debug("%s:%d: ioremap local_store failed\n", 196 __func__, __LINE__); 197 goto fail_ioremap; 198 } 199 200 spu->problem = ioremap(spu->problem_phys, 201 sizeof(struct spu_problem)); 202 if (!spu->problem) { 203 pr_debug("%s:%d: ioremap problem failed\n", __func__, __LINE__); 204 goto fail_ioremap; 205 } 206 207 spu->priv2 = ioremap(spu_pdata(spu)->priv2_addr, 208 sizeof(struct spu_priv2)); 209 if (!spu->priv2) { 210 pr_debug("%s:%d: ioremap priv2 failed\n", __func__, __LINE__); 211 goto fail_ioremap; 212 } 213 214 dump_areas(spu_pdata(spu)->spe_id, spu_pdata(spu)->priv2_addr, 215 spu->problem_phys, spu->local_store_phys, 216 spu_pdata(spu)->shadow_addr); 217 dump_areas(spu_pdata(spu)->spe_id, (unsigned long)spu->priv2, 218 (unsigned long)spu->problem, (unsigned long)spu->local_store, 219 (unsigned long)spu_pdata(spu)->shadow); 220 221 return 0; 222 223 fail_ioremap: 224 spu_unmap(spu); 225 226 return -ENOMEM; 227 } 228 229 static int __init setup_interrupts(struct spu *spu) 230 { 231 int result; 232 233 result = ps3_spe_irq_setup(PS3_BINDING_CPU_ANY, spu_pdata(spu)->spe_id, 234 0, &spu->irqs[0]); 235 236 if (result) 237 goto fail_alloc_0; 238 239 result = ps3_spe_irq_setup(PS3_BINDING_CPU_ANY, spu_pdata(spu)->spe_id, 240 1, &spu->irqs[1]); 241 242 if (result) 243 goto fail_alloc_1; 244 245 result = ps3_spe_irq_setup(PS3_BINDING_CPU_ANY, spu_pdata(spu)->spe_id, 246 2, &spu->irqs[2]); 247 248 if (result) 249 goto fail_alloc_2; 250 251 return result; 252 253 fail_alloc_2: 254 ps3_spe_irq_destroy(spu->irqs[1]); 255 fail_alloc_1: 256 ps3_spe_irq_destroy(spu->irqs[0]); 257 fail_alloc_0: 258 spu->irqs[0] = spu->irqs[1] = spu->irqs[2] = NO_IRQ; 259 return result; 260 } 261 262 static int __init enable_spu(struct spu *spu) 263 { 264 int result; 265 266 result = lv1_enable_logical_spe(spu_pdata(spu)->spe_id, 267 spu_pdata(spu)->resource_id); 268 269 if (result) { 270 pr_debug("%s:%d: lv1_enable_logical_spe failed: %s\n", 271 __func__, __LINE__, ps3_result(result)); 272 goto fail_enable; 273 } 274 275 result = setup_areas(spu); 276 277 if (result) 278 goto fail_areas; 279 280 result = setup_interrupts(spu); 281 282 if (result) 283 goto fail_interrupts; 284 285 return 0; 286 287 fail_interrupts: 288 spu_unmap(spu); 289 fail_areas: 290 lv1_disable_logical_spe(spu_pdata(spu)->spe_id, 0); 291 fail_enable: 292 return result; 293 } 294 295 static int ps3_destroy_spu(struct spu *spu) 296 { 297 int result; 298 299 pr_debug("%s:%d spu_%d\n", __func__, __LINE__, spu->number); 300 301 result = lv1_disable_logical_spe(spu_pdata(spu)->spe_id, 0); 302 BUG_ON(result); 303 304 ps3_spe_irq_destroy(spu->irqs[2]); 305 ps3_spe_irq_destroy(spu->irqs[1]); 306 ps3_spe_irq_destroy(spu->irqs[0]); 307 308 spu->irqs[0] = spu->irqs[1] = spu->irqs[2] = NO_IRQ; 309 310 spu_unmap(spu); 311 312 result = lv1_destruct_logical_spe(spu_pdata(spu)->spe_id); 313 BUG_ON(result); 314 315 kfree(spu->pdata); 316 spu->pdata = NULL; 317 318 return 0; 319 } 320 321 static int __init ps3_create_spu(struct spu *spu, void *data) 322 { 323 int result; 324 325 pr_debug("%s:%d spu_%d\n", __func__, __LINE__, spu->number); 326 327 spu->pdata = kzalloc(sizeof(struct spu_pdata), 328 GFP_KERNEL); 329 330 if (!spu->pdata) { 331 result = -ENOMEM; 332 goto fail_malloc; 333 } 334 335 spu_pdata(spu)->resource_id = (unsigned long)data; 336 337 /* Init cached reg values to HV defaults. */ 338 339 spu_pdata(spu)->cache.sr1 = 0x33; 340 341 result = construct_spu(spu); 342 343 if (result) 344 goto fail_construct; 345 346 /* For now, just go ahead and enable it. */ 347 348 result = enable_spu(spu); 349 350 if (result) 351 goto fail_enable; 352 353 /* Make sure the spu is in SPE_EX_STATE_EXECUTED. */ 354 355 /* need something better here!!! */ 356 while (in_be64(&spu_pdata(spu)->shadow->spe_execution_status) 357 != SPE_EX_STATE_EXECUTED) 358 (void)0; 359 360 return result; 361 362 fail_enable: 363 fail_construct: 364 ps3_destroy_spu(spu); 365 fail_malloc: 366 return result; 367 } 368 369 static int __init ps3_enumerate_spus(int (*fn)(void *data)) 370 { 371 int result; 372 unsigned int num_resource_id; 373 unsigned int i; 374 375 result = ps3_repository_read_num_spu_resource_id(&num_resource_id); 376 377 pr_debug("%s:%d: num_resource_id %u\n", __func__, __LINE__, 378 num_resource_id); 379 380 /* 381 * For now, just create logical spus equal to the number 382 * of physical spus reserved for the partition. 383 */ 384 385 for (i = 0; i < num_resource_id; i++) { 386 enum ps3_spu_resource_type resource_type; 387 unsigned int resource_id; 388 389 result = ps3_repository_read_spu_resource_id(i, 390 &resource_type, &resource_id); 391 392 if (result) 393 break; 394 395 if (resource_type == PS3_SPU_RESOURCE_TYPE_EXCLUSIVE) { 396 result = fn((void*)(unsigned long)resource_id); 397 398 if (result) 399 break; 400 } 401 } 402 403 if (result) 404 printk(KERN_WARNING "%s:%d: Error initializing spus\n", 405 __func__, __LINE__); 406 407 return result; 408 } 409 410 const struct spu_management_ops spu_management_ps3_ops = { 411 .enumerate_spus = ps3_enumerate_spus, 412 .create_spu = ps3_create_spu, 413 .destroy_spu = ps3_destroy_spu, 414 }; 415 416 /* spu_priv1_ops */ 417 418 static void int_mask_and(struct spu *spu, int class, u64 mask) 419 { 420 u64 old_mask; 421 422 /* are these serialized by caller??? */ 423 old_mask = spu_int_mask_get(spu, class); 424 spu_int_mask_set(spu, class, old_mask & mask); 425 } 426 427 static void int_mask_or(struct spu *spu, int class, u64 mask) 428 { 429 u64 old_mask; 430 431 old_mask = spu_int_mask_get(spu, class); 432 spu_int_mask_set(spu, class, old_mask | mask); 433 } 434 435 static void int_mask_set(struct spu *spu, int class, u64 mask) 436 { 437 spu_pdata(spu)->cache.masks[class] = mask; 438 lv1_set_spe_interrupt_mask(spu_pdata(spu)->spe_id, class, 439 spu_pdata(spu)->cache.masks[class]); 440 } 441 442 static u64 int_mask_get(struct spu *spu, int class) 443 { 444 return spu_pdata(spu)->cache.masks[class]; 445 } 446 447 static void int_stat_clear(struct spu *spu, int class, u64 stat) 448 { 449 /* Note that MFC_DSISR will be cleared when class1[MF] is set. */ 450 451 lv1_clear_spe_interrupt_status(spu_pdata(spu)->spe_id, class, 452 stat, 0); 453 } 454 455 static u64 int_stat_get(struct spu *spu, int class) 456 { 457 u64 stat; 458 459 lv1_get_spe_interrupt_status(spu_pdata(spu)->spe_id, class, &stat); 460 return stat; 461 } 462 463 static void cpu_affinity_set(struct spu *spu, int cpu) 464 { 465 /* No support. */ 466 } 467 468 static u64 mfc_dar_get(struct spu *spu) 469 { 470 return in_be64(&spu_pdata(spu)->shadow->mfc_dar_RW); 471 } 472 473 static void mfc_dsisr_set(struct spu *spu, u64 dsisr) 474 { 475 /* Nothing to do, cleared in int_stat_clear(). */ 476 } 477 478 static u64 mfc_dsisr_get(struct spu *spu) 479 { 480 return in_be64(&spu_pdata(spu)->shadow->mfc_dsisr_RW); 481 } 482 483 static void mfc_sdr_setup(struct spu *spu) 484 { 485 /* Nothing to do. */ 486 } 487 488 static void mfc_sr1_set(struct spu *spu, u64 sr1) 489 { 490 /* Check bits allowed by HV. */ 491 492 static const u64 allowed = ~(MFC_STATE1_LOCAL_STORAGE_DECODE_MASK 493 | MFC_STATE1_PROBLEM_STATE_MASK); 494 495 BUG_ON((sr1 & allowed) != (spu_pdata(spu)->cache.sr1 & allowed)); 496 497 spu_pdata(spu)->cache.sr1 = sr1; 498 lv1_set_spe_privilege_state_area_1_register( 499 spu_pdata(spu)->spe_id, 500 offsetof(struct spu_priv1, mfc_sr1_RW), 501 spu_pdata(spu)->cache.sr1); 502 } 503 504 static u64 mfc_sr1_get(struct spu *spu) 505 { 506 return spu_pdata(spu)->cache.sr1; 507 } 508 509 static void mfc_tclass_id_set(struct spu *spu, u64 tclass_id) 510 { 511 spu_pdata(spu)->cache.tclass_id = tclass_id; 512 lv1_set_spe_privilege_state_area_1_register( 513 spu_pdata(spu)->spe_id, 514 offsetof(struct spu_priv1, mfc_tclass_id_RW), 515 spu_pdata(spu)->cache.tclass_id); 516 } 517 518 static u64 mfc_tclass_id_get(struct spu *spu) 519 { 520 return spu_pdata(spu)->cache.tclass_id; 521 } 522 523 static void tlb_invalidate(struct spu *spu) 524 { 525 /* Nothing to do. */ 526 } 527 528 static void resource_allocation_groupID_set(struct spu *spu, u64 id) 529 { 530 /* No support. */ 531 } 532 533 static u64 resource_allocation_groupID_get(struct spu *spu) 534 { 535 return 0; /* No support. */ 536 } 537 538 static void resource_allocation_enable_set(struct spu *spu, u64 enable) 539 { 540 /* No support. */ 541 } 542 543 static u64 resource_allocation_enable_get(struct spu *spu) 544 { 545 return 0; /* No support. */ 546 } 547 548 const struct spu_priv1_ops spu_priv1_ps3_ops = { 549 .int_mask_and = int_mask_and, 550 .int_mask_or = int_mask_or, 551 .int_mask_set = int_mask_set, 552 .int_mask_get = int_mask_get, 553 .int_stat_clear = int_stat_clear, 554 .int_stat_get = int_stat_get, 555 .cpu_affinity_set = cpu_affinity_set, 556 .mfc_dar_get = mfc_dar_get, 557 .mfc_dsisr_set = mfc_dsisr_set, 558 .mfc_dsisr_get = mfc_dsisr_get, 559 .mfc_sdr_setup = mfc_sdr_setup, 560 .mfc_sr1_set = mfc_sr1_set, 561 .mfc_sr1_get = mfc_sr1_get, 562 .mfc_tclass_id_set = mfc_tclass_id_set, 563 .mfc_tclass_id_get = mfc_tclass_id_get, 564 .tlb_invalidate = tlb_invalidate, 565 .resource_allocation_groupID_set = resource_allocation_groupID_set, 566 .resource_allocation_groupID_get = resource_allocation_groupID_get, 567 .resource_allocation_enable_set = resource_allocation_enable_set, 568 .resource_allocation_enable_get = resource_allocation_enable_get, 569 }; 570 571 void ps3_spu_set_platform(void) 572 { 573 spu_priv1_ops = &spu_priv1_ps3_ops; 574 spu_management_ops = &spu_management_ps3_ops; 575 } 576