1 /* 2 * SN Platform GRU Driver 3 * 4 * GRU HANDLE DEFINITION 5 * 6 * Copyright (c) 2008 Silicon Graphics, Inc. All Rights Reserved. 7 * 8 * This program is free software; you can redistribute it and/or modify 9 * it under the terms of the GNU General Public License as published by 10 * the Free Software Foundation; either version 2 of the License, or 11 * (at your option) any later version. 12 * 13 * This program is distributed in the hope that it will be useful, 14 * but WITHOUT ANY WARRANTY; without even the implied warranty of 15 * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the 16 * GNU General Public License for more details. 17 * 18 * You should have received a copy of the GNU General Public License 19 * along with this program; if not, write to the Free Software 20 * Foundation, Inc., 59 Temple Place, Suite 330, Boston, MA 02111-1307 USA 21 */ 22 23 #ifndef __GRUHANDLES_H__ 24 #define __GRUHANDLES_H__ 25 #include "gru_instructions.h" 26 27 /* 28 * Manifest constants for GRU Memory Map 29 */ 30 #define GRU_GSEG0_BASE 0 31 #define GRU_MCS_BASE (64 * 1024 * 1024) 32 #define GRU_SIZE (128UL * 1024 * 1024) 33 34 /* Handle & resource counts */ 35 #define GRU_NUM_CB 128 36 #define GRU_NUM_DSR_BYTES (32 * 1024) 37 #define GRU_NUM_TFM 16 38 #define GRU_NUM_TGH 24 39 #define GRU_NUM_CBE 128 40 #define GRU_NUM_TFH 128 41 #define GRU_NUM_CCH 16 42 #define GRU_NUM_GSH 1 43 44 /* Maximum resource counts that can be reserved by user programs */ 45 #define GRU_NUM_USER_CBR GRU_NUM_CBE 46 #define GRU_NUM_USER_DSR_BYTES GRU_NUM_DSR_BYTES 47 48 /* Bytes per handle & handle stride. Code assumes all cb, tfh, cbe handles 49 * are the same */ 50 #define GRU_HANDLE_BYTES 64 51 #define GRU_HANDLE_STRIDE 256 52 53 /* Base addresses of handles */ 54 #define GRU_TFM_BASE (GRU_MCS_BASE + 0x00000) 55 #define GRU_TGH_BASE (GRU_MCS_BASE + 0x08000) 56 #define GRU_CBE_BASE (GRU_MCS_BASE + 0x10000) 57 #define GRU_TFH_BASE (GRU_MCS_BASE + 0x18000) 58 #define GRU_CCH_BASE (GRU_MCS_BASE + 0x20000) 59 #define GRU_GSH_BASE (GRU_MCS_BASE + 0x30000) 60 61 /* User gseg constants */ 62 #define GRU_GSEG_STRIDE (4 * 1024 * 1024) 63 #define GSEG_BASE(a) ((a) & ~(GRU_GSEG_PAGESIZE - 1)) 64 65 /* Data segment constants */ 66 #define GRU_DSR_AU_BYTES 1024 67 #define GRU_DSR_CL (GRU_NUM_DSR_BYTES / GRU_CACHE_LINE_BYTES) 68 #define GRU_DSR_AU_CL (GRU_DSR_AU_BYTES / GRU_CACHE_LINE_BYTES) 69 #define GRU_DSR_AU (GRU_NUM_DSR_BYTES / GRU_DSR_AU_BYTES) 70 71 /* Control block constants */ 72 #define GRU_CBR_AU_SIZE 2 73 #define GRU_CBR_AU (GRU_NUM_CBE / GRU_CBR_AU_SIZE) 74 75 /* Convert resource counts to the number of AU */ 76 #define GRU_DS_BYTES_TO_AU(n) DIV_ROUND_UP(n, GRU_DSR_AU_BYTES) 77 #define GRU_CB_COUNT_TO_AU(n) DIV_ROUND_UP(n, GRU_CBR_AU_SIZE) 78 79 /* UV limits */ 80 #define GRU_CHIPLETS_PER_HUB 2 81 #define GRU_HUBS_PER_BLADE 1 82 #define GRU_CHIPLETS_PER_BLADE (GRU_HUBS_PER_BLADE * GRU_CHIPLETS_PER_HUB) 83 84 /* User GRU Gseg offsets */ 85 #define GRU_CB_BASE 0 86 #define GRU_CB_LIMIT (GRU_CB_BASE + GRU_HANDLE_STRIDE * GRU_NUM_CBE) 87 #define GRU_DS_BASE 0x20000 88 #define GRU_DS_LIMIT (GRU_DS_BASE + GRU_NUM_DSR_BYTES) 89 90 /* Convert a GRU physical address to the chiplet offset */ 91 #define GSEGPOFF(h) ((h) & (GRU_SIZE - 1)) 92 93 /* Convert an arbitrary handle address to the beginning of the GRU segment */ 94 #ifndef __PLUGIN__ 95 #define GRUBASE(h) ((void *)((unsigned long)(h) & ~(GRU_SIZE - 1))) 96 #else 97 extern void *gmu_grubase(void *h); 98 #define GRUBASE(h) gmu_grubase(h) 99 #endif 100 101 /* General addressing macros. */ 102 static inline void *get_gseg_base_address(void *base, int ctxnum) 103 { 104 return (void *)(base + GRU_GSEG0_BASE + GRU_GSEG_STRIDE * ctxnum); 105 } 106 107 static inline void *get_gseg_base_address_cb(void *base, int ctxnum, int line) 108 { 109 return (void *)(get_gseg_base_address(base, ctxnum) + 110 GRU_CB_BASE + GRU_HANDLE_STRIDE * line); 111 } 112 113 static inline void *get_gseg_base_address_ds(void *base, int ctxnum, int line) 114 { 115 return (void *)(get_gseg_base_address(base, ctxnum) + GRU_DS_BASE + 116 GRU_CACHE_LINE_BYTES * line); 117 } 118 119 static inline struct gru_tlb_fault_map *get_tfm(void *base, int ctxnum) 120 { 121 return (struct gru_tlb_fault_map *)(base + GRU_TFM_BASE + 122 ctxnum * GRU_HANDLE_STRIDE); 123 } 124 125 static inline struct gru_tlb_global_handle *get_tgh(void *base, int ctxnum) 126 { 127 return (struct gru_tlb_global_handle *)(base + GRU_TGH_BASE + 128 ctxnum * GRU_HANDLE_STRIDE); 129 } 130 131 static inline struct gru_control_block_extended *get_cbe(void *base, int ctxnum) 132 { 133 return (struct gru_control_block_extended *)(base + GRU_CBE_BASE + 134 ctxnum * GRU_HANDLE_STRIDE); 135 } 136 137 static inline struct gru_tlb_fault_handle *get_tfh(void *base, int ctxnum) 138 { 139 return (struct gru_tlb_fault_handle *)(base + GRU_TFH_BASE + 140 ctxnum * GRU_HANDLE_STRIDE); 141 } 142 143 static inline struct gru_context_configuration_handle *get_cch(void *base, 144 int ctxnum) 145 { 146 return (struct gru_context_configuration_handle *)(base + 147 GRU_CCH_BASE + ctxnum * GRU_HANDLE_STRIDE); 148 } 149 150 static inline unsigned long get_cb_number(void *cb) 151 { 152 return (((unsigned long)cb - GRU_CB_BASE) % GRU_GSEG_PAGESIZE) / 153 GRU_HANDLE_STRIDE; 154 } 155 156 /* byte offset to a specific GRU chiplet. (p=pnode, c=chiplet (0 or 1)*/ 157 static inline unsigned long gru_chiplet_paddr(unsigned long paddr, int pnode, 158 int chiplet) 159 { 160 return paddr + GRU_SIZE * (2 * pnode + chiplet); 161 } 162 163 static inline void *gru_chiplet_vaddr(void *vaddr, int pnode, int chiplet) 164 { 165 return vaddr + GRU_SIZE * (2 * pnode + chiplet); 166 } 167 168 169 170 /* 171 * Global TLB Fault Map 172 * Bitmap of outstanding TLB misses needing interrupt/polling service. 173 * 174 */ 175 struct gru_tlb_fault_map { 176 unsigned long fault_bits[BITS_TO_LONGS(GRU_NUM_CBE)]; 177 unsigned long fill0[2]; 178 unsigned long done_bits[BITS_TO_LONGS(GRU_NUM_CBE)]; 179 unsigned long fill1[2]; 180 }; 181 182 /* 183 * TGH - TLB Global Handle 184 * Used for TLB flushing. 185 * 186 */ 187 struct gru_tlb_global_handle { 188 unsigned int cmd:1; /* DW 0 */ 189 unsigned int delresp:1; 190 unsigned int opc:1; 191 unsigned int fill1:5; 192 193 unsigned int fill2:8; 194 195 unsigned int status:2; 196 unsigned long fill3:2; 197 unsigned int state:3; 198 unsigned long fill4:1; 199 200 unsigned int cause:3; 201 unsigned long fill5:37; 202 203 unsigned long vaddr:64; /* DW 1 */ 204 205 unsigned int asid:24; /* DW 2 */ 206 unsigned int fill6:8; 207 208 unsigned int pagesize:5; 209 unsigned int fill7:11; 210 211 unsigned int global:1; 212 unsigned int fill8:15; 213 214 unsigned long vaddrmask:39; /* DW 3 */ 215 unsigned int fill9:9; 216 unsigned int n:10; 217 unsigned int fill10:6; 218 219 unsigned int ctxbitmap:16; /* DW4 */ 220 unsigned long fill11[3]; 221 }; 222 223 enum gru_tgh_cmd { 224 TGHCMD_START 225 }; 226 227 enum gru_tgh_opc { 228 TGHOP_TLBNOP, 229 TGHOP_TLBINV 230 }; 231 232 enum gru_tgh_status { 233 TGHSTATUS_IDLE, 234 TGHSTATUS_EXCEPTION, 235 TGHSTATUS_ACTIVE 236 }; 237 238 enum gru_tgh_state { 239 TGHSTATE_IDLE, 240 TGHSTATE_PE_INVAL, 241 TGHSTATE_INTERRUPT_INVAL, 242 TGHSTATE_WAITDONE, 243 TGHSTATE_RESTART_CTX, 244 }; 245 246 /* 247 * TFH - TLB Global Handle 248 * Used for TLB dropins into the GRU TLB. 249 * 250 */ 251 struct gru_tlb_fault_handle { 252 unsigned int cmd:1; /* DW 0 - low 32*/ 253 unsigned int delresp:1; 254 unsigned int fill0:2; 255 unsigned int opc:3; 256 unsigned int fill1:9; 257 258 unsigned int status:2; 259 unsigned int fill2:1; 260 unsigned int color:1; 261 unsigned int state:3; 262 unsigned int fill3:1; 263 264 unsigned int cause:7; /* DW 0 - high 32 */ 265 unsigned int fill4:1; 266 267 unsigned int indexway:12; 268 unsigned int fill5:4; 269 270 unsigned int ctxnum:4; 271 unsigned int fill6:12; 272 273 unsigned long missvaddr:64; /* DW 1 */ 274 275 unsigned int missasid:24; /* DW 2 */ 276 unsigned int fill7:8; 277 unsigned int fillasid:24; 278 unsigned int dirty:1; 279 unsigned int gaa:2; 280 unsigned long fill8:5; 281 282 unsigned long pfn:41; /* DW 3 */ 283 unsigned int fill9:7; 284 unsigned int pagesize:5; 285 unsigned int fill10:11; 286 287 unsigned long fillvaddr:64; /* DW 4 */ 288 289 unsigned long fill11[3]; 290 }; 291 292 enum gru_tfh_opc { 293 TFHOP_NOOP, 294 TFHOP_RESTART, 295 TFHOP_WRITE_ONLY, 296 TFHOP_WRITE_RESTART, 297 TFHOP_EXCEPTION, 298 TFHOP_USER_POLLING_MODE = 7, 299 }; 300 301 enum tfh_status { 302 TFHSTATUS_IDLE, 303 TFHSTATUS_EXCEPTION, 304 TFHSTATUS_ACTIVE, 305 }; 306 307 enum tfh_state { 308 TFHSTATE_INACTIVE, 309 TFHSTATE_IDLE, 310 TFHSTATE_MISS_UPM, 311 TFHSTATE_MISS_FMM, 312 TFHSTATE_HW_ERR, 313 TFHSTATE_WRITE_TLB, 314 TFHSTATE_RESTART_CBR, 315 }; 316 317 /* TFH cause bits */ 318 enum tfh_cause { 319 TFHCAUSE_NONE, 320 TFHCAUSE_TLB_MISS, 321 TFHCAUSE_TLB_MOD, 322 TFHCAUSE_HW_ERROR_RR, 323 TFHCAUSE_HW_ERROR_MAIN_ARRAY, 324 TFHCAUSE_HW_ERROR_VALID, 325 TFHCAUSE_HW_ERROR_PAGESIZE, 326 TFHCAUSE_INSTRUCTION_EXCEPTION, 327 TFHCAUSE_UNCORRECTIBLE_ERROR, 328 }; 329 330 /* GAA values */ 331 #define GAA_RAM 0x0 332 #define GAA_NCRAM 0x2 333 #define GAA_MMIO 0x1 334 #define GAA_REGISTER 0x3 335 336 /* GRU paddr shift for pfn. (NOTE: shift is NOT by actual pagesize) */ 337 #define GRU_PADDR_SHIFT 12 338 339 /* 340 * Context Configuration handle 341 * Used to allocate resources to a GSEG context. 342 * 343 */ 344 struct gru_context_configuration_handle { 345 unsigned int cmd:1; /* DW0 */ 346 unsigned int delresp:1; 347 unsigned int opc:3; 348 unsigned int unmap_enable:1; 349 unsigned int req_slice_set_enable:1; 350 unsigned int req_slice:2; 351 unsigned int cb_int_enable:1; 352 unsigned int tlb_int_enable:1; 353 unsigned int tfm_fault_bit_enable:1; 354 unsigned int tlb_int_select:4; 355 356 unsigned int status:2; 357 unsigned int state:2; 358 unsigned int reserved2:4; 359 360 unsigned int cause:4; 361 unsigned int tfm_done_bit_enable:1; 362 unsigned int unused:3; 363 364 unsigned int dsr_allocation_map; 365 366 unsigned long cbr_allocation_map; /* DW1 */ 367 368 unsigned int asid[8]; /* DW 2 - 5 */ 369 unsigned short sizeavail[8]; /* DW 6 - 7 */ 370 } __attribute__ ((packed)); 371 372 enum gru_cch_opc { 373 CCHOP_START = 1, 374 CCHOP_ALLOCATE, 375 CCHOP_INTERRUPT, 376 CCHOP_DEALLOCATE, 377 CCHOP_INTERRUPT_SYNC, 378 }; 379 380 enum gru_cch_status { 381 CCHSTATUS_IDLE, 382 CCHSTATUS_EXCEPTION, 383 CCHSTATUS_ACTIVE, 384 }; 385 386 enum gru_cch_state { 387 CCHSTATE_INACTIVE, 388 CCHSTATE_MAPPED, 389 CCHSTATE_ACTIVE, 390 CCHSTATE_INTERRUPTED, 391 }; 392 393 /* CCH Exception cause */ 394 enum gru_cch_cause { 395 CCHCAUSE_REGION_REGISTER_WRITE_ERROR = 1, 396 CCHCAUSE_ILLEGAL_OPCODE = 2, 397 CCHCAUSE_INVALID_START_REQUEST = 3, 398 CCHCAUSE_INVALID_ALLOCATION_REQUEST = 4, 399 CCHCAUSE_INVALID_DEALLOCATION_REQUEST = 5, 400 CCHCAUSE_INVALID_INTERRUPT_REQUEST = 6, 401 CCHCAUSE_CCH_BUSY = 7, 402 CCHCAUSE_NO_CBRS_TO_ALLOCATE = 8, 403 CCHCAUSE_BAD_TFM_CONFIG = 9, 404 CCHCAUSE_CBR_RESOURCES_OVERSUBSCRIPED = 10, 405 CCHCAUSE_DSR_RESOURCES_OVERSUBSCRIPED = 11, 406 CCHCAUSE_CBR_DEALLOCATION_ERROR = 12, 407 }; 408 /* 409 * CBE - Control Block Extended 410 * Maintains internal GRU state for active CBs. 411 * 412 */ 413 struct gru_control_block_extended { 414 unsigned int reserved0:1; /* DW 0 - low */ 415 unsigned int imacpy:3; 416 unsigned int reserved1:4; 417 unsigned int xtypecpy:3; 418 unsigned int iaa0cpy:2; 419 unsigned int iaa1cpy:2; 420 unsigned int reserved2:1; 421 unsigned int opccpy:8; 422 unsigned int exopccpy:8; 423 424 unsigned int idef2cpy:22; /* DW 0 - high */ 425 unsigned int reserved3:10; 426 427 unsigned int idef4cpy:22; /* DW 1 */ 428 unsigned int reserved4:10; 429 unsigned int idef4upd:22; 430 unsigned int reserved5:10; 431 432 unsigned long idef1upd:64; /* DW 2 */ 433 434 unsigned long idef5cpy:64; /* DW 3 */ 435 436 unsigned long idef6cpy:64; /* DW 4 */ 437 438 unsigned long idef3upd:64; /* DW 5 */ 439 440 unsigned long idef5upd:64; /* DW 6 */ 441 442 unsigned int idef2upd:22; /* DW 7 */ 443 unsigned int reserved6:10; 444 445 unsigned int ecause:20; 446 unsigned int cbrstate:4; 447 unsigned int cbrexecstatus:8; 448 }; 449 450 enum gru_cbr_state { 451 CBRSTATE_INACTIVE, 452 CBRSTATE_IDLE, 453 CBRSTATE_PE_CHECK, 454 CBRSTATE_QUEUED, 455 CBRSTATE_WAIT_RESPONSE, 456 CBRSTATE_INTERRUPTED, 457 CBRSTATE_INTERRUPTED_MISS_FMM, 458 CBRSTATE_BUSY_INTERRUPT_MISS_FMM, 459 CBRSTATE_INTERRUPTED_MISS_UPM, 460 CBRSTATE_BUSY_INTERRUPTED_MISS_UPM, 461 CBRSTATE_REQUEST_ISSUE, 462 CBRSTATE_BUSY_INTERRUPT, 463 }; 464 465 /* CBE cbrexecstatus bits */ 466 #define CBR_EXS_ABORT_OCC_BIT 0 467 #define CBR_EXS_INT_OCC_BIT 1 468 #define CBR_EXS_PENDING_BIT 2 469 #define CBR_EXS_QUEUED_BIT 3 470 #define CBR_EXS_TLBHW_BIT 4 471 #define CBR_EXS_EXCEPTION_BIT 5 472 473 #define CBR_EXS_ABORT_OCC (1 << CBR_EXS_ABORT_OCC_BIT) 474 #define CBR_EXS_INT_OCC (1 << CBR_EXS_INT_OCC_BIT) 475 #define CBR_EXS_PENDING (1 << CBR_EXS_PENDING_BIT) 476 #define CBR_EXS_QUEUED (1 << CBR_EXS_QUEUED_BIT) 477 #define CBR_EXS_TLBHW (1 << CBR_EXS_TLBHW_BIT) 478 #define CBR_EXS_EXCEPTION (1 << CBR_EXS_EXCEPTION_BIT) 479 480 /* CBE ecause bits - defined in gru_instructions.h */ 481 482 /* 483 * Convert a processor pagesize into the strange encoded pagesize used by the 484 * GRU. Processor pagesize is encoded as log of bytes per page. (or PAGE_SHIFT) 485 * pagesize log pagesize grupagesize 486 * 4k 12 0 487 * 16k 14 1 488 * 64k 16 2 489 * 256k 18 3 490 * 1m 20 4 491 * 2m 21 5 492 * 4m 22 6 493 * 16m 24 7 494 * 64m 26 8 495 * ... 496 */ 497 #define GRU_PAGESIZE(sh) ((((sh) > 20 ? (sh) + 2: (sh)) >> 1) - 6) 498 #define GRU_SIZEAVAIL(sh) (1UL << GRU_PAGESIZE(sh)) 499 500 /* minimum TLB purge count to ensure a full purge */ 501 #define GRUMAXINVAL 1024UL 502 503 504 /* Extract the status field from a kernel handle */ 505 #define GET_MSEG_HANDLE_STATUS(h) (((*(unsigned long *)(h)) >> 16) & 3) 506 507 static inline void start_instruction(void *h) 508 { 509 unsigned long *w0 = h; 510 511 wmb(); /* setting CMD bit must be last */ 512 *w0 = *w0 | 1; 513 gru_flush_cache(h); 514 } 515 516 static inline int wait_instruction_complete(void *h) 517 { 518 int status; 519 520 do { 521 cpu_relax(); 522 barrier(); 523 status = GET_MSEG_HANDLE_STATUS(h); 524 } while (status == CCHSTATUS_ACTIVE); 525 return status; 526 } 527 528 #if defined CONFIG_IA64 529 static inline void cch_allocate_set_asids( 530 struct gru_context_configuration_handle *cch, int asidval) 531 { 532 int i; 533 534 for (i = 0; i <= RGN_HPAGE; i++) { /* assume HPAGE is last region */ 535 cch->asid[i] = (asidval++); 536 #if 0 537 /* ZZZ hugepages not supported yet */ 538 if (i == RGN_HPAGE) 539 cch->sizeavail[i] = GRU_SIZEAVAIL(hpage_shift); 540 else 541 #endif 542 cch->sizeavail[i] = GRU_SIZEAVAIL(PAGE_SHIFT); 543 } 544 } 545 #elif defined CONFIG_X86_64 546 static inline void cch_allocate_set_asids( 547 struct gru_context_configuration_handle *cch, int asidval) 548 { 549 int i; 550 551 for (i = 0; i < 8; i++) { 552 cch->asid[i] = asidval++; 553 cch->sizeavail[i] = GRU_SIZEAVAIL(PAGE_SHIFT) | 554 GRU_SIZEAVAIL(21); 555 } 556 } 557 #endif 558 559 static inline int cch_allocate(struct gru_context_configuration_handle *cch, 560 int asidval, unsigned long cbrmap, 561 unsigned long dsrmap) 562 { 563 cch_allocate_set_asids(cch, asidval); 564 cch->dsr_allocation_map = dsrmap; 565 cch->cbr_allocation_map = cbrmap; 566 cch->opc = CCHOP_ALLOCATE; 567 start_instruction(cch); 568 return wait_instruction_complete(cch); 569 } 570 571 static inline int cch_start(struct gru_context_configuration_handle *cch) 572 { 573 cch->opc = CCHOP_START; 574 start_instruction(cch); 575 return wait_instruction_complete(cch); 576 } 577 578 static inline int cch_interrupt(struct gru_context_configuration_handle *cch) 579 { 580 cch->opc = CCHOP_INTERRUPT; 581 start_instruction(cch); 582 return wait_instruction_complete(cch); 583 } 584 585 static inline int cch_deallocate(struct gru_context_configuration_handle *cch) 586 { 587 cch->opc = CCHOP_DEALLOCATE; 588 start_instruction(cch); 589 return wait_instruction_complete(cch); 590 } 591 592 static inline int cch_interrupt_sync(struct gru_context_configuration_handle 593 *cch) 594 { 595 cch->opc = CCHOP_INTERRUPT_SYNC; 596 start_instruction(cch); 597 return wait_instruction_complete(cch); 598 } 599 600 static inline int tgh_invalidate(struct gru_tlb_global_handle *tgh, 601 unsigned long vaddr, unsigned long vaddrmask, 602 int asid, int pagesize, int global, int n, 603 unsigned short ctxbitmap) 604 { 605 tgh->vaddr = vaddr; 606 tgh->asid = asid; 607 tgh->pagesize = pagesize; 608 tgh->n = n; 609 tgh->global = global; 610 tgh->vaddrmask = vaddrmask; 611 tgh->ctxbitmap = ctxbitmap; 612 tgh->opc = TGHOP_TLBINV; 613 start_instruction(tgh); 614 return wait_instruction_complete(tgh); 615 } 616 617 static inline void tfh_write_only(struct gru_tlb_fault_handle *tfh, 618 unsigned long pfn, unsigned long vaddr, 619 int asid, int dirty, int pagesize) 620 { 621 tfh->fillasid = asid; 622 tfh->fillvaddr = vaddr; 623 tfh->pfn = pfn; 624 tfh->dirty = dirty; 625 tfh->pagesize = pagesize; 626 tfh->opc = TFHOP_WRITE_ONLY; 627 start_instruction(tfh); 628 } 629 630 static inline void tfh_write_restart(struct gru_tlb_fault_handle *tfh, 631 unsigned long paddr, int gaa, 632 unsigned long vaddr, int asid, int dirty, 633 int pagesize) 634 { 635 tfh->fillasid = asid; 636 tfh->fillvaddr = vaddr; 637 tfh->pfn = paddr >> GRU_PADDR_SHIFT; 638 tfh->gaa = gaa; 639 tfh->dirty = dirty; 640 tfh->pagesize = pagesize; 641 tfh->opc = TFHOP_WRITE_RESTART; 642 start_instruction(tfh); 643 } 644 645 static inline void tfh_restart(struct gru_tlb_fault_handle *tfh) 646 { 647 tfh->opc = TFHOP_RESTART; 648 start_instruction(tfh); 649 } 650 651 static inline void tfh_user_polling_mode(struct gru_tlb_fault_handle *tfh) 652 { 653 tfh->opc = TFHOP_USER_POLLING_MODE; 654 start_instruction(tfh); 655 } 656 657 static inline void tfh_exception(struct gru_tlb_fault_handle *tfh) 658 { 659 tfh->opc = TFHOP_EXCEPTION; 660 start_instruction(tfh); 661 } 662 663 #endif /* __GRUHANDLES_H__ */ 664