1 /* SPDX-License-Identifier: GPL-2.0-only */ 2 /* 3 * Copyright (C) 2013 Red Hat 4 * Author: Rob Clark <robdclark@gmail.com> 5 */ 6 7 #ifndef __MSM_GPU_H__ 8 #define __MSM_GPU_H__ 9 10 #include <linux/adreno-smmu-priv.h> 11 #include <linux/clk.h> 12 #include <linux/devfreq.h> 13 #include <linux/interconnect.h> 14 #include <linux/pm_opp.h> 15 #include <linux/regulator/consumer.h> 16 #include <linux/reset.h> 17 18 #include "msm_drv.h" 19 #include "msm_fence.h" 20 #include "msm_ringbuffer.h" 21 #include "msm_gem.h" 22 23 struct msm_gem_submit; 24 struct msm_gpu_perfcntr; 25 struct msm_gpu_state; 26 struct msm_file_private; 27 28 struct msm_gpu_config { 29 const char *ioname; 30 unsigned int nr_rings; 31 }; 32 33 /* So far, with hardware that I've seen to date, we can have: 34 * + zero, one, or two z180 2d cores 35 * + a3xx or a2xx 3d core, which share a common CP (the firmware 36 * for the CP seems to implement some different PM4 packet types 37 * but the basics of cmdstream submission are the same) 38 * 39 * Which means that the eventual complete "class" hierarchy, once 40 * support for all past and present hw is in place, becomes: 41 * + msm_gpu 42 * + adreno_gpu 43 * + a3xx_gpu 44 * + a2xx_gpu 45 * + z180_gpu 46 */ 47 struct msm_gpu_funcs { 48 int (*get_param)(struct msm_gpu *gpu, struct msm_file_private *ctx, 49 uint32_t param, uint64_t *value, uint32_t *len); 50 int (*set_param)(struct msm_gpu *gpu, struct msm_file_private *ctx, 51 uint32_t param, uint64_t value, uint32_t len); 52 int (*hw_init)(struct msm_gpu *gpu); 53 int (*pm_suspend)(struct msm_gpu *gpu); 54 int (*pm_resume)(struct msm_gpu *gpu); 55 void (*submit)(struct msm_gpu *gpu, struct msm_gem_submit *submit); 56 void (*flush)(struct msm_gpu *gpu, struct msm_ringbuffer *ring); 57 irqreturn_t (*irq)(struct msm_gpu *irq); 58 struct msm_ringbuffer *(*active_ring)(struct msm_gpu *gpu); 59 void (*recover)(struct msm_gpu *gpu); 60 void (*destroy)(struct msm_gpu *gpu); 61 #if defined(CONFIG_DEBUG_FS) || defined(CONFIG_DEV_COREDUMP) 62 /* show GPU status in debugfs: */ 63 void (*show)(struct msm_gpu *gpu, struct msm_gpu_state *state, 64 struct drm_printer *p); 65 /* for generation specific debugfs: */ 66 void (*debugfs_init)(struct msm_gpu *gpu, struct drm_minor *minor); 67 #endif 68 /* note: gpu_busy() can assume that we have been pm_resumed */ 69 u64 (*gpu_busy)(struct msm_gpu *gpu, unsigned long *out_sample_rate); 70 struct msm_gpu_state *(*gpu_state_get)(struct msm_gpu *gpu); 71 int (*gpu_state_put)(struct msm_gpu_state *state); 72 unsigned long (*gpu_get_freq)(struct msm_gpu *gpu); 73 /* note: gpu_set_freq() can assume that we have been pm_resumed */ 74 void (*gpu_set_freq)(struct msm_gpu *gpu, struct dev_pm_opp *opp, 75 bool suspended); 76 struct msm_gem_address_space *(*create_address_space) 77 (struct msm_gpu *gpu, struct platform_device *pdev); 78 struct msm_gem_address_space *(*create_private_address_space) 79 (struct msm_gpu *gpu); 80 uint32_t (*get_rptr)(struct msm_gpu *gpu, struct msm_ringbuffer *ring); 81 82 /** 83 * progress: Has the GPU made progress? 84 * 85 * Return true if GPU position in cmdstream has advanced (or changed) 86 * since the last call. To avoid false negatives, this should account 87 * for cmdstream that is buffered in this FIFO upstream of the CP fw. 88 */ 89 bool (*progress)(struct msm_gpu *gpu, struct msm_ringbuffer *ring); 90 }; 91 92 /* Additional state for iommu faults: */ 93 struct msm_gpu_fault_info { 94 u64 ttbr0; 95 unsigned long iova; 96 int flags; 97 const char *type; 98 const char *block; 99 }; 100 101 /** 102 * struct msm_gpu_devfreq - devfreq related state 103 */ 104 struct msm_gpu_devfreq { 105 /** devfreq: devfreq instance */ 106 struct devfreq *devfreq; 107 108 /** lock: lock for "suspended", "busy_cycles", and "time" */ 109 struct mutex lock; 110 111 /** 112 * idle_constraint: 113 * 114 * A PM QoS constraint to limit max freq while the GPU is idle. 115 */ 116 struct dev_pm_qos_request idle_freq; 117 118 /** 119 * boost_constraint: 120 * 121 * A PM QoS constraint to boost min freq for a period of time 122 * until the boost expires. 123 */ 124 struct dev_pm_qos_request boost_freq; 125 126 /** 127 * busy_cycles: Last busy counter value, for calculating elapsed busy 128 * cycles since last sampling period. 129 */ 130 u64 busy_cycles; 131 132 /** time: Time of last sampling period. */ 133 ktime_t time; 134 135 /** idle_time: Time of last transition to idle: */ 136 ktime_t idle_time; 137 138 struct devfreq_dev_status average_status; 139 140 /** 141 * idle_work: 142 * 143 * Used to delay clamping to idle freq on active->idle transition. 144 */ 145 struct msm_hrtimer_work idle_work; 146 147 /** 148 * boost_work: 149 * 150 * Used to reset the boost_constraint after the boost period has 151 * elapsed 152 */ 153 struct msm_hrtimer_work boost_work; 154 155 /** suspended: tracks if we're suspended */ 156 bool suspended; 157 }; 158 159 struct msm_gpu { 160 const char *name; 161 struct drm_device *dev; 162 struct platform_device *pdev; 163 const struct msm_gpu_funcs *funcs; 164 165 struct adreno_smmu_priv adreno_smmu; 166 167 /* performance counters (hw & sw): */ 168 spinlock_t perf_lock; 169 bool perfcntr_active; 170 struct { 171 bool active; 172 ktime_t time; 173 } last_sample; 174 uint32_t totaltime, activetime; /* sw counters */ 175 uint32_t last_cntrs[5]; /* hw counters */ 176 const struct msm_gpu_perfcntr *perfcntrs; 177 uint32_t num_perfcntrs; 178 179 struct msm_ringbuffer *rb[MSM_GPU_MAX_RINGS]; 180 int nr_rings; 181 182 /** 183 * sysprof_active: 184 * 185 * The count of contexts that have enabled system profiling. 186 */ 187 refcount_t sysprof_active; 188 189 /** 190 * cur_ctx_seqno: 191 * 192 * The ctx->seqno value of the last context to submit rendering, 193 * and the one with current pgtables installed (for generations 194 * that support per-context pgtables). Tracked by seqno rather 195 * than pointer value to avoid dangling pointers, and cases where 196 * a ctx can be freed and a new one created with the same address. 197 */ 198 int cur_ctx_seqno; 199 200 /** 201 * lock: 202 * 203 * General lock for serializing all the gpu things. 204 * 205 * TODO move to per-ring locking where feasible (ie. submit/retire 206 * path, etc) 207 */ 208 struct mutex lock; 209 210 /** 211 * active_submits: 212 * 213 * The number of submitted but not yet retired submits, used to 214 * determine transitions between active and idle. 215 * 216 * Protected by active_lock 217 */ 218 int active_submits; 219 220 /** lock: protects active_submits and idle/active transitions */ 221 struct mutex active_lock; 222 223 /* does gpu need hw_init? */ 224 bool needs_hw_init; 225 226 /** 227 * global_faults: number of GPU hangs not attributed to a particular 228 * address space 229 */ 230 int global_faults; 231 232 void __iomem *mmio; 233 int irq; 234 235 struct msm_gem_address_space *aspace; 236 237 /* Power Control: */ 238 struct regulator *gpu_reg, *gpu_cx; 239 struct clk_bulk_data *grp_clks; 240 int nr_clocks; 241 struct clk *ebi1_clk, *core_clk, *rbbmtimer_clk; 242 uint32_t fast_rate; 243 244 /* Hang and Inactivity Detection: 245 */ 246 #define DRM_MSM_INACTIVE_PERIOD 66 /* in ms (roughly four frames) */ 247 248 #define DRM_MSM_HANGCHECK_DEFAULT_PERIOD 500 /* in ms */ 249 #define DRM_MSM_HANGCHECK_PROGRESS_RETRIES 3 250 struct timer_list hangcheck_timer; 251 252 /* Fault info for most recent iova fault: */ 253 struct msm_gpu_fault_info fault_info; 254 255 /* work for handling GPU ioval faults: */ 256 struct kthread_work fault_work; 257 258 /* work for handling GPU recovery: */ 259 struct kthread_work recover_work; 260 261 /** retire_event: notified when submits are retired: */ 262 wait_queue_head_t retire_event; 263 264 /* work for handling active-list retiring: */ 265 struct kthread_work retire_work; 266 267 /* worker for retire/recover: */ 268 struct kthread_worker *worker; 269 270 struct drm_gem_object *memptrs_bo; 271 272 struct msm_gpu_devfreq devfreq; 273 274 uint32_t suspend_count; 275 276 struct msm_gpu_state *crashstate; 277 278 /* Enable clamping to idle freq when inactive: */ 279 bool clamp_to_idle; 280 281 /* True if the hardware supports expanded apriv (a650 and newer) */ 282 bool hw_apriv; 283 284 struct thermal_cooling_device *cooling; 285 286 /* To poll for cx gdsc collapse during gpu recovery */ 287 struct reset_control *cx_collapse; 288 }; 289 290 static inline struct msm_gpu *dev_to_gpu(struct device *dev) 291 { 292 struct adreno_smmu_priv *adreno_smmu = dev_get_drvdata(dev); 293 return container_of(adreno_smmu, struct msm_gpu, adreno_smmu); 294 } 295 296 /* It turns out that all targets use the same ringbuffer size */ 297 #define MSM_GPU_RINGBUFFER_SZ SZ_32K 298 #define MSM_GPU_RINGBUFFER_BLKSIZE 32 299 300 #define MSM_GPU_RB_CNTL_DEFAULT \ 301 (AXXX_CP_RB_CNTL_BUFSZ(ilog2(MSM_GPU_RINGBUFFER_SZ / 8)) | \ 302 AXXX_CP_RB_CNTL_BLKSZ(ilog2(MSM_GPU_RINGBUFFER_BLKSIZE / 8))) 303 304 static inline bool msm_gpu_active(struct msm_gpu *gpu) 305 { 306 int i; 307 308 for (i = 0; i < gpu->nr_rings; i++) { 309 struct msm_ringbuffer *ring = gpu->rb[i]; 310 311 if (fence_after(ring->fctx->last_fence, ring->memptrs->fence)) 312 return true; 313 } 314 315 return false; 316 } 317 318 /* Perf-Counters: 319 * The select_reg and select_val are just there for the benefit of the child 320 * class that actually enables the perf counter.. but msm_gpu base class 321 * will handle sampling/displaying the counters. 322 */ 323 324 struct msm_gpu_perfcntr { 325 uint32_t select_reg; 326 uint32_t sample_reg; 327 uint32_t select_val; 328 const char *name; 329 }; 330 331 /* 332 * The number of priority levels provided by drm gpu scheduler. The 333 * DRM_SCHED_PRIORITY_KERNEL priority level is treated specially in some 334 * cases, so we don't use it (no need for kernel generated jobs). 335 */ 336 #define NR_SCHED_PRIORITIES (1 + DRM_SCHED_PRIORITY_HIGH - DRM_SCHED_PRIORITY_MIN) 337 338 /** 339 * struct msm_file_private - per-drm_file context 340 * 341 * @queuelock: synchronizes access to submitqueues list 342 * @submitqueues: list of &msm_gpu_submitqueue created by userspace 343 * @queueid: counter incremented each time a submitqueue is created, 344 * used to assign &msm_gpu_submitqueue.id 345 * @aspace: the per-process GPU address-space 346 * @ref: reference count 347 * @seqno: unique per process seqno 348 */ 349 struct msm_file_private { 350 rwlock_t queuelock; 351 struct list_head submitqueues; 352 int queueid; 353 struct msm_gem_address_space *aspace; 354 struct kref ref; 355 int seqno; 356 357 /** 358 * sysprof: 359 * 360 * The value of MSM_PARAM_SYSPROF set by userspace. This is 361 * intended to be used by system profiling tools like Mesa's 362 * pps-producer (perfetto), and restricted to CAP_SYS_ADMIN. 363 * 364 * Setting a value of 1 will preserve performance counters across 365 * context switches. Setting a value of 2 will in addition 366 * suppress suspend. (Performance counters lose state across 367 * power collapse, which is undesirable for profiling in some 368 * cases.) 369 * 370 * The value automatically reverts to zero when the drm device 371 * file is closed. 372 */ 373 int sysprof; 374 375 /** comm: Overridden task comm, see MSM_PARAM_COMM */ 376 char *comm; 377 378 /** cmdline: Overridden task cmdline, see MSM_PARAM_CMDLINE */ 379 char *cmdline; 380 381 /** 382 * elapsed: 383 * 384 * The total (cumulative) elapsed time GPU was busy with rendering 385 * from this context in ns. 386 */ 387 uint64_t elapsed_ns; 388 389 /** 390 * cycles: 391 * 392 * The total (cumulative) GPU cycles elapsed attributed to this 393 * context. 394 */ 395 uint64_t cycles; 396 397 /** 398 * entities: 399 * 400 * Table of per-priority-level sched entities used by submitqueues 401 * associated with this &drm_file. Because some userspace apps 402 * make assumptions about rendering from multiple gl contexts 403 * (of the same priority) within the process happening in FIFO 404 * order without requiring any fencing beyond MakeCurrent(), we 405 * create at most one &drm_sched_entity per-process per-priority- 406 * level. 407 */ 408 struct drm_sched_entity *entities[NR_SCHED_PRIORITIES * MSM_GPU_MAX_RINGS]; 409 }; 410 411 /** 412 * msm_gpu_convert_priority - Map userspace priority to ring # and sched priority 413 * 414 * @gpu: the gpu instance 415 * @prio: the userspace priority level 416 * @ring_nr: [out] the ringbuffer the userspace priority maps to 417 * @sched_prio: [out] the gpu scheduler priority level which the userspace 418 * priority maps to 419 * 420 * With drm/scheduler providing it's own level of prioritization, our total 421 * number of available priority levels is (nr_rings * NR_SCHED_PRIORITIES). 422 * Each ring is associated with it's own scheduler instance. However, our 423 * UABI is that lower numerical values are higher priority. So mapping the 424 * single userspace priority level into ring_nr and sched_prio takes some 425 * care. The userspace provided priority (when a submitqueue is created) 426 * is mapped to ring nr and scheduler priority as such: 427 * 428 * ring_nr = userspace_prio / NR_SCHED_PRIORITIES 429 * sched_prio = NR_SCHED_PRIORITIES - 430 * (userspace_prio % NR_SCHED_PRIORITIES) - 1 431 * 432 * This allows generations without preemption (nr_rings==1) to have some 433 * amount of prioritization, and provides more priority levels for gens 434 * that do have preemption. 435 */ 436 static inline int msm_gpu_convert_priority(struct msm_gpu *gpu, int prio, 437 unsigned *ring_nr, enum drm_sched_priority *sched_prio) 438 { 439 unsigned rn, sp; 440 441 rn = div_u64_rem(prio, NR_SCHED_PRIORITIES, &sp); 442 443 /* invert sched priority to map to higher-numeric-is-higher- 444 * priority convention 445 */ 446 sp = NR_SCHED_PRIORITIES - sp - 1; 447 448 if (rn >= gpu->nr_rings) 449 return -EINVAL; 450 451 *ring_nr = rn; 452 *sched_prio = sp; 453 454 return 0; 455 } 456 457 /** 458 * struct msm_gpu_submitqueues - Userspace created context. 459 * 460 * A submitqueue is associated with a gl context or vk queue (or equiv) 461 * in userspace. 462 * 463 * @id: userspace id for the submitqueue, unique within the drm_file 464 * @flags: userspace flags for the submitqueue, specified at creation 465 * (currently unusued) 466 * @ring_nr: the ringbuffer used by this submitqueue, which is determined 467 * by the submitqueue's priority 468 * @faults: the number of GPU hangs associated with this submitqueue 469 * @last_fence: the sequence number of the last allocated fence (for error 470 * checking) 471 * @ctx: the per-drm_file context associated with the submitqueue (ie. 472 * which set of pgtables do submits jobs associated with the 473 * submitqueue use) 474 * @node: node in the context's list of submitqueues 475 * @fence_idr: maps fence-id to dma_fence for userspace visible fence 476 * seqno, protected by submitqueue lock 477 * @idr_lock: for serializing access to fence_idr 478 * @lock: submitqueue lock for serializing submits on a queue 479 * @ref: reference count 480 * @entity: the submit job-queue 481 */ 482 struct msm_gpu_submitqueue { 483 int id; 484 u32 flags; 485 u32 ring_nr; 486 int faults; 487 uint32_t last_fence; 488 struct msm_file_private *ctx; 489 struct list_head node; 490 struct idr fence_idr; 491 struct mutex idr_lock; 492 struct mutex lock; 493 struct kref ref; 494 struct drm_sched_entity *entity; 495 }; 496 497 struct msm_gpu_state_bo { 498 u64 iova; 499 size_t size; 500 void *data; 501 bool encoded; 502 char name[32]; 503 }; 504 505 struct msm_gpu_state { 506 struct kref ref; 507 struct timespec64 time; 508 509 struct { 510 u64 iova; 511 u32 fence; 512 u32 seqno; 513 u32 rptr; 514 u32 wptr; 515 void *data; 516 int data_size; 517 bool encoded; 518 } ring[MSM_GPU_MAX_RINGS]; 519 520 int nr_registers; 521 u32 *registers; 522 523 u32 rbbm_status; 524 525 char *comm; 526 char *cmd; 527 528 struct msm_gpu_fault_info fault_info; 529 530 int nr_bos; 531 struct msm_gpu_state_bo *bos; 532 }; 533 534 static inline void gpu_write(struct msm_gpu *gpu, u32 reg, u32 data) 535 { 536 msm_writel(data, gpu->mmio + (reg << 2)); 537 } 538 539 static inline u32 gpu_read(struct msm_gpu *gpu, u32 reg) 540 { 541 return msm_readl(gpu->mmio + (reg << 2)); 542 } 543 544 static inline void gpu_rmw(struct msm_gpu *gpu, u32 reg, u32 mask, u32 or) 545 { 546 msm_rmw(gpu->mmio + (reg << 2), mask, or); 547 } 548 549 static inline u64 gpu_read64(struct msm_gpu *gpu, u32 reg) 550 { 551 u64 val; 552 553 /* 554 * Why not a readq here? Two reasons: 1) many of the LO registers are 555 * not quad word aligned and 2) the GPU hardware designers have a bit 556 * of a history of putting registers where they fit, especially in 557 * spins. The longer a GPU family goes the higher the chance that 558 * we'll get burned. We could do a series of validity checks if we 559 * wanted to, but really is a readq() that much better? Nah. 560 */ 561 562 /* 563 * For some lo/hi registers (like perfcounters), the hi value is latched 564 * when the lo is read, so make sure to read the lo first to trigger 565 * that 566 */ 567 val = (u64) msm_readl(gpu->mmio + (reg << 2)); 568 val |= ((u64) msm_readl(gpu->mmio + ((reg + 1) << 2)) << 32); 569 570 return val; 571 } 572 573 static inline void gpu_write64(struct msm_gpu *gpu, u32 reg, u64 val) 574 { 575 /* Why not a writeq here? Read the screed above */ 576 msm_writel(lower_32_bits(val), gpu->mmio + (reg << 2)); 577 msm_writel(upper_32_bits(val), gpu->mmio + ((reg + 1) << 2)); 578 } 579 580 int msm_gpu_pm_suspend(struct msm_gpu *gpu); 581 int msm_gpu_pm_resume(struct msm_gpu *gpu); 582 583 void msm_gpu_show_fdinfo(struct msm_gpu *gpu, struct msm_file_private *ctx, 584 struct drm_printer *p); 585 586 int msm_submitqueue_init(struct drm_device *drm, struct msm_file_private *ctx); 587 struct msm_gpu_submitqueue *msm_submitqueue_get(struct msm_file_private *ctx, 588 u32 id); 589 int msm_submitqueue_create(struct drm_device *drm, 590 struct msm_file_private *ctx, 591 u32 prio, u32 flags, u32 *id); 592 int msm_submitqueue_query(struct drm_device *drm, struct msm_file_private *ctx, 593 struct drm_msm_submitqueue_query *args); 594 int msm_submitqueue_remove(struct msm_file_private *ctx, u32 id); 595 void msm_submitqueue_close(struct msm_file_private *ctx); 596 597 void msm_submitqueue_destroy(struct kref *kref); 598 599 int msm_file_private_set_sysprof(struct msm_file_private *ctx, 600 struct msm_gpu *gpu, int sysprof); 601 void __msm_file_private_destroy(struct kref *kref); 602 603 static inline void msm_file_private_put(struct msm_file_private *ctx) 604 { 605 kref_put(&ctx->ref, __msm_file_private_destroy); 606 } 607 608 static inline struct msm_file_private *msm_file_private_get( 609 struct msm_file_private *ctx) 610 { 611 kref_get(&ctx->ref); 612 return ctx; 613 } 614 615 void msm_devfreq_init(struct msm_gpu *gpu); 616 void msm_devfreq_cleanup(struct msm_gpu *gpu); 617 void msm_devfreq_resume(struct msm_gpu *gpu); 618 void msm_devfreq_suspend(struct msm_gpu *gpu); 619 void msm_devfreq_boost(struct msm_gpu *gpu, unsigned factor); 620 void msm_devfreq_active(struct msm_gpu *gpu); 621 void msm_devfreq_idle(struct msm_gpu *gpu); 622 623 int msm_gpu_hw_init(struct msm_gpu *gpu); 624 625 void msm_gpu_perfcntr_start(struct msm_gpu *gpu); 626 void msm_gpu_perfcntr_stop(struct msm_gpu *gpu); 627 int msm_gpu_perfcntr_sample(struct msm_gpu *gpu, uint32_t *activetime, 628 uint32_t *totaltime, uint32_t ncntrs, uint32_t *cntrs); 629 630 void msm_gpu_retire(struct msm_gpu *gpu); 631 void msm_gpu_submit(struct msm_gpu *gpu, struct msm_gem_submit *submit); 632 633 int msm_gpu_init(struct drm_device *drm, struct platform_device *pdev, 634 struct msm_gpu *gpu, const struct msm_gpu_funcs *funcs, 635 const char *name, struct msm_gpu_config *config); 636 637 struct msm_gem_address_space * 638 msm_gpu_create_private_address_space(struct msm_gpu *gpu, struct task_struct *task); 639 640 void msm_gpu_cleanup(struct msm_gpu *gpu); 641 642 struct msm_gpu *adreno_load_gpu(struct drm_device *dev); 643 void __init adreno_register(void); 644 void __exit adreno_unregister(void); 645 646 static inline void msm_submitqueue_put(struct msm_gpu_submitqueue *queue) 647 { 648 if (queue) 649 kref_put(&queue->ref, msm_submitqueue_destroy); 650 } 651 652 static inline struct msm_gpu_state *msm_gpu_crashstate_get(struct msm_gpu *gpu) 653 { 654 struct msm_gpu_state *state = NULL; 655 656 mutex_lock(&gpu->lock); 657 658 if (gpu->crashstate) { 659 kref_get(&gpu->crashstate->ref); 660 state = gpu->crashstate; 661 } 662 663 mutex_unlock(&gpu->lock); 664 665 return state; 666 } 667 668 static inline void msm_gpu_crashstate_put(struct msm_gpu *gpu) 669 { 670 mutex_lock(&gpu->lock); 671 672 if (gpu->crashstate) { 673 if (gpu->funcs->gpu_state_put(gpu->crashstate)) 674 gpu->crashstate = NULL; 675 } 676 677 mutex_unlock(&gpu->lock); 678 } 679 680 /* 681 * Simple macro to semi-cleanly add the MAP_PRIV flag for targets that can 682 * support expanded privileges 683 */ 684 #define check_apriv(gpu, flags) \ 685 (((gpu)->hw_apriv ? MSM_BO_MAP_PRIV : 0) | (flags)) 686 687 688 #endif /* __MSM_GPU_H__ */ 689