1 /* 2 * Copyright © 2012 Intel Corporation 3 * 4 * Permission is hereby granted, free of charge, to any person obtaining a 5 * copy of this software and associated documentation files (the "Software"), 6 * to deal in the Software without restriction, including without limitation 7 * the rights to use, copy, modify, merge, publish, distribute, sublicense, 8 * and/or sell copies of the Software, and to permit persons to whom the 9 * Software is furnished to do so, subject to the following conditions: 10 * 11 * The above copyright notice and this permission notice (including the next 12 * paragraph) shall be included in all copies or substantial portions of the 13 * Software. 14 * 15 * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR 16 * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, 17 * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL 18 * THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER 19 * LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING 20 * FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS 21 * IN THE SOFTWARE. 22 * 23 * Authors: 24 * Ben Widawsky <ben@bwidawsk.net> 25 * 26 */ 27 28 #include <linux/device.h> 29 #include <linux/module.h> 30 #include <linux/stat.h> 31 #include <linux/sysfs.h> 32 33 #include "gt/intel_rc6.h" 34 #include "gt/intel_rps.h" 35 #include "gt/sysfs_engines.h" 36 37 #include "i915_drv.h" 38 #include "i915_sysfs.h" 39 #include "intel_pm.h" 40 #include "intel_sideband.h" 41 42 static inline struct drm_i915_private *kdev_minor_to_i915(struct device *kdev) 43 { 44 struct drm_minor *minor = dev_get_drvdata(kdev); 45 return to_i915(minor->dev); 46 } 47 48 #ifdef CONFIG_PM 49 static u32 calc_residency(struct drm_i915_private *dev_priv, 50 i915_reg_t reg) 51 { 52 intel_wakeref_t wakeref; 53 u64 res = 0; 54 55 with_intel_runtime_pm(&dev_priv->runtime_pm, wakeref) 56 res = intel_rc6_residency_us(&dev_priv->gt.rc6, reg); 57 58 return DIV_ROUND_CLOSEST_ULL(res, 1000); 59 } 60 61 static ssize_t 62 show_rc6_mask(struct device *kdev, struct device_attribute *attr, char *buf) 63 { 64 struct drm_i915_private *dev_priv = kdev_minor_to_i915(kdev); 65 unsigned int mask; 66 67 mask = 0; 68 if (HAS_RC6(dev_priv)) 69 mask |= BIT(0); 70 if (HAS_RC6p(dev_priv)) 71 mask |= BIT(1); 72 if (HAS_RC6pp(dev_priv)) 73 mask |= BIT(2); 74 75 return snprintf(buf, PAGE_SIZE, "%x\n", mask); 76 } 77 78 static ssize_t 79 show_rc6_ms(struct device *kdev, struct device_attribute *attr, char *buf) 80 { 81 struct drm_i915_private *dev_priv = kdev_minor_to_i915(kdev); 82 u32 rc6_residency = calc_residency(dev_priv, GEN6_GT_GFX_RC6); 83 return snprintf(buf, PAGE_SIZE, "%u\n", rc6_residency); 84 } 85 86 static ssize_t 87 show_rc6p_ms(struct device *kdev, struct device_attribute *attr, char *buf) 88 { 89 struct drm_i915_private *dev_priv = kdev_minor_to_i915(kdev); 90 u32 rc6p_residency = calc_residency(dev_priv, GEN6_GT_GFX_RC6p); 91 return snprintf(buf, PAGE_SIZE, "%u\n", rc6p_residency); 92 } 93 94 static ssize_t 95 show_rc6pp_ms(struct device *kdev, struct device_attribute *attr, char *buf) 96 { 97 struct drm_i915_private *dev_priv = kdev_minor_to_i915(kdev); 98 u32 rc6pp_residency = calc_residency(dev_priv, GEN6_GT_GFX_RC6pp); 99 return snprintf(buf, PAGE_SIZE, "%u\n", rc6pp_residency); 100 } 101 102 static ssize_t 103 show_media_rc6_ms(struct device *kdev, struct device_attribute *attr, char *buf) 104 { 105 struct drm_i915_private *dev_priv = kdev_minor_to_i915(kdev); 106 u32 rc6_residency = calc_residency(dev_priv, VLV_GT_MEDIA_RC6); 107 return snprintf(buf, PAGE_SIZE, "%u\n", rc6_residency); 108 } 109 110 static DEVICE_ATTR(rc6_enable, S_IRUGO, show_rc6_mask, NULL); 111 static DEVICE_ATTR(rc6_residency_ms, S_IRUGO, show_rc6_ms, NULL); 112 static DEVICE_ATTR(rc6p_residency_ms, S_IRUGO, show_rc6p_ms, NULL); 113 static DEVICE_ATTR(rc6pp_residency_ms, S_IRUGO, show_rc6pp_ms, NULL); 114 static DEVICE_ATTR(media_rc6_residency_ms, S_IRUGO, show_media_rc6_ms, NULL); 115 116 static struct attribute *rc6_attrs[] = { 117 &dev_attr_rc6_enable.attr, 118 &dev_attr_rc6_residency_ms.attr, 119 NULL 120 }; 121 122 static const struct attribute_group rc6_attr_group = { 123 .name = power_group_name, 124 .attrs = rc6_attrs 125 }; 126 127 static struct attribute *rc6p_attrs[] = { 128 &dev_attr_rc6p_residency_ms.attr, 129 &dev_attr_rc6pp_residency_ms.attr, 130 NULL 131 }; 132 133 static const struct attribute_group rc6p_attr_group = { 134 .name = power_group_name, 135 .attrs = rc6p_attrs 136 }; 137 138 static struct attribute *media_rc6_attrs[] = { 139 &dev_attr_media_rc6_residency_ms.attr, 140 NULL 141 }; 142 143 static const struct attribute_group media_rc6_attr_group = { 144 .name = power_group_name, 145 .attrs = media_rc6_attrs 146 }; 147 #endif 148 149 static int l3_access_valid(struct drm_i915_private *i915, loff_t offset) 150 { 151 if (!HAS_L3_DPF(i915)) 152 return -EPERM; 153 154 if (!IS_ALIGNED(offset, sizeof(u32))) 155 return -EINVAL; 156 157 if (offset >= GEN7_L3LOG_SIZE) 158 return -ENXIO; 159 160 return 0; 161 } 162 163 static ssize_t 164 i915_l3_read(struct file *filp, struct kobject *kobj, 165 struct bin_attribute *attr, char *buf, 166 loff_t offset, size_t count) 167 { 168 struct device *kdev = kobj_to_dev(kobj); 169 struct drm_i915_private *i915 = kdev_minor_to_i915(kdev); 170 int slice = (int)(uintptr_t)attr->private; 171 int ret; 172 173 ret = l3_access_valid(i915, offset); 174 if (ret) 175 return ret; 176 177 count = round_down(count, sizeof(u32)); 178 count = min_t(size_t, GEN7_L3LOG_SIZE - offset, count); 179 memset(buf, 0, count); 180 181 spin_lock(&i915->gem.contexts.lock); 182 if (i915->l3_parity.remap_info[slice]) 183 memcpy(buf, 184 i915->l3_parity.remap_info[slice] + offset / sizeof(u32), 185 count); 186 spin_unlock(&i915->gem.contexts.lock); 187 188 return count; 189 } 190 191 static ssize_t 192 i915_l3_write(struct file *filp, struct kobject *kobj, 193 struct bin_attribute *attr, char *buf, 194 loff_t offset, size_t count) 195 { 196 struct device *kdev = kobj_to_dev(kobj); 197 struct drm_i915_private *i915 = kdev_minor_to_i915(kdev); 198 int slice = (int)(uintptr_t)attr->private; 199 u32 *remap_info, *freeme = NULL; 200 struct i915_gem_context *ctx; 201 int ret; 202 203 ret = l3_access_valid(i915, offset); 204 if (ret) 205 return ret; 206 207 if (count < sizeof(u32)) 208 return -EINVAL; 209 210 remap_info = kzalloc(GEN7_L3LOG_SIZE, GFP_KERNEL); 211 if (!remap_info) 212 return -ENOMEM; 213 214 spin_lock(&i915->gem.contexts.lock); 215 216 if (i915->l3_parity.remap_info[slice]) { 217 freeme = remap_info; 218 remap_info = i915->l3_parity.remap_info[slice]; 219 } else { 220 i915->l3_parity.remap_info[slice] = remap_info; 221 } 222 223 count = round_down(count, sizeof(u32)); 224 memcpy(remap_info + offset / sizeof(u32), buf, count); 225 226 /* NB: We defer the remapping until we switch to the context */ 227 list_for_each_entry(ctx, &i915->gem.contexts.list, link) 228 ctx->remap_slice |= BIT(slice); 229 230 spin_unlock(&i915->gem.contexts.lock); 231 kfree(freeme); 232 233 /* 234 * TODO: Ideally we really want a GPU reset here to make sure errors 235 * aren't propagated. Since I cannot find a stable way to reset the GPU 236 * at this point it is left as a TODO. 237 */ 238 239 return count; 240 } 241 242 static const struct bin_attribute dpf_attrs = { 243 .attr = {.name = "l3_parity", .mode = (S_IRUSR | S_IWUSR)}, 244 .size = GEN7_L3LOG_SIZE, 245 .read = i915_l3_read, 246 .write = i915_l3_write, 247 .mmap = NULL, 248 .private = (void *)0 249 }; 250 251 static const struct bin_attribute dpf_attrs_1 = { 252 .attr = {.name = "l3_parity_slice_1", .mode = (S_IRUSR | S_IWUSR)}, 253 .size = GEN7_L3LOG_SIZE, 254 .read = i915_l3_read, 255 .write = i915_l3_write, 256 .mmap = NULL, 257 .private = (void *)1 258 }; 259 260 static ssize_t gt_act_freq_mhz_show(struct device *kdev, 261 struct device_attribute *attr, char *buf) 262 { 263 struct drm_i915_private *i915 = kdev_minor_to_i915(kdev); 264 struct intel_rps *rps = &i915->gt.rps; 265 266 return snprintf(buf, PAGE_SIZE, "%d\n", 267 intel_rps_read_actual_frequency(rps)); 268 } 269 270 static ssize_t gt_cur_freq_mhz_show(struct device *kdev, 271 struct device_attribute *attr, char *buf) 272 { 273 struct drm_i915_private *i915 = kdev_minor_to_i915(kdev); 274 struct intel_rps *rps = &i915->gt.rps; 275 276 return snprintf(buf, PAGE_SIZE, "%d\n", 277 intel_gpu_freq(rps, rps->cur_freq)); 278 } 279 280 static ssize_t gt_boost_freq_mhz_show(struct device *kdev, struct device_attribute *attr, char *buf) 281 { 282 struct drm_i915_private *i915 = kdev_minor_to_i915(kdev); 283 struct intel_rps *rps = &i915->gt.rps; 284 285 return snprintf(buf, PAGE_SIZE, "%d\n", 286 intel_gpu_freq(rps, rps->boost_freq)); 287 } 288 289 static ssize_t gt_boost_freq_mhz_store(struct device *kdev, 290 struct device_attribute *attr, 291 const char *buf, size_t count) 292 { 293 struct drm_i915_private *dev_priv = kdev_minor_to_i915(kdev); 294 struct intel_rps *rps = &dev_priv->gt.rps; 295 bool boost = false; 296 ssize_t ret; 297 u32 val; 298 299 ret = kstrtou32(buf, 0, &val); 300 if (ret) 301 return ret; 302 303 /* Validate against (static) hardware limits */ 304 val = intel_freq_opcode(rps, val); 305 if (val < rps->min_freq || val > rps->max_freq) 306 return -EINVAL; 307 308 mutex_lock(&rps->lock); 309 if (val != rps->boost_freq) { 310 rps->boost_freq = val; 311 boost = atomic_read(&rps->num_waiters); 312 } 313 mutex_unlock(&rps->lock); 314 if (boost) 315 schedule_work(&rps->work); 316 317 return count; 318 } 319 320 static ssize_t vlv_rpe_freq_mhz_show(struct device *kdev, 321 struct device_attribute *attr, char *buf) 322 { 323 struct drm_i915_private *dev_priv = kdev_minor_to_i915(kdev); 324 struct intel_rps *rps = &dev_priv->gt.rps; 325 326 return snprintf(buf, PAGE_SIZE, "%d\n", 327 intel_gpu_freq(rps, rps->efficient_freq)); 328 } 329 330 static ssize_t gt_max_freq_mhz_show(struct device *kdev, struct device_attribute *attr, char *buf) 331 { 332 struct drm_i915_private *dev_priv = kdev_minor_to_i915(kdev); 333 struct intel_rps *rps = &dev_priv->gt.rps; 334 335 return snprintf(buf, PAGE_SIZE, "%d\n", 336 intel_gpu_freq(rps, rps->max_freq_softlimit)); 337 } 338 339 static ssize_t gt_max_freq_mhz_store(struct device *kdev, 340 struct device_attribute *attr, 341 const char *buf, size_t count) 342 { 343 struct drm_i915_private *dev_priv = kdev_minor_to_i915(kdev); 344 struct intel_rps *rps = &dev_priv->gt.rps; 345 ssize_t ret; 346 u32 val; 347 348 ret = kstrtou32(buf, 0, &val); 349 if (ret) 350 return ret; 351 352 mutex_lock(&rps->lock); 353 354 val = intel_freq_opcode(rps, val); 355 if (val < rps->min_freq || 356 val > rps->max_freq || 357 val < rps->min_freq_softlimit) { 358 ret = -EINVAL; 359 goto unlock; 360 } 361 362 if (val > rps->rp0_freq) 363 DRM_DEBUG("User requested overclocking to %d\n", 364 intel_gpu_freq(rps, val)); 365 366 rps->max_freq_softlimit = val; 367 368 val = clamp_t(int, rps->cur_freq, 369 rps->min_freq_softlimit, 370 rps->max_freq_softlimit); 371 372 /* 373 * We still need *_set_rps to process the new max_delay and 374 * update the interrupt limits and PMINTRMSK even though 375 * frequency request may be unchanged. 376 */ 377 intel_rps_set(rps, val); 378 379 unlock: 380 mutex_unlock(&rps->lock); 381 382 return ret ?: count; 383 } 384 385 static ssize_t gt_min_freq_mhz_show(struct device *kdev, struct device_attribute *attr, char *buf) 386 { 387 struct drm_i915_private *dev_priv = kdev_minor_to_i915(kdev); 388 struct intel_rps *rps = &dev_priv->gt.rps; 389 390 return snprintf(buf, PAGE_SIZE, "%d\n", 391 intel_gpu_freq(rps, rps->min_freq_softlimit)); 392 } 393 394 static ssize_t gt_min_freq_mhz_store(struct device *kdev, 395 struct device_attribute *attr, 396 const char *buf, size_t count) 397 { 398 struct drm_i915_private *dev_priv = kdev_minor_to_i915(kdev); 399 struct intel_rps *rps = &dev_priv->gt.rps; 400 ssize_t ret; 401 u32 val; 402 403 ret = kstrtou32(buf, 0, &val); 404 if (ret) 405 return ret; 406 407 mutex_lock(&rps->lock); 408 409 val = intel_freq_opcode(rps, val); 410 if (val < rps->min_freq || 411 val > rps->max_freq || 412 val > rps->max_freq_softlimit) { 413 ret = -EINVAL; 414 goto unlock; 415 } 416 417 rps->min_freq_softlimit = val; 418 419 val = clamp_t(int, rps->cur_freq, 420 rps->min_freq_softlimit, 421 rps->max_freq_softlimit); 422 423 /* 424 * We still need *_set_rps to process the new min_delay and 425 * update the interrupt limits and PMINTRMSK even though 426 * frequency request may be unchanged. 427 */ 428 intel_rps_set(rps, val); 429 430 unlock: 431 mutex_unlock(&rps->lock); 432 433 return ret ?: count; 434 } 435 436 static DEVICE_ATTR_RO(gt_act_freq_mhz); 437 static DEVICE_ATTR_RO(gt_cur_freq_mhz); 438 static DEVICE_ATTR_RW(gt_boost_freq_mhz); 439 static DEVICE_ATTR_RW(gt_max_freq_mhz); 440 static DEVICE_ATTR_RW(gt_min_freq_mhz); 441 442 static DEVICE_ATTR_RO(vlv_rpe_freq_mhz); 443 444 static ssize_t gt_rp_mhz_show(struct device *kdev, struct device_attribute *attr, char *buf); 445 static DEVICE_ATTR(gt_RP0_freq_mhz, S_IRUGO, gt_rp_mhz_show, NULL); 446 static DEVICE_ATTR(gt_RP1_freq_mhz, S_IRUGO, gt_rp_mhz_show, NULL); 447 static DEVICE_ATTR(gt_RPn_freq_mhz, S_IRUGO, gt_rp_mhz_show, NULL); 448 449 /* For now we have a static number of RP states */ 450 static ssize_t gt_rp_mhz_show(struct device *kdev, struct device_attribute *attr, char *buf) 451 { 452 struct drm_i915_private *dev_priv = kdev_minor_to_i915(kdev); 453 struct intel_rps *rps = &dev_priv->gt.rps; 454 u32 val; 455 456 if (attr == &dev_attr_gt_RP0_freq_mhz) 457 val = intel_gpu_freq(rps, rps->rp0_freq); 458 else if (attr == &dev_attr_gt_RP1_freq_mhz) 459 val = intel_gpu_freq(rps, rps->rp1_freq); 460 else if (attr == &dev_attr_gt_RPn_freq_mhz) 461 val = intel_gpu_freq(rps, rps->min_freq); 462 else 463 BUG(); 464 465 return snprintf(buf, PAGE_SIZE, "%d\n", val); 466 } 467 468 static const struct attribute * const gen6_attrs[] = { 469 &dev_attr_gt_act_freq_mhz.attr, 470 &dev_attr_gt_cur_freq_mhz.attr, 471 &dev_attr_gt_boost_freq_mhz.attr, 472 &dev_attr_gt_max_freq_mhz.attr, 473 &dev_attr_gt_min_freq_mhz.attr, 474 &dev_attr_gt_RP0_freq_mhz.attr, 475 &dev_attr_gt_RP1_freq_mhz.attr, 476 &dev_attr_gt_RPn_freq_mhz.attr, 477 NULL, 478 }; 479 480 static const struct attribute * const vlv_attrs[] = { 481 &dev_attr_gt_act_freq_mhz.attr, 482 &dev_attr_gt_cur_freq_mhz.attr, 483 &dev_attr_gt_boost_freq_mhz.attr, 484 &dev_attr_gt_max_freq_mhz.attr, 485 &dev_attr_gt_min_freq_mhz.attr, 486 &dev_attr_gt_RP0_freq_mhz.attr, 487 &dev_attr_gt_RP1_freq_mhz.attr, 488 &dev_attr_gt_RPn_freq_mhz.attr, 489 &dev_attr_vlv_rpe_freq_mhz.attr, 490 NULL, 491 }; 492 493 #if IS_ENABLED(CONFIG_DRM_I915_CAPTURE_ERROR) 494 495 static ssize_t error_state_read(struct file *filp, struct kobject *kobj, 496 struct bin_attribute *attr, char *buf, 497 loff_t off, size_t count) 498 { 499 500 struct device *kdev = kobj_to_dev(kobj); 501 struct drm_i915_private *i915 = kdev_minor_to_i915(kdev); 502 struct i915_gpu_coredump *gpu; 503 ssize_t ret; 504 505 gpu = i915_first_error_state(i915); 506 if (IS_ERR(gpu)) { 507 ret = PTR_ERR(gpu); 508 } else if (gpu) { 509 ret = i915_gpu_coredump_copy_to_buffer(gpu, buf, off, count); 510 i915_gpu_coredump_put(gpu); 511 } else { 512 const char *str = "No error state collected\n"; 513 size_t len = strlen(str); 514 515 ret = min_t(size_t, count, len - off); 516 memcpy(buf, str + off, ret); 517 } 518 519 return ret; 520 } 521 522 static ssize_t error_state_write(struct file *file, struct kobject *kobj, 523 struct bin_attribute *attr, char *buf, 524 loff_t off, size_t count) 525 { 526 struct device *kdev = kobj_to_dev(kobj); 527 struct drm_i915_private *dev_priv = kdev_minor_to_i915(kdev); 528 529 drm_dbg(&dev_priv->drm, "Resetting error state\n"); 530 i915_reset_error_state(dev_priv); 531 532 return count; 533 } 534 535 static const struct bin_attribute error_state_attr = { 536 .attr.name = "error", 537 .attr.mode = S_IRUSR | S_IWUSR, 538 .size = 0, 539 .read = error_state_read, 540 .write = error_state_write, 541 }; 542 543 static void i915_setup_error_capture(struct device *kdev) 544 { 545 if (sysfs_create_bin_file(&kdev->kobj, &error_state_attr)) 546 DRM_ERROR("error_state sysfs setup failed\n"); 547 } 548 549 static void i915_teardown_error_capture(struct device *kdev) 550 { 551 sysfs_remove_bin_file(&kdev->kobj, &error_state_attr); 552 } 553 #else 554 static void i915_setup_error_capture(struct device *kdev) {} 555 static void i915_teardown_error_capture(struct device *kdev) {} 556 #endif 557 558 void i915_setup_sysfs(struct drm_i915_private *dev_priv) 559 { 560 struct device *kdev = dev_priv->drm.primary->kdev; 561 int ret; 562 563 #ifdef CONFIG_PM 564 if (HAS_RC6(dev_priv)) { 565 ret = sysfs_merge_group(&kdev->kobj, 566 &rc6_attr_group); 567 if (ret) 568 drm_err(&dev_priv->drm, 569 "RC6 residency sysfs setup failed\n"); 570 } 571 if (HAS_RC6p(dev_priv)) { 572 ret = sysfs_merge_group(&kdev->kobj, 573 &rc6p_attr_group); 574 if (ret) 575 drm_err(&dev_priv->drm, 576 "RC6p residency sysfs setup failed\n"); 577 } 578 if (IS_VALLEYVIEW(dev_priv) || IS_CHERRYVIEW(dev_priv)) { 579 ret = sysfs_merge_group(&kdev->kobj, 580 &media_rc6_attr_group); 581 if (ret) 582 drm_err(&dev_priv->drm, 583 "Media RC6 residency sysfs setup failed\n"); 584 } 585 #endif 586 if (HAS_L3_DPF(dev_priv)) { 587 ret = device_create_bin_file(kdev, &dpf_attrs); 588 if (ret) 589 drm_err(&dev_priv->drm, 590 "l3 parity sysfs setup failed\n"); 591 592 if (NUM_L3_SLICES(dev_priv) > 1) { 593 ret = device_create_bin_file(kdev, 594 &dpf_attrs_1); 595 if (ret) 596 drm_err(&dev_priv->drm, 597 "l3 parity slice 1 setup failed\n"); 598 } 599 } 600 601 ret = 0; 602 if (IS_VALLEYVIEW(dev_priv) || IS_CHERRYVIEW(dev_priv)) 603 ret = sysfs_create_files(&kdev->kobj, vlv_attrs); 604 else if (INTEL_GEN(dev_priv) >= 6) 605 ret = sysfs_create_files(&kdev->kobj, gen6_attrs); 606 if (ret) 607 drm_err(&dev_priv->drm, "RPS sysfs setup failed\n"); 608 609 i915_setup_error_capture(kdev); 610 611 intel_engines_add_sysfs(dev_priv); 612 } 613 614 void i915_teardown_sysfs(struct drm_i915_private *dev_priv) 615 { 616 struct device *kdev = dev_priv->drm.primary->kdev; 617 618 i915_teardown_error_capture(kdev); 619 620 if (IS_VALLEYVIEW(dev_priv) || IS_CHERRYVIEW(dev_priv)) 621 sysfs_remove_files(&kdev->kobj, vlv_attrs); 622 else 623 sysfs_remove_files(&kdev->kobj, gen6_attrs); 624 device_remove_bin_file(kdev, &dpf_attrs_1); 625 device_remove_bin_file(kdev, &dpf_attrs); 626 #ifdef CONFIG_PM 627 sysfs_unmerge_group(&kdev->kobj, &rc6_attr_group); 628 sysfs_unmerge_group(&kdev->kobj, &rc6p_attr_group); 629 #endif 630 } 631