1 /* 2 * Copyright (c) 2006, 2017 Oracle and/or its affiliates. All rights reserved. 3 * 4 * This software is available to you under a choice of one of two 5 * licenses. You may choose to be licensed under the terms of the GNU 6 * General Public License (GPL) Version 2, available from the file 7 * COPYING in the main directory of this source tree, or the 8 * OpenIB.org BSD license below: 9 * 10 * Redistribution and use in source and binary forms, with or 11 * without modification, are permitted provided that the following 12 * conditions are met: 13 * 14 * - Redistributions of source code must retain the above 15 * copyright notice, this list of conditions and the following 16 * disclaimer. 17 * 18 * - Redistributions in binary form must reproduce the above 19 * copyright notice, this list of conditions and the following 20 * disclaimer in the documentation and/or other materials 21 * provided with the distribution. 22 * 23 * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, 24 * EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF 25 * MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND 26 * NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT HOLDERS 27 * BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN 28 * ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN 29 * CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE 30 * SOFTWARE. 31 * 32 */ 33 #include <linux/kernel.h> 34 #include <linux/slab.h> 35 #include <linux/rculist.h> 36 #include <linux/llist.h> 37 38 #include "rds_single_path.h" 39 #include "ib_mr.h" 40 41 struct workqueue_struct *rds_ib_mr_wq; 42 43 static DEFINE_PER_CPU(unsigned long, clean_list_grace); 44 #define CLEAN_LIST_BUSY_BIT 0 45 46 static struct rds_ib_device *rds_ib_get_device(__be32 ipaddr) 47 { 48 struct rds_ib_device *rds_ibdev; 49 struct rds_ib_ipaddr *i_ipaddr; 50 51 rcu_read_lock(); 52 list_for_each_entry_rcu(rds_ibdev, &rds_ib_devices, list) { 53 list_for_each_entry_rcu(i_ipaddr, &rds_ibdev->ipaddr_list, list) { 54 if (i_ipaddr->ipaddr == ipaddr) { 55 refcount_inc(&rds_ibdev->refcount); 56 rcu_read_unlock(); 57 return rds_ibdev; 58 } 59 } 60 } 61 rcu_read_unlock(); 62 63 return NULL; 64 } 65 66 static int rds_ib_add_ipaddr(struct rds_ib_device *rds_ibdev, __be32 ipaddr) 67 { 68 struct rds_ib_ipaddr *i_ipaddr; 69 70 i_ipaddr = kmalloc(sizeof *i_ipaddr, GFP_KERNEL); 71 if (!i_ipaddr) 72 return -ENOMEM; 73 74 i_ipaddr->ipaddr = ipaddr; 75 76 spin_lock_irq(&rds_ibdev->spinlock); 77 list_add_tail_rcu(&i_ipaddr->list, &rds_ibdev->ipaddr_list); 78 spin_unlock_irq(&rds_ibdev->spinlock); 79 80 return 0; 81 } 82 83 static void rds_ib_remove_ipaddr(struct rds_ib_device *rds_ibdev, __be32 ipaddr) 84 { 85 struct rds_ib_ipaddr *i_ipaddr; 86 struct rds_ib_ipaddr *to_free = NULL; 87 88 89 spin_lock_irq(&rds_ibdev->spinlock); 90 list_for_each_entry_rcu(i_ipaddr, &rds_ibdev->ipaddr_list, list) { 91 if (i_ipaddr->ipaddr == ipaddr) { 92 list_del_rcu(&i_ipaddr->list); 93 to_free = i_ipaddr; 94 break; 95 } 96 } 97 spin_unlock_irq(&rds_ibdev->spinlock); 98 99 if (to_free) 100 kfree_rcu(to_free, rcu); 101 } 102 103 int rds_ib_update_ipaddr(struct rds_ib_device *rds_ibdev, 104 struct in6_addr *ipaddr) 105 { 106 struct rds_ib_device *rds_ibdev_old; 107 108 rds_ibdev_old = rds_ib_get_device(ipaddr->s6_addr32[3]); 109 if (!rds_ibdev_old) 110 return rds_ib_add_ipaddr(rds_ibdev, ipaddr->s6_addr32[3]); 111 112 if (rds_ibdev_old != rds_ibdev) { 113 rds_ib_remove_ipaddr(rds_ibdev_old, ipaddr->s6_addr32[3]); 114 rds_ib_dev_put(rds_ibdev_old); 115 return rds_ib_add_ipaddr(rds_ibdev, ipaddr->s6_addr32[3]); 116 } 117 rds_ib_dev_put(rds_ibdev_old); 118 119 return 0; 120 } 121 122 void rds_ib_add_conn(struct rds_ib_device *rds_ibdev, struct rds_connection *conn) 123 { 124 struct rds_ib_connection *ic = conn->c_transport_data; 125 126 /* conn was previously on the nodev_conns_list */ 127 spin_lock_irq(&ib_nodev_conns_lock); 128 BUG_ON(list_empty(&ib_nodev_conns)); 129 BUG_ON(list_empty(&ic->ib_node)); 130 list_del(&ic->ib_node); 131 132 spin_lock(&rds_ibdev->spinlock); 133 list_add_tail(&ic->ib_node, &rds_ibdev->conn_list); 134 spin_unlock(&rds_ibdev->spinlock); 135 spin_unlock_irq(&ib_nodev_conns_lock); 136 137 ic->rds_ibdev = rds_ibdev; 138 refcount_inc(&rds_ibdev->refcount); 139 } 140 141 void rds_ib_remove_conn(struct rds_ib_device *rds_ibdev, struct rds_connection *conn) 142 { 143 struct rds_ib_connection *ic = conn->c_transport_data; 144 145 /* place conn on nodev_conns_list */ 146 spin_lock(&ib_nodev_conns_lock); 147 148 spin_lock_irq(&rds_ibdev->spinlock); 149 BUG_ON(list_empty(&ic->ib_node)); 150 list_del(&ic->ib_node); 151 spin_unlock_irq(&rds_ibdev->spinlock); 152 153 list_add_tail(&ic->ib_node, &ib_nodev_conns); 154 155 spin_unlock(&ib_nodev_conns_lock); 156 157 ic->rds_ibdev = NULL; 158 rds_ib_dev_put(rds_ibdev); 159 } 160 161 void rds_ib_destroy_nodev_conns(void) 162 { 163 struct rds_ib_connection *ic, *_ic; 164 LIST_HEAD(tmp_list); 165 166 /* avoid calling conn_destroy with irqs off */ 167 spin_lock_irq(&ib_nodev_conns_lock); 168 list_splice(&ib_nodev_conns, &tmp_list); 169 spin_unlock_irq(&ib_nodev_conns_lock); 170 171 list_for_each_entry_safe(ic, _ic, &tmp_list, ib_node) 172 rds_conn_destroy(ic->conn); 173 } 174 175 void rds_ib_get_mr_info(struct rds_ib_device *rds_ibdev, struct rds_info_rdma_connection *iinfo) 176 { 177 struct rds_ib_mr_pool *pool_1m = rds_ibdev->mr_1m_pool; 178 179 iinfo->rdma_mr_max = pool_1m->max_items; 180 iinfo->rdma_mr_size = pool_1m->fmr_attr.max_pages; 181 } 182 183 struct rds_ib_mr *rds_ib_reuse_mr(struct rds_ib_mr_pool *pool) 184 { 185 struct rds_ib_mr *ibmr = NULL; 186 struct llist_node *ret; 187 unsigned long *flag; 188 189 preempt_disable(); 190 flag = this_cpu_ptr(&clean_list_grace); 191 set_bit(CLEAN_LIST_BUSY_BIT, flag); 192 ret = llist_del_first(&pool->clean_list); 193 if (ret) { 194 ibmr = llist_entry(ret, struct rds_ib_mr, llnode); 195 if (pool->pool_type == RDS_IB_MR_8K_POOL) 196 rds_ib_stats_inc(s_ib_rdma_mr_8k_reused); 197 else 198 rds_ib_stats_inc(s_ib_rdma_mr_1m_reused); 199 } 200 201 clear_bit(CLEAN_LIST_BUSY_BIT, flag); 202 preempt_enable(); 203 return ibmr; 204 } 205 206 static inline void wait_clean_list_grace(void) 207 { 208 int cpu; 209 unsigned long *flag; 210 211 for_each_online_cpu(cpu) { 212 flag = &per_cpu(clean_list_grace, cpu); 213 while (test_bit(CLEAN_LIST_BUSY_BIT, flag)) 214 cpu_relax(); 215 } 216 } 217 218 void rds_ib_sync_mr(void *trans_private, int direction) 219 { 220 struct rds_ib_mr *ibmr = trans_private; 221 struct rds_ib_device *rds_ibdev = ibmr->device; 222 223 switch (direction) { 224 case DMA_FROM_DEVICE: 225 ib_dma_sync_sg_for_cpu(rds_ibdev->dev, ibmr->sg, 226 ibmr->sg_dma_len, DMA_BIDIRECTIONAL); 227 break; 228 case DMA_TO_DEVICE: 229 ib_dma_sync_sg_for_device(rds_ibdev->dev, ibmr->sg, 230 ibmr->sg_dma_len, DMA_BIDIRECTIONAL); 231 break; 232 } 233 } 234 235 void __rds_ib_teardown_mr(struct rds_ib_mr *ibmr) 236 { 237 struct rds_ib_device *rds_ibdev = ibmr->device; 238 239 if (ibmr->sg_dma_len) { 240 ib_dma_unmap_sg(rds_ibdev->dev, 241 ibmr->sg, ibmr->sg_len, 242 DMA_BIDIRECTIONAL); 243 ibmr->sg_dma_len = 0; 244 } 245 246 /* Release the s/g list */ 247 if (ibmr->sg_len) { 248 unsigned int i; 249 250 for (i = 0; i < ibmr->sg_len; ++i) { 251 struct page *page = sg_page(&ibmr->sg[i]); 252 253 /* FIXME we need a way to tell a r/w MR 254 * from a r/o MR */ 255 WARN_ON(!page->mapping && irqs_disabled()); 256 set_page_dirty(page); 257 put_page(page); 258 } 259 kfree(ibmr->sg); 260 261 ibmr->sg = NULL; 262 ibmr->sg_len = 0; 263 } 264 } 265 266 void rds_ib_teardown_mr(struct rds_ib_mr *ibmr) 267 { 268 unsigned int pinned = ibmr->sg_len; 269 270 __rds_ib_teardown_mr(ibmr); 271 if (pinned) { 272 struct rds_ib_mr_pool *pool = ibmr->pool; 273 274 atomic_sub(pinned, &pool->free_pinned); 275 } 276 } 277 278 static inline unsigned int rds_ib_flush_goal(struct rds_ib_mr_pool *pool, int free_all) 279 { 280 unsigned int item_count; 281 282 item_count = atomic_read(&pool->item_count); 283 if (free_all) 284 return item_count; 285 286 return 0; 287 } 288 289 /* 290 * given an llist of mrs, put them all into the list_head for more processing 291 */ 292 static unsigned int llist_append_to_list(struct llist_head *llist, 293 struct list_head *list) 294 { 295 struct rds_ib_mr *ibmr; 296 struct llist_node *node; 297 struct llist_node *next; 298 unsigned int count = 0; 299 300 node = llist_del_all(llist); 301 while (node) { 302 next = node->next; 303 ibmr = llist_entry(node, struct rds_ib_mr, llnode); 304 list_add_tail(&ibmr->unmap_list, list); 305 node = next; 306 count++; 307 } 308 return count; 309 } 310 311 /* 312 * this takes a list head of mrs and turns it into linked llist nodes 313 * of clusters. Each cluster has linked llist nodes of 314 * MR_CLUSTER_SIZE mrs that are ready for reuse. 315 */ 316 static void list_to_llist_nodes(struct rds_ib_mr_pool *pool, 317 struct list_head *list, 318 struct llist_node **nodes_head, 319 struct llist_node **nodes_tail) 320 { 321 struct rds_ib_mr *ibmr; 322 struct llist_node *cur = NULL; 323 struct llist_node **next = nodes_head; 324 325 list_for_each_entry(ibmr, list, unmap_list) { 326 cur = &ibmr->llnode; 327 *next = cur; 328 next = &cur->next; 329 } 330 *next = NULL; 331 *nodes_tail = cur; 332 } 333 334 /* 335 * Flush our pool of MRs. 336 * At a minimum, all currently unused MRs are unmapped. 337 * If the number of MRs allocated exceeds the limit, we also try 338 * to free as many MRs as needed to get back to this limit. 339 */ 340 int rds_ib_flush_mr_pool(struct rds_ib_mr_pool *pool, 341 int free_all, struct rds_ib_mr **ibmr_ret) 342 { 343 struct rds_ib_mr *ibmr; 344 struct llist_node *clean_nodes; 345 struct llist_node *clean_tail; 346 LIST_HEAD(unmap_list); 347 unsigned long unpinned = 0; 348 unsigned int nfreed = 0, dirty_to_clean = 0, free_goal; 349 350 if (pool->pool_type == RDS_IB_MR_8K_POOL) 351 rds_ib_stats_inc(s_ib_rdma_mr_8k_pool_flush); 352 else 353 rds_ib_stats_inc(s_ib_rdma_mr_1m_pool_flush); 354 355 if (ibmr_ret) { 356 DEFINE_WAIT(wait); 357 while (!mutex_trylock(&pool->flush_lock)) { 358 ibmr = rds_ib_reuse_mr(pool); 359 if (ibmr) { 360 *ibmr_ret = ibmr; 361 finish_wait(&pool->flush_wait, &wait); 362 goto out_nolock; 363 } 364 365 prepare_to_wait(&pool->flush_wait, &wait, 366 TASK_UNINTERRUPTIBLE); 367 if (llist_empty(&pool->clean_list)) 368 schedule(); 369 370 ibmr = rds_ib_reuse_mr(pool); 371 if (ibmr) { 372 *ibmr_ret = ibmr; 373 finish_wait(&pool->flush_wait, &wait); 374 goto out_nolock; 375 } 376 } 377 finish_wait(&pool->flush_wait, &wait); 378 } else 379 mutex_lock(&pool->flush_lock); 380 381 if (ibmr_ret) { 382 ibmr = rds_ib_reuse_mr(pool); 383 if (ibmr) { 384 *ibmr_ret = ibmr; 385 goto out; 386 } 387 } 388 389 /* Get the list of all MRs to be dropped. Ordering matters - 390 * we want to put drop_list ahead of free_list. 391 */ 392 dirty_to_clean = llist_append_to_list(&pool->drop_list, &unmap_list); 393 dirty_to_clean += llist_append_to_list(&pool->free_list, &unmap_list); 394 if (free_all) 395 llist_append_to_list(&pool->clean_list, &unmap_list); 396 397 free_goal = rds_ib_flush_goal(pool, free_all); 398 399 if (list_empty(&unmap_list)) 400 goto out; 401 402 if (pool->use_fastreg) 403 rds_ib_unreg_frmr(&unmap_list, &nfreed, &unpinned, free_goal); 404 else 405 rds_ib_unreg_fmr(&unmap_list, &nfreed, &unpinned, free_goal); 406 407 if (!list_empty(&unmap_list)) { 408 /* we have to make sure that none of the things we're about 409 * to put on the clean list would race with other cpus trying 410 * to pull items off. The llist would explode if we managed to 411 * remove something from the clean list and then add it back again 412 * while another CPU was spinning on that same item in llist_del_first. 413 * 414 * This is pretty unlikely, but just in case wait for an llist grace period 415 * here before adding anything back into the clean list. 416 */ 417 wait_clean_list_grace(); 418 419 list_to_llist_nodes(pool, &unmap_list, &clean_nodes, &clean_tail); 420 if (ibmr_ret) 421 *ibmr_ret = llist_entry(clean_nodes, struct rds_ib_mr, llnode); 422 423 /* more than one entry in llist nodes */ 424 if (clean_nodes->next) 425 llist_add_batch(clean_nodes->next, clean_tail, &pool->clean_list); 426 427 } 428 429 atomic_sub(unpinned, &pool->free_pinned); 430 atomic_sub(dirty_to_clean, &pool->dirty_count); 431 atomic_sub(nfreed, &pool->item_count); 432 433 out: 434 mutex_unlock(&pool->flush_lock); 435 if (waitqueue_active(&pool->flush_wait)) 436 wake_up(&pool->flush_wait); 437 out_nolock: 438 return 0; 439 } 440 441 struct rds_ib_mr *rds_ib_try_reuse_ibmr(struct rds_ib_mr_pool *pool) 442 { 443 struct rds_ib_mr *ibmr = NULL; 444 int iter = 0; 445 446 if (atomic_read(&pool->dirty_count) >= pool->max_items_soft / 10) 447 queue_delayed_work(rds_ib_mr_wq, &pool->flush_worker, 10); 448 449 while (1) { 450 ibmr = rds_ib_reuse_mr(pool); 451 if (ibmr) 452 return ibmr; 453 454 if (atomic_inc_return(&pool->item_count) <= pool->max_items) 455 break; 456 457 atomic_dec(&pool->item_count); 458 459 if (++iter > 2) { 460 if (pool->pool_type == RDS_IB_MR_8K_POOL) 461 rds_ib_stats_inc(s_ib_rdma_mr_8k_pool_depleted); 462 else 463 rds_ib_stats_inc(s_ib_rdma_mr_1m_pool_depleted); 464 return ERR_PTR(-EAGAIN); 465 } 466 467 /* We do have some empty MRs. Flush them out. */ 468 if (pool->pool_type == RDS_IB_MR_8K_POOL) 469 rds_ib_stats_inc(s_ib_rdma_mr_8k_pool_wait); 470 else 471 rds_ib_stats_inc(s_ib_rdma_mr_1m_pool_wait); 472 473 rds_ib_flush_mr_pool(pool, 0, &ibmr); 474 if (ibmr) 475 return ibmr; 476 } 477 478 return ibmr; 479 } 480 481 static void rds_ib_mr_pool_flush_worker(struct work_struct *work) 482 { 483 struct rds_ib_mr_pool *pool = container_of(work, struct rds_ib_mr_pool, flush_worker.work); 484 485 rds_ib_flush_mr_pool(pool, 0, NULL); 486 } 487 488 void rds_ib_free_mr(void *trans_private, int invalidate) 489 { 490 struct rds_ib_mr *ibmr = trans_private; 491 struct rds_ib_mr_pool *pool = ibmr->pool; 492 struct rds_ib_device *rds_ibdev = ibmr->device; 493 494 rdsdebug("RDS/IB: free_mr nents %u\n", ibmr->sg_len); 495 496 /* Return it to the pool's free list */ 497 if (rds_ibdev->use_fastreg) 498 rds_ib_free_frmr_list(ibmr); 499 else 500 rds_ib_free_fmr_list(ibmr); 501 502 atomic_add(ibmr->sg_len, &pool->free_pinned); 503 atomic_inc(&pool->dirty_count); 504 505 /* If we've pinned too many pages, request a flush */ 506 if (atomic_read(&pool->free_pinned) >= pool->max_free_pinned || 507 atomic_read(&pool->dirty_count) >= pool->max_items / 5) 508 queue_delayed_work(rds_ib_mr_wq, &pool->flush_worker, 10); 509 510 if (invalidate) { 511 if (likely(!in_interrupt())) { 512 rds_ib_flush_mr_pool(pool, 0, NULL); 513 } else { 514 /* We get here if the user created a MR marked 515 * as use_once and invalidate at the same time. 516 */ 517 queue_delayed_work(rds_ib_mr_wq, 518 &pool->flush_worker, 10); 519 } 520 } 521 522 rds_ib_dev_put(rds_ibdev); 523 } 524 525 void rds_ib_flush_mrs(void) 526 { 527 struct rds_ib_device *rds_ibdev; 528 529 down_read(&rds_ib_devices_lock); 530 list_for_each_entry(rds_ibdev, &rds_ib_devices, list) { 531 if (rds_ibdev->mr_8k_pool) 532 rds_ib_flush_mr_pool(rds_ibdev->mr_8k_pool, 0, NULL); 533 534 if (rds_ibdev->mr_1m_pool) 535 rds_ib_flush_mr_pool(rds_ibdev->mr_1m_pool, 0, NULL); 536 } 537 up_read(&rds_ib_devices_lock); 538 } 539 540 void *rds_ib_get_mr(struct scatterlist *sg, unsigned long nents, 541 struct rds_sock *rs, u32 *key_ret) 542 { 543 struct rds_ib_device *rds_ibdev; 544 struct rds_ib_mr *ibmr = NULL; 545 struct rds_ib_connection *ic = rs->rs_conn->c_transport_data; 546 int ret; 547 548 rds_ibdev = rds_ib_get_device(rs->rs_bound_addr.s6_addr32[3]); 549 if (!rds_ibdev) { 550 ret = -ENODEV; 551 goto out; 552 } 553 554 if (!rds_ibdev->mr_8k_pool || !rds_ibdev->mr_1m_pool) { 555 ret = -ENODEV; 556 goto out; 557 } 558 559 if (rds_ibdev->use_fastreg) 560 ibmr = rds_ib_reg_frmr(rds_ibdev, ic, sg, nents, key_ret); 561 else 562 ibmr = rds_ib_reg_fmr(rds_ibdev, sg, nents, key_ret); 563 if (ibmr) 564 rds_ibdev = NULL; 565 566 out: 567 if (!ibmr) 568 pr_warn("RDS/IB: rds_ib_get_mr failed (errno=%d)\n", ret); 569 570 if (rds_ibdev) 571 rds_ib_dev_put(rds_ibdev); 572 573 return ibmr; 574 } 575 576 void rds_ib_destroy_mr_pool(struct rds_ib_mr_pool *pool) 577 { 578 cancel_delayed_work_sync(&pool->flush_worker); 579 rds_ib_flush_mr_pool(pool, 1, NULL); 580 WARN_ON(atomic_read(&pool->item_count)); 581 WARN_ON(atomic_read(&pool->free_pinned)); 582 kfree(pool); 583 } 584 585 struct rds_ib_mr_pool *rds_ib_create_mr_pool(struct rds_ib_device *rds_ibdev, 586 int pool_type) 587 { 588 struct rds_ib_mr_pool *pool; 589 590 pool = kzalloc(sizeof(*pool), GFP_KERNEL); 591 if (!pool) 592 return ERR_PTR(-ENOMEM); 593 594 pool->pool_type = pool_type; 595 init_llist_head(&pool->free_list); 596 init_llist_head(&pool->drop_list); 597 init_llist_head(&pool->clean_list); 598 mutex_init(&pool->flush_lock); 599 init_waitqueue_head(&pool->flush_wait); 600 INIT_DELAYED_WORK(&pool->flush_worker, rds_ib_mr_pool_flush_worker); 601 602 if (pool_type == RDS_IB_MR_1M_POOL) { 603 /* +1 allows for unaligned MRs */ 604 pool->fmr_attr.max_pages = RDS_MR_1M_MSG_SIZE + 1; 605 pool->max_items = rds_ibdev->max_1m_mrs; 606 } else { 607 /* pool_type == RDS_IB_MR_8K_POOL */ 608 pool->fmr_attr.max_pages = RDS_MR_8K_MSG_SIZE + 1; 609 pool->max_items = rds_ibdev->max_8k_mrs; 610 } 611 612 pool->max_free_pinned = pool->max_items * pool->fmr_attr.max_pages / 4; 613 pool->fmr_attr.max_maps = rds_ibdev->fmr_max_remaps; 614 pool->fmr_attr.page_shift = PAGE_SHIFT; 615 pool->max_items_soft = rds_ibdev->max_mrs * 3 / 4; 616 pool->use_fastreg = rds_ibdev->use_fastreg; 617 618 return pool; 619 } 620 621 int rds_ib_mr_init(void) 622 { 623 rds_ib_mr_wq = alloc_workqueue("rds_mr_flushd", WQ_MEM_RECLAIM, 0); 624 if (!rds_ib_mr_wq) 625 return -ENOMEM; 626 return 0; 627 } 628 629 /* By the time this is called all the IB devices should have been torn down and 630 * had their pools freed. As each pool is freed its work struct is waited on, 631 * so the pool flushing work queue should be idle by the time we get here. 632 */ 633 void rds_ib_mr_exit(void) 634 { 635 destroy_workqueue(rds_ib_mr_wq); 636 } 637