1 // SPDX-License-Identifier: GPL-2.0 2 /* 3 * (C) 2001 Clemson University and The University of Chicago 4 * 5 * See COPYING in top-level directory. 6 */ 7 #include "protocol.h" 8 #include "orangefs-kernel.h" 9 #include "orangefs-bufmap.h" 10 11 struct slot_map { 12 int c; 13 wait_queue_head_t q; 14 int count; 15 unsigned long *map; 16 }; 17 18 static struct slot_map rw_map = { 19 .c = -1, 20 .q = __WAIT_QUEUE_HEAD_INITIALIZER(rw_map.q) 21 }; 22 static struct slot_map readdir_map = { 23 .c = -1, 24 .q = __WAIT_QUEUE_HEAD_INITIALIZER(readdir_map.q) 25 }; 26 27 28 static void install(struct slot_map *m, int count, unsigned long *map) 29 { 30 spin_lock(&m->q.lock); 31 m->c = m->count = count; 32 m->map = map; 33 wake_up_all_locked(&m->q); 34 spin_unlock(&m->q.lock); 35 } 36 37 static void mark_killed(struct slot_map *m) 38 { 39 spin_lock(&m->q.lock); 40 m->c -= m->count + 1; 41 spin_unlock(&m->q.lock); 42 } 43 44 static void run_down(struct slot_map *m) 45 { 46 DEFINE_WAIT(wait); 47 spin_lock(&m->q.lock); 48 if (m->c != -1) { 49 for (;;) { 50 if (likely(list_empty(&wait.entry))) 51 __add_wait_queue_entry_tail(&m->q, &wait); 52 set_current_state(TASK_UNINTERRUPTIBLE); 53 54 if (m->c == -1) 55 break; 56 57 spin_unlock(&m->q.lock); 58 schedule(); 59 spin_lock(&m->q.lock); 60 } 61 __remove_wait_queue(&m->q, &wait); 62 __set_current_state(TASK_RUNNING); 63 } 64 m->map = NULL; 65 spin_unlock(&m->q.lock); 66 } 67 68 static void put(struct slot_map *m, int slot) 69 { 70 int v; 71 spin_lock(&m->q.lock); 72 __clear_bit(slot, m->map); 73 v = ++m->c; 74 if (v > 0) 75 wake_up_locked(&m->q); 76 if (unlikely(v == -1)) /* finished dying */ 77 wake_up_all_locked(&m->q); 78 spin_unlock(&m->q.lock); 79 } 80 81 static int wait_for_free(struct slot_map *m) 82 { 83 long left = slot_timeout_secs * HZ; 84 DEFINE_WAIT(wait); 85 86 do { 87 long n = left, t; 88 if (likely(list_empty(&wait.entry))) 89 __add_wait_queue_entry_tail_exclusive(&m->q, &wait); 90 set_current_state(TASK_INTERRUPTIBLE); 91 92 if (m->c > 0) 93 break; 94 95 if (m->c < 0) { 96 /* we are waiting for map to be installed */ 97 /* it would better be there soon, or we go away */ 98 if (n > ORANGEFS_BUFMAP_WAIT_TIMEOUT_SECS * HZ) 99 n = ORANGEFS_BUFMAP_WAIT_TIMEOUT_SECS * HZ; 100 } 101 spin_unlock(&m->q.lock); 102 t = schedule_timeout(n); 103 spin_lock(&m->q.lock); 104 if (unlikely(!t) && n != left && m->c < 0) 105 left = t; 106 else 107 left = t + (left - n); 108 if (signal_pending(current)) 109 left = -EINTR; 110 } while (left > 0); 111 112 if (!list_empty(&wait.entry)) 113 list_del(&wait.entry); 114 else if (left <= 0 && waitqueue_active(&m->q)) 115 __wake_up_locked_key(&m->q, TASK_INTERRUPTIBLE, NULL); 116 __set_current_state(TASK_RUNNING); 117 118 if (likely(left > 0)) 119 return 0; 120 121 return left < 0 ? -EINTR : -ETIMEDOUT; 122 } 123 124 static int get(struct slot_map *m) 125 { 126 int res = 0; 127 spin_lock(&m->q.lock); 128 if (unlikely(m->c <= 0)) 129 res = wait_for_free(m); 130 if (likely(!res)) { 131 m->c--; 132 res = find_first_zero_bit(m->map, m->count); 133 __set_bit(res, m->map); 134 } 135 spin_unlock(&m->q.lock); 136 return res; 137 } 138 139 /* used to describe mapped buffers */ 140 struct orangefs_bufmap_desc { 141 void __user *uaddr; /* user space address pointer */ 142 struct page **page_array; /* array of mapped pages */ 143 int array_count; /* size of above arrays */ 144 struct list_head list_link; 145 }; 146 147 static struct orangefs_bufmap { 148 int desc_size; 149 int desc_shift; 150 int desc_count; 151 int total_size; 152 int page_count; 153 154 struct page **page_array; 155 struct orangefs_bufmap_desc *desc_array; 156 157 /* array to track usage of buffer descriptors */ 158 unsigned long *buffer_index_array; 159 160 /* array to track usage of buffer descriptors for readdir */ 161 #define N DIV_ROUND_UP(ORANGEFS_READDIR_DEFAULT_DESC_COUNT, BITS_PER_LONG) 162 unsigned long readdir_index_array[N]; 163 #undef N 164 } *__orangefs_bufmap; 165 166 static DEFINE_SPINLOCK(orangefs_bufmap_lock); 167 168 static void 169 orangefs_bufmap_unmap(struct orangefs_bufmap *bufmap) 170 { 171 int i; 172 173 for (i = 0; i < bufmap->page_count; i++) 174 put_page(bufmap->page_array[i]); 175 } 176 177 static void 178 orangefs_bufmap_free(struct orangefs_bufmap *bufmap) 179 { 180 kfree(bufmap->page_array); 181 kfree(bufmap->desc_array); 182 kfree(bufmap->buffer_index_array); 183 kfree(bufmap); 184 } 185 186 /* 187 * XXX: Can the size and shift change while the caller gives up the 188 * XXX: lock between calling this and doing something useful? 189 */ 190 191 int orangefs_bufmap_size_query(void) 192 { 193 struct orangefs_bufmap *bufmap; 194 int size = 0; 195 spin_lock(&orangefs_bufmap_lock); 196 bufmap = __orangefs_bufmap; 197 if (bufmap) 198 size = bufmap->desc_size; 199 spin_unlock(&orangefs_bufmap_lock); 200 return size; 201 } 202 203 int orangefs_bufmap_shift_query(void) 204 { 205 struct orangefs_bufmap *bufmap; 206 int shift = 0; 207 spin_lock(&orangefs_bufmap_lock); 208 bufmap = __orangefs_bufmap; 209 if (bufmap) 210 shift = bufmap->desc_shift; 211 spin_unlock(&orangefs_bufmap_lock); 212 return shift; 213 } 214 215 static DECLARE_WAIT_QUEUE_HEAD(bufmap_waitq); 216 static DECLARE_WAIT_QUEUE_HEAD(readdir_waitq); 217 218 static struct orangefs_bufmap * 219 orangefs_bufmap_alloc(struct ORANGEFS_dev_map_desc *user_desc) 220 { 221 struct orangefs_bufmap *bufmap; 222 223 bufmap = kzalloc(sizeof(*bufmap), GFP_KERNEL); 224 if (!bufmap) 225 goto out; 226 227 bufmap->total_size = user_desc->total_size; 228 bufmap->desc_count = user_desc->count; 229 bufmap->desc_size = user_desc->size; 230 bufmap->desc_shift = ilog2(bufmap->desc_size); 231 232 bufmap->buffer_index_array = 233 kzalloc(DIV_ROUND_UP(bufmap->desc_count, BITS_PER_LONG), GFP_KERNEL); 234 if (!bufmap->buffer_index_array) 235 goto out_free_bufmap; 236 237 bufmap->desc_array = 238 kcalloc(bufmap->desc_count, sizeof(struct orangefs_bufmap_desc), 239 GFP_KERNEL); 240 if (!bufmap->desc_array) 241 goto out_free_index_array; 242 243 bufmap->page_count = bufmap->total_size / PAGE_SIZE; 244 245 /* allocate storage to track our page mappings */ 246 bufmap->page_array = 247 kcalloc(bufmap->page_count, sizeof(struct page *), GFP_KERNEL); 248 if (!bufmap->page_array) 249 goto out_free_desc_array; 250 251 return bufmap; 252 253 out_free_desc_array: 254 kfree(bufmap->desc_array); 255 out_free_index_array: 256 kfree(bufmap->buffer_index_array); 257 out_free_bufmap: 258 kfree(bufmap); 259 out: 260 return NULL; 261 } 262 263 static int 264 orangefs_bufmap_map(struct orangefs_bufmap *bufmap, 265 struct ORANGEFS_dev_map_desc *user_desc) 266 { 267 int pages_per_desc = bufmap->desc_size / PAGE_SIZE; 268 int offset = 0, ret, i; 269 270 /* map the pages */ 271 ret = get_user_pages_fast((unsigned long)user_desc->ptr, 272 bufmap->page_count, 1, bufmap->page_array); 273 274 if (ret < 0) 275 return ret; 276 277 if (ret != bufmap->page_count) { 278 gossip_err("orangefs error: asked for %d pages, only got %d.\n", 279 bufmap->page_count, ret); 280 281 for (i = 0; i < ret; i++) { 282 SetPageError(bufmap->page_array[i]); 283 put_page(bufmap->page_array[i]); 284 } 285 return -ENOMEM; 286 } 287 288 /* 289 * ideally we want to get kernel space pointers for each page, but 290 * we can't kmap that many pages at once if highmem is being used. 291 * so instead, we just kmap/kunmap the page address each time the 292 * kaddr is needed. 293 */ 294 for (i = 0; i < bufmap->page_count; i++) 295 flush_dcache_page(bufmap->page_array[i]); 296 297 /* build a list of available descriptors */ 298 for (offset = 0, i = 0; i < bufmap->desc_count; i++) { 299 bufmap->desc_array[i].page_array = &bufmap->page_array[offset]; 300 bufmap->desc_array[i].array_count = pages_per_desc; 301 bufmap->desc_array[i].uaddr = 302 (user_desc->ptr + (i * pages_per_desc * PAGE_SIZE)); 303 offset += pages_per_desc; 304 } 305 306 return 0; 307 } 308 309 /* 310 * orangefs_bufmap_initialize() 311 * 312 * initializes the mapped buffer interface 313 * 314 * returns 0 on success, -errno on failure 315 */ 316 int orangefs_bufmap_initialize(struct ORANGEFS_dev_map_desc *user_desc) 317 { 318 struct orangefs_bufmap *bufmap; 319 int ret = -EINVAL; 320 321 gossip_debug(GOSSIP_BUFMAP_DEBUG, 322 "orangefs_bufmap_initialize: called (ptr (" 323 "%p) sz (%d) cnt(%d).\n", 324 user_desc->ptr, 325 user_desc->size, 326 user_desc->count); 327 328 if (user_desc->total_size < 0 || 329 user_desc->size < 0 || 330 user_desc->count < 0) 331 goto out; 332 333 /* 334 * sanity check alignment and size of buffer that caller wants to 335 * work with 336 */ 337 if (PAGE_ALIGN((unsigned long)user_desc->ptr) != 338 (unsigned long)user_desc->ptr) { 339 gossip_err("orangefs error: memory alignment (front). %p\n", 340 user_desc->ptr); 341 goto out; 342 } 343 344 if (PAGE_ALIGN(((unsigned long)user_desc->ptr + user_desc->total_size)) 345 != (unsigned long)(user_desc->ptr + user_desc->total_size)) { 346 gossip_err("orangefs error: memory alignment (back).(%p + %d)\n", 347 user_desc->ptr, 348 user_desc->total_size); 349 goto out; 350 } 351 352 if (user_desc->total_size != (user_desc->size * user_desc->count)) { 353 gossip_err("orangefs error: user provided an oddly sized buffer: (%d, %d, %d)\n", 354 user_desc->total_size, 355 user_desc->size, 356 user_desc->count); 357 goto out; 358 } 359 360 if ((user_desc->size % PAGE_SIZE) != 0) { 361 gossip_err("orangefs error: bufmap size not page size divisible (%d).\n", 362 user_desc->size); 363 goto out; 364 } 365 366 ret = -ENOMEM; 367 bufmap = orangefs_bufmap_alloc(user_desc); 368 if (!bufmap) 369 goto out; 370 371 ret = orangefs_bufmap_map(bufmap, user_desc); 372 if (ret) 373 goto out_free_bufmap; 374 375 376 spin_lock(&orangefs_bufmap_lock); 377 if (__orangefs_bufmap) { 378 spin_unlock(&orangefs_bufmap_lock); 379 gossip_err("orangefs: error: bufmap already initialized.\n"); 380 ret = -EINVAL; 381 goto out_unmap_bufmap; 382 } 383 __orangefs_bufmap = bufmap; 384 install(&rw_map, 385 bufmap->desc_count, 386 bufmap->buffer_index_array); 387 install(&readdir_map, 388 ORANGEFS_READDIR_DEFAULT_DESC_COUNT, 389 bufmap->readdir_index_array); 390 spin_unlock(&orangefs_bufmap_lock); 391 392 gossip_debug(GOSSIP_BUFMAP_DEBUG, 393 "orangefs_bufmap_initialize: exiting normally\n"); 394 return 0; 395 396 out_unmap_bufmap: 397 orangefs_bufmap_unmap(bufmap); 398 out_free_bufmap: 399 orangefs_bufmap_free(bufmap); 400 out: 401 return ret; 402 } 403 404 /* 405 * orangefs_bufmap_finalize() 406 * 407 * shuts down the mapped buffer interface and releases any resources 408 * associated with it 409 * 410 * no return value 411 */ 412 void orangefs_bufmap_finalize(void) 413 { 414 struct orangefs_bufmap *bufmap = __orangefs_bufmap; 415 if (!bufmap) 416 return; 417 gossip_debug(GOSSIP_BUFMAP_DEBUG, "orangefs_bufmap_finalize: called\n"); 418 mark_killed(&rw_map); 419 mark_killed(&readdir_map); 420 gossip_debug(GOSSIP_BUFMAP_DEBUG, 421 "orangefs_bufmap_finalize: exiting normally\n"); 422 } 423 424 void orangefs_bufmap_run_down(void) 425 { 426 struct orangefs_bufmap *bufmap = __orangefs_bufmap; 427 if (!bufmap) 428 return; 429 run_down(&rw_map); 430 run_down(&readdir_map); 431 spin_lock(&orangefs_bufmap_lock); 432 __orangefs_bufmap = NULL; 433 spin_unlock(&orangefs_bufmap_lock); 434 orangefs_bufmap_unmap(bufmap); 435 orangefs_bufmap_free(bufmap); 436 } 437 438 /* 439 * orangefs_bufmap_get() 440 * 441 * gets a free mapped buffer descriptor, will sleep until one becomes 442 * available if necessary 443 * 444 * returns slot on success, -errno on failure 445 */ 446 int orangefs_bufmap_get(void) 447 { 448 return get(&rw_map); 449 } 450 451 /* 452 * orangefs_bufmap_put() 453 * 454 * returns a mapped buffer descriptor to the collection 455 * 456 * no return value 457 */ 458 void orangefs_bufmap_put(int buffer_index) 459 { 460 put(&rw_map, buffer_index); 461 } 462 463 /* 464 * orangefs_readdir_index_get() 465 * 466 * gets a free descriptor, will sleep until one becomes 467 * available if necessary. 468 * Although the readdir buffers are not mapped into kernel space 469 * we could do that at a later point of time. Regardless, these 470 * indices are used by the client-core. 471 * 472 * returns slot on success, -errno on failure 473 */ 474 int orangefs_readdir_index_get(void) 475 { 476 return get(&readdir_map); 477 } 478 479 void orangefs_readdir_index_put(int buffer_index) 480 { 481 put(&readdir_map, buffer_index); 482 } 483 484 /* 485 * we've been handed an iovec, we need to copy it to 486 * the shared memory descriptor at "buffer_index". 487 */ 488 int orangefs_bufmap_copy_from_iovec(struct iov_iter *iter, 489 int buffer_index, 490 size_t size) 491 { 492 struct orangefs_bufmap_desc *to; 493 int i; 494 495 gossip_debug(GOSSIP_BUFMAP_DEBUG, 496 "%s: buffer_index:%d: size:%zu:\n", 497 __func__, buffer_index, size); 498 499 to = &__orangefs_bufmap->desc_array[buffer_index]; 500 for (i = 0; size; i++) { 501 struct page *page = to->page_array[i]; 502 size_t n = size; 503 if (n > PAGE_SIZE) 504 n = PAGE_SIZE; 505 if (copy_page_from_iter(page, 0, n, iter) != n) 506 return -EFAULT; 507 size -= n; 508 } 509 return 0; 510 } 511 512 /* 513 * we've been handed an iovec, we need to fill it from 514 * the shared memory descriptor at "buffer_index". 515 */ 516 int orangefs_bufmap_copy_to_iovec(struct iov_iter *iter, 517 int buffer_index, 518 size_t size) 519 { 520 struct orangefs_bufmap_desc *from; 521 int i; 522 523 from = &__orangefs_bufmap->desc_array[buffer_index]; 524 gossip_debug(GOSSIP_BUFMAP_DEBUG, 525 "%s: buffer_index:%d: size:%zu:\n", 526 __func__, buffer_index, size); 527 528 529 for (i = 0; size; i++) { 530 struct page *page = from->page_array[i]; 531 size_t n = size; 532 if (n > PAGE_SIZE) 533 n = PAGE_SIZE; 534 n = copy_page_to_iter(page, 0, n, iter); 535 if (!n) 536 return -EFAULT; 537 size -= n; 538 } 539 return 0; 540 } 541