1 /* 2 * Copyright (c) 2006-2007 Silicon Graphics, Inc. 3 * All Rights Reserved. 4 * 5 * This program is free software; you can redistribute it and/or 6 * modify it under the terms of the GNU General Public License as 7 * published by the Free Software Foundation. 8 * 9 * This program is distributed in the hope that it would be useful, 10 * but WITHOUT ANY WARRANTY; without even the implied warranty of 11 * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the 12 * GNU General Public License for more details. 13 * 14 * You should have received a copy of the GNU General Public License 15 * along with this program; if not, write the Free Software Foundation, 16 * Inc., 51 Franklin St, Fifth Floor, Boston, MA 02110-1301 USA 17 */ 18 #include "xfs.h" 19 #include "xfs_mru_cache.h" 20 21 /* 22 * The MRU Cache data structure consists of a data store, an array of lists and 23 * a lock to protect its internal state. At initialisation time, the client 24 * supplies an element lifetime in milliseconds and a group count, as well as a 25 * function pointer to call when deleting elements. A data structure for 26 * queueing up work in the form of timed callbacks is also included. 27 * 28 * The group count controls how many lists are created, and thereby how finely 29 * the elements are grouped in time. When reaping occurs, all the elements in 30 * all the lists whose time has expired are deleted. 31 * 32 * To give an example of how this works in practice, consider a client that 33 * initialises an MRU Cache with a lifetime of ten seconds and a group count of 34 * five. Five internal lists will be created, each representing a two second 35 * period in time. When the first element is added, time zero for the data 36 * structure is initialised to the current time. 37 * 38 * All the elements added in the first two seconds are appended to the first 39 * list. Elements added in the third second go into the second list, and so on. 40 * If an element is accessed at any point, it is removed from its list and 41 * inserted at the head of the current most-recently-used list. 42 * 43 * The reaper function will have nothing to do until at least twelve seconds 44 * have elapsed since the first element was added. The reason for this is that 45 * if it were called at t=11s, there could be elements in the first list that 46 * have only been inactive for nine seconds, so it still does nothing. If it is 47 * called anywhere between t=12 and t=14 seconds, it will delete all the 48 * elements that remain in the first list. It's therefore possible for elements 49 * to remain in the data store even after they've been inactive for up to 50 * (t + t/g) seconds, where t is the inactive element lifetime and g is the 51 * number of groups. 52 * 53 * The above example assumes that the reaper function gets called at least once 54 * every (t/g) seconds. If it is called less frequently, unused elements will 55 * accumulate in the reap list until the reaper function is eventually called. 56 * The current implementation uses work queue callbacks to carefully time the 57 * reaper function calls, so this should happen rarely, if at all. 58 * 59 * From a design perspective, the primary reason for the choice of a list array 60 * representing discrete time intervals is that it's only practical to reap 61 * expired elements in groups of some appreciable size. This automatically 62 * introduces a granularity to element lifetimes, so there's no point storing an 63 * individual timeout with each element that specifies a more precise reap time. 64 * The bonus is a saving of sizeof(long) bytes of memory per element stored. 65 * 66 * The elements could have been stored in just one list, but an array of 67 * counters or pointers would need to be maintained to allow them to be divided 68 * up into discrete time groups. More critically, the process of touching or 69 * removing an element would involve walking large portions of the entire list, 70 * which would have a detrimental effect on performance. The additional memory 71 * requirement for the array of list heads is minimal. 72 * 73 * When an element is touched or deleted, it needs to be removed from its 74 * current list. Doubly linked lists are used to make the list maintenance 75 * portion of these operations O(1). Since reaper timing can be imprecise, 76 * inserts and lookups can occur when there are no free lists available. When 77 * this happens, all the elements on the LRU list need to be migrated to the end 78 * of the reap list. To keep the list maintenance portion of these operations 79 * O(1) also, list tails need to be accessible without walking the entire list. 80 * This is the reason why doubly linked list heads are used. 81 */ 82 83 /* 84 * An MRU Cache is a dynamic data structure that stores its elements in a way 85 * that allows efficient lookups, but also groups them into discrete time 86 * intervals based on insertion time. This allows elements to be efficiently 87 * and automatically reaped after a fixed period of inactivity. 88 * 89 * When a client data pointer is stored in the MRU Cache it needs to be added to 90 * both the data store and to one of the lists. It must also be possible to 91 * access each of these entries via the other, i.e. to: 92 * 93 * a) Walk a list, removing the corresponding data store entry for each item. 94 * b) Look up a data store entry, then access its list entry directly. 95 * 96 * To achieve both of these goals, each entry must contain both a list entry and 97 * a key, in addition to the user's data pointer. Note that it's not a good 98 * idea to have the client embed one of these structures at the top of their own 99 * data structure, because inserting the same item more than once would most 100 * likely result in a loop in one of the lists. That's a sure-fire recipe for 101 * an infinite loop in the code. 102 */ 103 typedef struct xfs_mru_cache_elem 104 { 105 struct list_head list_node; 106 unsigned long key; 107 void *value; 108 } xfs_mru_cache_elem_t; 109 110 static kmem_zone_t *xfs_mru_elem_zone; 111 static struct workqueue_struct *xfs_mru_reap_wq; 112 113 /* 114 * When inserting, destroying or reaping, it's first necessary to update the 115 * lists relative to a particular time. In the case of destroying, that time 116 * will be well in the future to ensure that all items are moved to the reap 117 * list. In all other cases though, the time will be the current time. 118 * 119 * This function enters a loop, moving the contents of the LRU list to the reap 120 * list again and again until either a) the lists are all empty, or b) time zero 121 * has been advanced sufficiently to be within the immediate element lifetime. 122 * 123 * Case a) above is detected by counting how many groups are migrated and 124 * stopping when they've all been moved. Case b) is detected by monitoring the 125 * time_zero field, which is updated as each group is migrated. 126 * 127 * The return value is the earliest time that more migration could be needed, or 128 * zero if there's no need to schedule more work because the lists are empty. 129 */ 130 STATIC unsigned long 131 _xfs_mru_cache_migrate( 132 xfs_mru_cache_t *mru, 133 unsigned long now) 134 { 135 unsigned int grp; 136 unsigned int migrated = 0; 137 struct list_head *lru_list; 138 139 /* Nothing to do if the data store is empty. */ 140 if (!mru->time_zero) 141 return 0; 142 143 /* While time zero is older than the time spanned by all the lists. */ 144 while (mru->time_zero <= now - mru->grp_count * mru->grp_time) { 145 146 /* 147 * If the LRU list isn't empty, migrate its elements to the tail 148 * of the reap list. 149 */ 150 lru_list = mru->lists + mru->lru_grp; 151 if (!list_empty(lru_list)) 152 list_splice_init(lru_list, mru->reap_list.prev); 153 154 /* 155 * Advance the LRU group number, freeing the old LRU list to 156 * become the new MRU list; advance time zero accordingly. 157 */ 158 mru->lru_grp = (mru->lru_grp + 1) % mru->grp_count; 159 mru->time_zero += mru->grp_time; 160 161 /* 162 * If reaping is so far behind that all the elements on all the 163 * lists have been migrated to the reap list, it's now empty. 164 */ 165 if (++migrated == mru->grp_count) { 166 mru->lru_grp = 0; 167 mru->time_zero = 0; 168 return 0; 169 } 170 } 171 172 /* Find the first non-empty list from the LRU end. */ 173 for (grp = 0; grp < mru->grp_count; grp++) { 174 175 /* Check the grp'th list from the LRU end. */ 176 lru_list = mru->lists + ((mru->lru_grp + grp) % mru->grp_count); 177 if (!list_empty(lru_list)) 178 return mru->time_zero + 179 (mru->grp_count + grp) * mru->grp_time; 180 } 181 182 /* All the lists must be empty. */ 183 mru->lru_grp = 0; 184 mru->time_zero = 0; 185 return 0; 186 } 187 188 /* 189 * When inserting or doing a lookup, an element needs to be inserted into the 190 * MRU list. The lists must be migrated first to ensure that they're 191 * up-to-date, otherwise the new element could be given a shorter lifetime in 192 * the cache than it should. 193 */ 194 STATIC void 195 _xfs_mru_cache_list_insert( 196 xfs_mru_cache_t *mru, 197 xfs_mru_cache_elem_t *elem) 198 { 199 unsigned int grp = 0; 200 unsigned long now = jiffies; 201 202 /* 203 * If the data store is empty, initialise time zero, leave grp set to 204 * zero and start the work queue timer if necessary. Otherwise, set grp 205 * to the number of group times that have elapsed since time zero. 206 */ 207 if (!_xfs_mru_cache_migrate(mru, now)) { 208 mru->time_zero = now; 209 if (!mru->queued) { 210 mru->queued = 1; 211 queue_delayed_work(xfs_mru_reap_wq, &mru->work, 212 mru->grp_count * mru->grp_time); 213 } 214 } else { 215 grp = (now - mru->time_zero) / mru->grp_time; 216 grp = (mru->lru_grp + grp) % mru->grp_count; 217 } 218 219 /* Insert the element at the tail of the corresponding list. */ 220 list_add_tail(&elem->list_node, mru->lists + grp); 221 } 222 223 /* 224 * When destroying or reaping, all the elements that were migrated to the reap 225 * list need to be deleted. For each element this involves removing it from the 226 * data store, removing it from the reap list, calling the client's free 227 * function and deleting the element from the element zone. 228 */ 229 STATIC void 230 _xfs_mru_cache_clear_reap_list( 231 xfs_mru_cache_t *mru) 232 { 233 xfs_mru_cache_elem_t *elem, *next; 234 struct list_head tmp; 235 236 INIT_LIST_HEAD(&tmp); 237 list_for_each_entry_safe(elem, next, &mru->reap_list, list_node) { 238 239 /* Remove the element from the data store. */ 240 radix_tree_delete(&mru->store, elem->key); 241 242 /* 243 * remove to temp list so it can be freed without 244 * needing to hold the lock 245 */ 246 list_move(&elem->list_node, &tmp); 247 } 248 mutex_spinunlock(&mru->lock, 0); 249 250 list_for_each_entry_safe(elem, next, &tmp, list_node) { 251 252 /* Remove the element from the reap list. */ 253 list_del_init(&elem->list_node); 254 255 /* Call the client's free function with the key and value pointer. */ 256 mru->free_func(elem->key, elem->value); 257 258 /* Free the element structure. */ 259 kmem_zone_free(xfs_mru_elem_zone, elem); 260 } 261 262 mutex_spinlock(&mru->lock); 263 } 264 265 /* 266 * We fire the reap timer every group expiry interval so 267 * we always have a reaper ready to run. This makes shutdown 268 * and flushing of the reaper easy to do. Hence we need to 269 * keep when the next reap must occur so we can determine 270 * at each interval whether there is anything we need to do. 271 */ 272 STATIC void 273 _xfs_mru_cache_reap( 274 struct work_struct *work) 275 { 276 xfs_mru_cache_t *mru = container_of(work, xfs_mru_cache_t, work.work); 277 unsigned long now, next; 278 279 ASSERT(mru && mru->lists); 280 if (!mru || !mru->lists) 281 return; 282 283 mutex_spinlock(&mru->lock); 284 next = _xfs_mru_cache_migrate(mru, jiffies); 285 _xfs_mru_cache_clear_reap_list(mru); 286 287 mru->queued = next; 288 if ((mru->queued > 0)) { 289 now = jiffies; 290 if (next <= now) 291 next = 0; 292 else 293 next -= now; 294 queue_delayed_work(xfs_mru_reap_wq, &mru->work, next); 295 } 296 297 mutex_spinunlock(&mru->lock, 0); 298 } 299 300 int 301 xfs_mru_cache_init(void) 302 { 303 xfs_mru_elem_zone = kmem_zone_init(sizeof(xfs_mru_cache_elem_t), 304 "xfs_mru_cache_elem"); 305 if (!xfs_mru_elem_zone) 306 return ENOMEM; 307 308 xfs_mru_reap_wq = create_singlethread_workqueue("xfs_mru_cache"); 309 if (!xfs_mru_reap_wq) { 310 kmem_zone_destroy(xfs_mru_elem_zone); 311 return ENOMEM; 312 } 313 314 return 0; 315 } 316 317 void 318 xfs_mru_cache_uninit(void) 319 { 320 destroy_workqueue(xfs_mru_reap_wq); 321 kmem_zone_destroy(xfs_mru_elem_zone); 322 } 323 324 /* 325 * To initialise a struct xfs_mru_cache pointer, call xfs_mru_cache_create() 326 * with the address of the pointer, a lifetime value in milliseconds, a group 327 * count and a free function to use when deleting elements. This function 328 * returns 0 if the initialisation was successful. 329 */ 330 int 331 xfs_mru_cache_create( 332 xfs_mru_cache_t **mrup, 333 unsigned int lifetime_ms, 334 unsigned int grp_count, 335 xfs_mru_cache_free_func_t free_func) 336 { 337 xfs_mru_cache_t *mru = NULL; 338 int err = 0, grp; 339 unsigned int grp_time; 340 341 if (mrup) 342 *mrup = NULL; 343 344 if (!mrup || !grp_count || !lifetime_ms || !free_func) 345 return EINVAL; 346 347 if (!(grp_time = msecs_to_jiffies(lifetime_ms) / grp_count)) 348 return EINVAL; 349 350 if (!(mru = kmem_zalloc(sizeof(*mru), KM_SLEEP))) 351 return ENOMEM; 352 353 /* An extra list is needed to avoid reaping up to a grp_time early. */ 354 mru->grp_count = grp_count + 1; 355 mru->lists = kmem_zalloc(mru->grp_count * sizeof(*mru->lists), KM_SLEEP); 356 357 if (!mru->lists) { 358 err = ENOMEM; 359 goto exit; 360 } 361 362 for (grp = 0; grp < mru->grp_count; grp++) 363 INIT_LIST_HEAD(mru->lists + grp); 364 365 /* 366 * We use GFP_KERNEL radix tree preload and do inserts under a 367 * spinlock so GFP_ATOMIC is appropriate for the radix tree itself. 368 */ 369 INIT_RADIX_TREE(&mru->store, GFP_ATOMIC); 370 INIT_LIST_HEAD(&mru->reap_list); 371 spinlock_init(&mru->lock, "xfs_mru_cache"); 372 INIT_DELAYED_WORK(&mru->work, _xfs_mru_cache_reap); 373 374 mru->grp_time = grp_time; 375 mru->free_func = free_func; 376 377 *mrup = mru; 378 379 exit: 380 if (err && mru && mru->lists) 381 kmem_free(mru->lists, mru->grp_count * sizeof(*mru->lists)); 382 if (err && mru) 383 kmem_free(mru, sizeof(*mru)); 384 385 return err; 386 } 387 388 /* 389 * Call xfs_mru_cache_flush() to flush out all cached entries, calling their 390 * free functions as they're deleted. When this function returns, the caller is 391 * guaranteed that all the free functions for all the elements have finished 392 * executing and the reaper is not running. 393 */ 394 void 395 xfs_mru_cache_flush( 396 xfs_mru_cache_t *mru) 397 { 398 if (!mru || !mru->lists) 399 return; 400 401 mutex_spinlock(&mru->lock); 402 if (mru->queued) { 403 mutex_spinunlock(&mru->lock, 0); 404 cancel_rearming_delayed_workqueue(xfs_mru_reap_wq, &mru->work); 405 mutex_spinlock(&mru->lock); 406 } 407 408 _xfs_mru_cache_migrate(mru, jiffies + mru->grp_count * mru->grp_time); 409 _xfs_mru_cache_clear_reap_list(mru); 410 411 mutex_spinunlock(&mru->lock, 0); 412 } 413 414 void 415 xfs_mru_cache_destroy( 416 xfs_mru_cache_t *mru) 417 { 418 if (!mru || !mru->lists) 419 return; 420 421 xfs_mru_cache_flush(mru); 422 423 kmem_free(mru->lists, mru->grp_count * sizeof(*mru->lists)); 424 kmem_free(mru, sizeof(*mru)); 425 } 426 427 /* 428 * To insert an element, call xfs_mru_cache_insert() with the data store, the 429 * element's key and the client data pointer. This function returns 0 on 430 * success or ENOMEM if memory for the data element couldn't be allocated. 431 */ 432 int 433 xfs_mru_cache_insert( 434 xfs_mru_cache_t *mru, 435 unsigned long key, 436 void *value) 437 { 438 xfs_mru_cache_elem_t *elem; 439 440 ASSERT(mru && mru->lists); 441 if (!mru || !mru->lists) 442 return EINVAL; 443 444 elem = kmem_zone_zalloc(xfs_mru_elem_zone, KM_SLEEP); 445 if (!elem) 446 return ENOMEM; 447 448 if (radix_tree_preload(GFP_KERNEL)) { 449 kmem_zone_free(xfs_mru_elem_zone, elem); 450 return ENOMEM; 451 } 452 453 INIT_LIST_HEAD(&elem->list_node); 454 elem->key = key; 455 elem->value = value; 456 457 mutex_spinlock(&mru->lock); 458 459 radix_tree_insert(&mru->store, key, elem); 460 radix_tree_preload_end(); 461 _xfs_mru_cache_list_insert(mru, elem); 462 463 mutex_spinunlock(&mru->lock, 0); 464 465 return 0; 466 } 467 468 /* 469 * To remove an element without calling the free function, call 470 * xfs_mru_cache_remove() with the data store and the element's key. On success 471 * the client data pointer for the removed element is returned, otherwise this 472 * function will return a NULL pointer. 473 */ 474 void * 475 xfs_mru_cache_remove( 476 xfs_mru_cache_t *mru, 477 unsigned long key) 478 { 479 xfs_mru_cache_elem_t *elem; 480 void *value = NULL; 481 482 ASSERT(mru && mru->lists); 483 if (!mru || !mru->lists) 484 return NULL; 485 486 mutex_spinlock(&mru->lock); 487 elem = radix_tree_delete(&mru->store, key); 488 if (elem) { 489 value = elem->value; 490 list_del(&elem->list_node); 491 } 492 493 mutex_spinunlock(&mru->lock, 0); 494 495 if (elem) 496 kmem_zone_free(xfs_mru_elem_zone, elem); 497 498 return value; 499 } 500 501 /* 502 * To remove and element and call the free function, call xfs_mru_cache_delete() 503 * with the data store and the element's key. 504 */ 505 void 506 xfs_mru_cache_delete( 507 xfs_mru_cache_t *mru, 508 unsigned long key) 509 { 510 void *value = xfs_mru_cache_remove(mru, key); 511 512 if (value) 513 mru->free_func(key, value); 514 } 515 516 /* 517 * To look up an element using its key, call xfs_mru_cache_lookup() with the 518 * data store and the element's key. If found, the element will be moved to the 519 * head of the MRU list to indicate that it's been touched. 520 * 521 * The internal data structures are protected by a spinlock that is STILL HELD 522 * when this function returns. Call xfs_mru_cache_done() to release it. Note 523 * that it is not safe to call any function that might sleep in the interim. 524 * 525 * The implementation could have used reference counting to avoid this 526 * restriction, but since most clients simply want to get, set or test a member 527 * of the returned data structure, the extra per-element memory isn't warranted. 528 * 529 * If the element isn't found, this function returns NULL and the spinlock is 530 * released. xfs_mru_cache_done() should NOT be called when this occurs. 531 */ 532 void * 533 xfs_mru_cache_lookup( 534 xfs_mru_cache_t *mru, 535 unsigned long key) 536 { 537 xfs_mru_cache_elem_t *elem; 538 539 ASSERT(mru && mru->lists); 540 if (!mru || !mru->lists) 541 return NULL; 542 543 mutex_spinlock(&mru->lock); 544 elem = radix_tree_lookup(&mru->store, key); 545 if (elem) { 546 list_del(&elem->list_node); 547 _xfs_mru_cache_list_insert(mru, elem); 548 } 549 else 550 mutex_spinunlock(&mru->lock, 0); 551 552 return elem ? elem->value : NULL; 553 } 554 555 /* 556 * To look up an element using its key, but leave its location in the internal 557 * lists alone, call xfs_mru_cache_peek(). If the element isn't found, this 558 * function returns NULL. 559 * 560 * See the comments above the declaration of the xfs_mru_cache_lookup() function 561 * for important locking information pertaining to this call. 562 */ 563 void * 564 xfs_mru_cache_peek( 565 xfs_mru_cache_t *mru, 566 unsigned long key) 567 { 568 xfs_mru_cache_elem_t *elem; 569 570 ASSERT(mru && mru->lists); 571 if (!mru || !mru->lists) 572 return NULL; 573 574 mutex_spinlock(&mru->lock); 575 elem = radix_tree_lookup(&mru->store, key); 576 if (!elem) 577 mutex_spinunlock(&mru->lock, 0); 578 579 return elem ? elem->value : NULL; 580 } 581 582 /* 583 * To release the internal data structure spinlock after having performed an 584 * xfs_mru_cache_lookup() or an xfs_mru_cache_peek(), call xfs_mru_cache_done() 585 * with the data store pointer. 586 */ 587 void 588 xfs_mru_cache_done( 589 xfs_mru_cache_t *mru) 590 { 591 mutex_spinunlock(&mru->lock, 0); 592 } 593