1 /* 2 * Register cache access API - rbtree caching support 3 * 4 * Copyright 2011 Wolfson Microelectronics plc 5 * 6 * Author: Dimitris Papastamos <dp@opensource.wolfsonmicro.com> 7 * 8 * This program is free software; you can redistribute it and/or modify 9 * it under the terms of the GNU General Public License version 2 as 10 * published by the Free Software Foundation. 11 */ 12 13 #include <linux/debugfs.h> 14 #include <linux/device.h> 15 #include <linux/rbtree.h> 16 #include <linux/seq_file.h> 17 #include <linux/slab.h> 18 19 #include "internal.h" 20 21 static int regcache_rbtree_write(struct regmap *map, unsigned int reg, 22 unsigned int value); 23 static int regcache_rbtree_exit(struct regmap *map); 24 25 struct regcache_rbtree_node { 26 /* block of adjacent registers */ 27 void *block; 28 /* Which registers are present */ 29 long *cache_present; 30 /* base register handled by this block */ 31 unsigned int base_reg; 32 /* number of registers available in the block */ 33 unsigned int blklen; 34 /* the actual rbtree node holding this block */ 35 struct rb_node node; 36 } __attribute__ ((packed)); 37 38 struct regcache_rbtree_ctx { 39 struct rb_root root; 40 struct regcache_rbtree_node *cached_rbnode; 41 }; 42 43 static inline void regcache_rbtree_get_base_top_reg( 44 struct regmap *map, 45 struct regcache_rbtree_node *rbnode, 46 unsigned int *base, unsigned int *top) 47 { 48 *base = rbnode->base_reg; 49 *top = rbnode->base_reg + ((rbnode->blklen - 1) * map->reg_stride); 50 } 51 52 static unsigned int regcache_rbtree_get_register(struct regmap *map, 53 struct regcache_rbtree_node *rbnode, unsigned int idx) 54 { 55 return regcache_get_val(map, rbnode->block, idx); 56 } 57 58 static void regcache_rbtree_set_register(struct regmap *map, 59 struct regcache_rbtree_node *rbnode, 60 unsigned int idx, unsigned int val) 61 { 62 set_bit(idx, rbnode->cache_present); 63 regcache_set_val(map, rbnode->block, idx, val); 64 } 65 66 static struct regcache_rbtree_node *regcache_rbtree_lookup(struct regmap *map, 67 unsigned int reg) 68 { 69 struct regcache_rbtree_ctx *rbtree_ctx = map->cache; 70 struct rb_node *node; 71 struct regcache_rbtree_node *rbnode; 72 unsigned int base_reg, top_reg; 73 74 rbnode = rbtree_ctx->cached_rbnode; 75 if (rbnode) { 76 regcache_rbtree_get_base_top_reg(map, rbnode, &base_reg, 77 &top_reg); 78 if (reg >= base_reg && reg <= top_reg) 79 return rbnode; 80 } 81 82 node = rbtree_ctx->root.rb_node; 83 while (node) { 84 rbnode = rb_entry(node, struct regcache_rbtree_node, node); 85 regcache_rbtree_get_base_top_reg(map, rbnode, &base_reg, 86 &top_reg); 87 if (reg >= base_reg && reg <= top_reg) { 88 rbtree_ctx->cached_rbnode = rbnode; 89 return rbnode; 90 } else if (reg > top_reg) { 91 node = node->rb_right; 92 } else if (reg < base_reg) { 93 node = node->rb_left; 94 } 95 } 96 97 return NULL; 98 } 99 100 static int regcache_rbtree_insert(struct regmap *map, struct rb_root *root, 101 struct regcache_rbtree_node *rbnode) 102 { 103 struct rb_node **new, *parent; 104 struct regcache_rbtree_node *rbnode_tmp; 105 unsigned int base_reg_tmp, top_reg_tmp; 106 unsigned int base_reg; 107 108 parent = NULL; 109 new = &root->rb_node; 110 while (*new) { 111 rbnode_tmp = rb_entry(*new, struct regcache_rbtree_node, node); 112 /* base and top registers of the current rbnode */ 113 regcache_rbtree_get_base_top_reg(map, rbnode_tmp, &base_reg_tmp, 114 &top_reg_tmp); 115 /* base register of the rbnode to be added */ 116 base_reg = rbnode->base_reg; 117 parent = *new; 118 /* if this register has already been inserted, just return */ 119 if (base_reg >= base_reg_tmp && 120 base_reg <= top_reg_tmp) 121 return 0; 122 else if (base_reg > top_reg_tmp) 123 new = &((*new)->rb_right); 124 else if (base_reg < base_reg_tmp) 125 new = &((*new)->rb_left); 126 } 127 128 /* insert the node into the rbtree */ 129 rb_link_node(&rbnode->node, parent, new); 130 rb_insert_color(&rbnode->node, root); 131 132 return 1; 133 } 134 135 #ifdef CONFIG_DEBUG_FS 136 static int rbtree_show(struct seq_file *s, void *ignored) 137 { 138 struct regmap *map = s->private; 139 struct regcache_rbtree_ctx *rbtree_ctx = map->cache; 140 struct regcache_rbtree_node *n; 141 struct rb_node *node; 142 unsigned int base, top; 143 size_t mem_size; 144 int nodes = 0; 145 int registers = 0; 146 int this_registers, average; 147 148 map->lock(map->lock_arg); 149 150 mem_size = sizeof(*rbtree_ctx); 151 152 for (node = rb_first(&rbtree_ctx->root); node != NULL; 153 node = rb_next(node)) { 154 n = rb_entry(node, struct regcache_rbtree_node, node); 155 mem_size += sizeof(*n); 156 mem_size += (n->blklen * map->cache_word_size); 157 mem_size += BITS_TO_LONGS(n->blklen) * sizeof(long); 158 159 regcache_rbtree_get_base_top_reg(map, n, &base, &top); 160 this_registers = ((top - base) / map->reg_stride) + 1; 161 seq_printf(s, "%x-%x (%d)\n", base, top, this_registers); 162 163 nodes++; 164 registers += this_registers; 165 } 166 167 if (nodes) 168 average = registers / nodes; 169 else 170 average = 0; 171 172 seq_printf(s, "%d nodes, %d registers, average %d registers, used %zu bytes\n", 173 nodes, registers, average, mem_size); 174 175 map->unlock(map->lock_arg); 176 177 return 0; 178 } 179 180 static int rbtree_open(struct inode *inode, struct file *file) 181 { 182 return single_open(file, rbtree_show, inode->i_private); 183 } 184 185 static const struct file_operations rbtree_fops = { 186 .open = rbtree_open, 187 .read = seq_read, 188 .llseek = seq_lseek, 189 .release = single_release, 190 }; 191 192 static void rbtree_debugfs_init(struct regmap *map) 193 { 194 debugfs_create_file("rbtree", 0400, map->debugfs, map, &rbtree_fops); 195 } 196 #endif 197 198 static int regcache_rbtree_init(struct regmap *map) 199 { 200 struct regcache_rbtree_ctx *rbtree_ctx; 201 int i; 202 int ret; 203 204 map->cache = kmalloc(sizeof *rbtree_ctx, GFP_KERNEL); 205 if (!map->cache) 206 return -ENOMEM; 207 208 rbtree_ctx = map->cache; 209 rbtree_ctx->root = RB_ROOT; 210 rbtree_ctx->cached_rbnode = NULL; 211 212 for (i = 0; i < map->num_reg_defaults; i++) { 213 ret = regcache_rbtree_write(map, 214 map->reg_defaults[i].reg, 215 map->reg_defaults[i].def); 216 if (ret) 217 goto err; 218 } 219 220 return 0; 221 222 err: 223 regcache_rbtree_exit(map); 224 return ret; 225 } 226 227 static int regcache_rbtree_exit(struct regmap *map) 228 { 229 struct rb_node *next; 230 struct regcache_rbtree_ctx *rbtree_ctx; 231 struct regcache_rbtree_node *rbtree_node; 232 233 /* if we've already been called then just return */ 234 rbtree_ctx = map->cache; 235 if (!rbtree_ctx) 236 return 0; 237 238 /* free up the rbtree */ 239 next = rb_first(&rbtree_ctx->root); 240 while (next) { 241 rbtree_node = rb_entry(next, struct regcache_rbtree_node, node); 242 next = rb_next(&rbtree_node->node); 243 rb_erase(&rbtree_node->node, &rbtree_ctx->root); 244 kfree(rbtree_node->cache_present); 245 kfree(rbtree_node->block); 246 kfree(rbtree_node); 247 } 248 249 /* release the resources */ 250 kfree(map->cache); 251 map->cache = NULL; 252 253 return 0; 254 } 255 256 static int regcache_rbtree_read(struct regmap *map, 257 unsigned int reg, unsigned int *value) 258 { 259 struct regcache_rbtree_node *rbnode; 260 unsigned int reg_tmp; 261 262 rbnode = regcache_rbtree_lookup(map, reg); 263 if (rbnode) { 264 reg_tmp = (reg - rbnode->base_reg) / map->reg_stride; 265 if (!test_bit(reg_tmp, rbnode->cache_present)) 266 return -ENOENT; 267 *value = regcache_rbtree_get_register(map, rbnode, reg_tmp); 268 } else { 269 return -ENOENT; 270 } 271 272 return 0; 273 } 274 275 276 static int regcache_rbtree_insert_to_block(struct regmap *map, 277 struct regcache_rbtree_node *rbnode, 278 unsigned int base_reg, 279 unsigned int top_reg, 280 unsigned int reg, 281 unsigned int value) 282 { 283 unsigned int blklen; 284 unsigned int pos, offset; 285 unsigned long *present; 286 u8 *blk; 287 288 blklen = (top_reg - base_reg) / map->reg_stride + 1; 289 pos = (reg - base_reg) / map->reg_stride; 290 offset = (rbnode->base_reg - base_reg) / map->reg_stride; 291 292 blk = krealloc(rbnode->block, 293 blklen * map->cache_word_size, 294 GFP_KERNEL); 295 if (!blk) 296 return -ENOMEM; 297 298 if (BITS_TO_LONGS(blklen) > BITS_TO_LONGS(rbnode->blklen)) { 299 present = krealloc(rbnode->cache_present, 300 BITS_TO_LONGS(blklen) * sizeof(*present), 301 GFP_KERNEL); 302 if (!present) { 303 kfree(blk); 304 return -ENOMEM; 305 } 306 307 memset(present + BITS_TO_LONGS(rbnode->blklen), 0, 308 (BITS_TO_LONGS(blklen) - BITS_TO_LONGS(rbnode->blklen)) 309 * sizeof(*present)); 310 } else { 311 present = rbnode->cache_present; 312 } 313 314 /* insert the register value in the correct place in the rbnode block */ 315 if (pos == 0) { 316 memmove(blk + offset * map->cache_word_size, 317 blk, rbnode->blklen * map->cache_word_size); 318 bitmap_shift_left(present, present, offset, blklen); 319 } 320 321 /* update the rbnode block, its size and the base register */ 322 rbnode->block = blk; 323 rbnode->blklen = blklen; 324 rbnode->base_reg = base_reg; 325 rbnode->cache_present = present; 326 327 regcache_rbtree_set_register(map, rbnode, pos, value); 328 return 0; 329 } 330 331 static struct regcache_rbtree_node * 332 regcache_rbtree_node_alloc(struct regmap *map, unsigned int reg) 333 { 334 struct regcache_rbtree_node *rbnode; 335 const struct regmap_range *range; 336 int i; 337 338 rbnode = kzalloc(sizeof(*rbnode), GFP_KERNEL); 339 if (!rbnode) 340 return NULL; 341 342 /* If there is a read table then use it to guess at an allocation */ 343 if (map->rd_table) { 344 for (i = 0; i < map->rd_table->n_yes_ranges; i++) { 345 if (regmap_reg_in_range(reg, 346 &map->rd_table->yes_ranges[i])) 347 break; 348 } 349 350 if (i != map->rd_table->n_yes_ranges) { 351 range = &map->rd_table->yes_ranges[i]; 352 rbnode->blklen = (range->range_max - range->range_min) / 353 map->reg_stride + 1; 354 rbnode->base_reg = range->range_min; 355 } 356 } 357 358 if (!rbnode->blklen) { 359 rbnode->blklen = 1; 360 rbnode->base_reg = reg; 361 } 362 363 rbnode->block = kmalloc_array(rbnode->blklen, map->cache_word_size, 364 GFP_KERNEL); 365 if (!rbnode->block) 366 goto err_free; 367 368 rbnode->cache_present = kcalloc(BITS_TO_LONGS(rbnode->blklen), 369 sizeof(*rbnode->cache_present), 370 GFP_KERNEL); 371 if (!rbnode->cache_present) 372 goto err_free_block; 373 374 return rbnode; 375 376 err_free_block: 377 kfree(rbnode->block); 378 err_free: 379 kfree(rbnode); 380 return NULL; 381 } 382 383 static int regcache_rbtree_write(struct regmap *map, unsigned int reg, 384 unsigned int value) 385 { 386 struct regcache_rbtree_ctx *rbtree_ctx; 387 struct regcache_rbtree_node *rbnode, *rbnode_tmp; 388 struct rb_node *node; 389 unsigned int reg_tmp; 390 int ret; 391 392 rbtree_ctx = map->cache; 393 394 /* if we can't locate it in the cached rbnode we'll have 395 * to traverse the rbtree looking for it. 396 */ 397 rbnode = regcache_rbtree_lookup(map, reg); 398 if (rbnode) { 399 reg_tmp = (reg - rbnode->base_reg) / map->reg_stride; 400 regcache_rbtree_set_register(map, rbnode, reg_tmp, value); 401 } else { 402 unsigned int base_reg, top_reg; 403 unsigned int new_base_reg, new_top_reg; 404 unsigned int min, max; 405 unsigned int max_dist; 406 unsigned int dist, best_dist = UINT_MAX; 407 408 max_dist = map->reg_stride * sizeof(*rbnode_tmp) / 409 map->cache_word_size; 410 if (reg < max_dist) 411 min = 0; 412 else 413 min = reg - max_dist; 414 max = reg + max_dist; 415 416 /* look for an adjacent register to the one we are about to add */ 417 node = rbtree_ctx->root.rb_node; 418 while (node) { 419 rbnode_tmp = rb_entry(node, struct regcache_rbtree_node, 420 node); 421 422 regcache_rbtree_get_base_top_reg(map, rbnode_tmp, 423 &base_reg, &top_reg); 424 425 if (base_reg <= max && top_reg >= min) { 426 if (reg < base_reg) 427 dist = base_reg - reg; 428 else if (reg > top_reg) 429 dist = reg - top_reg; 430 else 431 dist = 0; 432 if (dist < best_dist) { 433 rbnode = rbnode_tmp; 434 best_dist = dist; 435 new_base_reg = min(reg, base_reg); 436 new_top_reg = max(reg, top_reg); 437 } 438 } 439 440 /* 441 * Keep looking, we want to choose the closest block, 442 * otherwise we might end up creating overlapping 443 * blocks, which breaks the rbtree. 444 */ 445 if (reg < base_reg) 446 node = node->rb_left; 447 else if (reg > top_reg) 448 node = node->rb_right; 449 else 450 break; 451 } 452 453 if (rbnode) { 454 ret = regcache_rbtree_insert_to_block(map, rbnode, 455 new_base_reg, 456 new_top_reg, reg, 457 value); 458 if (ret) 459 return ret; 460 rbtree_ctx->cached_rbnode = rbnode; 461 return 0; 462 } 463 464 /* We did not manage to find a place to insert it in 465 * an existing block so create a new rbnode. 466 */ 467 rbnode = regcache_rbtree_node_alloc(map, reg); 468 if (!rbnode) 469 return -ENOMEM; 470 regcache_rbtree_set_register(map, rbnode, 471 reg - rbnode->base_reg, value); 472 regcache_rbtree_insert(map, &rbtree_ctx->root, rbnode); 473 rbtree_ctx->cached_rbnode = rbnode; 474 } 475 476 return 0; 477 } 478 479 static int regcache_rbtree_sync(struct regmap *map, unsigned int min, 480 unsigned int max) 481 { 482 struct regcache_rbtree_ctx *rbtree_ctx; 483 struct rb_node *node; 484 struct regcache_rbtree_node *rbnode; 485 unsigned int base_reg, top_reg; 486 unsigned int start, end; 487 int ret; 488 489 rbtree_ctx = map->cache; 490 for (node = rb_first(&rbtree_ctx->root); node; node = rb_next(node)) { 491 rbnode = rb_entry(node, struct regcache_rbtree_node, node); 492 493 regcache_rbtree_get_base_top_reg(map, rbnode, &base_reg, 494 &top_reg); 495 if (base_reg > max) 496 break; 497 if (top_reg < min) 498 continue; 499 500 if (min > base_reg) 501 start = (min - base_reg) / map->reg_stride; 502 else 503 start = 0; 504 505 if (max < top_reg) 506 end = (max - base_reg) / map->reg_stride + 1; 507 else 508 end = rbnode->blklen; 509 510 ret = regcache_sync_block(map, rbnode->block, 511 rbnode->cache_present, 512 rbnode->base_reg, start, end); 513 if (ret != 0) 514 return ret; 515 } 516 517 return regmap_async_complete(map); 518 } 519 520 static int regcache_rbtree_drop(struct regmap *map, unsigned int min, 521 unsigned int max) 522 { 523 struct regcache_rbtree_ctx *rbtree_ctx; 524 struct regcache_rbtree_node *rbnode; 525 struct rb_node *node; 526 unsigned int base_reg, top_reg; 527 unsigned int start, end; 528 529 rbtree_ctx = map->cache; 530 for (node = rb_first(&rbtree_ctx->root); node; node = rb_next(node)) { 531 rbnode = rb_entry(node, struct regcache_rbtree_node, node); 532 533 regcache_rbtree_get_base_top_reg(map, rbnode, &base_reg, 534 &top_reg); 535 if (base_reg > max) 536 break; 537 if (top_reg < min) 538 continue; 539 540 if (min > base_reg) 541 start = (min - base_reg) / map->reg_stride; 542 else 543 start = 0; 544 545 if (max < top_reg) 546 end = (max - base_reg) / map->reg_stride + 1; 547 else 548 end = rbnode->blklen; 549 550 bitmap_clear(rbnode->cache_present, start, end - start); 551 } 552 553 return 0; 554 } 555 556 struct regcache_ops regcache_rbtree_ops = { 557 .type = REGCACHE_RBTREE, 558 .name = "rbtree", 559 .init = regcache_rbtree_init, 560 .exit = regcache_rbtree_exit, 561 #ifdef CONFIG_DEBUG_FS 562 .debugfs_init = rbtree_debugfs_init, 563 #endif 564 .read = regcache_rbtree_read, 565 .write = regcache_rbtree_write, 566 .sync = regcache_rbtree_sync, 567 .drop = regcache_rbtree_drop, 568 }; 569