1 /* 2 * Register cache access API - rbtree caching support 3 * 4 * Copyright 2011 Wolfson Microelectronics plc 5 * 6 * Author: Dimitris Papastamos <dp@opensource.wolfsonmicro.com> 7 * 8 * This program is free software; you can redistribute it and/or modify 9 * it under the terms of the GNU General Public License version 2 as 10 * published by the Free Software Foundation. 11 */ 12 13 #include <linux/slab.h> 14 #include <linux/device.h> 15 #include <linux/debugfs.h> 16 #include <linux/rbtree.h> 17 #include <linux/seq_file.h> 18 19 #include "internal.h" 20 21 static int regcache_rbtree_write(struct regmap *map, unsigned int reg, 22 unsigned int value); 23 static int regcache_rbtree_exit(struct regmap *map); 24 25 struct regcache_rbtree_node { 26 /* the actual rbtree node holding this block */ 27 struct rb_node node; 28 /* base register handled by this block */ 29 unsigned int base_reg; 30 /* block of adjacent registers */ 31 void *block; 32 /* number of registers available in the block */ 33 unsigned int blklen; 34 } __attribute__ ((packed)); 35 36 struct regcache_rbtree_ctx { 37 struct rb_root root; 38 struct regcache_rbtree_node *cached_rbnode; 39 }; 40 41 static inline void regcache_rbtree_get_base_top_reg( 42 struct regcache_rbtree_node *rbnode, 43 unsigned int *base, unsigned int *top) 44 { 45 *base = rbnode->base_reg; 46 *top = rbnode->base_reg + rbnode->blklen - 1; 47 } 48 49 static unsigned int regcache_rbtree_get_register( 50 struct regcache_rbtree_node *rbnode, unsigned int idx, 51 unsigned int word_size) 52 { 53 return regcache_get_val(rbnode->block, idx, word_size); 54 } 55 56 static void regcache_rbtree_set_register(struct regcache_rbtree_node *rbnode, 57 unsigned int idx, unsigned int val, 58 unsigned int word_size) 59 { 60 regcache_set_val(rbnode->block, idx, val, word_size); 61 } 62 63 static struct regcache_rbtree_node *regcache_rbtree_lookup(struct regmap *map, 64 unsigned int reg) 65 { 66 struct regcache_rbtree_ctx *rbtree_ctx = map->cache; 67 struct rb_node *node; 68 struct regcache_rbtree_node *rbnode; 69 unsigned int base_reg, top_reg; 70 71 rbnode = rbtree_ctx->cached_rbnode; 72 if (rbnode) { 73 regcache_rbtree_get_base_top_reg(rbnode, &base_reg, &top_reg); 74 if (reg >= base_reg && reg <= top_reg) 75 return rbnode; 76 } 77 78 node = rbtree_ctx->root.rb_node; 79 while (node) { 80 rbnode = container_of(node, struct regcache_rbtree_node, node); 81 regcache_rbtree_get_base_top_reg(rbnode, &base_reg, &top_reg); 82 if (reg >= base_reg && reg <= top_reg) { 83 rbtree_ctx->cached_rbnode = rbnode; 84 return rbnode; 85 } else if (reg > top_reg) { 86 node = node->rb_right; 87 } else if (reg < base_reg) { 88 node = node->rb_left; 89 } 90 } 91 92 return NULL; 93 } 94 95 static int regcache_rbtree_insert(struct rb_root *root, 96 struct regcache_rbtree_node *rbnode) 97 { 98 struct rb_node **new, *parent; 99 struct regcache_rbtree_node *rbnode_tmp; 100 unsigned int base_reg_tmp, top_reg_tmp; 101 unsigned int base_reg; 102 103 parent = NULL; 104 new = &root->rb_node; 105 while (*new) { 106 rbnode_tmp = container_of(*new, struct regcache_rbtree_node, 107 node); 108 /* base and top registers of the current rbnode */ 109 regcache_rbtree_get_base_top_reg(rbnode_tmp, &base_reg_tmp, 110 &top_reg_tmp); 111 /* base register of the rbnode to be added */ 112 base_reg = rbnode->base_reg; 113 parent = *new; 114 /* if this register has already been inserted, just return */ 115 if (base_reg >= base_reg_tmp && 116 base_reg <= top_reg_tmp) 117 return 0; 118 else if (base_reg > top_reg_tmp) 119 new = &((*new)->rb_right); 120 else if (base_reg < base_reg_tmp) 121 new = &((*new)->rb_left); 122 } 123 124 /* insert the node into the rbtree */ 125 rb_link_node(&rbnode->node, parent, new); 126 rb_insert_color(&rbnode->node, root); 127 128 return 1; 129 } 130 131 #ifdef CONFIG_DEBUG_FS 132 static int rbtree_show(struct seq_file *s, void *ignored) 133 { 134 struct regmap *map = s->private; 135 struct regcache_rbtree_ctx *rbtree_ctx = map->cache; 136 struct regcache_rbtree_node *n; 137 struct rb_node *node; 138 unsigned int base, top; 139 int nodes = 0; 140 int registers = 0; 141 142 mutex_lock(&map->lock); 143 144 for (node = rb_first(&rbtree_ctx->root); node != NULL; 145 node = rb_next(node)) { 146 n = container_of(node, struct regcache_rbtree_node, node); 147 148 regcache_rbtree_get_base_top_reg(n, &base, &top); 149 seq_printf(s, "%x-%x (%d)\n", base, top, top - base + 1); 150 151 nodes++; 152 registers += top - base + 1; 153 } 154 155 seq_printf(s, "%d nodes, %d registers, average %d registers\n", 156 nodes, registers, registers / nodes); 157 158 mutex_unlock(&map->lock); 159 160 return 0; 161 } 162 163 static int rbtree_open(struct inode *inode, struct file *file) 164 { 165 return single_open(file, rbtree_show, inode->i_private); 166 } 167 168 static const struct file_operations rbtree_fops = { 169 .open = rbtree_open, 170 .read = seq_read, 171 .llseek = seq_lseek, 172 .release = single_release, 173 }; 174 175 static void rbtree_debugfs_init(struct regmap *map) 176 { 177 debugfs_create_file("rbtree", 0400, map->debugfs, map, &rbtree_fops); 178 } 179 #else 180 static void rbtree_debugfs_init(struct regmap *map) 181 { 182 } 183 #endif 184 185 static int regcache_rbtree_init(struct regmap *map) 186 { 187 struct regcache_rbtree_ctx *rbtree_ctx; 188 int i; 189 int ret; 190 191 map->cache = kmalloc(sizeof *rbtree_ctx, GFP_KERNEL); 192 if (!map->cache) 193 return -ENOMEM; 194 195 rbtree_ctx = map->cache; 196 rbtree_ctx->root = RB_ROOT; 197 rbtree_ctx->cached_rbnode = NULL; 198 199 for (i = 0; i < map->num_reg_defaults; i++) { 200 ret = regcache_rbtree_write(map, 201 map->reg_defaults[i].reg, 202 map->reg_defaults[i].def); 203 if (ret) 204 goto err; 205 } 206 207 rbtree_debugfs_init(map); 208 209 return 0; 210 211 err: 212 regcache_rbtree_exit(map); 213 return ret; 214 } 215 216 static int regcache_rbtree_exit(struct regmap *map) 217 { 218 struct rb_node *next; 219 struct regcache_rbtree_ctx *rbtree_ctx; 220 struct regcache_rbtree_node *rbtree_node; 221 222 /* if we've already been called then just return */ 223 rbtree_ctx = map->cache; 224 if (!rbtree_ctx) 225 return 0; 226 227 /* free up the rbtree */ 228 next = rb_first(&rbtree_ctx->root); 229 while (next) { 230 rbtree_node = rb_entry(next, struct regcache_rbtree_node, node); 231 next = rb_next(&rbtree_node->node); 232 rb_erase(&rbtree_node->node, &rbtree_ctx->root); 233 kfree(rbtree_node->block); 234 kfree(rbtree_node); 235 } 236 237 /* release the resources */ 238 kfree(map->cache); 239 map->cache = NULL; 240 241 return 0; 242 } 243 244 static int regcache_rbtree_read(struct regmap *map, 245 unsigned int reg, unsigned int *value) 246 { 247 struct regcache_rbtree_node *rbnode; 248 unsigned int reg_tmp; 249 250 rbnode = regcache_rbtree_lookup(map, reg); 251 if (rbnode) { 252 reg_tmp = reg - rbnode->base_reg; 253 *value = regcache_rbtree_get_register(rbnode, reg_tmp, 254 map->cache_word_size); 255 } else { 256 return -ENOENT; 257 } 258 259 return 0; 260 } 261 262 263 static int regcache_rbtree_insert_to_block(struct regcache_rbtree_node *rbnode, 264 unsigned int pos, unsigned int reg, 265 unsigned int value, unsigned int word_size) 266 { 267 u8 *blk; 268 269 blk = krealloc(rbnode->block, 270 (rbnode->blklen + 1) * word_size, GFP_KERNEL); 271 if (!blk) 272 return -ENOMEM; 273 274 /* insert the register value in the correct place in the rbnode block */ 275 memmove(blk + (pos + 1) * word_size, 276 blk + pos * word_size, 277 (rbnode->blklen - pos) * word_size); 278 279 /* update the rbnode block, its size and the base register */ 280 rbnode->block = blk; 281 rbnode->blklen++; 282 if (!pos) 283 rbnode->base_reg = reg; 284 285 regcache_rbtree_set_register(rbnode, pos, value, word_size); 286 return 0; 287 } 288 289 static int regcache_rbtree_write(struct regmap *map, unsigned int reg, 290 unsigned int value) 291 { 292 struct regcache_rbtree_ctx *rbtree_ctx; 293 struct regcache_rbtree_node *rbnode, *rbnode_tmp; 294 struct rb_node *node; 295 unsigned int val; 296 unsigned int reg_tmp; 297 unsigned int pos; 298 int i; 299 int ret; 300 301 rbtree_ctx = map->cache; 302 /* if we can't locate it in the cached rbnode we'll have 303 * to traverse the rbtree looking for it. 304 */ 305 rbnode = regcache_rbtree_lookup(map, reg); 306 if (rbnode) { 307 reg_tmp = reg - rbnode->base_reg; 308 val = regcache_rbtree_get_register(rbnode, reg_tmp, 309 map->cache_word_size); 310 if (val == value) 311 return 0; 312 regcache_rbtree_set_register(rbnode, reg_tmp, value, 313 map->cache_word_size); 314 } else { 315 /* look for an adjacent register to the one we are about to add */ 316 for (node = rb_first(&rbtree_ctx->root); node; 317 node = rb_next(node)) { 318 rbnode_tmp = rb_entry(node, struct regcache_rbtree_node, node); 319 for (i = 0; i < rbnode_tmp->blklen; i++) { 320 reg_tmp = rbnode_tmp->base_reg + i; 321 if (abs(reg_tmp - reg) != 1) 322 continue; 323 /* decide where in the block to place our register */ 324 if (reg_tmp + 1 == reg) 325 pos = i + 1; 326 else 327 pos = i; 328 ret = regcache_rbtree_insert_to_block(rbnode_tmp, pos, 329 reg, value, 330 map->cache_word_size); 331 if (ret) 332 return ret; 333 rbtree_ctx->cached_rbnode = rbnode_tmp; 334 return 0; 335 } 336 } 337 /* we did not manage to find a place to insert it in an existing 338 * block so create a new rbnode with a single register in its block. 339 * This block will get populated further if any other adjacent 340 * registers get modified in the future. 341 */ 342 rbnode = kzalloc(sizeof *rbnode, GFP_KERNEL); 343 if (!rbnode) 344 return -ENOMEM; 345 rbnode->blklen = 1; 346 rbnode->base_reg = reg; 347 rbnode->block = kmalloc(rbnode->blklen * map->cache_word_size, 348 GFP_KERNEL); 349 if (!rbnode->block) { 350 kfree(rbnode); 351 return -ENOMEM; 352 } 353 regcache_rbtree_set_register(rbnode, 0, value, map->cache_word_size); 354 regcache_rbtree_insert(&rbtree_ctx->root, rbnode); 355 rbtree_ctx->cached_rbnode = rbnode; 356 } 357 358 return 0; 359 } 360 361 static int regcache_rbtree_sync(struct regmap *map, unsigned int min, 362 unsigned int max) 363 { 364 struct regcache_rbtree_ctx *rbtree_ctx; 365 struct rb_node *node; 366 struct regcache_rbtree_node *rbnode; 367 unsigned int regtmp; 368 unsigned int val; 369 int ret; 370 int i, base, end; 371 372 rbtree_ctx = map->cache; 373 for (node = rb_first(&rbtree_ctx->root); node; node = rb_next(node)) { 374 rbnode = rb_entry(node, struct regcache_rbtree_node, node); 375 376 if (rbnode->base_reg < min) 377 continue; 378 if (rbnode->base_reg > max) 379 break; 380 if (rbnode->base_reg + rbnode->blklen < min) 381 continue; 382 383 if (min > rbnode->base_reg) 384 base = min - rbnode->base_reg; 385 else 386 base = 0; 387 388 if (max < rbnode->base_reg + rbnode->blklen) 389 end = rbnode->base_reg + rbnode->blklen - max; 390 else 391 end = rbnode->blklen; 392 393 for (i = base; i < end; i++) { 394 regtmp = rbnode->base_reg + i; 395 val = regcache_rbtree_get_register(rbnode, i, 396 map->cache_word_size); 397 398 /* Is this the hardware default? If so skip. */ 399 ret = regcache_lookup_reg(map, i); 400 if (ret >= 0 && val == map->reg_defaults[ret].def) 401 continue; 402 403 map->cache_bypass = 1; 404 ret = _regmap_write(map, regtmp, val); 405 map->cache_bypass = 0; 406 if (ret) 407 return ret; 408 dev_dbg(map->dev, "Synced register %#x, value %#x\n", 409 regtmp, val); 410 } 411 } 412 413 return 0; 414 } 415 416 struct regcache_ops regcache_rbtree_ops = { 417 .type = REGCACHE_RBTREE, 418 .name = "rbtree", 419 .init = regcache_rbtree_init, 420 .exit = regcache_rbtree_exit, 421 .read = regcache_rbtree_read, 422 .write = regcache_rbtree_write, 423 .sync = regcache_rbtree_sync 424 }; 425