1 // SPDX-License-Identifier: GPL-2.0-only 2 /* 3 * Copyright (c) 2012 Linutronix GmbH 4 * Copyright (c) 2014 sigma star gmbh 5 * Author: Richard Weinberger <richard@nod.at> 6 */ 7 8 /** 9 * update_fastmap_work_fn - calls ubi_update_fastmap from a work queue 10 * @wrk: the work description object 11 */ 12 static void update_fastmap_work_fn(struct work_struct *wrk) 13 { 14 struct ubi_device *ubi = container_of(wrk, struct ubi_device, fm_work); 15 16 ubi_update_fastmap(ubi); 17 spin_lock(&ubi->wl_lock); 18 ubi->fm_work_scheduled = 0; 19 spin_unlock(&ubi->wl_lock); 20 } 21 22 /** 23 * find_anchor_wl_entry - find wear-leveling entry to used as anchor PEB. 24 * @root: the RB-tree where to look for 25 */ 26 static struct ubi_wl_entry *find_anchor_wl_entry(struct rb_root *root) 27 { 28 struct rb_node *p; 29 struct ubi_wl_entry *e, *victim = NULL; 30 int max_ec = UBI_MAX_ERASECOUNTER; 31 32 ubi_rb_for_each_entry(p, e, root, u.rb) { 33 if (e->pnum < UBI_FM_MAX_START && e->ec < max_ec) { 34 victim = e; 35 max_ec = e->ec; 36 } 37 } 38 39 return victim; 40 } 41 42 static inline void return_unused_peb(struct ubi_device *ubi, 43 struct ubi_wl_entry *e) 44 { 45 wl_tree_add(e, &ubi->free); 46 ubi->free_count++; 47 } 48 49 /** 50 * return_unused_pool_pebs - returns unused PEB to the free tree. 51 * @ubi: UBI device description object 52 * @pool: fastmap pool description object 53 */ 54 static void return_unused_pool_pebs(struct ubi_device *ubi, 55 struct ubi_fm_pool *pool) 56 { 57 int i; 58 struct ubi_wl_entry *e; 59 60 for (i = pool->used; i < pool->size; i++) { 61 e = ubi->lookuptbl[pool->pebs[i]]; 62 return_unused_peb(ubi, e); 63 } 64 } 65 66 /** 67 * ubi_wl_get_fm_peb - find a physical erase block with a given maximal number. 68 * @ubi: UBI device description object 69 * @anchor: This PEB will be used as anchor PEB by fastmap 70 * 71 * The function returns a physical erase block with a given maximal number 72 * and removes it from the wl subsystem. 73 * Must be called with wl_lock held! 74 */ 75 struct ubi_wl_entry *ubi_wl_get_fm_peb(struct ubi_device *ubi, int anchor) 76 { 77 struct ubi_wl_entry *e = NULL; 78 79 if (!ubi->free.rb_node || (ubi->free_count - ubi->beb_rsvd_pebs < 1)) 80 goto out; 81 82 if (anchor) 83 e = find_anchor_wl_entry(&ubi->free); 84 else 85 e = find_mean_wl_entry(ubi, &ubi->free); 86 87 if (!e) 88 goto out; 89 90 self_check_in_wl_tree(ubi, e, &ubi->free); 91 92 /* remove it from the free list, 93 * the wl subsystem does no longer know this erase block */ 94 rb_erase(&e->u.rb, &ubi->free); 95 ubi->free_count--; 96 out: 97 return e; 98 } 99 100 /* 101 * has_enough_free_count - whether ubi has enough free pebs to fill fm pools 102 * @ubi: UBI device description object 103 * @is_wl_pool: whether UBI is filling wear leveling pool 104 * 105 * This helper function checks whether there are enough free pebs (deducted 106 * by fastmap pebs) to fill fm_pool and fm_wl_pool, above rule works after 107 * there is at least one of free pebs is filled into fm_wl_pool. 108 * For wear leveling pool, UBI should also reserve free pebs for bad pebs 109 * handling, because there maybe no enough free pebs for user volumes after 110 * producing new bad pebs. 111 */ 112 static bool has_enough_free_count(struct ubi_device *ubi, bool is_wl_pool) 113 { 114 int fm_used = 0; // fastmap non anchor pebs. 115 int beb_rsvd_pebs; 116 117 if (!ubi->free.rb_node) 118 return false; 119 120 beb_rsvd_pebs = is_wl_pool ? ubi->beb_rsvd_pebs : 0; 121 if (ubi->fm_wl_pool.size > 0 && !(ubi->ro_mode || ubi->fm_disabled)) 122 fm_used = ubi->fm_size / ubi->leb_size - 1; 123 124 return ubi->free_count - beb_rsvd_pebs > fm_used; 125 } 126 127 /** 128 * ubi_refill_pools - refills all fastmap PEB pools. 129 * @ubi: UBI device description object 130 */ 131 void ubi_refill_pools(struct ubi_device *ubi) 132 { 133 struct ubi_fm_pool *wl_pool = &ubi->fm_wl_pool; 134 struct ubi_fm_pool *pool = &ubi->fm_pool; 135 struct ubi_wl_entry *e; 136 int enough; 137 138 spin_lock(&ubi->wl_lock); 139 140 return_unused_pool_pebs(ubi, wl_pool); 141 return_unused_pool_pebs(ubi, pool); 142 143 wl_pool->size = 0; 144 pool->size = 0; 145 146 if (ubi->fm_anchor) { 147 wl_tree_add(ubi->fm_anchor, &ubi->free); 148 ubi->free_count++; 149 ubi->fm_anchor = NULL; 150 } 151 152 if (!ubi->fm_disabled) 153 /* 154 * All available PEBs are in ubi->free, now is the time to get 155 * the best anchor PEBs. 156 */ 157 ubi->fm_anchor = ubi_wl_get_fm_peb(ubi, 1); 158 159 for (;;) { 160 enough = 0; 161 if (pool->size < pool->max_size) { 162 if (!has_enough_free_count(ubi, false)) 163 break; 164 165 e = wl_get_wle(ubi); 166 if (!e) 167 break; 168 169 pool->pebs[pool->size] = e->pnum; 170 pool->size++; 171 } else 172 enough++; 173 174 if (wl_pool->size < wl_pool->max_size) { 175 if (!has_enough_free_count(ubi, true)) 176 break; 177 178 e = find_wl_entry(ubi, &ubi->free, WL_FREE_MAX_DIFF); 179 self_check_in_wl_tree(ubi, e, &ubi->free); 180 rb_erase(&e->u.rb, &ubi->free); 181 ubi->free_count--; 182 183 wl_pool->pebs[wl_pool->size] = e->pnum; 184 wl_pool->size++; 185 } else 186 enough++; 187 188 if (enough == 2) 189 break; 190 } 191 192 wl_pool->used = 0; 193 pool->used = 0; 194 195 spin_unlock(&ubi->wl_lock); 196 } 197 198 /** 199 * produce_free_peb - produce a free physical eraseblock. 200 * @ubi: UBI device description object 201 * 202 * This function tries to make a free PEB by means of synchronous execution of 203 * pending works. This may be needed if, for example the background thread is 204 * disabled. Returns zero in case of success and a negative error code in case 205 * of failure. 206 */ 207 static int produce_free_peb(struct ubi_device *ubi) 208 { 209 int err; 210 211 while (!ubi->free.rb_node && ubi->works_count) { 212 dbg_wl("do one work synchronously"); 213 err = do_work(ubi); 214 215 if (err) 216 return err; 217 } 218 219 return 0; 220 } 221 222 /** 223 * ubi_wl_get_peb - get a physical eraseblock. 224 * @ubi: UBI device description object 225 * 226 * This function returns a physical eraseblock in case of success and a 227 * negative error code in case of failure. 228 * Returns with ubi->fm_eba_sem held in read mode! 229 */ 230 int ubi_wl_get_peb(struct ubi_device *ubi) 231 { 232 int ret, attempts = 0; 233 struct ubi_fm_pool *pool = &ubi->fm_pool; 234 struct ubi_fm_pool *wl_pool = &ubi->fm_wl_pool; 235 236 again: 237 down_read(&ubi->fm_eba_sem); 238 spin_lock(&ubi->wl_lock); 239 240 /* We check here also for the WL pool because at this point we can 241 * refill the WL pool synchronous. */ 242 if (pool->used == pool->size || wl_pool->used == wl_pool->size) { 243 spin_unlock(&ubi->wl_lock); 244 up_read(&ubi->fm_eba_sem); 245 ret = ubi_update_fastmap(ubi); 246 if (ret) { 247 ubi_msg(ubi, "Unable to write a new fastmap: %i", ret); 248 down_read(&ubi->fm_eba_sem); 249 return -ENOSPC; 250 } 251 down_read(&ubi->fm_eba_sem); 252 spin_lock(&ubi->wl_lock); 253 } 254 255 if (pool->used == pool->size) { 256 spin_unlock(&ubi->wl_lock); 257 attempts++; 258 if (attempts == 10) { 259 ubi_err(ubi, "Unable to get a free PEB from user WL pool"); 260 ret = -ENOSPC; 261 goto out; 262 } 263 up_read(&ubi->fm_eba_sem); 264 ret = produce_free_peb(ubi); 265 if (ret < 0) { 266 down_read(&ubi->fm_eba_sem); 267 goto out; 268 } 269 goto again; 270 } 271 272 ubi_assert(pool->used < pool->size); 273 ret = pool->pebs[pool->used++]; 274 prot_queue_add(ubi, ubi->lookuptbl[ret]); 275 spin_unlock(&ubi->wl_lock); 276 out: 277 return ret; 278 } 279 280 /** 281 * next_peb_for_wl - returns next PEB to be used internally by the 282 * WL sub-system. 283 * 284 * @ubi: UBI device description object 285 * @need_fill: whether to fill wear-leveling pool when no PEBs are found 286 */ 287 static struct ubi_wl_entry *next_peb_for_wl(struct ubi_device *ubi, 288 bool need_fill) 289 { 290 struct ubi_fm_pool *pool = &ubi->fm_wl_pool; 291 int pnum; 292 293 if (pool->used == pool->size) { 294 if (need_fill && !ubi->fm_work_scheduled) { 295 /* 296 * We cannot update the fastmap here because this 297 * function is called in atomic context. 298 * Let's fail here and refill/update it as soon as 299 * possible. 300 */ 301 ubi->fm_work_scheduled = 1; 302 schedule_work(&ubi->fm_work); 303 } 304 return NULL; 305 } 306 307 pnum = pool->pebs[pool->used]; 308 return ubi->lookuptbl[pnum]; 309 } 310 311 /** 312 * need_wear_leveling - checks whether to trigger a wear leveling work. 313 * UBI fetches free PEB from wl_pool, we check free PEBs from both 'wl_pool' 314 * and 'ubi->free', because free PEB in 'ubi->free' tree maybe moved into 315 * 'wl_pool' by ubi_refill_pools(). 316 * 317 * @ubi: UBI device description object 318 */ 319 static bool need_wear_leveling(struct ubi_device *ubi) 320 { 321 int ec; 322 struct ubi_wl_entry *e; 323 324 if (!ubi->used.rb_node) 325 return false; 326 327 e = next_peb_for_wl(ubi, false); 328 if (!e) { 329 if (!ubi->free.rb_node) 330 return false; 331 e = find_wl_entry(ubi, &ubi->free, WL_FREE_MAX_DIFF); 332 ec = e->ec; 333 } else { 334 ec = e->ec; 335 if (ubi->free.rb_node) { 336 e = find_wl_entry(ubi, &ubi->free, WL_FREE_MAX_DIFF); 337 ec = max(ec, e->ec); 338 } 339 } 340 e = rb_entry(rb_first(&ubi->used), struct ubi_wl_entry, u.rb); 341 342 return ec - e->ec >= UBI_WL_THRESHOLD; 343 } 344 345 /* get_peb_for_wl - returns a PEB to be used internally by the WL sub-system. 346 * 347 * @ubi: UBI device description object 348 */ 349 static struct ubi_wl_entry *get_peb_for_wl(struct ubi_device *ubi) 350 { 351 struct ubi_fm_pool *pool = &ubi->fm_wl_pool; 352 int pnum; 353 354 ubi_assert(rwsem_is_locked(&ubi->fm_eba_sem)); 355 356 if (pool->used == pool->size) { 357 /* We cannot update the fastmap here because this 358 * function is called in atomic context. 359 * Let's fail here and refill/update it as soon as possible. */ 360 if (!ubi->fm_work_scheduled) { 361 ubi->fm_work_scheduled = 1; 362 schedule_work(&ubi->fm_work); 363 } 364 return NULL; 365 } 366 367 pnum = pool->pebs[pool->used++]; 368 return ubi->lookuptbl[pnum]; 369 } 370 371 /** 372 * ubi_ensure_anchor_pebs - schedule wear-leveling to produce an anchor PEB. 373 * @ubi: UBI device description object 374 */ 375 int ubi_ensure_anchor_pebs(struct ubi_device *ubi) 376 { 377 struct ubi_work *wrk; 378 struct ubi_wl_entry *anchor; 379 380 spin_lock(&ubi->wl_lock); 381 382 /* Do we already have an anchor? */ 383 if (ubi->fm_anchor) { 384 spin_unlock(&ubi->wl_lock); 385 return 0; 386 } 387 388 /* See if we can find an anchor PEB on the list of free PEBs */ 389 anchor = ubi_wl_get_fm_peb(ubi, 1); 390 if (anchor) { 391 ubi->fm_anchor = anchor; 392 spin_unlock(&ubi->wl_lock); 393 return 0; 394 } 395 396 ubi->fm_do_produce_anchor = 1; 397 /* No luck, trigger wear leveling to produce a new anchor PEB. */ 398 if (ubi->wl_scheduled) { 399 spin_unlock(&ubi->wl_lock); 400 return 0; 401 } 402 ubi->wl_scheduled = 1; 403 spin_unlock(&ubi->wl_lock); 404 405 wrk = kmalloc(sizeof(struct ubi_work), GFP_NOFS); 406 if (!wrk) { 407 spin_lock(&ubi->wl_lock); 408 ubi->wl_scheduled = 0; 409 spin_unlock(&ubi->wl_lock); 410 return -ENOMEM; 411 } 412 413 wrk->func = &wear_leveling_worker; 414 __schedule_ubi_work(ubi, wrk); 415 return 0; 416 } 417 418 /** 419 * ubi_wl_put_fm_peb - returns a PEB used in a fastmap to the wear-leveling 420 * sub-system. 421 * see: ubi_wl_put_peb() 422 * 423 * @ubi: UBI device description object 424 * @fm_e: physical eraseblock to return 425 * @lnum: the last used logical eraseblock number for the PEB 426 * @torture: if this physical eraseblock has to be tortured 427 */ 428 int ubi_wl_put_fm_peb(struct ubi_device *ubi, struct ubi_wl_entry *fm_e, 429 int lnum, int torture) 430 { 431 struct ubi_wl_entry *e; 432 int vol_id, pnum = fm_e->pnum; 433 434 dbg_wl("PEB %d", pnum); 435 436 ubi_assert(pnum >= 0); 437 ubi_assert(pnum < ubi->peb_count); 438 439 spin_lock(&ubi->wl_lock); 440 e = ubi->lookuptbl[pnum]; 441 442 /* This can happen if we recovered from a fastmap the very 443 * first time and writing now a new one. In this case the wl system 444 * has never seen any PEB used by the original fastmap. 445 */ 446 if (!e) { 447 e = fm_e; 448 ubi_assert(e->ec >= 0); 449 ubi->lookuptbl[pnum] = e; 450 } 451 452 spin_unlock(&ubi->wl_lock); 453 454 vol_id = lnum ? UBI_FM_DATA_VOLUME_ID : UBI_FM_SB_VOLUME_ID; 455 return schedule_erase(ubi, e, vol_id, lnum, torture, true); 456 } 457 458 /** 459 * ubi_is_erase_work - checks whether a work is erase work. 460 * @wrk: The work object to be checked 461 */ 462 int ubi_is_erase_work(struct ubi_work *wrk) 463 { 464 return wrk->func == erase_worker; 465 } 466 467 static void ubi_fastmap_close(struct ubi_device *ubi) 468 { 469 int i; 470 471 return_unused_pool_pebs(ubi, &ubi->fm_pool); 472 return_unused_pool_pebs(ubi, &ubi->fm_wl_pool); 473 474 if (ubi->fm_anchor) { 475 return_unused_peb(ubi, ubi->fm_anchor); 476 ubi->fm_anchor = NULL; 477 } 478 479 if (ubi->fm) { 480 for (i = 0; i < ubi->fm->used_blocks; i++) 481 kfree(ubi->fm->e[i]); 482 } 483 kfree(ubi->fm); 484 } 485 486 /** 487 * may_reserve_for_fm - tests whether a PEB shall be reserved for fastmap. 488 * See find_mean_wl_entry() 489 * 490 * @ubi: UBI device description object 491 * @e: physical eraseblock to return 492 * @root: RB tree to test against. 493 */ 494 static struct ubi_wl_entry *may_reserve_for_fm(struct ubi_device *ubi, 495 struct ubi_wl_entry *e, 496 struct rb_root *root) { 497 if (e && !ubi->fm_disabled && !ubi->fm && 498 e->pnum < UBI_FM_MAX_START) 499 e = rb_entry(rb_next(root->rb_node), 500 struct ubi_wl_entry, u.rb); 501 502 return e; 503 } 504