xref: /openbmc/linux/drivers/md/persistent-data/dm-transaction-manager.c (revision 2612e3bbc0386368a850140a6c9b990cd496a5ec)
1  // SPDX-License-Identifier: GPL-2.0-only
2  /*
3   * Copyright (C) 2011 Red Hat, Inc.
4   *
5   * This file is released under the GPL.
6   */
7  #include "dm-transaction-manager.h"
8  #include "dm-space-map.h"
9  #include "dm-space-map-disk.h"
10  #include "dm-space-map-metadata.h"
11  #include "dm-persistent-data-internal.h"
12  
13  #include <linux/export.h>
14  #include <linux/mutex.h>
15  #include <linux/hash.h>
16  #include <linux/slab.h>
17  #include <linux/device-mapper.h>
18  
19  #define DM_MSG_PREFIX "transaction manager"
20  
21  /*----------------------------------------------------------------*/
22  
23  #define PREFETCH_SIZE 128
24  #define PREFETCH_BITS 7
25  #define PREFETCH_SENTINEL ((dm_block_t) -1ULL)
26  
27  struct prefetch_set {
28  	struct mutex lock;
29  	dm_block_t blocks[PREFETCH_SIZE];
30  };
31  
prefetch_hash(dm_block_t b)32  static unsigned int prefetch_hash(dm_block_t b)
33  {
34  	return hash_64(b, PREFETCH_BITS);
35  }
36  
prefetch_wipe(struct prefetch_set * p)37  static void prefetch_wipe(struct prefetch_set *p)
38  {
39  	unsigned int i;
40  
41  	for (i = 0; i < PREFETCH_SIZE; i++)
42  		p->blocks[i] = PREFETCH_SENTINEL;
43  }
44  
prefetch_init(struct prefetch_set * p)45  static void prefetch_init(struct prefetch_set *p)
46  {
47  	mutex_init(&p->lock);
48  	prefetch_wipe(p);
49  }
50  
prefetch_add(struct prefetch_set * p,dm_block_t b)51  static void prefetch_add(struct prefetch_set *p, dm_block_t b)
52  {
53  	unsigned int h = prefetch_hash(b);
54  
55  	mutex_lock(&p->lock);
56  	if (p->blocks[h] == PREFETCH_SENTINEL)
57  		p->blocks[h] = b;
58  
59  	mutex_unlock(&p->lock);
60  }
61  
prefetch_issue(struct prefetch_set * p,struct dm_block_manager * bm)62  static void prefetch_issue(struct prefetch_set *p, struct dm_block_manager *bm)
63  {
64  	unsigned int i;
65  
66  	mutex_lock(&p->lock);
67  
68  	for (i = 0; i < PREFETCH_SIZE; i++)
69  		if (p->blocks[i] != PREFETCH_SENTINEL) {
70  			dm_bm_prefetch(bm, p->blocks[i]);
71  			p->blocks[i] = PREFETCH_SENTINEL;
72  		}
73  
74  	mutex_unlock(&p->lock);
75  }
76  
77  /*----------------------------------------------------------------*/
78  
79  struct shadow_info {
80  	struct hlist_node hlist;
81  	dm_block_t where;
82  };
83  
84  /*
85   * It would be nice if we scaled with the size of transaction.
86   */
87  #define DM_HASH_SIZE 256
88  #define DM_HASH_MASK (DM_HASH_SIZE - 1)
89  
90  struct dm_transaction_manager {
91  	int is_clone;
92  	struct dm_transaction_manager *real;
93  
94  	struct dm_block_manager *bm;
95  	struct dm_space_map *sm;
96  
97  	spinlock_t lock;
98  	struct hlist_head buckets[DM_HASH_SIZE];
99  
100  	struct prefetch_set prefetches;
101  };
102  
103  /*----------------------------------------------------------------*/
104  
is_shadow(struct dm_transaction_manager * tm,dm_block_t b)105  static int is_shadow(struct dm_transaction_manager *tm, dm_block_t b)
106  {
107  	int r = 0;
108  	unsigned int bucket = dm_hash_block(b, DM_HASH_MASK);
109  	struct shadow_info *si;
110  
111  	spin_lock(&tm->lock);
112  	hlist_for_each_entry(si, tm->buckets + bucket, hlist)
113  		if (si->where == b) {
114  			r = 1;
115  			break;
116  		}
117  	spin_unlock(&tm->lock);
118  
119  	return r;
120  }
121  
122  /*
123   * This can silently fail if there's no memory.  We're ok with this since
124   * creating redundant shadows causes no harm.
125   */
insert_shadow(struct dm_transaction_manager * tm,dm_block_t b)126  static void insert_shadow(struct dm_transaction_manager *tm, dm_block_t b)
127  {
128  	unsigned int bucket;
129  	struct shadow_info *si;
130  
131  	si = kmalloc(sizeof(*si), GFP_NOIO);
132  	if (si) {
133  		si->where = b;
134  		bucket = dm_hash_block(b, DM_HASH_MASK);
135  		spin_lock(&tm->lock);
136  		hlist_add_head(&si->hlist, tm->buckets + bucket);
137  		spin_unlock(&tm->lock);
138  	}
139  }
140  
wipe_shadow_table(struct dm_transaction_manager * tm)141  static void wipe_shadow_table(struct dm_transaction_manager *tm)
142  {
143  	struct shadow_info *si;
144  	struct hlist_node *tmp;
145  	struct hlist_head *bucket;
146  	int i;
147  
148  	spin_lock(&tm->lock);
149  	for (i = 0; i < DM_HASH_SIZE; i++) {
150  		bucket = tm->buckets + i;
151  		hlist_for_each_entry_safe(si, tmp, bucket, hlist)
152  			kfree(si);
153  
154  		INIT_HLIST_HEAD(bucket);
155  	}
156  
157  	spin_unlock(&tm->lock);
158  }
159  
160  /*----------------------------------------------------------------*/
161  
dm_tm_create(struct dm_block_manager * bm,struct dm_space_map * sm)162  static struct dm_transaction_manager *dm_tm_create(struct dm_block_manager *bm,
163  						   struct dm_space_map *sm)
164  {
165  	int i;
166  	struct dm_transaction_manager *tm;
167  
168  	tm = kmalloc(sizeof(*tm), GFP_KERNEL);
169  	if (!tm)
170  		return ERR_PTR(-ENOMEM);
171  
172  	tm->is_clone = 0;
173  	tm->real = NULL;
174  	tm->bm = bm;
175  	tm->sm = sm;
176  
177  	spin_lock_init(&tm->lock);
178  	for (i = 0; i < DM_HASH_SIZE; i++)
179  		INIT_HLIST_HEAD(tm->buckets + i);
180  
181  	prefetch_init(&tm->prefetches);
182  
183  	return tm;
184  }
185  
dm_tm_create_non_blocking_clone(struct dm_transaction_manager * real)186  struct dm_transaction_manager *dm_tm_create_non_blocking_clone(struct dm_transaction_manager *real)
187  {
188  	struct dm_transaction_manager *tm;
189  
190  	tm = kmalloc(sizeof(*tm), GFP_KERNEL);
191  	if (tm) {
192  		tm->is_clone = 1;
193  		tm->real = real;
194  	}
195  
196  	return tm;
197  }
198  EXPORT_SYMBOL_GPL(dm_tm_create_non_blocking_clone);
199  
dm_tm_destroy(struct dm_transaction_manager * tm)200  void dm_tm_destroy(struct dm_transaction_manager *tm)
201  {
202  	if (!tm)
203  		return;
204  
205  	if (!tm->is_clone)
206  		wipe_shadow_table(tm);
207  
208  	kfree(tm);
209  }
210  EXPORT_SYMBOL_GPL(dm_tm_destroy);
211  
dm_tm_pre_commit(struct dm_transaction_manager * tm)212  int dm_tm_pre_commit(struct dm_transaction_manager *tm)
213  {
214  	int r;
215  
216  	if (tm->is_clone)
217  		return -EWOULDBLOCK;
218  
219  	r = dm_sm_commit(tm->sm);
220  	if (r < 0)
221  		return r;
222  
223  	return dm_bm_flush(tm->bm);
224  }
225  EXPORT_SYMBOL_GPL(dm_tm_pre_commit);
226  
dm_tm_commit(struct dm_transaction_manager * tm,struct dm_block * root)227  int dm_tm_commit(struct dm_transaction_manager *tm, struct dm_block *root)
228  {
229  	if (tm->is_clone)
230  		return -EWOULDBLOCK;
231  
232  	wipe_shadow_table(tm);
233  	dm_bm_unlock(root);
234  
235  	return dm_bm_flush(tm->bm);
236  }
237  EXPORT_SYMBOL_GPL(dm_tm_commit);
238  
dm_tm_new_block(struct dm_transaction_manager * tm,struct dm_block_validator * v,struct dm_block ** result)239  int dm_tm_new_block(struct dm_transaction_manager *tm,
240  		    struct dm_block_validator *v,
241  		    struct dm_block **result)
242  {
243  	int r;
244  	dm_block_t new_block;
245  
246  	if (tm->is_clone)
247  		return -EWOULDBLOCK;
248  
249  	r = dm_sm_new_block(tm->sm, &new_block);
250  	if (r < 0)
251  		return r;
252  
253  	r = dm_bm_write_lock_zero(tm->bm, new_block, v, result);
254  	if (r < 0) {
255  		dm_sm_dec_block(tm->sm, new_block);
256  		return r;
257  	}
258  
259  	/*
260  	 * New blocks count as shadows in that they don't need to be
261  	 * shadowed again.
262  	 */
263  	insert_shadow(tm, new_block);
264  
265  	return 0;
266  }
267  
__shadow_block(struct dm_transaction_manager * tm,dm_block_t orig,struct dm_block_validator * v,struct dm_block ** result)268  static int __shadow_block(struct dm_transaction_manager *tm, dm_block_t orig,
269  			  struct dm_block_validator *v,
270  			  struct dm_block **result)
271  {
272  	int r;
273  	dm_block_t new;
274  	struct dm_block *orig_block;
275  
276  	r = dm_sm_new_block(tm->sm, &new);
277  	if (r < 0)
278  		return r;
279  
280  	r = dm_sm_dec_block(tm->sm, orig);
281  	if (r < 0)
282  		return r;
283  
284  	r = dm_bm_read_lock(tm->bm, orig, v, &orig_block);
285  	if (r < 0)
286  		return r;
287  
288  	/*
289  	 * It would be tempting to use dm_bm_unlock_move here, but some
290  	 * code, such as the space maps, keeps using the old data structures
291  	 * secure in the knowledge they won't be changed until the next
292  	 * transaction.  Using unlock_move would force a synchronous read
293  	 * since the old block would no longer be in the cache.
294  	 */
295  	r = dm_bm_write_lock_zero(tm->bm, new, v, result);
296  	if (r) {
297  		dm_bm_unlock(orig_block);
298  		return r;
299  	}
300  
301  	memcpy(dm_block_data(*result), dm_block_data(orig_block),
302  	       dm_bm_block_size(tm->bm));
303  
304  	dm_bm_unlock(orig_block);
305  	return r;
306  }
307  
dm_tm_shadow_block(struct dm_transaction_manager * tm,dm_block_t orig,struct dm_block_validator * v,struct dm_block ** result,int * inc_children)308  int dm_tm_shadow_block(struct dm_transaction_manager *tm, dm_block_t orig,
309  		       struct dm_block_validator *v, struct dm_block **result,
310  		       int *inc_children)
311  {
312  	int r;
313  
314  	if (tm->is_clone)
315  		return -EWOULDBLOCK;
316  
317  	r = dm_sm_count_is_more_than_one(tm->sm, orig, inc_children);
318  	if (r < 0)
319  		return r;
320  
321  	if (is_shadow(tm, orig) && !*inc_children)
322  		return dm_bm_write_lock(tm->bm, orig, v, result);
323  
324  	r = __shadow_block(tm, orig, v, result);
325  	if (r < 0)
326  		return r;
327  	insert_shadow(tm, dm_block_location(*result));
328  
329  	return r;
330  }
331  EXPORT_SYMBOL_GPL(dm_tm_shadow_block);
332  
dm_tm_read_lock(struct dm_transaction_manager * tm,dm_block_t b,struct dm_block_validator * v,struct dm_block ** blk)333  int dm_tm_read_lock(struct dm_transaction_manager *tm, dm_block_t b,
334  		    struct dm_block_validator *v,
335  		    struct dm_block **blk)
336  {
337  	if (tm->is_clone) {
338  		int r = dm_bm_read_try_lock(tm->real->bm, b, v, blk);
339  
340  		if (r == -EWOULDBLOCK)
341  			prefetch_add(&tm->real->prefetches, b);
342  
343  		return r;
344  	}
345  
346  	return dm_bm_read_lock(tm->bm, b, v, blk);
347  }
348  EXPORT_SYMBOL_GPL(dm_tm_read_lock);
349  
dm_tm_unlock(struct dm_transaction_manager * tm,struct dm_block * b)350  void dm_tm_unlock(struct dm_transaction_manager *tm, struct dm_block *b)
351  {
352  	dm_bm_unlock(b);
353  }
354  EXPORT_SYMBOL_GPL(dm_tm_unlock);
355  
dm_tm_inc(struct dm_transaction_manager * tm,dm_block_t b)356  void dm_tm_inc(struct dm_transaction_manager *tm, dm_block_t b)
357  {
358  	/*
359  	 * The non-blocking clone doesn't support this.
360  	 */
361  	BUG_ON(tm->is_clone);
362  
363  	dm_sm_inc_block(tm->sm, b);
364  }
365  EXPORT_SYMBOL_GPL(dm_tm_inc);
366  
dm_tm_inc_range(struct dm_transaction_manager * tm,dm_block_t b,dm_block_t e)367  void dm_tm_inc_range(struct dm_transaction_manager *tm, dm_block_t b, dm_block_t e)
368  {
369  	/*
370  	 * The non-blocking clone doesn't support this.
371  	 */
372  	BUG_ON(tm->is_clone);
373  
374  	dm_sm_inc_blocks(tm->sm, b, e);
375  }
376  EXPORT_SYMBOL_GPL(dm_tm_inc_range);
377  
dm_tm_dec(struct dm_transaction_manager * tm,dm_block_t b)378  void dm_tm_dec(struct dm_transaction_manager *tm, dm_block_t b)
379  {
380  	/*
381  	 * The non-blocking clone doesn't support this.
382  	 */
383  	BUG_ON(tm->is_clone);
384  
385  	dm_sm_dec_block(tm->sm, b);
386  }
387  EXPORT_SYMBOL_GPL(dm_tm_dec);
388  
dm_tm_dec_range(struct dm_transaction_manager * tm,dm_block_t b,dm_block_t e)389  void dm_tm_dec_range(struct dm_transaction_manager *tm, dm_block_t b, dm_block_t e)
390  {
391  	/*
392  	 * The non-blocking clone doesn't support this.
393  	 */
394  	BUG_ON(tm->is_clone);
395  
396  	dm_sm_dec_blocks(tm->sm, b, e);
397  }
398  EXPORT_SYMBOL_GPL(dm_tm_dec_range);
399  
dm_tm_with_runs(struct dm_transaction_manager * tm,const __le64 * value_le,unsigned int count,dm_tm_run_fn fn)400  void dm_tm_with_runs(struct dm_transaction_manager *tm,
401  		     const __le64 *value_le, unsigned int count, dm_tm_run_fn fn)
402  {
403  	uint64_t b, begin, end;
404  	bool in_run = false;
405  	unsigned int i;
406  
407  	for (i = 0; i < count; i++, value_le++) {
408  		b = le64_to_cpu(*value_le);
409  
410  		if (in_run) {
411  			if (b == end)
412  				end++;
413  			else {
414  				fn(tm, begin, end);
415  				begin = b;
416  				end = b + 1;
417  			}
418  		} else {
419  			in_run = true;
420  			begin = b;
421  			end = b + 1;
422  		}
423  	}
424  
425  	if (in_run)
426  		fn(tm, begin, end);
427  }
428  EXPORT_SYMBOL_GPL(dm_tm_with_runs);
429  
dm_tm_ref(struct dm_transaction_manager * tm,dm_block_t b,uint32_t * result)430  int dm_tm_ref(struct dm_transaction_manager *tm, dm_block_t b,
431  	      uint32_t *result)
432  {
433  	if (tm->is_clone)
434  		return -EWOULDBLOCK;
435  
436  	return dm_sm_get_count(tm->sm, b, result);
437  }
438  
dm_tm_block_is_shared(struct dm_transaction_manager * tm,dm_block_t b,int * result)439  int dm_tm_block_is_shared(struct dm_transaction_manager *tm, dm_block_t b,
440  			  int *result)
441  {
442  	if (tm->is_clone)
443  		return -EWOULDBLOCK;
444  
445  	return dm_sm_count_is_more_than_one(tm->sm, b, result);
446  }
447  
dm_tm_get_bm(struct dm_transaction_manager * tm)448  struct dm_block_manager *dm_tm_get_bm(struct dm_transaction_manager *tm)
449  {
450  	return tm->bm;
451  }
452  
dm_tm_issue_prefetches(struct dm_transaction_manager * tm)453  void dm_tm_issue_prefetches(struct dm_transaction_manager *tm)
454  {
455  	prefetch_issue(&tm->prefetches, tm->bm);
456  }
457  EXPORT_SYMBOL_GPL(dm_tm_issue_prefetches);
458  
459  /*----------------------------------------------------------------*/
460  
dm_tm_create_internal(struct dm_block_manager * bm,dm_block_t sb_location,struct dm_transaction_manager ** tm,struct dm_space_map ** sm,int create,void * sm_root,size_t sm_len)461  static int dm_tm_create_internal(struct dm_block_manager *bm,
462  				 dm_block_t sb_location,
463  				 struct dm_transaction_manager **tm,
464  				 struct dm_space_map **sm,
465  				 int create,
466  				 void *sm_root, size_t sm_len)
467  {
468  	int r;
469  
470  	*sm = dm_sm_metadata_init();
471  	if (IS_ERR(*sm))
472  		return PTR_ERR(*sm);
473  
474  	*tm = dm_tm_create(bm, *sm);
475  	if (IS_ERR(*tm)) {
476  		dm_sm_destroy(*sm);
477  		return PTR_ERR(*tm);
478  	}
479  
480  	if (create) {
481  		r = dm_sm_metadata_create(*sm, *tm, dm_bm_nr_blocks(bm),
482  					  sb_location);
483  		if (r) {
484  			DMERR("couldn't create metadata space map");
485  			goto bad;
486  		}
487  
488  	} else {
489  		r = dm_sm_metadata_open(*sm, *tm, sm_root, sm_len);
490  		if (r) {
491  			DMERR("couldn't open metadata space map");
492  			goto bad;
493  		}
494  	}
495  
496  	return 0;
497  
498  bad:
499  	dm_tm_destroy(*tm);
500  	dm_sm_destroy(*sm);
501  	return r;
502  }
503  
dm_tm_create_with_sm(struct dm_block_manager * bm,dm_block_t sb_location,struct dm_transaction_manager ** tm,struct dm_space_map ** sm)504  int dm_tm_create_with_sm(struct dm_block_manager *bm, dm_block_t sb_location,
505  			 struct dm_transaction_manager **tm,
506  			 struct dm_space_map **sm)
507  {
508  	return dm_tm_create_internal(bm, sb_location, tm, sm, 1, NULL, 0);
509  }
510  EXPORT_SYMBOL_GPL(dm_tm_create_with_sm);
511  
dm_tm_open_with_sm(struct dm_block_manager * bm,dm_block_t sb_location,void * sm_root,size_t root_len,struct dm_transaction_manager ** tm,struct dm_space_map ** sm)512  int dm_tm_open_with_sm(struct dm_block_manager *bm, dm_block_t sb_location,
513  		       void *sm_root, size_t root_len,
514  		       struct dm_transaction_manager **tm,
515  		       struct dm_space_map **sm)
516  {
517  	return dm_tm_create_internal(bm, sb_location, tm, sm, 0, sm_root, root_len);
518  }
519  EXPORT_SYMBOL_GPL(dm_tm_open_with_sm);
520  
521  /*----------------------------------------------------------------*/
522