xref: /openbmc/linux/drivers/md/dm-bio-prison-v1.h (revision 46c30cb8)
1 /* SPDX-License-Identifier: GPL-2.0-only */
2 /*
3  * Copyright (C) 2011-2017 Red Hat, Inc.
4  *
5  * This file is released under the GPL.
6  */
7 
8 #ifndef DM_BIO_PRISON_H
9 #define DM_BIO_PRISON_H
10 
11 #include "persistent-data/dm-block-manager.h" /* FIXME: for dm_block_t */
12 #include "dm-thin-metadata.h" /* FIXME: for dm_thin_id */
13 
14 #include <linux/bio.h>
15 #include <linux/rbtree.h>
16 
17 /*----------------------------------------------------------------*/
18 
19 /*
20  * Sometimes we can't deal with a bio straight away.  We put them in prison
21  * where they can't cause any mischief.  Bios are put in a cell identified
22  * by a key, multiple bios can be in the same cell.  When the cell is
23  * subsequently unlocked the bios become available.
24  */
25 struct dm_bio_prison;
26 
27 /*
28  * Keys define a range of blocks within either a virtual or physical
29  * device.
30  */
31 struct dm_cell_key {
32 	int virtual;
33 	dm_thin_id dev;
34 	dm_block_t block_begin, block_end;
35 };
36 
37 /*
38  * Treat this as opaque, only in header so callers can manage allocation
39  * themselves.
40  */
41 struct dm_bio_prison_cell {
42 	struct list_head user_list;	/* for client use */
43 	struct rb_node node;
44 
45 	struct dm_cell_key key;
46 	struct bio *holder;
47 	struct bio_list bios;
48 };
49 
50 struct dm_bio_prison *dm_bio_prison_create(void);
51 void dm_bio_prison_destroy(struct dm_bio_prison *prison);
52 
53 /*
54  * These two functions just wrap a mempool.  This is a transitory step:
55  * Eventually all bio prison clients should manage their own cell memory.
56  *
57  * Like mempool_alloc(), dm_bio_prison_alloc_cell() can only fail if called
58  * in interrupt context or passed GFP_NOWAIT.
59  */
60 struct dm_bio_prison_cell *dm_bio_prison_alloc_cell(struct dm_bio_prison *prison,
61 						    gfp_t gfp);
62 void dm_bio_prison_free_cell(struct dm_bio_prison *prison,
63 			     struct dm_bio_prison_cell *cell);
64 
65 /*
66  * Creates, or retrieves a cell that overlaps the given key.
67  *
68  * Returns 1 if pre-existing cell returned, zero if new cell created using
69  * @cell_prealloc.
70  */
71 int dm_get_cell(struct dm_bio_prison *prison,
72 		struct dm_cell_key *key,
73 		struct dm_bio_prison_cell *cell_prealloc,
74 		struct dm_bio_prison_cell **cell_result);
75 
76 /*
77  * An atomic op that combines retrieving or creating a cell, and adding a
78  * bio to it.
79  *
80  * Returns 1 if the cell was already held, 0 if @inmate is the new holder.
81  */
82 int dm_bio_detain(struct dm_bio_prison *prison,
83 		  struct dm_cell_key *key,
84 		  struct bio *inmate,
85 		  struct dm_bio_prison_cell *cell_prealloc,
86 		  struct dm_bio_prison_cell **cell_result);
87 
88 void dm_cell_release(struct dm_bio_prison *prison,
89 		     struct dm_bio_prison_cell *cell,
90 		     struct bio_list *bios);
91 void dm_cell_release_no_holder(struct dm_bio_prison *prison,
92 			       struct dm_bio_prison_cell *cell,
93 			       struct bio_list *inmates);
94 void dm_cell_error(struct dm_bio_prison *prison,
95 		   struct dm_bio_prison_cell *cell, blk_status_t error);
96 
97 /*
98  * Visits the cell and then releases.  Guarantees no new inmates are
99  * inserted between the visit and release.
100  */
101 void dm_cell_visit_release(struct dm_bio_prison *prison,
102 			   void (*visit_fn)(void *, struct dm_bio_prison_cell *),
103 			   void *context, struct dm_bio_prison_cell *cell);
104 
105 /*
106  * Rather than always releasing the prisoners in a cell, the client may
107  * want to promote one of them to be the new holder.  There is a race here
108  * though between releasing an empty cell, and other threads adding new
109  * inmates.  So this function makes the decision with its lock held.
110  *
111  * This function can have two outcomes:
112  * i) An inmate is promoted to be the holder of the cell (return value of 0).
113  * ii) The cell has no inmate for promotion and is released (return value of 1).
114  */
115 int dm_cell_promote_or_release(struct dm_bio_prison *prison,
116 			       struct dm_bio_prison_cell *cell);
117 
118 /*----------------------------------------------------------------*/
119 
120 /*
121  * We use the deferred set to keep track of pending reads to shared blocks.
122  * We do this to ensure the new mapping caused by a write isn't performed
123  * until these prior reads have completed.  Otherwise the insertion of the
124  * new mapping could free the old block that the read bios are mapped to.
125  */
126 
127 struct dm_deferred_set;
128 struct dm_deferred_entry;
129 
130 struct dm_deferred_set *dm_deferred_set_create(void);
131 void dm_deferred_set_destroy(struct dm_deferred_set *ds);
132 
133 struct dm_deferred_entry *dm_deferred_entry_inc(struct dm_deferred_set *ds);
134 void dm_deferred_entry_dec(struct dm_deferred_entry *entry, struct list_head *head);
135 int dm_deferred_set_add_work(struct dm_deferred_set *ds, struct list_head *work);
136 
137 /*----------------------------------------------------------------*/
138 
139 #endif
140