xref: /openbmc/linux/lib/sg_pool.c (revision 171fa692)
1  #include <linux/module.h>
2  #include <linux/scatterlist.h>
3  #include <linux/mempool.h>
4  #include <linux/slab.h>
5  
6  #define SG_MEMPOOL_NR		ARRAY_SIZE(sg_pools)
7  #define SG_MEMPOOL_SIZE		2
8  
9  struct sg_pool {
10  	size_t		size;
11  	char		*name;
12  	struct kmem_cache	*slab;
13  	mempool_t	*pool;
14  };
15  
16  #define SP(x) { .size = x, "sgpool-" __stringify(x) }
17  #if (SG_CHUNK_SIZE < 32)
18  #error SG_CHUNK_SIZE is too small (must be 32 or greater)
19  #endif
20  static struct sg_pool sg_pools[] = {
21  	SP(8),
22  	SP(16),
23  #if (SG_CHUNK_SIZE > 32)
24  	SP(32),
25  #if (SG_CHUNK_SIZE > 64)
26  	SP(64),
27  #if (SG_CHUNK_SIZE > 128)
28  	SP(128),
29  #if (SG_CHUNK_SIZE > 256)
30  #error SG_CHUNK_SIZE is too large (256 MAX)
31  #endif
32  #endif
33  #endif
34  #endif
35  	SP(SG_CHUNK_SIZE)
36  };
37  #undef SP
38  
39  static inline unsigned int sg_pool_index(unsigned short nents)
40  {
41  	unsigned int index;
42  
43  	BUG_ON(nents > SG_CHUNK_SIZE);
44  
45  	if (nents <= 8)
46  		index = 0;
47  	else
48  		index = get_count_order(nents) - 3;
49  
50  	return index;
51  }
52  
53  static void sg_pool_free(struct scatterlist *sgl, unsigned int nents)
54  {
55  	struct sg_pool *sgp;
56  
57  	sgp = sg_pools + sg_pool_index(nents);
58  	mempool_free(sgl, sgp->pool);
59  }
60  
61  static struct scatterlist *sg_pool_alloc(unsigned int nents, gfp_t gfp_mask)
62  {
63  	struct sg_pool *sgp;
64  
65  	sgp = sg_pools + sg_pool_index(nents);
66  	return mempool_alloc(sgp->pool, gfp_mask);
67  }
68  
69  /**
70   * sg_free_table_chained - Free a previously mapped sg table
71   * @table:	The sg table header to use
72   * @first_chunk: was first_chunk not NULL in sg_alloc_table_chained?
73   *
74   *  Description:
75   *    Free an sg table previously allocated and setup with
76   *    sg_alloc_table_chained().
77   *
78   **/
79  void sg_free_table_chained(struct sg_table *table, bool first_chunk)
80  {
81  	if (first_chunk && table->orig_nents <= SG_CHUNK_SIZE)
82  		return;
83  	__sg_free_table(table, SG_CHUNK_SIZE, first_chunk, sg_pool_free);
84  }
85  EXPORT_SYMBOL_GPL(sg_free_table_chained);
86  
87  /**
88   * sg_alloc_table_chained - Allocate and chain SGLs in an sg table
89   * @table:	The sg table header to use
90   * @nents:	Number of entries in sg list
91   * @first_chunk: first SGL
92   *
93   *  Description:
94   *    Allocate and chain SGLs in an sg table. If @nents@ is larger than
95   *    SG_CHUNK_SIZE a chained sg table will be setup.
96   *
97   **/
98  int sg_alloc_table_chained(struct sg_table *table, int nents,
99  		struct scatterlist *first_chunk)
100  {
101  	int ret;
102  
103  	BUG_ON(!nents);
104  
105  	if (first_chunk) {
106  		if (nents <= SG_CHUNK_SIZE) {
107  			table->nents = table->orig_nents = nents;
108  			sg_init_table(table->sgl, nents);
109  			return 0;
110  		}
111  	}
112  
113  	ret = __sg_alloc_table(table, nents, SG_CHUNK_SIZE,
114  			       first_chunk, GFP_ATOMIC, sg_pool_alloc);
115  	if (unlikely(ret))
116  		sg_free_table_chained(table, (bool)first_chunk);
117  	return ret;
118  }
119  EXPORT_SYMBOL_GPL(sg_alloc_table_chained);
120  
121  static __init int sg_pool_init(void)
122  {
123  	int i;
124  
125  	for (i = 0; i < SG_MEMPOOL_NR; i++) {
126  		struct sg_pool *sgp = sg_pools + i;
127  		int size = sgp->size * sizeof(struct scatterlist);
128  
129  		sgp->slab = kmem_cache_create(sgp->name, size, 0,
130  				SLAB_HWCACHE_ALIGN, NULL);
131  		if (!sgp->slab) {
132  			printk(KERN_ERR "SG_POOL: can't init sg slab %s\n",
133  					sgp->name);
134  			goto cleanup_sdb;
135  		}
136  
137  		sgp->pool = mempool_create_slab_pool(SG_MEMPOOL_SIZE,
138  						     sgp->slab);
139  		if (!sgp->pool) {
140  			printk(KERN_ERR "SG_POOL: can't init sg mempool %s\n",
141  					sgp->name);
142  			goto cleanup_sdb;
143  		}
144  	}
145  
146  	return 0;
147  
148  cleanup_sdb:
149  	for (i = 0; i < SG_MEMPOOL_NR; i++) {
150  		struct sg_pool *sgp = sg_pools + i;
151  		if (sgp->pool)
152  			mempool_destroy(sgp->pool);
153  		if (sgp->slab)
154  			kmem_cache_destroy(sgp->slab);
155  	}
156  
157  	return -ENOMEM;
158  }
159  
160  static __exit void sg_pool_exit(void)
161  {
162  	int i;
163  
164  	for (i = 0; i < SG_MEMPOOL_NR; i++) {
165  		struct sg_pool *sgp = sg_pools + i;
166  		mempool_destroy(sgp->pool);
167  		kmem_cache_destroy(sgp->slab);
168  	}
169  }
170  
171  module_init(sg_pool_init);
172  module_exit(sg_pool_exit);
173