xref: /openbmc/linux/drivers/nvdimm/dimm_devs.c (revision 110e6f26)
1 /*
2  * Copyright(c) 2013-2015 Intel Corporation. All rights reserved.
3  *
4  * This program is free software; you can redistribute it and/or modify
5  * it under the terms of version 2 of the GNU General Public License as
6  * published by the Free Software Foundation.
7  *
8  * This program is distributed in the hope that it will be useful, but
9  * WITHOUT ANY WARRANTY; without even the implied warranty of
10  * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.  See the GNU
11  * General Public License for more details.
12  */
13 #define pr_fmt(fmt) KBUILD_MODNAME ": " fmt
14 #include <linux/vmalloc.h>
15 #include <linux/device.h>
16 #include <linux/ndctl.h>
17 #include <linux/slab.h>
18 #include <linux/io.h>
19 #include <linux/fs.h>
20 #include <linux/mm.h>
21 #include "nd-core.h"
22 #include "label.h"
23 #include "nd.h"
24 
25 static DEFINE_IDA(dimm_ida);
26 
27 /*
28  * Retrieve bus and dimm handle and return if this bus supports
29  * get_config_data commands
30  */
31 static int __validate_dimm(struct nvdimm_drvdata *ndd)
32 {
33 	struct nvdimm *nvdimm;
34 
35 	if (!ndd)
36 		return -EINVAL;
37 
38 	nvdimm = to_nvdimm(ndd->dev);
39 
40 	if (!nvdimm->dsm_mask)
41 		return -ENXIO;
42 	if (!test_bit(ND_CMD_GET_CONFIG_DATA, nvdimm->dsm_mask))
43 		return -ENXIO;
44 
45 	return 0;
46 }
47 
48 static int validate_dimm(struct nvdimm_drvdata *ndd)
49 {
50 	int rc = __validate_dimm(ndd);
51 
52 	if (rc && ndd)
53 		dev_dbg(ndd->dev, "%pf: %s error: %d\n",
54 				__builtin_return_address(0), __func__, rc);
55 	return rc;
56 }
57 
58 /**
59  * nvdimm_init_nsarea - determine the geometry of a dimm's namespace area
60  * @nvdimm: dimm to initialize
61  */
62 int nvdimm_init_nsarea(struct nvdimm_drvdata *ndd)
63 {
64 	struct nd_cmd_get_config_size *cmd = &ndd->nsarea;
65 	struct nvdimm_bus *nvdimm_bus = walk_to_nvdimm_bus(ndd->dev);
66 	struct nvdimm_bus_descriptor *nd_desc;
67 	int rc = validate_dimm(ndd);
68 
69 	if (rc)
70 		return rc;
71 
72 	if (cmd->config_size)
73 		return 0; /* already valid */
74 
75 	memset(cmd, 0, sizeof(*cmd));
76 	nd_desc = nvdimm_bus->nd_desc;
77 	return nd_desc->ndctl(nd_desc, to_nvdimm(ndd->dev),
78 			ND_CMD_GET_CONFIG_SIZE, cmd, sizeof(*cmd), NULL);
79 }
80 
81 int nvdimm_init_config_data(struct nvdimm_drvdata *ndd)
82 {
83 	struct nvdimm_bus *nvdimm_bus = walk_to_nvdimm_bus(ndd->dev);
84 	struct nd_cmd_get_config_data_hdr *cmd;
85 	struct nvdimm_bus_descriptor *nd_desc;
86 	int rc = validate_dimm(ndd);
87 	u32 max_cmd_size, config_size;
88 	size_t offset;
89 
90 	if (rc)
91 		return rc;
92 
93 	if (ndd->data)
94 		return 0;
95 
96 	if (ndd->nsarea.status || ndd->nsarea.max_xfer == 0
97 			|| ndd->nsarea.config_size < ND_LABEL_MIN_SIZE) {
98 		dev_dbg(ndd->dev, "failed to init config data area: (%d:%d)\n",
99 				ndd->nsarea.max_xfer, ndd->nsarea.config_size);
100 		return -ENXIO;
101 	}
102 
103 	ndd->data = kmalloc(ndd->nsarea.config_size, GFP_KERNEL);
104 	if (!ndd->data)
105 		ndd->data = vmalloc(ndd->nsarea.config_size);
106 
107 	if (!ndd->data)
108 		return -ENOMEM;
109 
110 	max_cmd_size = min_t(u32, PAGE_SIZE, ndd->nsarea.max_xfer);
111 	cmd = kzalloc(max_cmd_size + sizeof(*cmd), GFP_KERNEL);
112 	if (!cmd)
113 		return -ENOMEM;
114 
115 	nd_desc = nvdimm_bus->nd_desc;
116 	for (config_size = ndd->nsarea.config_size, offset = 0;
117 			config_size; config_size -= cmd->in_length,
118 			offset += cmd->in_length) {
119 		cmd->in_length = min(config_size, max_cmd_size);
120 		cmd->in_offset = offset;
121 		rc = nd_desc->ndctl(nd_desc, to_nvdimm(ndd->dev),
122 				ND_CMD_GET_CONFIG_DATA, cmd,
123 				cmd->in_length + sizeof(*cmd), NULL);
124 		if (rc || cmd->status) {
125 			rc = -ENXIO;
126 			break;
127 		}
128 		memcpy(ndd->data + offset, cmd->out_buf, cmd->in_length);
129 	}
130 	dev_dbg(ndd->dev, "%s: len: %zu rc: %d\n", __func__, offset, rc);
131 	kfree(cmd);
132 
133 	return rc;
134 }
135 
136 int nvdimm_set_config_data(struct nvdimm_drvdata *ndd, size_t offset,
137 		void *buf, size_t len)
138 {
139 	int rc = validate_dimm(ndd);
140 	size_t max_cmd_size, buf_offset;
141 	struct nd_cmd_set_config_hdr *cmd;
142 	struct nvdimm_bus *nvdimm_bus = walk_to_nvdimm_bus(ndd->dev);
143 	struct nvdimm_bus_descriptor *nd_desc = nvdimm_bus->nd_desc;
144 
145 	if (rc)
146 		return rc;
147 
148 	if (!ndd->data)
149 		return -ENXIO;
150 
151 	if (offset + len > ndd->nsarea.config_size)
152 		return -ENXIO;
153 
154 	max_cmd_size = min_t(u32, PAGE_SIZE, len);
155 	max_cmd_size = min_t(u32, max_cmd_size, ndd->nsarea.max_xfer);
156 	cmd = kzalloc(max_cmd_size + sizeof(*cmd) + sizeof(u32), GFP_KERNEL);
157 	if (!cmd)
158 		return -ENOMEM;
159 
160 	for (buf_offset = 0; len; len -= cmd->in_length,
161 			buf_offset += cmd->in_length) {
162 		size_t cmd_size;
163 		u32 *status;
164 
165 		cmd->in_offset = offset + buf_offset;
166 		cmd->in_length = min(max_cmd_size, len);
167 		memcpy(cmd->in_buf, buf + buf_offset, cmd->in_length);
168 
169 		/* status is output in the last 4-bytes of the command buffer */
170 		cmd_size = sizeof(*cmd) + cmd->in_length + sizeof(u32);
171 		status = ((void *) cmd) + cmd_size - sizeof(u32);
172 
173 		rc = nd_desc->ndctl(nd_desc, to_nvdimm(ndd->dev),
174 				ND_CMD_SET_CONFIG_DATA, cmd, cmd_size, NULL);
175 		if (rc || *status) {
176 			rc = rc ? rc : -ENXIO;
177 			break;
178 		}
179 	}
180 	kfree(cmd);
181 
182 	return rc;
183 }
184 
185 static void nvdimm_release(struct device *dev)
186 {
187 	struct nvdimm *nvdimm = to_nvdimm(dev);
188 
189 	ida_simple_remove(&dimm_ida, nvdimm->id);
190 	kfree(nvdimm);
191 }
192 
193 static struct device_type nvdimm_device_type = {
194 	.name = "nvdimm",
195 	.release = nvdimm_release,
196 };
197 
198 bool is_nvdimm(struct device *dev)
199 {
200 	return dev->type == &nvdimm_device_type;
201 }
202 
203 struct nvdimm *to_nvdimm(struct device *dev)
204 {
205 	struct nvdimm *nvdimm = container_of(dev, struct nvdimm, dev);
206 
207 	WARN_ON(!is_nvdimm(dev));
208 	return nvdimm;
209 }
210 EXPORT_SYMBOL_GPL(to_nvdimm);
211 
212 struct nvdimm *nd_blk_region_to_dimm(struct nd_blk_region *ndbr)
213 {
214 	struct nd_region *nd_region = &ndbr->nd_region;
215 	struct nd_mapping *nd_mapping = &nd_region->mapping[0];
216 
217 	return nd_mapping->nvdimm;
218 }
219 EXPORT_SYMBOL_GPL(nd_blk_region_to_dimm);
220 
221 struct nvdimm_drvdata *to_ndd(struct nd_mapping *nd_mapping)
222 {
223 	struct nvdimm *nvdimm = nd_mapping->nvdimm;
224 
225 	WARN_ON_ONCE(!is_nvdimm_bus_locked(&nvdimm->dev));
226 
227 	return dev_get_drvdata(&nvdimm->dev);
228 }
229 EXPORT_SYMBOL(to_ndd);
230 
231 void nvdimm_drvdata_release(struct kref *kref)
232 {
233 	struct nvdimm_drvdata *ndd = container_of(kref, typeof(*ndd), kref);
234 	struct device *dev = ndd->dev;
235 	struct resource *res, *_r;
236 
237 	dev_dbg(dev, "%s\n", __func__);
238 
239 	nvdimm_bus_lock(dev);
240 	for_each_dpa_resource_safe(ndd, res, _r)
241 		nvdimm_free_dpa(ndd, res);
242 	nvdimm_bus_unlock(dev);
243 
244 	kvfree(ndd->data);
245 	kfree(ndd);
246 	put_device(dev);
247 }
248 
249 void get_ndd(struct nvdimm_drvdata *ndd)
250 {
251 	kref_get(&ndd->kref);
252 }
253 
254 void put_ndd(struct nvdimm_drvdata *ndd)
255 {
256 	if (ndd)
257 		kref_put(&ndd->kref, nvdimm_drvdata_release);
258 }
259 
260 const char *nvdimm_name(struct nvdimm *nvdimm)
261 {
262 	return dev_name(&nvdimm->dev);
263 }
264 EXPORT_SYMBOL_GPL(nvdimm_name);
265 
266 void *nvdimm_provider_data(struct nvdimm *nvdimm)
267 {
268 	if (nvdimm)
269 		return nvdimm->provider_data;
270 	return NULL;
271 }
272 EXPORT_SYMBOL_GPL(nvdimm_provider_data);
273 
274 static ssize_t commands_show(struct device *dev,
275 		struct device_attribute *attr, char *buf)
276 {
277 	struct nvdimm *nvdimm = to_nvdimm(dev);
278 	int cmd, len = 0;
279 
280 	if (!nvdimm->dsm_mask)
281 		return sprintf(buf, "\n");
282 
283 	for_each_set_bit(cmd, nvdimm->dsm_mask, BITS_PER_LONG)
284 		len += sprintf(buf + len, "%s ", nvdimm_cmd_name(cmd));
285 	len += sprintf(buf + len, "\n");
286 	return len;
287 }
288 static DEVICE_ATTR_RO(commands);
289 
290 static ssize_t state_show(struct device *dev, struct device_attribute *attr,
291 		char *buf)
292 {
293 	struct nvdimm *nvdimm = to_nvdimm(dev);
294 
295 	/*
296 	 * The state may be in the process of changing, userspace should
297 	 * quiesce probing if it wants a static answer
298 	 */
299 	nvdimm_bus_lock(dev);
300 	nvdimm_bus_unlock(dev);
301 	return sprintf(buf, "%s\n", atomic_read(&nvdimm->busy)
302 			? "active" : "idle");
303 }
304 static DEVICE_ATTR_RO(state);
305 
306 static ssize_t available_slots_show(struct device *dev,
307 		struct device_attribute *attr, char *buf)
308 {
309 	struct nvdimm_drvdata *ndd = dev_get_drvdata(dev);
310 	ssize_t rc;
311 	u32 nfree;
312 
313 	if (!ndd)
314 		return -ENXIO;
315 
316 	nvdimm_bus_lock(dev);
317 	nfree = nd_label_nfree(ndd);
318 	if (nfree - 1 > nfree) {
319 		dev_WARN_ONCE(dev, 1, "we ate our last label?\n");
320 		nfree = 0;
321 	} else
322 		nfree--;
323 	rc = sprintf(buf, "%d\n", nfree);
324 	nvdimm_bus_unlock(dev);
325 	return rc;
326 }
327 static DEVICE_ATTR_RO(available_slots);
328 
329 static struct attribute *nvdimm_attributes[] = {
330 	&dev_attr_state.attr,
331 	&dev_attr_commands.attr,
332 	&dev_attr_available_slots.attr,
333 	NULL,
334 };
335 
336 struct attribute_group nvdimm_attribute_group = {
337 	.attrs = nvdimm_attributes,
338 };
339 EXPORT_SYMBOL_GPL(nvdimm_attribute_group);
340 
341 struct nvdimm *nvdimm_create(struct nvdimm_bus *nvdimm_bus, void *provider_data,
342 		const struct attribute_group **groups, unsigned long flags,
343 		unsigned long *dsm_mask)
344 {
345 	struct nvdimm *nvdimm = kzalloc(sizeof(*nvdimm), GFP_KERNEL);
346 	struct device *dev;
347 
348 	if (!nvdimm)
349 		return NULL;
350 
351 	nvdimm->id = ida_simple_get(&dimm_ida, 0, 0, GFP_KERNEL);
352 	if (nvdimm->id < 0) {
353 		kfree(nvdimm);
354 		return NULL;
355 	}
356 	nvdimm->provider_data = provider_data;
357 	nvdimm->flags = flags;
358 	nvdimm->dsm_mask = dsm_mask;
359 	atomic_set(&nvdimm->busy, 0);
360 	dev = &nvdimm->dev;
361 	dev_set_name(dev, "nmem%d", nvdimm->id);
362 	dev->parent = &nvdimm_bus->dev;
363 	dev->type = &nvdimm_device_type;
364 	dev->devt = MKDEV(nvdimm_major, nvdimm->id);
365 	dev->groups = groups;
366 	nd_device_register(dev);
367 
368 	return nvdimm;
369 }
370 EXPORT_SYMBOL_GPL(nvdimm_create);
371 
372 /**
373  * nd_blk_available_dpa - account the unused dpa of BLK region
374  * @nd_mapping: container of dpa-resource-root + labels
375  *
376  * Unlike PMEM, BLK namespaces can occupy discontiguous DPA ranges.
377  */
378 resource_size_t nd_blk_available_dpa(struct nd_mapping *nd_mapping)
379 {
380 	struct nvdimm_drvdata *ndd = to_ndd(nd_mapping);
381 	resource_size_t map_end, busy = 0, available;
382 	struct resource *res;
383 
384 	if (!ndd)
385 		return 0;
386 
387 	map_end = nd_mapping->start + nd_mapping->size - 1;
388 	for_each_dpa_resource(ndd, res)
389 		if (res->start >= nd_mapping->start && res->start < map_end) {
390 			resource_size_t end = min(map_end, res->end);
391 
392 			busy += end - res->start + 1;
393 		} else if (res->end >= nd_mapping->start
394 				&& res->end <= map_end) {
395 			busy += res->end - nd_mapping->start;
396 		} else if (nd_mapping->start > res->start
397 				&& nd_mapping->start < res->end) {
398 			/* total eclipse of the BLK region mapping */
399 			busy += nd_mapping->size;
400 		}
401 
402 	available = map_end - nd_mapping->start + 1;
403 	if (busy < available)
404 		return available - busy;
405 	return 0;
406 }
407 
408 /**
409  * nd_pmem_available_dpa - for the given dimm+region account unallocated dpa
410  * @nd_mapping: container of dpa-resource-root + labels
411  * @nd_region: constrain available space check to this reference region
412  * @overlap: calculate available space assuming this level of overlap
413  *
414  * Validate that a PMEM label, if present, aligns with the start of an
415  * interleave set and truncate the available size at the lowest BLK
416  * overlap point.
417  *
418  * The expectation is that this routine is called multiple times as it
419  * probes for the largest BLK encroachment for any single member DIMM of
420  * the interleave set.  Once that value is determined the PMEM-limit for
421  * the set can be established.
422  */
423 resource_size_t nd_pmem_available_dpa(struct nd_region *nd_region,
424 		struct nd_mapping *nd_mapping, resource_size_t *overlap)
425 {
426 	resource_size_t map_start, map_end, busy = 0, available, blk_start;
427 	struct nvdimm_drvdata *ndd = to_ndd(nd_mapping);
428 	struct resource *res;
429 	const char *reason;
430 
431 	if (!ndd)
432 		return 0;
433 
434 	map_start = nd_mapping->start;
435 	map_end = map_start + nd_mapping->size - 1;
436 	blk_start = max(map_start, map_end + 1 - *overlap);
437 	for_each_dpa_resource(ndd, res)
438 		if (res->start >= map_start && res->start < map_end) {
439 			if (strncmp(res->name, "blk", 3) == 0)
440 				blk_start = min(blk_start, res->start);
441 			else if (res->start != map_start) {
442 				reason = "misaligned to iset";
443 				goto err;
444 			} else {
445 				if (busy) {
446 					reason = "duplicate overlapping PMEM reservations?";
447 					goto err;
448 				}
449 				busy += resource_size(res);
450 				continue;
451 			}
452 		} else if (res->end >= map_start && res->end <= map_end) {
453 			if (strncmp(res->name, "blk", 3) == 0) {
454 				/*
455 				 * If a BLK allocation overlaps the start of
456 				 * PMEM the entire interleave set may now only
457 				 * be used for BLK.
458 				 */
459 				blk_start = map_start;
460 			} else {
461 				reason = "misaligned to iset";
462 				goto err;
463 			}
464 		} else if (map_start > res->start && map_start < res->end) {
465 			/* total eclipse of the mapping */
466 			busy += nd_mapping->size;
467 			blk_start = map_start;
468 		}
469 
470 	*overlap = map_end + 1 - blk_start;
471 	available = blk_start - map_start;
472 	if (busy < available)
473 		return available - busy;
474 	return 0;
475 
476  err:
477 	/*
478 	 * Something is wrong, PMEM must align with the start of the
479 	 * interleave set, and there can only be one allocation per set.
480 	 */
481 	nd_dbg_dpa(nd_region, ndd, res, "%s\n", reason);
482 	return 0;
483 }
484 
485 void nvdimm_free_dpa(struct nvdimm_drvdata *ndd, struct resource *res)
486 {
487 	WARN_ON_ONCE(!is_nvdimm_bus_locked(ndd->dev));
488 	kfree(res->name);
489 	__release_region(&ndd->dpa, res->start, resource_size(res));
490 }
491 
492 struct resource *nvdimm_allocate_dpa(struct nvdimm_drvdata *ndd,
493 		struct nd_label_id *label_id, resource_size_t start,
494 		resource_size_t n)
495 {
496 	char *name = kmemdup(label_id, sizeof(*label_id), GFP_KERNEL);
497 	struct resource *res;
498 
499 	if (!name)
500 		return NULL;
501 
502 	WARN_ON_ONCE(!is_nvdimm_bus_locked(ndd->dev));
503 	res = __request_region(&ndd->dpa, start, n, name, 0);
504 	if (!res)
505 		kfree(name);
506 	return res;
507 }
508 
509 /**
510  * nvdimm_allocated_dpa - sum up the dpa currently allocated to this label_id
511  * @nvdimm: container of dpa-resource-root + labels
512  * @label_id: dpa resource name of the form {pmem|blk}-<human readable uuid>
513  */
514 resource_size_t nvdimm_allocated_dpa(struct nvdimm_drvdata *ndd,
515 		struct nd_label_id *label_id)
516 {
517 	resource_size_t allocated = 0;
518 	struct resource *res;
519 
520 	for_each_dpa_resource(ndd, res)
521 		if (strcmp(res->name, label_id->id) == 0)
522 			allocated += resource_size(res);
523 
524 	return allocated;
525 }
526 
527 static int count_dimms(struct device *dev, void *c)
528 {
529 	int *count = c;
530 
531 	if (is_nvdimm(dev))
532 		(*count)++;
533 	return 0;
534 }
535 
536 int nvdimm_bus_check_dimm_count(struct nvdimm_bus *nvdimm_bus, int dimm_count)
537 {
538 	int count = 0;
539 	/* Flush any possible dimm registration failures */
540 	nd_synchronize();
541 
542 	device_for_each_child(&nvdimm_bus->dev, &count, count_dimms);
543 	dev_dbg(&nvdimm_bus->dev, "%s: count: %d\n", __func__, count);
544 	if (count != dimm_count)
545 		return -ENXIO;
546 	return 0;
547 }
548 EXPORT_SYMBOL_GPL(nvdimm_bus_check_dimm_count);
549