xref: /openbmc/linux/drivers/dax/dax-private.h (revision 3cf3cdea)
1 /* SPDX-License-Identifier: GPL-2.0-only */
2 /*
3  * Copyright(c) 2016 Intel Corporation. All rights reserved.
4  */
5 #ifndef __DAX_PRIVATE_H__
6 #define __DAX_PRIVATE_H__
7 
8 #include <linux/device.h>
9 #include <linux/cdev.h>
10 #include <linux/idr.h>
11 
12 /* private routines between core files */
13 struct dax_device;
14 struct dax_device *inode_dax(struct inode *inode);
15 struct inode *dax_inode(struct dax_device *dax_dev);
16 int dax_bus_init(void);
17 void dax_bus_exit(void);
18 
19 /**
20  * struct dax_region - mapping infrastructure for dax devices
21  * @id: kernel-wide unique region for a memory range
22  * @target_node: effective numa node if this memory range is onlined
23  * @kref: to pin while other agents have a need to do lookups
24  * @dev: parent device backing this region
25  * @align: allocation and mapping alignment for child dax devices
26  * @ida: instance id allocator
27  * @res: resource tree to track instance allocations
28  * @seed: allow userspace to find the first unbound seed device
29  * @youngest: allow userspace to find the most recently created device
30  */
31 struct dax_region {
32 	int id;
33 	int target_node;
34 	struct kref kref;
35 	struct device *dev;
36 	unsigned int align;
37 	struct ida ida;
38 	struct resource res;
39 	struct device *seed;
40 	struct device *youngest;
41 };
42 
43 struct dax_mapping {
44 	struct device dev;
45 	int range_id;
46 	int id;
47 };
48 
49 /**
50  * struct dev_dax - instance data for a subdivision of a dax region, and
51  * data while the device is activated in the driver.
52  * @region - parent region
53  * @dax_dev - core dax functionality
54  * @target_node: effective numa node if dev_dax memory range is onlined
55  * @id: ida allocated id
56  * @ida: mapping id allocator
57  * @dev - device core
58  * @pgmap - pgmap for memmap setup / lifetime (driver owned)
59  * @nr_range: size of @ranges
60  * @ranges: resource-span + pgoff tuples for the instance
61  */
62 struct dev_dax {
63 	struct dax_region *region;
64 	struct dax_device *dax_dev;
65 	unsigned int align;
66 	int target_node;
67 	int id;
68 	struct ida ida;
69 	struct device dev;
70 	struct dev_pagemap *pgmap;
71 	int nr_range;
72 	struct dev_dax_range {
73 		unsigned long pgoff;
74 		struct range range;
75 		struct dax_mapping *mapping;
76 	} *ranges;
77 };
78 
79 static inline struct dev_dax *to_dev_dax(struct device *dev)
80 {
81 	return container_of(dev, struct dev_dax, dev);
82 }
83 
84 static inline struct dax_mapping *to_dax_mapping(struct device *dev)
85 {
86 	return container_of(dev, struct dax_mapping, dev);
87 }
88 
89 phys_addr_t dax_pgoff_to_phys(struct dev_dax *dev_dax, pgoff_t pgoff, unsigned long size);
90 
91 #ifdef CONFIG_TRANSPARENT_HUGEPAGE
92 static inline bool dax_align_valid(unsigned long align)
93 {
94 	if (align == PUD_SIZE && IS_ENABLED(CONFIG_HAVE_ARCH_TRANSPARENT_HUGEPAGE_PUD))
95 		return true;
96 	if (align == PMD_SIZE && has_transparent_hugepage())
97 		return true;
98 	if (align == PAGE_SIZE)
99 		return true;
100 	return false;
101 }
102 #else
103 static inline bool dax_align_valid(unsigned long align)
104 {
105 	return align == PAGE_SIZE;
106 }
107 #endif /* CONFIG_TRANSPARENT_HUGEPAGE */
108 #endif
109