xref: /openbmc/linux/include/linux/hmm.h (revision 752beb5e)
1 /*
2  * Copyright 2013 Red Hat Inc.
3  *
4  * This program is free software; you can redistribute it and/or modify
5  * it under the terms of the GNU General Public License as published by
6  * the Free Software Foundation; either version 2 of the License, or
7  * (at your option) any later version.
8  *
9  * This program is distributed in the hope that it will be useful,
10  * but WITHOUT ANY WARRANTY; without even the implied warranty of
11  * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.  See the
12  * GNU General Public License for more details.
13  *
14  * Authors: Jérôme Glisse <jglisse@redhat.com>
15  */
16 /*
17  * Heterogeneous Memory Management (HMM)
18  *
19  * See Documentation/vm/hmm.rst for reasons and overview of what HMM is and it
20  * is for. Here we focus on the HMM API description, with some explanation of
21  * the underlying implementation.
22  *
23  * Short description: HMM provides a set of helpers to share a virtual address
24  * space between CPU and a device, so that the device can access any valid
25  * address of the process (while still obeying memory protection). HMM also
26  * provides helpers to migrate process memory to device memory, and back. Each
27  * set of functionality (address space mirroring, and migration to and from
28  * device memory) can be used independently of the other.
29  *
30  *
31  * HMM address space mirroring API:
32  *
33  * Use HMM address space mirroring if you want to mirror range of the CPU page
34  * table of a process into a device page table. Here, "mirror" means "keep
35  * synchronized". Prerequisites: the device must provide the ability to write-
36  * protect its page tables (at PAGE_SIZE granularity), and must be able to
37  * recover from the resulting potential page faults.
38  *
39  * HMM guarantees that at any point in time, a given virtual address points to
40  * either the same memory in both CPU and device page tables (that is: CPU and
41  * device page tables each point to the same pages), or that one page table (CPU
42  * or device) points to no entry, while the other still points to the old page
43  * for the address. The latter case happens when the CPU page table update
44  * happens first, and then the update is mirrored over to the device page table.
45  * This does not cause any issue, because the CPU page table cannot start
46  * pointing to a new page until the device page table is invalidated.
47  *
48  * HMM uses mmu_notifiers to monitor the CPU page tables, and forwards any
49  * updates to each device driver that has registered a mirror. It also provides
50  * some API calls to help with taking a snapshot of the CPU page table, and to
51  * synchronize with any updates that might happen concurrently.
52  *
53  *
54  * HMM migration to and from device memory:
55  *
56  * HMM provides a set of helpers to hotplug device memory as ZONE_DEVICE, with
57  * a new MEMORY_DEVICE_PRIVATE type. This provides a struct page for each page
58  * of the device memory, and allows the device driver to manage its memory
59  * using those struct pages. Having struct pages for device memory makes
60  * migration easier. Because that memory is not addressable by the CPU it must
61  * never be pinned to the device; in other words, any CPU page fault can always
62  * cause the device memory to be migrated (copied/moved) back to regular memory.
63  *
64  * A new migrate helper (migrate_vma()) has been added (see mm/migrate.c) that
65  * allows use of a device DMA engine to perform the copy operation between
66  * regular system memory and device memory.
67  */
68 #ifndef LINUX_HMM_H
69 #define LINUX_HMM_H
70 
71 #include <linux/kconfig.h>
72 #include <asm/pgtable.h>
73 
74 #if IS_ENABLED(CONFIG_HMM)
75 
76 #include <linux/device.h>
77 #include <linux/migrate.h>
78 #include <linux/memremap.h>
79 #include <linux/completion.h>
80 #include <linux/mmu_notifier.h>
81 
82 
83 /*
84  * struct hmm - HMM per mm struct
85  *
86  * @mm: mm struct this HMM struct is bound to
87  * @lock: lock protecting ranges list
88  * @ranges: list of range being snapshotted
89  * @mirrors: list of mirrors for this mm
90  * @mmu_notifier: mmu notifier to track updates to CPU page table
91  * @mirrors_sem: read/write semaphore protecting the mirrors list
92  * @wq: wait queue for user waiting on a range invalidation
93  * @notifiers: count of active mmu notifiers
94  * @dead: is the mm dead ?
95  */
96 struct hmm {
97 	struct mm_struct	*mm;
98 	struct kref		kref;
99 	struct mutex		lock;
100 	struct list_head	ranges;
101 	struct list_head	mirrors;
102 	struct mmu_notifier	mmu_notifier;
103 	struct rw_semaphore	mirrors_sem;
104 	wait_queue_head_t	wq;
105 	long			notifiers;
106 	bool			dead;
107 };
108 
109 /*
110  * hmm_pfn_flag_e - HMM flag enums
111  *
112  * Flags:
113  * HMM_PFN_VALID: pfn is valid. It has, at least, read permission.
114  * HMM_PFN_WRITE: CPU page table has write permission set
115  * HMM_PFN_DEVICE_PRIVATE: private device memory (ZONE_DEVICE)
116  *
117  * The driver provide a flags array, if driver valid bit for an entry is bit
118  * 3 ie (entry & (1 << 3)) is true if entry is valid then driver must provide
119  * an array in hmm_range.flags with hmm_range.flags[HMM_PFN_VALID] == 1 << 3.
120  * Same logic apply to all flags. This is same idea as vm_page_prot in vma
121  * except that this is per device driver rather than per architecture.
122  */
123 enum hmm_pfn_flag_e {
124 	HMM_PFN_VALID = 0,
125 	HMM_PFN_WRITE,
126 	HMM_PFN_DEVICE_PRIVATE,
127 	HMM_PFN_FLAG_MAX
128 };
129 
130 /*
131  * hmm_pfn_value_e - HMM pfn special value
132  *
133  * Flags:
134  * HMM_PFN_ERROR: corresponding CPU page table entry points to poisoned memory
135  * HMM_PFN_NONE: corresponding CPU page table entry is pte_none()
136  * HMM_PFN_SPECIAL: corresponding CPU page table entry is special; i.e., the
137  *      result of vmf_insert_pfn() or vm_insert_page(). Therefore, it should not
138  *      be mirrored by a device, because the entry will never have HMM_PFN_VALID
139  *      set and the pfn value is undefined.
140  *
141  * Driver provide entry value for none entry, error entry and special entry,
142  * driver can alias (ie use same value for error and special for instance). It
143  * should not alias none and error or special.
144  *
145  * HMM pfn value returned by hmm_vma_get_pfns() or hmm_vma_fault() will be:
146  * hmm_range.values[HMM_PFN_ERROR] if CPU page table entry is poisonous,
147  * hmm_range.values[HMM_PFN_NONE] if there is no CPU page table
148  * hmm_range.values[HMM_PFN_SPECIAL] if CPU page table entry is a special one
149  */
150 enum hmm_pfn_value_e {
151 	HMM_PFN_ERROR,
152 	HMM_PFN_NONE,
153 	HMM_PFN_SPECIAL,
154 	HMM_PFN_VALUE_MAX
155 };
156 
157 /*
158  * struct hmm_range - track invalidation lock on virtual address range
159  *
160  * @hmm: the core HMM structure this range is active against
161  * @vma: the vm area struct for the range
162  * @list: all range lock are on a list
163  * @start: range virtual start address (inclusive)
164  * @end: range virtual end address (exclusive)
165  * @pfns: array of pfns (big enough for the range)
166  * @flags: pfn flags to match device driver page table
167  * @values: pfn value for some special case (none, special, error, ...)
168  * @default_flags: default flags for the range (write, read, ... see hmm doc)
169  * @pfn_flags_mask: allows to mask pfn flags so that only default_flags matter
170  * @pfn_shifts: pfn shift value (should be <= PAGE_SHIFT)
171  * @valid: pfns array did not change since it has been fill by an HMM function
172  */
173 struct hmm_range {
174 	struct hmm		*hmm;
175 	struct vm_area_struct	*vma;
176 	struct list_head	list;
177 	unsigned long		start;
178 	unsigned long		end;
179 	uint64_t		*pfns;
180 	const uint64_t		*flags;
181 	const uint64_t		*values;
182 	uint64_t		default_flags;
183 	uint64_t		pfn_flags_mask;
184 	uint8_t			page_shift;
185 	uint8_t			pfn_shift;
186 	bool			valid;
187 };
188 
189 /*
190  * hmm_range_page_shift() - return the page shift for the range
191  * @range: range being queried
192  * Returns: page shift (page size = 1 << page shift) for the range
193  */
194 static inline unsigned hmm_range_page_shift(const struct hmm_range *range)
195 {
196 	return range->page_shift;
197 }
198 
199 /*
200  * hmm_range_page_size() - return the page size for the range
201  * @range: range being queried
202  * Returns: page size for the range in bytes
203  */
204 static inline unsigned long hmm_range_page_size(const struct hmm_range *range)
205 {
206 	return 1UL << hmm_range_page_shift(range);
207 }
208 
209 /*
210  * hmm_range_wait_until_valid() - wait for range to be valid
211  * @range: range affected by invalidation to wait on
212  * @timeout: time out for wait in ms (ie abort wait after that period of time)
213  * Returns: true if the range is valid, false otherwise.
214  */
215 static inline bool hmm_range_wait_until_valid(struct hmm_range *range,
216 					      unsigned long timeout)
217 {
218 	/* Check if mm is dead ? */
219 	if (range->hmm == NULL || range->hmm->dead || range->hmm->mm == NULL) {
220 		range->valid = false;
221 		return false;
222 	}
223 	if (range->valid)
224 		return true;
225 	wait_event_timeout(range->hmm->wq, range->valid || range->hmm->dead,
226 			   msecs_to_jiffies(timeout));
227 	/* Return current valid status just in case we get lucky */
228 	return range->valid;
229 }
230 
231 /*
232  * hmm_range_valid() - test if a range is valid or not
233  * @range: range
234  * Returns: true if the range is valid, false otherwise.
235  */
236 static inline bool hmm_range_valid(struct hmm_range *range)
237 {
238 	return range->valid;
239 }
240 
241 /*
242  * hmm_device_entry_to_page() - return struct page pointed to by a device entry
243  * @range: range use to decode device entry value
244  * @entry: device entry value to get corresponding struct page from
245  * Returns: struct page pointer if entry is a valid, NULL otherwise
246  *
247  * If the device entry is valid (ie valid flag set) then return the struct page
248  * matching the entry value. Otherwise return NULL.
249  */
250 static inline struct page *hmm_device_entry_to_page(const struct hmm_range *range,
251 						    uint64_t entry)
252 {
253 	if (entry == range->values[HMM_PFN_NONE])
254 		return NULL;
255 	if (entry == range->values[HMM_PFN_ERROR])
256 		return NULL;
257 	if (entry == range->values[HMM_PFN_SPECIAL])
258 		return NULL;
259 	if (!(entry & range->flags[HMM_PFN_VALID]))
260 		return NULL;
261 	return pfn_to_page(entry >> range->pfn_shift);
262 }
263 
264 /*
265  * hmm_device_entry_to_pfn() - return pfn value store in a device entry
266  * @range: range use to decode device entry value
267  * @entry: device entry to extract pfn from
268  * Returns: pfn value if device entry is valid, -1UL otherwise
269  */
270 static inline unsigned long
271 hmm_device_entry_to_pfn(const struct hmm_range *range, uint64_t pfn)
272 {
273 	if (pfn == range->values[HMM_PFN_NONE])
274 		return -1UL;
275 	if (pfn == range->values[HMM_PFN_ERROR])
276 		return -1UL;
277 	if (pfn == range->values[HMM_PFN_SPECIAL])
278 		return -1UL;
279 	if (!(pfn & range->flags[HMM_PFN_VALID]))
280 		return -1UL;
281 	return (pfn >> range->pfn_shift);
282 }
283 
284 /*
285  * hmm_device_entry_from_page() - create a valid device entry for a page
286  * @range: range use to encode HMM pfn value
287  * @page: page for which to create the device entry
288  * Returns: valid device entry for the page
289  */
290 static inline uint64_t hmm_device_entry_from_page(const struct hmm_range *range,
291 						  struct page *page)
292 {
293 	return (page_to_pfn(page) << range->pfn_shift) |
294 		range->flags[HMM_PFN_VALID];
295 }
296 
297 /*
298  * hmm_device_entry_from_pfn() - create a valid device entry value from pfn
299  * @range: range use to encode HMM pfn value
300  * @pfn: pfn value for which to create the device entry
301  * Returns: valid device entry for the pfn
302  */
303 static inline uint64_t hmm_device_entry_from_pfn(const struct hmm_range *range,
304 						 unsigned long pfn)
305 {
306 	return (pfn << range->pfn_shift) |
307 		range->flags[HMM_PFN_VALID];
308 }
309 
310 /*
311  * Old API:
312  * hmm_pfn_to_page()
313  * hmm_pfn_to_pfn()
314  * hmm_pfn_from_page()
315  * hmm_pfn_from_pfn()
316  *
317  * This are the OLD API please use new API, it is here to avoid cross-tree
318  * merge painfullness ie we convert things to new API in stages.
319  */
320 static inline struct page *hmm_pfn_to_page(const struct hmm_range *range,
321 					   uint64_t pfn)
322 {
323 	return hmm_device_entry_to_page(range, pfn);
324 }
325 
326 static inline unsigned long hmm_pfn_to_pfn(const struct hmm_range *range,
327 					   uint64_t pfn)
328 {
329 	return hmm_device_entry_to_pfn(range, pfn);
330 }
331 
332 static inline uint64_t hmm_pfn_from_page(const struct hmm_range *range,
333 					 struct page *page)
334 {
335 	return hmm_device_entry_from_page(range, page);
336 }
337 
338 static inline uint64_t hmm_pfn_from_pfn(const struct hmm_range *range,
339 					unsigned long pfn)
340 {
341 	return hmm_device_entry_from_pfn(range, pfn);
342 }
343 
344 
345 
346 #if IS_ENABLED(CONFIG_HMM_MIRROR)
347 /*
348  * Mirroring: how to synchronize device page table with CPU page table.
349  *
350  * A device driver that is participating in HMM mirroring must always
351  * synchronize with CPU page table updates. For this, device drivers can either
352  * directly use mmu_notifier APIs or they can use the hmm_mirror API. Device
353  * drivers can decide to register one mirror per device per process, or just
354  * one mirror per process for a group of devices. The pattern is:
355  *
356  *      int device_bind_address_space(..., struct mm_struct *mm, ...)
357  *      {
358  *          struct device_address_space *das;
359  *
360  *          // Device driver specific initialization, and allocation of das
361  *          // which contains an hmm_mirror struct as one of its fields.
362  *          ...
363  *
364  *          ret = hmm_mirror_register(&das->mirror, mm, &device_mirror_ops);
365  *          if (ret) {
366  *              // Cleanup on error
367  *              return ret;
368  *          }
369  *
370  *          // Other device driver specific initialization
371  *          ...
372  *      }
373  *
374  * Once an hmm_mirror is registered for an address space, the device driver
375  * will get callbacks through sync_cpu_device_pagetables() operation (see
376  * hmm_mirror_ops struct).
377  *
378  * Device driver must not free the struct containing the hmm_mirror struct
379  * before calling hmm_mirror_unregister(). The expected usage is to do that when
380  * the device driver is unbinding from an address space.
381  *
382  *
383  *      void device_unbind_address_space(struct device_address_space *das)
384  *      {
385  *          // Device driver specific cleanup
386  *          ...
387  *
388  *          hmm_mirror_unregister(&das->mirror);
389  *
390  *          // Other device driver specific cleanup, and now das can be freed
391  *          ...
392  *      }
393  */
394 
395 struct hmm_mirror;
396 
397 /*
398  * enum hmm_update_event - type of update
399  * @HMM_UPDATE_INVALIDATE: invalidate range (no indication as to why)
400  */
401 enum hmm_update_event {
402 	HMM_UPDATE_INVALIDATE,
403 };
404 
405 /*
406  * struct hmm_update - HMM update informations for callback
407  *
408  * @start: virtual start address of the range to update
409  * @end: virtual end address of the range to update
410  * @event: event triggering the update (what is happening)
411  * @blockable: can the callback block/sleep ?
412  */
413 struct hmm_update {
414 	unsigned long start;
415 	unsigned long end;
416 	enum hmm_update_event event;
417 	bool blockable;
418 };
419 
420 /*
421  * struct hmm_mirror_ops - HMM mirror device operations callback
422  *
423  * @update: callback to update range on a device
424  */
425 struct hmm_mirror_ops {
426 	/* release() - release hmm_mirror
427 	 *
428 	 * @mirror: pointer to struct hmm_mirror
429 	 *
430 	 * This is called when the mm_struct is being released.
431 	 * The callback should make sure no references to the mirror occur
432 	 * after the callback returns.
433 	 */
434 	void (*release)(struct hmm_mirror *mirror);
435 
436 	/* sync_cpu_device_pagetables() - synchronize page tables
437 	 *
438 	 * @mirror: pointer to struct hmm_mirror
439 	 * @update: update informations (see struct hmm_update)
440 	 * Returns: -EAGAIN if update.blockable false and callback need to
441 	 *          block, 0 otherwise.
442 	 *
443 	 * This callback ultimately originates from mmu_notifiers when the CPU
444 	 * page table is updated. The device driver must update its page table
445 	 * in response to this callback. The update argument tells what action
446 	 * to perform.
447 	 *
448 	 * The device driver must not return from this callback until the device
449 	 * page tables are completely updated (TLBs flushed, etc); this is a
450 	 * synchronous call.
451 	 */
452 	int (*sync_cpu_device_pagetables)(struct hmm_mirror *mirror,
453 					  const struct hmm_update *update);
454 };
455 
456 /*
457  * struct hmm_mirror - mirror struct for a device driver
458  *
459  * @hmm: pointer to struct hmm (which is unique per mm_struct)
460  * @ops: device driver callback for HMM mirror operations
461  * @list: for list of mirrors of a given mm
462  *
463  * Each address space (mm_struct) being mirrored by a device must register one
464  * instance of an hmm_mirror struct with HMM. HMM will track the list of all
465  * mirrors for each mm_struct.
466  */
467 struct hmm_mirror {
468 	struct hmm			*hmm;
469 	const struct hmm_mirror_ops	*ops;
470 	struct list_head		list;
471 };
472 
473 int hmm_mirror_register(struct hmm_mirror *mirror, struct mm_struct *mm);
474 void hmm_mirror_unregister(struct hmm_mirror *mirror);
475 
476 /*
477  * hmm_mirror_mm_is_alive() - test if mm is still alive
478  * @mirror: the HMM mm mirror for which we want to lock the mmap_sem
479  * Returns: false if the mm is dead, true otherwise
480  *
481  * This is an optimization it will not accurately always return -EINVAL if the
482  * mm is dead ie there can be false negative (process is being kill but HMM is
483  * not yet inform of that). It is only intented to be use to optimize out case
484  * where driver is about to do something time consuming and it would be better
485  * to skip it if the mm is dead.
486  */
487 static inline bool hmm_mirror_mm_is_alive(struct hmm_mirror *mirror)
488 {
489 	struct mm_struct *mm;
490 
491 	if (!mirror || !mirror->hmm)
492 		return false;
493 	mm = READ_ONCE(mirror->hmm->mm);
494 	if (mirror->hmm->dead || !mm)
495 		return false;
496 
497 	return true;
498 }
499 
500 
501 /*
502  * Please see Documentation/vm/hmm.rst for how to use the range API.
503  */
504 int hmm_range_register(struct hmm_range *range,
505 		       struct mm_struct *mm,
506 		       unsigned long start,
507 		       unsigned long end,
508 		       unsigned page_shift);
509 void hmm_range_unregister(struct hmm_range *range);
510 long hmm_range_snapshot(struct hmm_range *range);
511 long hmm_range_fault(struct hmm_range *range, bool block);
512 long hmm_range_dma_map(struct hmm_range *range,
513 		       struct device *device,
514 		       dma_addr_t *daddrs,
515 		       bool block);
516 long hmm_range_dma_unmap(struct hmm_range *range,
517 			 struct vm_area_struct *vma,
518 			 struct device *device,
519 			 dma_addr_t *daddrs,
520 			 bool dirty);
521 
522 /*
523  * HMM_RANGE_DEFAULT_TIMEOUT - default timeout (ms) when waiting for a range
524  *
525  * When waiting for mmu notifiers we need some kind of time out otherwise we
526  * could potentialy wait for ever, 1000ms ie 1s sounds like a long time to
527  * wait already.
528  */
529 #define HMM_RANGE_DEFAULT_TIMEOUT 1000
530 
531 /* This is a temporary helper to avoid merge conflict between trees. */
532 static inline bool hmm_vma_range_done(struct hmm_range *range)
533 {
534 	bool ret = hmm_range_valid(range);
535 
536 	hmm_range_unregister(range);
537 	return ret;
538 }
539 
540 /* This is a temporary helper to avoid merge conflict between trees. */
541 static inline int hmm_vma_fault(struct hmm_range *range, bool block)
542 {
543 	long ret;
544 
545 	/*
546 	 * With the old API the driver must set each individual entries with
547 	 * the requested flags (valid, write, ...). So here we set the mask to
548 	 * keep intact the entries provided by the driver and zero out the
549 	 * default_flags.
550 	 */
551 	range->default_flags = 0;
552 	range->pfn_flags_mask = -1UL;
553 
554 	ret = hmm_range_register(range, range->vma->vm_mm,
555 				 range->start, range->end,
556 				 PAGE_SHIFT);
557 	if (ret)
558 		return (int)ret;
559 
560 	if (!hmm_range_wait_until_valid(range, HMM_RANGE_DEFAULT_TIMEOUT)) {
561 		/*
562 		 * The mmap_sem was taken by driver we release it here and
563 		 * returns -EAGAIN which correspond to mmap_sem have been
564 		 * drop in the old API.
565 		 */
566 		up_read(&range->vma->vm_mm->mmap_sem);
567 		return -EAGAIN;
568 	}
569 
570 	ret = hmm_range_fault(range, block);
571 	if (ret <= 0) {
572 		if (ret == -EBUSY || !ret) {
573 			/* Same as above  drop mmap_sem to match old API. */
574 			up_read(&range->vma->vm_mm->mmap_sem);
575 			ret = -EBUSY;
576 		} else if (ret == -EAGAIN)
577 			ret = -EBUSY;
578 		hmm_range_unregister(range);
579 		return ret;
580 	}
581 	return 0;
582 }
583 
584 /* Below are for HMM internal use only! Not to be used by device driver! */
585 void hmm_mm_destroy(struct mm_struct *mm);
586 
587 static inline void hmm_mm_init(struct mm_struct *mm)
588 {
589 	mm->hmm = NULL;
590 }
591 #else /* IS_ENABLED(CONFIG_HMM_MIRROR) */
592 static inline void hmm_mm_destroy(struct mm_struct *mm) {}
593 static inline void hmm_mm_init(struct mm_struct *mm) {}
594 #endif /* IS_ENABLED(CONFIG_HMM_MIRROR) */
595 
596 #if IS_ENABLED(CONFIG_DEVICE_PRIVATE) ||  IS_ENABLED(CONFIG_DEVICE_PUBLIC)
597 struct hmm_devmem;
598 
599 struct page *hmm_vma_alloc_locked_page(struct vm_area_struct *vma,
600 				       unsigned long addr);
601 
602 /*
603  * struct hmm_devmem_ops - callback for ZONE_DEVICE memory events
604  *
605  * @free: call when refcount on page reach 1 and thus is no longer use
606  * @fault: call when there is a page fault to unaddressable memory
607  *
608  * Both callback happens from page_free() and page_fault() callback of struct
609  * dev_pagemap respectively. See include/linux/memremap.h for more details on
610  * those.
611  *
612  * The hmm_devmem_ops callback are just here to provide a coherent and
613  * uniq API to device driver and device driver should not register their
614  * own page_free() or page_fault() but rely on the hmm_devmem_ops call-
615  * back.
616  */
617 struct hmm_devmem_ops {
618 	/*
619 	 * free() - free a device page
620 	 * @devmem: device memory structure (see struct hmm_devmem)
621 	 * @page: pointer to struct page being freed
622 	 *
623 	 * Call back occurs whenever a device page refcount reach 1 which
624 	 * means that no one is holding any reference on the page anymore
625 	 * (ZONE_DEVICE page have an elevated refcount of 1 as default so
626 	 * that they are not release to the general page allocator).
627 	 *
628 	 * Note that callback has exclusive ownership of the page (as no
629 	 * one is holding any reference).
630 	 */
631 	void (*free)(struct hmm_devmem *devmem, struct page *page);
632 	/*
633 	 * fault() - CPU page fault or get user page (GUP)
634 	 * @devmem: device memory structure (see struct hmm_devmem)
635 	 * @vma: virtual memory area containing the virtual address
636 	 * @addr: virtual address that faulted or for which there is a GUP
637 	 * @page: pointer to struct page backing virtual address (unreliable)
638 	 * @flags: FAULT_FLAG_* (see include/linux/mm.h)
639 	 * @pmdp: page middle directory
640 	 * Returns: VM_FAULT_MINOR/MAJOR on success or one of VM_FAULT_ERROR
641 	 *   on error
642 	 *
643 	 * The callback occurs whenever there is a CPU page fault or GUP on a
644 	 * virtual address. This means that the device driver must migrate the
645 	 * page back to regular memory (CPU accessible).
646 	 *
647 	 * The device driver is free to migrate more than one page from the
648 	 * fault() callback as an optimization. However if device decide to
649 	 * migrate more than one page it must always priotirize the faulting
650 	 * address over the others.
651 	 *
652 	 * The struct page pointer is only given as an hint to allow quick
653 	 * lookup of internal device driver data. A concurrent migration
654 	 * might have already free that page and the virtual address might
655 	 * not longer be back by it. So it should not be modified by the
656 	 * callback.
657 	 *
658 	 * Note that mmap semaphore is held in read mode at least when this
659 	 * callback occurs, hence the vma is valid upon callback entry.
660 	 */
661 	vm_fault_t (*fault)(struct hmm_devmem *devmem,
662 		     struct vm_area_struct *vma,
663 		     unsigned long addr,
664 		     const struct page *page,
665 		     unsigned int flags,
666 		     pmd_t *pmdp);
667 };
668 
669 /*
670  * struct hmm_devmem - track device memory
671  *
672  * @completion: completion object for device memory
673  * @pfn_first: first pfn for this resource (set by hmm_devmem_add())
674  * @pfn_last: last pfn for this resource (set by hmm_devmem_add())
675  * @resource: IO resource reserved for this chunk of memory
676  * @pagemap: device page map for that chunk
677  * @device: device to bind resource to
678  * @ops: memory operations callback
679  * @ref: per CPU refcount
680  * @page_fault: callback when CPU fault on an unaddressable device page
681  *
682  * This an helper structure for device drivers that do not wish to implement
683  * the gory details related to hotplugging new memoy and allocating struct
684  * pages.
685  *
686  * Device drivers can directly use ZONE_DEVICE memory on their own if they
687  * wish to do so.
688  *
689  * The page_fault() callback must migrate page back, from device memory to
690  * system memory, so that the CPU can access it. This might fail for various
691  * reasons (device issues,  device have been unplugged, ...). When such error
692  * conditions happen, the page_fault() callback must return VM_FAULT_SIGBUS and
693  * set the CPU page table entry to "poisoned".
694  *
695  * Note that because memory cgroup charges are transferred to the device memory,
696  * this should never fail due to memory restrictions. However, allocation
697  * of a regular system page might still fail because we are out of memory. If
698  * that happens, the page_fault() callback must return VM_FAULT_OOM.
699  *
700  * The page_fault() callback can also try to migrate back multiple pages in one
701  * chunk, as an optimization. It must, however, prioritize the faulting address
702  * over all the others.
703  */
704 typedef vm_fault_t (*dev_page_fault_t)(struct vm_area_struct *vma,
705 				unsigned long addr,
706 				const struct page *page,
707 				unsigned int flags,
708 				pmd_t *pmdp);
709 
710 struct hmm_devmem {
711 	struct completion		completion;
712 	unsigned long			pfn_first;
713 	unsigned long			pfn_last;
714 	struct resource			*resource;
715 	struct device			*device;
716 	struct dev_pagemap		pagemap;
717 	const struct hmm_devmem_ops	*ops;
718 	struct percpu_ref		ref;
719 	dev_page_fault_t		page_fault;
720 };
721 
722 /*
723  * To add (hotplug) device memory, HMM assumes that there is no real resource
724  * that reserves a range in the physical address space (this is intended to be
725  * use by unaddressable device memory). It will reserve a physical range big
726  * enough and allocate struct page for it.
727  *
728  * The device driver can wrap the hmm_devmem struct inside a private device
729  * driver struct.
730  */
731 struct hmm_devmem *hmm_devmem_add(const struct hmm_devmem_ops *ops,
732 				  struct device *device,
733 				  unsigned long size);
734 struct hmm_devmem *hmm_devmem_add_resource(const struct hmm_devmem_ops *ops,
735 					   struct device *device,
736 					   struct resource *res);
737 
738 /*
739  * hmm_devmem_page_set_drvdata - set per-page driver data field
740  *
741  * @page: pointer to struct page
742  * @data: driver data value to set
743  *
744  * Because page can not be on lru we have an unsigned long that driver can use
745  * to store a per page field. This just a simple helper to do that.
746  */
747 static inline void hmm_devmem_page_set_drvdata(struct page *page,
748 					       unsigned long data)
749 {
750 	page->hmm_data = data;
751 }
752 
753 /*
754  * hmm_devmem_page_get_drvdata - get per page driver data field
755  *
756  * @page: pointer to struct page
757  * Return: driver data value
758  */
759 static inline unsigned long hmm_devmem_page_get_drvdata(const struct page *page)
760 {
761 	return page->hmm_data;
762 }
763 
764 
765 /*
766  * struct hmm_device - fake device to hang device memory onto
767  *
768  * @device: device struct
769  * @minor: device minor number
770  */
771 struct hmm_device {
772 	struct device		device;
773 	unsigned int		minor;
774 };
775 
776 /*
777  * A device driver that wants to handle multiple devices memory through a
778  * single fake device can use hmm_device to do so. This is purely a helper and
779  * it is not strictly needed, in order to make use of any HMM functionality.
780  */
781 struct hmm_device *hmm_device_new(void *drvdata);
782 void hmm_device_put(struct hmm_device *hmm_device);
783 #endif /* CONFIG_DEVICE_PRIVATE || CONFIG_DEVICE_PUBLIC */
784 #else /* IS_ENABLED(CONFIG_HMM) */
785 static inline void hmm_mm_destroy(struct mm_struct *mm) {}
786 static inline void hmm_mm_init(struct mm_struct *mm) {}
787 #endif /* IS_ENABLED(CONFIG_HMM) */
788 
789 #endif /* LINUX_HMM_H */
790