1f931551bSRalph Campbell /*
2f931551bSRalph Campbell * Copyright (c) 2006, 2007, 2008, 2009 QLogic Corporation. All rights reserved.
3f931551bSRalph Campbell * Copyright (c) 2003, 2004, 2005, 2006 PathScale, Inc. All rights reserved.
4f931551bSRalph Campbell *
5f931551bSRalph Campbell * This software is available to you under a choice of one of two
6f931551bSRalph Campbell * licenses. You may choose to be licensed under the terms of the GNU
7f931551bSRalph Campbell * General Public License (GPL) Version 2, available from the file
8f931551bSRalph Campbell * COPYING in the main directory of this source tree, or the
9f931551bSRalph Campbell * OpenIB.org BSD license below:
10f931551bSRalph Campbell *
11f931551bSRalph Campbell * Redistribution and use in source and binary forms, with or
12f931551bSRalph Campbell * without modification, are permitted provided that the following
13f931551bSRalph Campbell * conditions are met:
14f931551bSRalph Campbell *
15f931551bSRalph Campbell * - Redistributions of source code must retain the above
16f931551bSRalph Campbell * copyright notice, this list of conditions and the following
17f931551bSRalph Campbell * disclaimer.
18f931551bSRalph Campbell *
19f931551bSRalph Campbell * - Redistributions in binary form must reproduce the above
20f931551bSRalph Campbell * copyright notice, this list of conditions and the following
21f931551bSRalph Campbell * disclaimer in the documentation and/or other materials
22f931551bSRalph Campbell * provided with the distribution.
23f931551bSRalph Campbell *
24f931551bSRalph Campbell * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND,
25f931551bSRalph Campbell * EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF
26f931551bSRalph Campbell * MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND
27f931551bSRalph Campbell * NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT HOLDERS
28f931551bSRalph Campbell * BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN
29f931551bSRalph Campbell * ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN
30f931551bSRalph Campbell * CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE
31f931551bSRalph Campbell * SOFTWARE.
32f931551bSRalph Campbell */
33f931551bSRalph Campbell
34f931551bSRalph Campbell #include <linux/mm.h>
353f07c014SIngo Molnar #include <linux/sched/signal.h>
36f931551bSRalph Campbell #include <linux/device.h>
37f931551bSRalph Campbell
38f931551bSRalph Campbell #include "qib.h"
39f931551bSRalph Campbell
__qib_release_user_pages(struct page ** p,size_t num_pages,int dirty)40f931551bSRalph Campbell static void __qib_release_user_pages(struct page **p, size_t num_pages,
41f931551bSRalph Campbell int dirty)
42f931551bSRalph Campbell {
43f1f6a7ddSJohn Hubbard unpin_user_pages_dirty_lock(p, num_pages, dirty);
44f931551bSRalph Campbell }
45f931551bSRalph Campbell
46888bf760SLee Jones /*
47f931551bSRalph Campbell * qib_map_page - a safety wrapper around pci_map_page()
48f931551bSRalph Campbell *
49f931551bSRalph Campbell * A dma_addr of all 0's is interpreted by the chip as "disabled".
50f931551bSRalph Campbell * Unfortunately, it can also be a valid dma_addr returned on some
51f931551bSRalph Campbell * architectures.
52f931551bSRalph Campbell *
53f931551bSRalph Campbell * The powerpc iommu assigns dma_addrs in ascending order, so we don't
54f931551bSRalph Campbell * have to bother with retries or mapping a dummy page to insure we
55f931551bSRalph Campbell * don't just get the same mapping again.
56f931551bSRalph Campbell *
57f931551bSRalph Campbell * I'm sure we won't be so lucky with other iommu's, so FIXME.
58f931551bSRalph Campbell */
qib_map_page(struct pci_dev * hwdev,struct page * page,dma_addr_t * daddr)590252f733SMike Marciniszyn int qib_map_page(struct pci_dev *hwdev, struct page *page, dma_addr_t *daddr)
60f931551bSRalph Campbell {
61f931551bSRalph Campbell dma_addr_t phys;
62f931551bSRalph Campbell
633f69f4e0SChristophe JAILLET phys = dma_map_page(&hwdev->dev, page, 0, PAGE_SIZE, DMA_FROM_DEVICE);
643f69f4e0SChristophe JAILLET if (dma_mapping_error(&hwdev->dev, phys))
650252f733SMike Marciniszyn return -ENOMEM;
66f931551bSRalph Campbell
670252f733SMike Marciniszyn if (!phys) {
683f69f4e0SChristophe JAILLET dma_unmap_page(&hwdev->dev, phys, PAGE_SIZE, DMA_FROM_DEVICE);
693f69f4e0SChristophe JAILLET phys = dma_map_page(&hwdev->dev, page, 0, PAGE_SIZE,
703f69f4e0SChristophe JAILLET DMA_FROM_DEVICE);
713f69f4e0SChristophe JAILLET if (dma_mapping_error(&hwdev->dev, phys))
720252f733SMike Marciniszyn return -ENOMEM;
73f931551bSRalph Campbell /*
74f931551bSRalph Campbell * FIXME: If we get 0 again, we should keep this page,
75f931551bSRalph Campbell * map another, then free the 0 page.
76f931551bSRalph Campbell */
77f931551bSRalph Campbell }
780252f733SMike Marciniszyn *daddr = phys;
790252f733SMike Marciniszyn return 0;
80f931551bSRalph Campbell }
81f931551bSRalph Campbell
82f931551bSRalph Campbell /**
83f931551bSRalph Campbell * qib_get_user_pages - lock user pages into memory
84f931551bSRalph Campbell * @start_page: the start page
85f931551bSRalph Campbell * @num_pages: the number of pages
86f931551bSRalph Campbell * @p: the output page structures
87f931551bSRalph Campbell *
88f931551bSRalph Campbell * This function takes a given start page (page aligned user virtual
89f931551bSRalph Campbell * address) and pins it and the following specified number of pages. For
90f931551bSRalph Campbell * now, num_pages is always 1, but that will probably change at some point
91f931551bSRalph Campbell * (because caller is doing expected sends on a single virtually contiguous
92f931551bSRalph Campbell * buffer, so we can do all pages at once).
93f931551bSRalph Campbell */
qib_get_user_pages(unsigned long start_page,size_t num_pages,struct page ** p)94f931551bSRalph Campbell int qib_get_user_pages(unsigned long start_page, size_t num_pages,
95f931551bSRalph Campbell struct page **p)
96f931551bSRalph Campbell {
973a2a1e90SDavidlohr Bueso unsigned long locked, lock_limit;
983a2a1e90SDavidlohr Bueso size_t got;
99f931551bSRalph Campbell int ret;
100f931551bSRalph Campbell
1013a2a1e90SDavidlohr Bueso lock_limit = rlimit(RLIMIT_MEMLOCK) >> PAGE_SHIFT;
1023a2a1e90SDavidlohr Bueso locked = atomic64_add_return(num_pages, ¤t->mm->pinned_vm);
103f931551bSRalph Campbell
104ec95e0faSDavidlohr Bueso if (locked > lock_limit && !capable(CAP_IPC_LOCK)) {
1053a2a1e90SDavidlohr Bueso ret = -ENOMEM;
1063a2a1e90SDavidlohr Bueso goto bail;
1073a2a1e90SDavidlohr Bueso }
108f931551bSRalph Campbell
109d8ed45c5SMichel Lespinasse mmap_read_lock(current->mm);
1103a2a1e90SDavidlohr Bueso for (got = 0; got < num_pages; got += ret) {
111dfa0a4ffSJohn Hubbard ret = pin_user_pages(start_page + got * PAGE_SIZE,
1123a2a1e90SDavidlohr Bueso num_pages - got,
11320ea7783SDavid Hildenbrand FOLL_LONGTERM | FOLL_WRITE,
114*4c630f30SLorenzo Stoakes p + got);
1153a2a1e90SDavidlohr Bueso if (ret < 0) {
116d8ed45c5SMichel Lespinasse mmap_read_unlock(current->mm);
1173a2a1e90SDavidlohr Bueso goto bail_release;
1183a2a1e90SDavidlohr Bueso }
1193a2a1e90SDavidlohr Bueso }
120d8ed45c5SMichel Lespinasse mmap_read_unlock(current->mm);
121f931551bSRalph Campbell
1223a2a1e90SDavidlohr Bueso return 0;
1233a2a1e90SDavidlohr Bueso bail_release:
1243a2a1e90SDavidlohr Bueso __qib_release_user_pages(p, got, 0);
1253a2a1e90SDavidlohr Bueso bail:
1263a2a1e90SDavidlohr Bueso atomic64_sub(num_pages, ¤t->mm->pinned_vm);
127f931551bSRalph Campbell return ret;
128f931551bSRalph Campbell }
129f931551bSRalph Campbell
qib_release_user_pages(struct page ** p,size_t num_pages)130f931551bSRalph Campbell void qib_release_user_pages(struct page **p, size_t num_pages)
131f931551bSRalph Campbell {
132f931551bSRalph Campbell __qib_release_user_pages(p, num_pages, 1);
133f931551bSRalph Campbell
1343a2a1e90SDavidlohr Bueso /* during close after signal, mm can be NULL */
1353a2a1e90SDavidlohr Bueso if (current->mm)
13670f8a3caSDavidlohr Bueso atomic64_sub(num_pages, ¤t->mm->pinned_vm);
137f931551bSRalph Campbell }
138