Linux Audio

Check our new training course

Loading...
  1/*
  2 * Copyright (c) 2006, 2007 QLogic Corporation. All rights reserved.
  3 * Copyright (c) 2003, 2004, 2005, 2006 PathScale, Inc. All rights reserved.
  4 *
  5 * This software is available to you under a choice of one of two
  6 * licenses.  You may choose to be licensed under the terms of the GNU
  7 * General Public License (GPL) Version 2, available from the file
  8 * COPYING in the main directory of this source tree, or the
  9 * OpenIB.org BSD license below:
 10 *
 11 *     Redistribution and use in source and binary forms, with or
 12 *     without modification, are permitted provided that the following
 13 *     conditions are met:
 14 *
 15 *      - Redistributions of source code must retain the above
 16 *        copyright notice, this list of conditions and the following
 17 *        disclaimer.
 18 *
 19 *      - Redistributions in binary form must reproduce the above
 20 *        copyright notice, this list of conditions and the following
 21 *        disclaimer in the documentation and/or other materials
 22 *        provided with the distribution.
 23 *
 24 * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND,
 25 * EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF
 26 * MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND
 27 * NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT HOLDERS
 28 * BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN
 29 * ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN
 30 * CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE
 31 * SOFTWARE.
 32 */
 33
 34#include <linux/mm.h>
 35#include <linux/device.h>
 36#include <linux/slab.h>
 37#include <linux/sched.h>
 38
 39#include "ipath_kernel.h"
 40
 41static void __ipath_release_user_pages(struct page **p, size_t num_pages,
 42				   int dirty)
 43{
 44	size_t i;
 45
 46	for (i = 0; i < num_pages; i++) {
 47		ipath_cdbg(MM, "%lu/%lu put_page %p\n", (unsigned long) i,
 48			   (unsigned long) num_pages, p[i]);
 49		if (dirty)
 50			set_page_dirty_lock(p[i]);
 51		put_page(p[i]);
 52	}
 53}
 54
 55/* call with current->mm->mmap_sem held */
 56static int __ipath_get_user_pages(unsigned long start_page, size_t num_pages,
 57				  struct page **p, struct vm_area_struct **vma)
 58{
 59	unsigned long lock_limit;
 60	size_t got;
 61	int ret;
 62
 63	lock_limit = rlimit(RLIMIT_MEMLOCK) >> PAGE_SHIFT;
 64
 65	if (num_pages > lock_limit) {
 66		ret = -ENOMEM;
 67		goto bail;
 68	}
 69
 70	ipath_cdbg(VERBOSE, "pin %lx pages from vaddr %lx\n",
 71		   (unsigned long) num_pages, start_page);
 72
 73	for (got = 0; got < num_pages; got += ret) {
 74		ret = get_user_pages(current, current->mm,
 75				     start_page + got * PAGE_SIZE,
 76				     num_pages - got, 1, 1,
 77				     p + got, vma);
 78		if (ret < 0)
 79			goto bail_release;
 80	}
 81
 82	current->mm->pinned_vm += num_pages;
 83
 84	ret = 0;
 85	goto bail;
 86
 87bail_release:
 88	__ipath_release_user_pages(p, got, 0);
 89bail:
 90	return ret;
 91}
 92
 93/**
 94 * ipath_map_page - a safety wrapper around pci_map_page()
 95 *
 96 * A dma_addr of all 0's is interpreted by the chip as "disabled".
 97 * Unfortunately, it can also be a valid dma_addr returned on some
 98 * architectures.
 99 *
100 * The powerpc iommu assigns dma_addrs in ascending order, so we don't
101 * have to bother with retries or mapping a dummy page to insure we
102 * don't just get the same mapping again.
103 *
104 * I'm sure we won't be so lucky with other iommu's, so FIXME.
105 */
106dma_addr_t ipath_map_page(struct pci_dev *hwdev, struct page *page,
107	unsigned long offset, size_t size, int direction)
108{
109	dma_addr_t phys;
110
111	phys = pci_map_page(hwdev, page, offset, size, direction);
112
113	if (phys == 0) {
114		pci_unmap_page(hwdev, phys, size, direction);
115		phys = pci_map_page(hwdev, page, offset, size, direction);
116		/*
117		 * FIXME: If we get 0 again, we should keep this page,
118		 * map another, then free the 0 page.
119		 */
120	}
121
122	return phys;
123}
124
125/**
126 * ipath_map_single - a safety wrapper around pci_map_single()
127 *
128 * Same idea as ipath_map_page().
129 */
130dma_addr_t ipath_map_single(struct pci_dev *hwdev, void *ptr, size_t size,
131	int direction)
132{
133	dma_addr_t phys;
134
135	phys = pci_map_single(hwdev, ptr, size, direction);
136
137	if (phys == 0) {
138		pci_unmap_single(hwdev, phys, size, direction);
139		phys = pci_map_single(hwdev, ptr, size, direction);
140		/*
141		 * FIXME: If we get 0 again, we should keep this page,
142		 * map another, then free the 0 page.
143		 */
144	}
145
146	return phys;
147}
148
149/**
150 * ipath_get_user_pages - lock user pages into memory
151 * @start_page: the start page
152 * @num_pages: the number of pages
153 * @p: the output page structures
154 *
155 * This function takes a given start page (page aligned user virtual
156 * address) and pins it and the following specified number of pages.  For
157 * now, num_pages is always 1, but that will probably change at some point
158 * (because caller is doing expected sends on a single virtually contiguous
159 * buffer, so we can do all pages at once).
160 */
161int ipath_get_user_pages(unsigned long start_page, size_t num_pages,
162			 struct page **p)
163{
164	int ret;
165
166	down_write(&current->mm->mmap_sem);
167
168	ret = __ipath_get_user_pages(start_page, num_pages, p, NULL);
169
170	up_write(&current->mm->mmap_sem);
171
172	return ret;
173}
174
175void ipath_release_user_pages(struct page **p, size_t num_pages)
176{
177	down_write(&current->mm->mmap_sem);
178
179	__ipath_release_user_pages(p, num_pages, 1);
180
181	current->mm->pinned_vm -= num_pages;
182
183	up_write(&current->mm->mmap_sem);
184}
185
186struct ipath_user_pages_work {
187	struct work_struct work;
188	struct mm_struct *mm;
189	unsigned long num_pages;
190};
191
192static void user_pages_account(struct work_struct *_work)
193{
194	struct ipath_user_pages_work *work =
195		container_of(_work, struct ipath_user_pages_work, work);
196
197	down_write(&work->mm->mmap_sem);
198	work->mm->pinned_vm -= work->num_pages;
199	up_write(&work->mm->mmap_sem);
200	mmput(work->mm);
201	kfree(work);
202}
203
204void ipath_release_user_pages_on_close(struct page **p, size_t num_pages)
205{
206	struct ipath_user_pages_work *work;
207	struct mm_struct *mm;
208
209	__ipath_release_user_pages(p, num_pages, 1);
210
211	mm = get_task_mm(current);
212	if (!mm)
213		return;
214
215	work = kmalloc(sizeof(*work), GFP_KERNEL);
216	if (!work)
217		goto bail_mm;
218
219	INIT_WORK(&work->work, user_pages_account);
220	work->mm = mm;
221	work->num_pages = num_pages;
222
223	queue_work(ib_wq, &work->work);
224	return;
225
226bail_mm:
227	mmput(mm);
228	return;
229}