Linux Audio

Check our new training course

Loading...
v6.13.7
  1// SPDX-License-Identifier: GPL-2.0 OR BSD-3-Clause
  2/*
  3 * Copyright(c) 2016 Intel Corporation.
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
  4 */
  5
  6#include <linux/slab.h>
  7#include <linux/vmalloc.h>
  8#include <linux/mm.h>
  9#include <rdma/uverbs_ioctl.h>
 10#include "mmap.h"
 11
 12/**
 13 * rvt_mmap_init - init link list and lock for mem map
 14 * @rdi: rvt dev struct
 15 */
 16void rvt_mmap_init(struct rvt_dev_info *rdi)
 17{
 18	INIT_LIST_HEAD(&rdi->pending_mmaps);
 19	spin_lock_init(&rdi->pending_lock);
 20	rdi->mmap_offset = PAGE_SIZE;
 21	spin_lock_init(&rdi->mmap_offset_lock);
 22}
 23
 24/**
 25 * rvt_release_mmap_info - free mmap info structure
 26 * @ref: a pointer to the kref within struct rvt_mmap_info
 27 */
 28void rvt_release_mmap_info(struct kref *ref)
 29{
 30	struct rvt_mmap_info *ip =
 31		container_of(ref, struct rvt_mmap_info, ref);
 32	struct rvt_dev_info *rdi = ib_to_rvt(ip->context->device);
 33
 34	spin_lock_irq(&rdi->pending_lock);
 35	list_del(&ip->pending_mmaps);
 36	spin_unlock_irq(&rdi->pending_lock);
 37
 38	vfree(ip->obj);
 39	kfree(ip);
 40}
 41
 42static void rvt_vma_open(struct vm_area_struct *vma)
 43{
 44	struct rvt_mmap_info *ip = vma->vm_private_data;
 45
 46	kref_get(&ip->ref);
 47}
 48
 49static void rvt_vma_close(struct vm_area_struct *vma)
 50{
 51	struct rvt_mmap_info *ip = vma->vm_private_data;
 52
 53	kref_put(&ip->ref, rvt_release_mmap_info);
 54}
 55
 56static const struct vm_operations_struct rvt_vm_ops = {
 57	.open = rvt_vma_open,
 58	.close = rvt_vma_close,
 59};
 60
 61/**
 62 * rvt_mmap - create a new mmap region
 63 * @context: the IB user context of the process making the mmap() call
 64 * @vma: the VMA to be initialized
 65 *
 66 * Return: zero if the mmap is OK. Otherwise, return an errno.
 67 */
 68int rvt_mmap(struct ib_ucontext *context, struct vm_area_struct *vma)
 69{
 70	struct rvt_dev_info *rdi = ib_to_rvt(context->device);
 71	unsigned long offset = vma->vm_pgoff << PAGE_SHIFT;
 72	unsigned long size = vma->vm_end - vma->vm_start;
 73	struct rvt_mmap_info *ip, *pp;
 74	int ret = -EINVAL;
 75
 76	/*
 77	 * Search the device's list of objects waiting for a mmap call.
 78	 * Normally, this list is very short since a call to create a
 79	 * CQ, QP, or SRQ is soon followed by a call to mmap().
 80	 */
 81	spin_lock_irq(&rdi->pending_lock);
 82	list_for_each_entry_safe(ip, pp, &rdi->pending_mmaps,
 83				 pending_mmaps) {
 84		/* Only the creator is allowed to mmap the object */
 85		if (context != ip->context || (__u64)offset != ip->offset)
 86			continue;
 87		/* Don't allow a mmap larger than the object. */
 88		if (size > ip->size)
 89			break;
 90
 91		list_del_init(&ip->pending_mmaps);
 92		spin_unlock_irq(&rdi->pending_lock);
 93
 94		ret = remap_vmalloc_range(vma, ip->obj, 0);
 95		if (ret)
 96			goto done;
 97		vma->vm_ops = &rvt_vm_ops;
 98		vma->vm_private_data = ip;
 99		rvt_vma_open(vma);
100		goto done;
101	}
102	spin_unlock_irq(&rdi->pending_lock);
103done:
104	return ret;
105}
106
107/**
108 * rvt_create_mmap_info - allocate information for hfi1_mmap
109 * @rdi: rvt dev struct
110 * @size: size in bytes to map
111 * @udata: user data (must be valid!)
112 * @obj: opaque pointer to a cq, wq etc
113 *
114 * Return: rvt_mmap struct on success, ERR_PTR on failure
115 */
116struct rvt_mmap_info *rvt_create_mmap_info(struct rvt_dev_info *rdi, u32 size,
117					   struct ib_udata *udata, void *obj)
 
 
118{
119	struct rvt_mmap_info *ip;
120
121	if (!udata)
122		return ERR_PTR(-EINVAL);
123
124	ip = kmalloc_node(sizeof(*ip), GFP_KERNEL, rdi->dparms.node);
125	if (!ip)
126		return ERR_PTR(-ENOMEM);
127
128	size = PAGE_ALIGN(size);
129
130	spin_lock_irq(&rdi->mmap_offset_lock);
131	if (rdi->mmap_offset == 0)
132		rdi->mmap_offset = ALIGN(PAGE_SIZE, SHMLBA);
133	ip->offset = rdi->mmap_offset;
134	rdi->mmap_offset += ALIGN(size, SHMLBA);
135	spin_unlock_irq(&rdi->mmap_offset_lock);
136
137	INIT_LIST_HEAD(&ip->pending_mmaps);
138	ip->size = size;
139	ip->context =
140		container_of(udata, struct uverbs_attr_bundle, driver_udata)
141			->context;
142	ip->obj = obj;
143	kref_init(&ip->ref);
144
145	return ip;
146}
147
148/**
149 * rvt_update_mmap_info - update a mem map
150 * @rdi: rvt dev struct
151 * @ip: mmap info pointer
152 * @size: size to grow by
153 * @obj: opaque pointer to cq, wq, etc.
154 */
155void rvt_update_mmap_info(struct rvt_dev_info *rdi, struct rvt_mmap_info *ip,
156			  u32 size, void *obj)
157{
158	size = PAGE_ALIGN(size);
159
160	spin_lock_irq(&rdi->mmap_offset_lock);
161	if (rdi->mmap_offset == 0)
162		rdi->mmap_offset = PAGE_SIZE;
163	ip->offset = rdi->mmap_offset;
164	rdi->mmap_offset += size;
165	spin_unlock_irq(&rdi->mmap_offset_lock);
166
167	ip->size = size;
168	ip->obj = obj;
169}
v4.10.11
 
  1/*
  2 * Copyright(c) 2016 Intel Corporation.
  3 *
  4 * This file is provided under a dual BSD/GPLv2 license.  When using or
  5 * redistributing this file, you may do so under either license.
  6 *
  7 * GPL LICENSE SUMMARY
  8 *
  9 * This program is free software; you can redistribute it and/or modify
 10 * it under the terms of version 2 of the GNU General Public License as
 11 * published by the Free Software Foundation.
 12 *
 13 * This program is distributed in the hope that it will be useful, but
 14 * WITHOUT ANY WARRANTY; without even the implied warranty of
 15 * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.  See the GNU
 16 * General Public License for more details.
 17 *
 18 * BSD LICENSE
 19 *
 20 * Redistribution and use in source and binary forms, with or without
 21 * modification, are permitted provided that the following conditions
 22 * are met:
 23 *
 24 *  - Redistributions of source code must retain the above copyright
 25 *    notice, this list of conditions and the following disclaimer.
 26 *  - Redistributions in binary form must reproduce the above copyright
 27 *    notice, this list of conditions and the following disclaimer in
 28 *    the documentation and/or other materials provided with the
 29 *    distribution.
 30 *  - Neither the name of Intel Corporation nor the names of its
 31 *    contributors may be used to endorse or promote products derived
 32 *    from this software without specific prior written permission.
 33 *
 34 * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
 35 * "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
 36 * LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
 37 * A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
 38 * OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
 39 * SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
 40 * LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
 41 * DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
 42 * THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
 43 * (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
 44 * OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
 45 *
 46 */
 47
 48#include <linux/slab.h>
 49#include <linux/vmalloc.h>
 50#include <linux/mm.h>
 51#include <asm/pgtable.h>
 52#include "mmap.h"
 53
 54/**
 55 * rvt_mmap_init - init link list and lock for mem map
 56 * @rdi: rvt dev struct
 57 */
 58void rvt_mmap_init(struct rvt_dev_info *rdi)
 59{
 60	INIT_LIST_HEAD(&rdi->pending_mmaps);
 61	spin_lock_init(&rdi->pending_lock);
 62	rdi->mmap_offset = PAGE_SIZE;
 63	spin_lock_init(&rdi->mmap_offset_lock);
 64}
 65
 66/**
 67 * rvt_release_mmap_info - free mmap info structure
 68 * @ref: a pointer to the kref within struct rvt_mmap_info
 69 */
 70void rvt_release_mmap_info(struct kref *ref)
 71{
 72	struct rvt_mmap_info *ip =
 73		container_of(ref, struct rvt_mmap_info, ref);
 74	struct rvt_dev_info *rdi = ib_to_rvt(ip->context->device);
 75
 76	spin_lock_irq(&rdi->pending_lock);
 77	list_del(&ip->pending_mmaps);
 78	spin_unlock_irq(&rdi->pending_lock);
 79
 80	vfree(ip->obj);
 81	kfree(ip);
 82}
 83
 84static void rvt_vma_open(struct vm_area_struct *vma)
 85{
 86	struct rvt_mmap_info *ip = vma->vm_private_data;
 87
 88	kref_get(&ip->ref);
 89}
 90
 91static void rvt_vma_close(struct vm_area_struct *vma)
 92{
 93	struct rvt_mmap_info *ip = vma->vm_private_data;
 94
 95	kref_put(&ip->ref, rvt_release_mmap_info);
 96}
 97
 98static const struct vm_operations_struct rvt_vm_ops = {
 99	.open = rvt_vma_open,
100	.close = rvt_vma_close,
101};
102
103/**
104 * rvt_mmap - create a new mmap region
105 * @context: the IB user context of the process making the mmap() call
106 * @vma: the VMA to be initialized
107 *
108 * Return: zero if the mmap is OK. Otherwise, return an errno.
109 */
110int rvt_mmap(struct ib_ucontext *context, struct vm_area_struct *vma)
111{
112	struct rvt_dev_info *rdi = ib_to_rvt(context->device);
113	unsigned long offset = vma->vm_pgoff << PAGE_SHIFT;
114	unsigned long size = vma->vm_end - vma->vm_start;
115	struct rvt_mmap_info *ip, *pp;
116	int ret = -EINVAL;
117
118	/*
119	 * Search the device's list of objects waiting for a mmap call.
120	 * Normally, this list is very short since a call to create a
121	 * CQ, QP, or SRQ is soon followed by a call to mmap().
122	 */
123	spin_lock_irq(&rdi->pending_lock);
124	list_for_each_entry_safe(ip, pp, &rdi->pending_mmaps,
125				 pending_mmaps) {
126		/* Only the creator is allowed to mmap the object */
127		if (context != ip->context || (__u64)offset != ip->offset)
128			continue;
129		/* Don't allow a mmap larger than the object. */
130		if (size > ip->size)
131			break;
132
133		list_del_init(&ip->pending_mmaps);
134		spin_unlock_irq(&rdi->pending_lock);
135
136		ret = remap_vmalloc_range(vma, ip->obj, 0);
137		if (ret)
138			goto done;
139		vma->vm_ops = &rvt_vm_ops;
140		vma->vm_private_data = ip;
141		rvt_vma_open(vma);
142		goto done;
143	}
144	spin_unlock_irq(&rdi->pending_lock);
145done:
146	return ret;
147}
148
149/**
150 * rvt_create_mmap_info - allocate information for hfi1_mmap
151 * @rdi: rvt dev struct
152 * @size: size in bytes to map
153 * @context: user context
154 * @obj: opaque pointer to a cq, wq etc
155 *
156 * Return: rvt_mmap struct on success
157 */
158struct rvt_mmap_info *rvt_create_mmap_info(struct rvt_dev_info *rdi,
159					   u32 size,
160					   struct ib_ucontext *context,
161					   void *obj)
162{
163	struct rvt_mmap_info *ip;
164
 
 
 
165	ip = kmalloc_node(sizeof(*ip), GFP_KERNEL, rdi->dparms.node);
166	if (!ip)
167		return ip;
168
169	size = PAGE_ALIGN(size);
170
171	spin_lock_irq(&rdi->mmap_offset_lock);
172	if (rdi->mmap_offset == 0)
173		rdi->mmap_offset = PAGE_SIZE;
174	ip->offset = rdi->mmap_offset;
175	rdi->mmap_offset += size;
176	spin_unlock_irq(&rdi->mmap_offset_lock);
177
178	INIT_LIST_HEAD(&ip->pending_mmaps);
179	ip->size = size;
180	ip->context = context;
 
 
181	ip->obj = obj;
182	kref_init(&ip->ref);
183
184	return ip;
185}
186
187/**
188 * rvt_update_mmap_info - update a mem map
189 * @rdi: rvt dev struct
190 * @ip: mmap info pointer
191 * @size: size to grow by
192 * @obj: opaque pointer to cq, wq, etc.
193 */
194void rvt_update_mmap_info(struct rvt_dev_info *rdi, struct rvt_mmap_info *ip,
195			  u32 size, void *obj)
196{
197	size = PAGE_ALIGN(size);
198
199	spin_lock_irq(&rdi->mmap_offset_lock);
200	if (rdi->mmap_offset == 0)
201		rdi->mmap_offset = PAGE_SIZE;
202	ip->offset = rdi->mmap_offset;
203	rdi->mmap_offset += size;
204	spin_unlock_irq(&rdi->mmap_offset_lock);
205
206	ip->size = size;
207	ip->obj = obj;
208}