Linux Audio

Check our new training course

Loading...
v6.2
  1// SPDX-License-Identifier: MIT
  2/*
  3 * Copyright © 2020 Intel Corporation
  4 */
  5
  6#include <linux/iosys-map.h>
  7#include <linux/mm.h>
  8#include <linux/pagemap.h>
  9#include <linux/shmem_fs.h>
 10
 11#include "gem/i915_gem_object.h"
 12#include "gem/i915_gem_lmem.h"
 13#include "shmem_utils.h"
 14
 15struct file *shmem_create_from_data(const char *name, void *data, size_t len)
 16{
 17	struct file *file;
 18	int err;
 19
 20	file = shmem_file_setup(name, PAGE_ALIGN(len), VM_NORESERVE);
 21	if (IS_ERR(file))
 22		return file;
 23
 24	err = shmem_write(file, 0, data, len);
 25	if (err) {
 26		fput(file);
 27		return ERR_PTR(err);
 28	}
 29
 30	return file;
 31}
 32
 33struct file *shmem_create_from_object(struct drm_i915_gem_object *obj)
 34{
 35	struct file *file;
 36	void *ptr;
 37
 38	if (i915_gem_object_is_shmem(obj)) {
 39		file = obj->base.filp;
 40		atomic_long_inc(&file->f_count);
 41		return file;
 42	}
 43
 44	ptr = i915_gem_object_pin_map_unlocked(obj, i915_gem_object_is_lmem(obj) ?
 45						I915_MAP_WC : I915_MAP_WB);
 46	if (IS_ERR(ptr))
 47		return ERR_CAST(ptr);
 48
 49	file = shmem_create_from_data("", ptr, obj->base.size);
 50	i915_gem_object_unpin_map(obj);
 51
 52	return file;
 53}
 54
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 55void *shmem_pin_map(struct file *file)
 56{
 57	struct page **pages;
 58	size_t n_pages, i;
 59	void *vaddr;
 60
 61	n_pages = file->f_mapping->host->i_size >> PAGE_SHIFT;
 62	pages = kvmalloc_array(n_pages, sizeof(*pages), GFP_KERNEL);
 63	if (!pages)
 
 
 
 
 
 
 
 
 
 64		return NULL;
 
 65
 66	for (i = 0; i < n_pages; i++) {
 67		pages[i] = shmem_read_mapping_page_gfp(file->f_mapping, i,
 68						       GFP_KERNEL);
 69		if (IS_ERR(pages[i]))
 
 
 
 70			goto err_page;
 
 
 71	}
 72
 73	vaddr = vmap(pages, n_pages, VM_MAP_PUT_PAGES, PAGE_KERNEL);
 74	if (!vaddr)
 75		goto err_page;
 76	mapping_set_unevictable(file->f_mapping);
 77	return vaddr;
 
 78err_page:
 79	while (i--)
 80		put_page(pages[i]);
 81	kvfree(pages);
 
 82	return NULL;
 83}
 84
 85void shmem_unpin_map(struct file *file, void *ptr)
 86{
 87	mapping_clear_unevictable(file->f_mapping);
 88	vfree(ptr);
 89}
 90
 91static int __shmem_rw(struct file *file, loff_t off,
 92		      void *ptr, size_t len,
 93		      bool write)
 94{
 95	unsigned long pfn;
 96
 97	for (pfn = off >> PAGE_SHIFT; len; pfn++) {
 98		unsigned int this =
 99			min_t(size_t, PAGE_SIZE - offset_in_page(off), len);
100		struct page *page;
101		void *vaddr;
102
103		page = shmem_read_mapping_page_gfp(file->f_mapping, pfn,
104						   GFP_KERNEL);
105		if (IS_ERR(page))
106			return PTR_ERR(page);
107
108		vaddr = kmap(page);
109		if (write) {
110			memcpy(vaddr + offset_in_page(off), ptr, this);
111			set_page_dirty(page);
112		} else {
113			memcpy(ptr, vaddr + offset_in_page(off), this);
114		}
115		mark_page_accessed(page);
116		kunmap(page);
117		put_page(page);
118
119		len -= this;
120		ptr += this;
121		off = 0;
122	}
123
124	return 0;
125}
126
127int shmem_read_to_iosys_map(struct file *file, loff_t off,
128			    struct iosys_map *map, size_t map_off, size_t len)
129{
130	unsigned long pfn;
131
132	for (pfn = off >> PAGE_SHIFT; len; pfn++) {
133		unsigned int this =
134			min_t(size_t, PAGE_SIZE - offset_in_page(off), len);
135		struct page *page;
136		void *vaddr;
137
138		page = shmem_read_mapping_page_gfp(file->f_mapping, pfn,
139						   GFP_KERNEL);
140		if (IS_ERR(page))
141			return PTR_ERR(page);
142
143		vaddr = kmap(page);
144		iosys_map_memcpy_to(map, map_off, vaddr + offset_in_page(off),
145				    this);
146		mark_page_accessed(page);
147		kunmap(page);
148		put_page(page);
149
150		len -= this;
151		map_off += this;
152		off = 0;
153	}
154
155	return 0;
156}
157
158int shmem_read(struct file *file, loff_t off, void *dst, size_t len)
159{
160	return __shmem_rw(file, off, dst, len, false);
161}
162
163int shmem_write(struct file *file, loff_t off, void *src, size_t len)
164{
165	return __shmem_rw(file, off, src, len, true);
166}
167
168#if IS_ENABLED(CONFIG_DRM_I915_SELFTEST)
169#include "st_shmem_utils.c"
170#endif
v5.9
  1// SPDX-License-Identifier: MIT
  2/*
  3 * Copyright © 2020 Intel Corporation
  4 */
  5
 
  6#include <linux/mm.h>
  7#include <linux/pagemap.h>
  8#include <linux/shmem_fs.h>
  9
 10#include "gem/i915_gem_object.h"
 
 11#include "shmem_utils.h"
 12
 13struct file *shmem_create_from_data(const char *name, void *data, size_t len)
 14{
 15	struct file *file;
 16	int err;
 17
 18	file = shmem_file_setup(name, PAGE_ALIGN(len), VM_NORESERVE);
 19	if (IS_ERR(file))
 20		return file;
 21
 22	err = shmem_write(file, 0, data, len);
 23	if (err) {
 24		fput(file);
 25		return ERR_PTR(err);
 26	}
 27
 28	return file;
 29}
 30
 31struct file *shmem_create_from_object(struct drm_i915_gem_object *obj)
 32{
 33	struct file *file;
 34	void *ptr;
 35
 36	if (obj->ops == &i915_gem_shmem_ops) {
 37		file = obj->base.filp;
 38		atomic_long_inc(&file->f_count);
 39		return file;
 40	}
 41
 42	ptr = i915_gem_object_pin_map(obj, I915_MAP_WB);
 
 43	if (IS_ERR(ptr))
 44		return ERR_CAST(ptr);
 45
 46	file = shmem_create_from_data("", ptr, obj->base.size);
 47	i915_gem_object_unpin_map(obj);
 48
 49	return file;
 50}
 51
 52static size_t shmem_npte(struct file *file)
 53{
 54	return file->f_mapping->host->i_size >> PAGE_SHIFT;
 55}
 56
 57static void __shmem_unpin_map(struct file *file, void *ptr, size_t n_pte)
 58{
 59	unsigned long pfn;
 60
 61	vunmap(ptr);
 62
 63	for (pfn = 0; pfn < n_pte; pfn++) {
 64		struct page *page;
 65
 66		page = shmem_read_mapping_page_gfp(file->f_mapping, pfn,
 67						   GFP_KERNEL);
 68		if (!WARN_ON(IS_ERR(page))) {
 69			put_page(page);
 70			put_page(page);
 71		}
 72	}
 73}
 74
 75void *shmem_pin_map(struct file *file)
 76{
 77	const size_t n_pte = shmem_npte(file);
 78	pte_t *stack[32], **ptes, **mem;
 79	struct vm_struct *area;
 80	unsigned long pfn;
 81
 82	mem = stack;
 83	if (n_pte > ARRAY_SIZE(stack)) {
 84		mem = kvmalloc_array(n_pte, sizeof(*mem), GFP_KERNEL);
 85		if (!mem)
 86			return NULL;
 87	}
 88
 89	area = alloc_vm_area(n_pte << PAGE_SHIFT, mem);
 90	if (!area) {
 91		if (mem != stack)
 92			kvfree(mem);
 93		return NULL;
 94	}
 95
 96	ptes = mem;
 97	for (pfn = 0; pfn < n_pte; pfn++) {
 98		struct page *page;
 99
100		page = shmem_read_mapping_page_gfp(file->f_mapping, pfn,
101						   GFP_KERNEL);
102		if (IS_ERR(page))
103			goto err_page;
104
105		**ptes++ = mk_pte(page,  PAGE_KERNEL);
106	}
107
108	if (mem != stack)
109		kvfree(mem);
110
111	mapping_set_unevictable(file->f_mapping);
112	return area->addr;
113
114err_page:
115	if (mem != stack)
116		kvfree(mem);
117
118	__shmem_unpin_map(file, area->addr, pfn);
119	return NULL;
120}
121
122void shmem_unpin_map(struct file *file, void *ptr)
123{
124	mapping_clear_unevictable(file->f_mapping);
125	__shmem_unpin_map(file, ptr, shmem_npte(file));
126}
127
128static int __shmem_rw(struct file *file, loff_t off,
129		      void *ptr, size_t len,
130		      bool write)
131{
132	unsigned long pfn;
133
134	for (pfn = off >> PAGE_SHIFT; len; pfn++) {
135		unsigned int this =
136			min_t(size_t, PAGE_SIZE - offset_in_page(off), len);
137		struct page *page;
138		void *vaddr;
139
140		page = shmem_read_mapping_page_gfp(file->f_mapping, pfn,
141						   GFP_KERNEL);
142		if (IS_ERR(page))
143			return PTR_ERR(page);
144
145		vaddr = kmap(page);
146		if (write)
147			memcpy(vaddr + offset_in_page(off), ptr, this);
148		else
 
149			memcpy(ptr, vaddr + offset_in_page(off), this);
 
 
150		kunmap(page);
151		put_page(page);
152
153		len -= this;
154		ptr += this;
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
155		off = 0;
156	}
157
158	return 0;
159}
160
161int shmem_read(struct file *file, loff_t off, void *dst, size_t len)
162{
163	return __shmem_rw(file, off, dst, len, false);
164}
165
166int shmem_write(struct file *file, loff_t off, void *src, size_t len)
167{
168	return __shmem_rw(file, off, src, len, true);
169}
170
171#if IS_ENABLED(CONFIG_DRM_I915_SELFTEST)
172#include "st_shmem_utils.c"
173#endif