Loading...
1// SPDX-License-Identifier: GPL-2.0-only
2/*
3 * Copyright (c) 2015, Linaro Limited
4 */
5#include <linux/device.h>
6#include <linux/dma-buf.h>
7#include <linux/genalloc.h>
8#include <linux/slab.h>
9#include <linux/tee_drv.h>
10#include "tee_private.h"
11
12static int pool_op_gen_alloc(struct tee_shm_pool_mgr *poolm,
13 struct tee_shm *shm, size_t size)
14{
15 unsigned long va;
16 struct gen_pool *genpool = poolm->private_data;
17 size_t s = roundup(size, 1 << genpool->min_alloc_order);
18
19 va = gen_pool_alloc(genpool, s);
20 if (!va)
21 return -ENOMEM;
22
23 memset((void *)va, 0, s);
24 shm->kaddr = (void *)va;
25 shm->paddr = gen_pool_virt_to_phys(genpool, va);
26 shm->size = s;
27 return 0;
28}
29
30static void pool_op_gen_free(struct tee_shm_pool_mgr *poolm,
31 struct tee_shm *shm)
32{
33 gen_pool_free(poolm->private_data, (unsigned long)shm->kaddr,
34 shm->size);
35 shm->kaddr = NULL;
36}
37
38static void pool_op_gen_destroy_poolmgr(struct tee_shm_pool_mgr *poolm)
39{
40 gen_pool_destroy(poolm->private_data);
41 kfree(poolm);
42}
43
44static const struct tee_shm_pool_mgr_ops pool_ops_generic = {
45 .alloc = pool_op_gen_alloc,
46 .free = pool_op_gen_free,
47 .destroy_poolmgr = pool_op_gen_destroy_poolmgr,
48};
49
50/**
51 * tee_shm_pool_alloc_res_mem() - Create a shared memory pool from reserved
52 * memory range
53 * @priv_info: Information for driver private shared memory pool
54 * @dmabuf_info: Information for dma-buf shared memory pool
55 *
56 * Start and end of pools will must be page aligned.
57 *
58 * Allocation with the flag TEE_SHM_DMA_BUF set will use the range supplied
59 * in @dmabuf, others will use the range provided by @priv.
60 *
61 * @returns pointer to a 'struct tee_shm_pool' or an ERR_PTR on failure.
62 */
63struct tee_shm_pool *
64tee_shm_pool_alloc_res_mem(struct tee_shm_pool_mem_info *priv_info,
65 struct tee_shm_pool_mem_info *dmabuf_info)
66{
67 struct tee_shm_pool_mgr *priv_mgr;
68 struct tee_shm_pool_mgr *dmabuf_mgr;
69 void *rc;
70
71 /*
72 * Create the pool for driver private shared memory
73 */
74 rc = tee_shm_pool_mgr_alloc_res_mem(priv_info->vaddr, priv_info->paddr,
75 priv_info->size,
76 3 /* 8 byte aligned */);
77 if (IS_ERR(rc))
78 return rc;
79 priv_mgr = rc;
80
81 /*
82 * Create the pool for dma_buf shared memory
83 */
84 rc = tee_shm_pool_mgr_alloc_res_mem(dmabuf_info->vaddr,
85 dmabuf_info->paddr,
86 dmabuf_info->size, PAGE_SHIFT);
87 if (IS_ERR(rc))
88 goto err_free_priv_mgr;
89 dmabuf_mgr = rc;
90
91 rc = tee_shm_pool_alloc(priv_mgr, dmabuf_mgr);
92 if (IS_ERR(rc))
93 goto err_free_dmabuf_mgr;
94
95 return rc;
96
97err_free_dmabuf_mgr:
98 tee_shm_pool_mgr_destroy(dmabuf_mgr);
99err_free_priv_mgr:
100 tee_shm_pool_mgr_destroy(priv_mgr);
101
102 return rc;
103}
104EXPORT_SYMBOL_GPL(tee_shm_pool_alloc_res_mem);
105
106struct tee_shm_pool_mgr *tee_shm_pool_mgr_alloc_res_mem(unsigned long vaddr,
107 phys_addr_t paddr,
108 size_t size,
109 int min_alloc_order)
110{
111 const size_t page_mask = PAGE_SIZE - 1;
112 struct tee_shm_pool_mgr *mgr;
113 int rc;
114
115 /* Start and end must be page aligned */
116 if (vaddr & page_mask || paddr & page_mask || size & page_mask)
117 return ERR_PTR(-EINVAL);
118
119 mgr = kzalloc(sizeof(*mgr), GFP_KERNEL);
120 if (!mgr)
121 return ERR_PTR(-ENOMEM);
122
123 mgr->private_data = gen_pool_create(min_alloc_order, -1);
124 if (!mgr->private_data) {
125 rc = -ENOMEM;
126 goto err;
127 }
128
129 gen_pool_set_algo(mgr->private_data, gen_pool_best_fit, NULL);
130 rc = gen_pool_add_virt(mgr->private_data, vaddr, paddr, size, -1);
131 if (rc) {
132 gen_pool_destroy(mgr->private_data);
133 goto err;
134 }
135
136 mgr->ops = &pool_ops_generic;
137
138 return mgr;
139err:
140 kfree(mgr);
141
142 return ERR_PTR(rc);
143}
144EXPORT_SYMBOL_GPL(tee_shm_pool_mgr_alloc_res_mem);
145
146static bool check_mgr_ops(struct tee_shm_pool_mgr *mgr)
147{
148 return mgr && mgr->ops && mgr->ops->alloc && mgr->ops->free &&
149 mgr->ops->destroy_poolmgr;
150}
151
152struct tee_shm_pool *tee_shm_pool_alloc(struct tee_shm_pool_mgr *priv_mgr,
153 struct tee_shm_pool_mgr *dmabuf_mgr)
154{
155 struct tee_shm_pool *pool;
156
157 if (!check_mgr_ops(priv_mgr) || !check_mgr_ops(dmabuf_mgr))
158 return ERR_PTR(-EINVAL);
159
160 pool = kzalloc(sizeof(*pool), GFP_KERNEL);
161 if (!pool)
162 return ERR_PTR(-ENOMEM);
163
164 pool->private_mgr = priv_mgr;
165 pool->dma_buf_mgr = dmabuf_mgr;
166
167 return pool;
168}
169EXPORT_SYMBOL_GPL(tee_shm_pool_alloc);
170
171/**
172 * tee_shm_pool_free() - Free a shared memory pool
173 * @pool: The shared memory pool to free
174 *
175 * There must be no remaining shared memory allocated from this pool when
176 * this function is called.
177 */
178void tee_shm_pool_free(struct tee_shm_pool *pool)
179{
180 if (pool->private_mgr)
181 tee_shm_pool_mgr_destroy(pool->private_mgr);
182 if (pool->dma_buf_mgr)
183 tee_shm_pool_mgr_destroy(pool->dma_buf_mgr);
184 kfree(pool);
185}
186EXPORT_SYMBOL_GPL(tee_shm_pool_free);
1/*
2 * Copyright (c) 2015, Linaro Limited
3 *
4 * This software is licensed under the terms of the GNU General Public
5 * License version 2, as published by the Free Software Foundation, and
6 * may be copied, distributed, and modified under those terms.
7 *
8 * This program is distributed in the hope that it will be useful,
9 * but WITHOUT ANY WARRANTY; without even the implied warranty of
10 * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
11 * GNU General Public License for more details.
12 *
13 */
14#include <linux/device.h>
15#include <linux/dma-buf.h>
16#include <linux/genalloc.h>
17#include <linux/slab.h>
18#include <linux/tee_drv.h>
19#include "tee_private.h"
20
21static int pool_op_gen_alloc(struct tee_shm_pool_mgr *poolm,
22 struct tee_shm *shm, size_t size)
23{
24 unsigned long va;
25 struct gen_pool *genpool = poolm->private_data;
26 size_t s = roundup(size, 1 << genpool->min_alloc_order);
27
28 va = gen_pool_alloc(genpool, s);
29 if (!va)
30 return -ENOMEM;
31
32 memset((void *)va, 0, s);
33 shm->kaddr = (void *)va;
34 shm->paddr = gen_pool_virt_to_phys(genpool, va);
35 shm->size = s;
36 return 0;
37}
38
39static void pool_op_gen_free(struct tee_shm_pool_mgr *poolm,
40 struct tee_shm *shm)
41{
42 gen_pool_free(poolm->private_data, (unsigned long)shm->kaddr,
43 shm->size);
44 shm->kaddr = NULL;
45}
46
47static void pool_op_gen_destroy_poolmgr(struct tee_shm_pool_mgr *poolm)
48{
49 gen_pool_destroy(poolm->private_data);
50 kfree(poolm);
51}
52
53static const struct tee_shm_pool_mgr_ops pool_ops_generic = {
54 .alloc = pool_op_gen_alloc,
55 .free = pool_op_gen_free,
56 .destroy_poolmgr = pool_op_gen_destroy_poolmgr,
57};
58
59/**
60 * tee_shm_pool_alloc_res_mem() - Create a shared memory pool from reserved
61 * memory range
62 * @priv_info: Information for driver private shared memory pool
63 * @dmabuf_info: Information for dma-buf shared memory pool
64 *
65 * Start and end of pools will must be page aligned.
66 *
67 * Allocation with the flag TEE_SHM_DMA_BUF set will use the range supplied
68 * in @dmabuf, others will use the range provided by @priv.
69 *
70 * @returns pointer to a 'struct tee_shm_pool' or an ERR_PTR on failure.
71 */
72struct tee_shm_pool *
73tee_shm_pool_alloc_res_mem(struct tee_shm_pool_mem_info *priv_info,
74 struct tee_shm_pool_mem_info *dmabuf_info)
75{
76 struct tee_shm_pool_mgr *priv_mgr;
77 struct tee_shm_pool_mgr *dmabuf_mgr;
78 void *rc;
79
80 /*
81 * Create the pool for driver private shared memory
82 */
83 rc = tee_shm_pool_mgr_alloc_res_mem(priv_info->vaddr, priv_info->paddr,
84 priv_info->size,
85 3 /* 8 byte aligned */);
86 if (IS_ERR(rc))
87 return rc;
88 priv_mgr = rc;
89
90 /*
91 * Create the pool for dma_buf shared memory
92 */
93 rc = tee_shm_pool_mgr_alloc_res_mem(dmabuf_info->vaddr,
94 dmabuf_info->paddr,
95 dmabuf_info->size, PAGE_SHIFT);
96 if (IS_ERR(rc))
97 goto err_free_priv_mgr;
98 dmabuf_mgr = rc;
99
100 rc = tee_shm_pool_alloc(priv_mgr, dmabuf_mgr);
101 if (IS_ERR(rc))
102 goto err_free_dmabuf_mgr;
103
104 return rc;
105
106err_free_dmabuf_mgr:
107 tee_shm_pool_mgr_destroy(dmabuf_mgr);
108err_free_priv_mgr:
109 tee_shm_pool_mgr_destroy(priv_mgr);
110
111 return rc;
112}
113EXPORT_SYMBOL_GPL(tee_shm_pool_alloc_res_mem);
114
115struct tee_shm_pool_mgr *tee_shm_pool_mgr_alloc_res_mem(unsigned long vaddr,
116 phys_addr_t paddr,
117 size_t size,
118 int min_alloc_order)
119{
120 const size_t page_mask = PAGE_SIZE - 1;
121 struct tee_shm_pool_mgr *mgr;
122 int rc;
123
124 /* Start and end must be page aligned */
125 if (vaddr & page_mask || paddr & page_mask || size & page_mask)
126 return ERR_PTR(-EINVAL);
127
128 mgr = kzalloc(sizeof(*mgr), GFP_KERNEL);
129 if (!mgr)
130 return ERR_PTR(-ENOMEM);
131
132 mgr->private_data = gen_pool_create(min_alloc_order, -1);
133 if (!mgr->private_data) {
134 rc = -ENOMEM;
135 goto err;
136 }
137
138 gen_pool_set_algo(mgr->private_data, gen_pool_best_fit, NULL);
139 rc = gen_pool_add_virt(mgr->private_data, vaddr, paddr, size, -1);
140 if (rc) {
141 gen_pool_destroy(mgr->private_data);
142 goto err;
143 }
144
145 mgr->ops = &pool_ops_generic;
146
147 return mgr;
148err:
149 kfree(mgr);
150
151 return ERR_PTR(rc);
152}
153EXPORT_SYMBOL_GPL(tee_shm_pool_mgr_alloc_res_mem);
154
155static bool check_mgr_ops(struct tee_shm_pool_mgr *mgr)
156{
157 return mgr && mgr->ops && mgr->ops->alloc && mgr->ops->free &&
158 mgr->ops->destroy_poolmgr;
159}
160
161struct tee_shm_pool *tee_shm_pool_alloc(struct tee_shm_pool_mgr *priv_mgr,
162 struct tee_shm_pool_mgr *dmabuf_mgr)
163{
164 struct tee_shm_pool *pool;
165
166 if (!check_mgr_ops(priv_mgr) || !check_mgr_ops(dmabuf_mgr))
167 return ERR_PTR(-EINVAL);
168
169 pool = kzalloc(sizeof(*pool), GFP_KERNEL);
170 if (!pool)
171 return ERR_PTR(-ENOMEM);
172
173 pool->private_mgr = priv_mgr;
174 pool->dma_buf_mgr = dmabuf_mgr;
175
176 return pool;
177}
178EXPORT_SYMBOL_GPL(tee_shm_pool_alloc);
179
180/**
181 * tee_shm_pool_free() - Free a shared memory pool
182 * @pool: The shared memory pool to free
183 *
184 * There must be no remaining shared memory allocated from this pool when
185 * this function is called.
186 */
187void tee_shm_pool_free(struct tee_shm_pool *pool)
188{
189 if (pool->private_mgr)
190 tee_shm_pool_mgr_destroy(pool->private_mgr);
191 if (pool->dma_buf_mgr)
192 tee_shm_pool_mgr_destroy(pool->dma_buf_mgr);
193 kfree(pool);
194}
195EXPORT_SYMBOL_GPL(tee_shm_pool_free);