Linux Audio

Check our new training course

Loading...
v6.8
  1// SPDX-License-Identifier: GPL-2.0 AND MIT
  2/*
  3 * Copyright © 2023 Intel Corporation
  4 */
  5#include <linux/mm.h>
  6
  7#include <drm/ttm/ttm_tt.h>
  8#include <drm/ttm/ttm_pool.h>
  9
 10#include "ttm_kunit_helpers.h"
 11
 12struct ttm_pool_test_case {
 13	const char *description;
 14	unsigned int order;
 15	bool use_dma_alloc;
 16};
 17
 18struct ttm_pool_test_priv {
 19	struct ttm_test_devices *devs;
 20
 21	/* Used to create mock ttm_tts */
 22	struct ttm_buffer_object *mock_bo;
 23};
 24
 25static struct ttm_operation_ctx simple_ctx = {
 26	.interruptible = true,
 27	.no_wait_gpu = false,
 28};
 29
 30static int ttm_pool_test_init(struct kunit *test)
 31{
 32	struct ttm_pool_test_priv *priv;
 33
 34	priv = kunit_kzalloc(test, sizeof(*priv), GFP_KERNEL);
 35	KUNIT_ASSERT_NOT_NULL(test, priv);
 36
 37	priv->devs = ttm_test_devices_basic(test);
 38	test->priv = priv;
 39
 40	return 0;
 41}
 42
 43static void ttm_pool_test_fini(struct kunit *test)
 44{
 45	struct ttm_pool_test_priv *priv = test->priv;
 46
 47	ttm_test_devices_put(test, priv->devs);
 48}
 49
 50static struct ttm_tt *ttm_tt_kunit_init(struct kunit *test,
 51					uint32_t page_flags,
 52					enum ttm_caching caching,
 53					size_t size)
 54{
 55	struct ttm_pool_test_priv *priv = test->priv;
 56	struct ttm_buffer_object *bo;
 57	struct ttm_tt *tt;
 58	int err;
 59
 60	bo = ttm_bo_kunit_init(test, priv->devs, size);
 61	KUNIT_ASSERT_NOT_NULL(test, bo);
 62	priv->mock_bo = bo;
 63
 64	tt = kunit_kzalloc(test, sizeof(*tt), GFP_KERNEL);
 65	KUNIT_ASSERT_NOT_NULL(test, tt);
 66
 67	err = ttm_tt_init(tt, priv->mock_bo, page_flags, caching, 0);
 68	KUNIT_ASSERT_EQ(test, err, 0);
 69
 70	return tt;
 71}
 72
 73static struct ttm_pool *ttm_pool_pre_populated(struct kunit *test,
 74					       size_t size,
 75					       enum ttm_caching caching)
 76{
 77	struct ttm_pool_test_priv *priv = test->priv;
 78	struct ttm_test_devices *devs = priv->devs;
 79	struct ttm_pool *pool;
 80	struct ttm_tt *tt;
 81	unsigned long order = __fls(size / PAGE_SIZE);
 82	int err;
 83
 84	tt = ttm_tt_kunit_init(test, order, caching, size);
 85	KUNIT_ASSERT_NOT_NULL(test, tt);
 86
 87	pool = kunit_kzalloc(test, sizeof(*pool), GFP_KERNEL);
 88	KUNIT_ASSERT_NOT_NULL(test, pool);
 89
 90	ttm_pool_init(pool, devs->dev, NUMA_NO_NODE, true, false);
 91
 92	err = ttm_pool_alloc(pool, tt, &simple_ctx);
 93	KUNIT_ASSERT_EQ(test, err, 0);
 94
 95	ttm_pool_free(pool, tt);
 96	ttm_tt_fini(tt);
 97
 98	return pool;
 99}
100
101static const struct ttm_pool_test_case ttm_pool_basic_cases[] = {
102	{
103		.description = "One page",
104		.order = 0,
105	},
106	{
107		.description = "More than one page",
108		.order = 2,
109	},
110	{
111		.description = "Above the allocation limit",
112		.order = MAX_PAGE_ORDER + 1,
113	},
114	{
115		.description = "One page, with coherent DMA mappings enabled",
116		.order = 0,
117		.use_dma_alloc = true,
118	},
119	{
120		.description = "Above the allocation limit, with coherent DMA mappings enabled",
121		.order = MAX_PAGE_ORDER + 1,
122		.use_dma_alloc = true,
123	},
124};
125
126static void ttm_pool_alloc_case_desc(const struct ttm_pool_test_case *t,
127				     char *desc)
128{
129	strscpy(desc, t->description, KUNIT_PARAM_DESC_SIZE);
130}
131
132KUNIT_ARRAY_PARAM(ttm_pool_alloc_basic, ttm_pool_basic_cases,
133		  ttm_pool_alloc_case_desc);
134
135static void ttm_pool_alloc_basic(struct kunit *test)
136{
137	struct ttm_pool_test_priv *priv = test->priv;
138	struct ttm_test_devices *devs = priv->devs;
139	const struct ttm_pool_test_case *params = test->param_value;
140	struct ttm_tt *tt;
141	struct ttm_pool *pool;
142	struct page *fst_page, *last_page;
143	enum ttm_caching caching = ttm_uncached;
144	unsigned int expected_num_pages = 1 << params->order;
145	size_t size = expected_num_pages * PAGE_SIZE;
146	int err;
147
148	tt = ttm_tt_kunit_init(test, 0, caching, size);
149	KUNIT_ASSERT_NOT_NULL(test, tt);
150
151	pool = kunit_kzalloc(test, sizeof(*pool), GFP_KERNEL);
152	KUNIT_ASSERT_NOT_NULL(test, pool);
153
154	ttm_pool_init(pool, devs->dev, NUMA_NO_NODE, params->use_dma_alloc,
155		      false);
156
157	KUNIT_ASSERT_PTR_EQ(test, pool->dev, devs->dev);
158	KUNIT_ASSERT_EQ(test, pool->nid, NUMA_NO_NODE);
159	KUNIT_ASSERT_EQ(test, pool->use_dma_alloc, params->use_dma_alloc);
160
161	err = ttm_pool_alloc(pool, tt, &simple_ctx);
162	KUNIT_ASSERT_EQ(test, err, 0);
163	KUNIT_ASSERT_EQ(test, tt->num_pages, expected_num_pages);
164
165	fst_page = tt->pages[0];
166	last_page = tt->pages[tt->num_pages - 1];
167
168	if (params->order <= MAX_PAGE_ORDER) {
169		if (params->use_dma_alloc) {
170			KUNIT_ASSERT_NOT_NULL(test, (void *)fst_page->private);
171			KUNIT_ASSERT_NOT_NULL(test, (void *)last_page->private);
172		} else {
173			KUNIT_ASSERT_EQ(test, fst_page->private, params->order);
174		}
175	} else {
176		if (params->use_dma_alloc) {
177			KUNIT_ASSERT_NOT_NULL(test, (void *)fst_page->private);
178			KUNIT_ASSERT_NULL(test, (void *)last_page->private);
179		} else {
180			/*
181			 * We expect to alloc one big block, followed by
182			 * order 0 blocks
183			 */
184			KUNIT_ASSERT_EQ(test, fst_page->private,
185					min_t(unsigned int, MAX_PAGE_ORDER,
186					      params->order));
187			KUNIT_ASSERT_EQ(test, last_page->private, 0);
188		}
189	}
190
191	ttm_pool_free(pool, tt);
192	ttm_tt_fini(tt);
193	ttm_pool_fini(pool);
194}
195
196static void ttm_pool_alloc_basic_dma_addr(struct kunit *test)
197{
198	struct ttm_pool_test_priv *priv = test->priv;
199	struct ttm_test_devices *devs = priv->devs;
200	const struct ttm_pool_test_case *params = test->param_value;
201	struct ttm_tt *tt;
202	struct ttm_pool *pool;
203	struct ttm_buffer_object *bo;
204	dma_addr_t dma1, dma2;
205	enum ttm_caching caching = ttm_uncached;
206	unsigned int expected_num_pages = 1 << params->order;
207	size_t size = expected_num_pages * PAGE_SIZE;
208	int err;
209
210	tt = kunit_kzalloc(test, sizeof(*tt), GFP_KERNEL);
211	KUNIT_ASSERT_NOT_NULL(test, tt);
212
213	bo = ttm_bo_kunit_init(test, devs, size);
214	KUNIT_ASSERT_NOT_NULL(test, bo);
215
216	err = ttm_sg_tt_init(tt, bo, 0, caching);
217	KUNIT_ASSERT_EQ(test, err, 0);
218
219	pool = kunit_kzalloc(test, sizeof(*pool), GFP_KERNEL);
220	KUNIT_ASSERT_NOT_NULL(test, pool);
221
222	ttm_pool_init(pool, devs->dev, NUMA_NO_NODE, true, false);
223
224	err = ttm_pool_alloc(pool, tt, &simple_ctx);
225	KUNIT_ASSERT_EQ(test, err, 0);
226	KUNIT_ASSERT_EQ(test, tt->num_pages, expected_num_pages);
227
228	dma1 = tt->dma_address[0];
229	dma2 = tt->dma_address[tt->num_pages - 1];
230
231	KUNIT_ASSERT_NOT_NULL(test, (void *)(uintptr_t)dma1);
232	KUNIT_ASSERT_NOT_NULL(test, (void *)(uintptr_t)dma2);
233
234	ttm_pool_free(pool, tt);
235	ttm_tt_fini(tt);
236	ttm_pool_fini(pool);
237}
238
239static void ttm_pool_alloc_order_caching_match(struct kunit *test)
240{
241	struct ttm_tt *tt;
242	struct ttm_pool *pool;
243	struct ttm_pool_type *pt;
244	enum ttm_caching caching = ttm_uncached;
245	unsigned int order = 0;
246	size_t size = PAGE_SIZE;
247	int err;
248
249	pool = ttm_pool_pre_populated(test, size, caching);
250
251	pt = &pool->caching[caching].orders[order];
252	KUNIT_ASSERT_FALSE(test, list_empty(&pt->pages));
253
254	tt = ttm_tt_kunit_init(test, 0, caching, size);
255	KUNIT_ASSERT_NOT_NULL(test, tt);
256
257	err = ttm_pool_alloc(pool, tt, &simple_ctx);
258	KUNIT_ASSERT_EQ(test, err, 0);
259
260	KUNIT_ASSERT_TRUE(test, list_empty(&pt->pages));
261
262	ttm_pool_free(pool, tt);
263	ttm_tt_fini(tt);
264	ttm_pool_fini(pool);
265}
266
267static void ttm_pool_alloc_caching_mismatch(struct kunit *test)
268{
269	struct ttm_tt *tt;
270	struct ttm_pool *pool;
271	struct ttm_pool_type *pt_pool, *pt_tt;
272	enum ttm_caching tt_caching = ttm_uncached;
273	enum ttm_caching pool_caching = ttm_cached;
274	size_t size = PAGE_SIZE;
275	unsigned int order = 0;
276	int err;
277
278	pool = ttm_pool_pre_populated(test, size, pool_caching);
279
280	pt_pool = &pool->caching[pool_caching].orders[order];
281	pt_tt = &pool->caching[tt_caching].orders[order];
282
283	tt = ttm_tt_kunit_init(test, 0, tt_caching, size);
284	KUNIT_ASSERT_NOT_NULL(test, tt);
285
286	KUNIT_ASSERT_FALSE(test, list_empty(&pt_pool->pages));
287	KUNIT_ASSERT_TRUE(test, list_empty(&pt_tt->pages));
288
289	err = ttm_pool_alloc(pool, tt, &simple_ctx);
290	KUNIT_ASSERT_EQ(test, err, 0);
291
292	ttm_pool_free(pool, tt);
293	ttm_tt_fini(tt);
294
295	KUNIT_ASSERT_FALSE(test, list_empty(&pt_pool->pages));
296	KUNIT_ASSERT_FALSE(test, list_empty(&pt_tt->pages));
297
298	ttm_pool_fini(pool);
299}
300
301static void ttm_pool_alloc_order_mismatch(struct kunit *test)
302{
303	struct ttm_tt *tt;
304	struct ttm_pool *pool;
305	struct ttm_pool_type *pt_pool, *pt_tt;
306	enum ttm_caching caching = ttm_uncached;
307	unsigned int order = 2;
308	size_t fst_size = (1 << order) * PAGE_SIZE;
309	size_t snd_size = PAGE_SIZE;
310	int err;
311
312	pool = ttm_pool_pre_populated(test, fst_size, caching);
313
314	pt_pool = &pool->caching[caching].orders[order];
315	pt_tt = &pool->caching[caching].orders[0];
316
317	tt = ttm_tt_kunit_init(test, 0, caching, snd_size);
318	KUNIT_ASSERT_NOT_NULL(test, tt);
319
320	KUNIT_ASSERT_FALSE(test, list_empty(&pt_pool->pages));
321	KUNIT_ASSERT_TRUE(test, list_empty(&pt_tt->pages));
322
323	err = ttm_pool_alloc(pool, tt, &simple_ctx);
324	KUNIT_ASSERT_EQ(test, err, 0);
325
326	ttm_pool_free(pool, tt);
327	ttm_tt_fini(tt);
328
329	KUNIT_ASSERT_FALSE(test, list_empty(&pt_pool->pages));
330	KUNIT_ASSERT_FALSE(test, list_empty(&pt_tt->pages));
331
332	ttm_pool_fini(pool);
333}
334
335static void ttm_pool_free_dma_alloc(struct kunit *test)
336{
337	struct ttm_pool_test_priv *priv = test->priv;
338	struct ttm_test_devices *devs = priv->devs;
339	struct ttm_tt *tt;
340	struct ttm_pool *pool;
341	struct ttm_pool_type *pt;
342	enum ttm_caching caching = ttm_uncached;
343	unsigned int order = 2;
344	size_t size = (1 << order) * PAGE_SIZE;
345
346	tt = ttm_tt_kunit_init(test, 0, caching, size);
347	KUNIT_ASSERT_NOT_NULL(test, tt);
348
349	pool = kunit_kzalloc(test, sizeof(*pool), GFP_KERNEL);
350	KUNIT_ASSERT_NOT_NULL(test, pool);
351
352	ttm_pool_init(pool, devs->dev, NUMA_NO_NODE, true, false);
353	ttm_pool_alloc(pool, tt, &simple_ctx);
354
355	pt = &pool->caching[caching].orders[order];
356	KUNIT_ASSERT_TRUE(test, list_empty(&pt->pages));
357
358	ttm_pool_free(pool, tt);
359	ttm_tt_fini(tt);
360
361	KUNIT_ASSERT_FALSE(test, list_empty(&pt->pages));
362
363	ttm_pool_fini(pool);
364}
365
366static void ttm_pool_free_no_dma_alloc(struct kunit *test)
367{
368	struct ttm_pool_test_priv *priv = test->priv;
369	struct ttm_test_devices *devs = priv->devs;
370	struct ttm_tt *tt;
371	struct ttm_pool *pool;
372	struct ttm_pool_type *pt;
373	enum ttm_caching caching = ttm_uncached;
374	unsigned int order = 2;
375	size_t size = (1 << order) * PAGE_SIZE;
376
377	tt = ttm_tt_kunit_init(test, 0, caching, size);
378	KUNIT_ASSERT_NOT_NULL(test, tt);
379
380	pool = kunit_kzalloc(test, sizeof(*pool), GFP_KERNEL);
381	KUNIT_ASSERT_NOT_NULL(test, pool);
382
383	ttm_pool_init(pool, devs->dev, NUMA_NO_NODE, false, false);
384	ttm_pool_alloc(pool, tt, &simple_ctx);
385
386	pt = &pool->caching[caching].orders[order];
387	KUNIT_ASSERT_TRUE(test, list_is_singular(&pt->pages));
388
389	ttm_pool_free(pool, tt);
390	ttm_tt_fini(tt);
391
392	KUNIT_ASSERT_TRUE(test, list_is_singular(&pt->pages));
393
394	ttm_pool_fini(pool);
395}
396
397static void ttm_pool_fini_basic(struct kunit *test)
398{
399	struct ttm_pool *pool;
400	struct ttm_pool_type *pt;
401	enum ttm_caching caching = ttm_uncached;
402	unsigned int order = 0;
403	size_t size = PAGE_SIZE;
404
405	pool = ttm_pool_pre_populated(test, size, caching);
406	pt = &pool->caching[caching].orders[order];
407
408	KUNIT_ASSERT_FALSE(test, list_empty(&pt->pages));
409
410	ttm_pool_fini(pool);
411
412	KUNIT_ASSERT_TRUE(test, list_empty(&pt->pages));
413}
414
415static struct kunit_case ttm_pool_test_cases[] = {
416	KUNIT_CASE_PARAM(ttm_pool_alloc_basic, ttm_pool_alloc_basic_gen_params),
417	KUNIT_CASE_PARAM(ttm_pool_alloc_basic_dma_addr,
418			 ttm_pool_alloc_basic_gen_params),
419	KUNIT_CASE(ttm_pool_alloc_order_caching_match),
420	KUNIT_CASE(ttm_pool_alloc_caching_mismatch),
421	KUNIT_CASE(ttm_pool_alloc_order_mismatch),
422	KUNIT_CASE(ttm_pool_free_dma_alloc),
423	KUNIT_CASE(ttm_pool_free_no_dma_alloc),
424	KUNIT_CASE(ttm_pool_fini_basic),
425	{}
426};
427
428static struct kunit_suite ttm_pool_test_suite = {
429	.name = "ttm_pool",
430	.init = ttm_pool_test_init,
431	.exit = ttm_pool_test_fini,
432	.test_cases = ttm_pool_test_cases,
433};
434
435kunit_test_suites(&ttm_pool_test_suite);
436
437MODULE_LICENSE("GPL");
v6.9.4
  1// SPDX-License-Identifier: GPL-2.0 AND MIT
  2/*
  3 * Copyright © 2023 Intel Corporation
  4 */
  5#include <linux/mm.h>
  6
  7#include <drm/ttm/ttm_tt.h>
  8#include <drm/ttm/ttm_pool.h>
  9
 10#include "ttm_kunit_helpers.h"
 11
 12struct ttm_pool_test_case {
 13	const char *description;
 14	unsigned int order;
 15	bool use_dma_alloc;
 16};
 17
 18struct ttm_pool_test_priv {
 19	struct ttm_test_devices *devs;
 20
 21	/* Used to create mock ttm_tts */
 22	struct ttm_buffer_object *mock_bo;
 23};
 24
 25static struct ttm_operation_ctx simple_ctx = {
 26	.interruptible = true,
 27	.no_wait_gpu = false,
 28};
 29
 30static int ttm_pool_test_init(struct kunit *test)
 31{
 32	struct ttm_pool_test_priv *priv;
 33
 34	priv = kunit_kzalloc(test, sizeof(*priv), GFP_KERNEL);
 35	KUNIT_ASSERT_NOT_NULL(test, priv);
 36
 37	priv->devs = ttm_test_devices_basic(test);
 38	test->priv = priv;
 39
 40	return 0;
 41}
 42
 43static void ttm_pool_test_fini(struct kunit *test)
 44{
 45	struct ttm_pool_test_priv *priv = test->priv;
 46
 47	ttm_test_devices_put(test, priv->devs);
 48}
 49
 50static struct ttm_tt *ttm_tt_kunit_init(struct kunit *test,
 51					uint32_t page_flags,
 52					enum ttm_caching caching,
 53					size_t size)
 54{
 55	struct ttm_pool_test_priv *priv = test->priv;
 56	struct ttm_buffer_object *bo;
 57	struct ttm_tt *tt;
 58	int err;
 59
 60	bo = ttm_bo_kunit_init(test, priv->devs, size);
 61	KUNIT_ASSERT_NOT_NULL(test, bo);
 62	priv->mock_bo = bo;
 63
 64	tt = kunit_kzalloc(test, sizeof(*tt), GFP_KERNEL);
 65	KUNIT_ASSERT_NOT_NULL(test, tt);
 66
 67	err = ttm_tt_init(tt, priv->mock_bo, page_flags, caching, 0);
 68	KUNIT_ASSERT_EQ(test, err, 0);
 69
 70	return tt;
 71}
 72
 73static struct ttm_pool *ttm_pool_pre_populated(struct kunit *test,
 74					       size_t size,
 75					       enum ttm_caching caching)
 76{
 77	struct ttm_pool_test_priv *priv = test->priv;
 78	struct ttm_test_devices *devs = priv->devs;
 79	struct ttm_pool *pool;
 80	struct ttm_tt *tt;
 
 81	int err;
 82
 83	tt = ttm_tt_kunit_init(test, 0, caching, size);
 84	KUNIT_ASSERT_NOT_NULL(test, tt);
 85
 86	pool = kunit_kzalloc(test, sizeof(*pool), GFP_KERNEL);
 87	KUNIT_ASSERT_NOT_NULL(test, pool);
 88
 89	ttm_pool_init(pool, devs->dev, NUMA_NO_NODE, true, false);
 90
 91	err = ttm_pool_alloc(pool, tt, &simple_ctx);
 92	KUNIT_ASSERT_EQ(test, err, 0);
 93
 94	ttm_pool_free(pool, tt);
 95	ttm_tt_fini(tt);
 96
 97	return pool;
 98}
 99
100static const struct ttm_pool_test_case ttm_pool_basic_cases[] = {
101	{
102		.description = "One page",
103		.order = 0,
104	},
105	{
106		.description = "More than one page",
107		.order = 2,
108	},
109	{
110		.description = "Above the allocation limit",
111		.order = MAX_PAGE_ORDER + 1,
112	},
113	{
114		.description = "One page, with coherent DMA mappings enabled",
115		.order = 0,
116		.use_dma_alloc = true,
117	},
118	{
119		.description = "Above the allocation limit, with coherent DMA mappings enabled",
120		.order = MAX_PAGE_ORDER + 1,
121		.use_dma_alloc = true,
122	},
123};
124
125static void ttm_pool_alloc_case_desc(const struct ttm_pool_test_case *t,
126				     char *desc)
127{
128	strscpy(desc, t->description, KUNIT_PARAM_DESC_SIZE);
129}
130
131KUNIT_ARRAY_PARAM(ttm_pool_alloc_basic, ttm_pool_basic_cases,
132		  ttm_pool_alloc_case_desc);
133
134static void ttm_pool_alloc_basic(struct kunit *test)
135{
136	struct ttm_pool_test_priv *priv = test->priv;
137	struct ttm_test_devices *devs = priv->devs;
138	const struct ttm_pool_test_case *params = test->param_value;
139	struct ttm_tt *tt;
140	struct ttm_pool *pool;
141	struct page *fst_page, *last_page;
142	enum ttm_caching caching = ttm_uncached;
143	unsigned int expected_num_pages = 1 << params->order;
144	size_t size = expected_num_pages * PAGE_SIZE;
145	int err;
146
147	tt = ttm_tt_kunit_init(test, 0, caching, size);
148	KUNIT_ASSERT_NOT_NULL(test, tt);
149
150	pool = kunit_kzalloc(test, sizeof(*pool), GFP_KERNEL);
151	KUNIT_ASSERT_NOT_NULL(test, pool);
152
153	ttm_pool_init(pool, devs->dev, NUMA_NO_NODE, params->use_dma_alloc,
154		      false);
155
156	KUNIT_ASSERT_PTR_EQ(test, pool->dev, devs->dev);
157	KUNIT_ASSERT_EQ(test, pool->nid, NUMA_NO_NODE);
158	KUNIT_ASSERT_EQ(test, pool->use_dma_alloc, params->use_dma_alloc);
159
160	err = ttm_pool_alloc(pool, tt, &simple_ctx);
161	KUNIT_ASSERT_EQ(test, err, 0);
162	KUNIT_ASSERT_EQ(test, tt->num_pages, expected_num_pages);
163
164	fst_page = tt->pages[0];
165	last_page = tt->pages[tt->num_pages - 1];
166
167	if (params->order <= MAX_PAGE_ORDER) {
168		if (params->use_dma_alloc) {
169			KUNIT_ASSERT_NOT_NULL(test, (void *)fst_page->private);
170			KUNIT_ASSERT_NOT_NULL(test, (void *)last_page->private);
171		} else {
172			KUNIT_ASSERT_EQ(test, fst_page->private, params->order);
173		}
174	} else {
175		if (params->use_dma_alloc) {
176			KUNIT_ASSERT_NOT_NULL(test, (void *)fst_page->private);
177			KUNIT_ASSERT_NULL(test, (void *)last_page->private);
178		} else {
179			/*
180			 * We expect to alloc one big block, followed by
181			 * order 0 blocks
182			 */
183			KUNIT_ASSERT_EQ(test, fst_page->private,
184					min_t(unsigned int, MAX_PAGE_ORDER,
185					      params->order));
186			KUNIT_ASSERT_EQ(test, last_page->private, 0);
187		}
188	}
189
190	ttm_pool_free(pool, tt);
191	ttm_tt_fini(tt);
192	ttm_pool_fini(pool);
193}
194
195static void ttm_pool_alloc_basic_dma_addr(struct kunit *test)
196{
197	struct ttm_pool_test_priv *priv = test->priv;
198	struct ttm_test_devices *devs = priv->devs;
199	const struct ttm_pool_test_case *params = test->param_value;
200	struct ttm_tt *tt;
201	struct ttm_pool *pool;
202	struct ttm_buffer_object *bo;
203	dma_addr_t dma1, dma2;
204	enum ttm_caching caching = ttm_uncached;
205	unsigned int expected_num_pages = 1 << params->order;
206	size_t size = expected_num_pages * PAGE_SIZE;
207	int err;
208
209	tt = kunit_kzalloc(test, sizeof(*tt), GFP_KERNEL);
210	KUNIT_ASSERT_NOT_NULL(test, tt);
211
212	bo = ttm_bo_kunit_init(test, devs, size);
213	KUNIT_ASSERT_NOT_NULL(test, bo);
214
215	err = ttm_sg_tt_init(tt, bo, 0, caching);
216	KUNIT_ASSERT_EQ(test, err, 0);
217
218	pool = kunit_kzalloc(test, sizeof(*pool), GFP_KERNEL);
219	KUNIT_ASSERT_NOT_NULL(test, pool);
220
221	ttm_pool_init(pool, devs->dev, NUMA_NO_NODE, true, false);
222
223	err = ttm_pool_alloc(pool, tt, &simple_ctx);
224	KUNIT_ASSERT_EQ(test, err, 0);
225	KUNIT_ASSERT_EQ(test, tt->num_pages, expected_num_pages);
226
227	dma1 = tt->dma_address[0];
228	dma2 = tt->dma_address[tt->num_pages - 1];
229
230	KUNIT_ASSERT_NOT_NULL(test, (void *)(uintptr_t)dma1);
231	KUNIT_ASSERT_NOT_NULL(test, (void *)(uintptr_t)dma2);
232
233	ttm_pool_free(pool, tt);
234	ttm_tt_fini(tt);
235	ttm_pool_fini(pool);
236}
237
238static void ttm_pool_alloc_order_caching_match(struct kunit *test)
239{
240	struct ttm_tt *tt;
241	struct ttm_pool *pool;
242	struct ttm_pool_type *pt;
243	enum ttm_caching caching = ttm_uncached;
244	unsigned int order = 0;
245	size_t size = PAGE_SIZE;
246	int err;
247
248	pool = ttm_pool_pre_populated(test, size, caching);
249
250	pt = &pool->caching[caching].orders[order];
251	KUNIT_ASSERT_FALSE(test, list_empty(&pt->pages));
252
253	tt = ttm_tt_kunit_init(test, 0, caching, size);
254	KUNIT_ASSERT_NOT_NULL(test, tt);
255
256	err = ttm_pool_alloc(pool, tt, &simple_ctx);
257	KUNIT_ASSERT_EQ(test, err, 0);
258
259	KUNIT_ASSERT_TRUE(test, list_empty(&pt->pages));
260
261	ttm_pool_free(pool, tt);
262	ttm_tt_fini(tt);
263	ttm_pool_fini(pool);
264}
265
266static void ttm_pool_alloc_caching_mismatch(struct kunit *test)
267{
268	struct ttm_tt *tt;
269	struct ttm_pool *pool;
270	struct ttm_pool_type *pt_pool, *pt_tt;
271	enum ttm_caching tt_caching = ttm_uncached;
272	enum ttm_caching pool_caching = ttm_cached;
273	size_t size = PAGE_SIZE;
274	unsigned int order = 0;
275	int err;
276
277	pool = ttm_pool_pre_populated(test, size, pool_caching);
278
279	pt_pool = &pool->caching[pool_caching].orders[order];
280	pt_tt = &pool->caching[tt_caching].orders[order];
281
282	tt = ttm_tt_kunit_init(test, 0, tt_caching, size);
283	KUNIT_ASSERT_NOT_NULL(test, tt);
284
285	KUNIT_ASSERT_FALSE(test, list_empty(&pt_pool->pages));
286	KUNIT_ASSERT_TRUE(test, list_empty(&pt_tt->pages));
287
288	err = ttm_pool_alloc(pool, tt, &simple_ctx);
289	KUNIT_ASSERT_EQ(test, err, 0);
290
291	ttm_pool_free(pool, tt);
292	ttm_tt_fini(tt);
293
294	KUNIT_ASSERT_FALSE(test, list_empty(&pt_pool->pages));
295	KUNIT_ASSERT_FALSE(test, list_empty(&pt_tt->pages));
296
297	ttm_pool_fini(pool);
298}
299
300static void ttm_pool_alloc_order_mismatch(struct kunit *test)
301{
302	struct ttm_tt *tt;
303	struct ttm_pool *pool;
304	struct ttm_pool_type *pt_pool, *pt_tt;
305	enum ttm_caching caching = ttm_uncached;
306	unsigned int order = 2;
307	size_t fst_size = (1 << order) * PAGE_SIZE;
308	size_t snd_size = PAGE_SIZE;
309	int err;
310
311	pool = ttm_pool_pre_populated(test, fst_size, caching);
312
313	pt_pool = &pool->caching[caching].orders[order];
314	pt_tt = &pool->caching[caching].orders[0];
315
316	tt = ttm_tt_kunit_init(test, 0, caching, snd_size);
317	KUNIT_ASSERT_NOT_NULL(test, tt);
318
319	KUNIT_ASSERT_FALSE(test, list_empty(&pt_pool->pages));
320	KUNIT_ASSERT_TRUE(test, list_empty(&pt_tt->pages));
321
322	err = ttm_pool_alloc(pool, tt, &simple_ctx);
323	KUNIT_ASSERT_EQ(test, err, 0);
324
325	ttm_pool_free(pool, tt);
326	ttm_tt_fini(tt);
327
328	KUNIT_ASSERT_FALSE(test, list_empty(&pt_pool->pages));
329	KUNIT_ASSERT_FALSE(test, list_empty(&pt_tt->pages));
330
331	ttm_pool_fini(pool);
332}
333
334static void ttm_pool_free_dma_alloc(struct kunit *test)
335{
336	struct ttm_pool_test_priv *priv = test->priv;
337	struct ttm_test_devices *devs = priv->devs;
338	struct ttm_tt *tt;
339	struct ttm_pool *pool;
340	struct ttm_pool_type *pt;
341	enum ttm_caching caching = ttm_uncached;
342	unsigned int order = 2;
343	size_t size = (1 << order) * PAGE_SIZE;
344
345	tt = ttm_tt_kunit_init(test, 0, caching, size);
346	KUNIT_ASSERT_NOT_NULL(test, tt);
347
348	pool = kunit_kzalloc(test, sizeof(*pool), GFP_KERNEL);
349	KUNIT_ASSERT_NOT_NULL(test, pool);
350
351	ttm_pool_init(pool, devs->dev, NUMA_NO_NODE, true, false);
352	ttm_pool_alloc(pool, tt, &simple_ctx);
353
354	pt = &pool->caching[caching].orders[order];
355	KUNIT_ASSERT_TRUE(test, list_empty(&pt->pages));
356
357	ttm_pool_free(pool, tt);
358	ttm_tt_fini(tt);
359
360	KUNIT_ASSERT_FALSE(test, list_empty(&pt->pages));
361
362	ttm_pool_fini(pool);
363}
364
365static void ttm_pool_free_no_dma_alloc(struct kunit *test)
366{
367	struct ttm_pool_test_priv *priv = test->priv;
368	struct ttm_test_devices *devs = priv->devs;
369	struct ttm_tt *tt;
370	struct ttm_pool *pool;
371	struct ttm_pool_type *pt;
372	enum ttm_caching caching = ttm_uncached;
373	unsigned int order = 2;
374	size_t size = (1 << order) * PAGE_SIZE;
375
376	tt = ttm_tt_kunit_init(test, 0, caching, size);
377	KUNIT_ASSERT_NOT_NULL(test, tt);
378
379	pool = kunit_kzalloc(test, sizeof(*pool), GFP_KERNEL);
380	KUNIT_ASSERT_NOT_NULL(test, pool);
381
382	ttm_pool_init(pool, devs->dev, NUMA_NO_NODE, false, false);
383	ttm_pool_alloc(pool, tt, &simple_ctx);
384
385	pt = &pool->caching[caching].orders[order];
386	KUNIT_ASSERT_TRUE(test, list_is_singular(&pt->pages));
387
388	ttm_pool_free(pool, tt);
389	ttm_tt_fini(tt);
390
391	KUNIT_ASSERT_TRUE(test, list_is_singular(&pt->pages));
392
393	ttm_pool_fini(pool);
394}
395
396static void ttm_pool_fini_basic(struct kunit *test)
397{
398	struct ttm_pool *pool;
399	struct ttm_pool_type *pt;
400	enum ttm_caching caching = ttm_uncached;
401	unsigned int order = 0;
402	size_t size = PAGE_SIZE;
403
404	pool = ttm_pool_pre_populated(test, size, caching);
405	pt = &pool->caching[caching].orders[order];
406
407	KUNIT_ASSERT_FALSE(test, list_empty(&pt->pages));
408
409	ttm_pool_fini(pool);
410
411	KUNIT_ASSERT_TRUE(test, list_empty(&pt->pages));
412}
413
414static struct kunit_case ttm_pool_test_cases[] = {
415	KUNIT_CASE_PARAM(ttm_pool_alloc_basic, ttm_pool_alloc_basic_gen_params),
416	KUNIT_CASE_PARAM(ttm_pool_alloc_basic_dma_addr,
417			 ttm_pool_alloc_basic_gen_params),
418	KUNIT_CASE(ttm_pool_alloc_order_caching_match),
419	KUNIT_CASE(ttm_pool_alloc_caching_mismatch),
420	KUNIT_CASE(ttm_pool_alloc_order_mismatch),
421	KUNIT_CASE(ttm_pool_free_dma_alloc),
422	KUNIT_CASE(ttm_pool_free_no_dma_alloc),
423	KUNIT_CASE(ttm_pool_fini_basic),
424	{}
425};
426
427static struct kunit_suite ttm_pool_test_suite = {
428	.name = "ttm_pool",
429	.init = ttm_pool_test_init,
430	.exit = ttm_pool_test_fini,
431	.test_cases = ttm_pool_test_cases,
432};
433
434kunit_test_suites(&ttm_pool_test_suite);
435
436MODULE_LICENSE("GPL");