Linux Audio

Check our new training course

Loading...
Note: File does not exist in v3.1.
   1// SPDX-License-Identifier: GPL-2.0 AND MIT
   2/*
   3 * Copyright © 2023 Intel Corporation
   4 */
   5#include <linux/delay.h>
   6#include <linux/kthread.h>
   7
   8#include <drm/ttm/ttm_resource.h>
   9#include <drm/ttm/ttm_placement.h>
  10#include <drm/ttm/ttm_tt.h>
  11
  12#include "ttm_kunit_helpers.h"
  13#include "ttm_mock_manager.h"
  14
  15#define BO_SIZE		SZ_4K
  16#define MANAGER_SIZE	SZ_1M
  17
  18static struct spinlock fence_lock;
  19
  20struct ttm_bo_validate_test_case {
  21	const char *description;
  22	enum ttm_bo_type bo_type;
  23	u32 mem_type;
  24	bool with_ttm;
  25	bool no_gpu_wait;
  26};
  27
  28static struct ttm_placement *ttm_placement_kunit_init(struct kunit *test,
  29						      struct ttm_place *places,
  30						      unsigned int num_places)
  31{
  32	struct ttm_placement *placement;
  33
  34	placement = kunit_kzalloc(test, sizeof(*placement), GFP_KERNEL);
  35	KUNIT_ASSERT_NOT_NULL(test, placement);
  36
  37	placement->num_placement = num_places;
  38	placement->placement = places;
  39
  40	return placement;
  41}
  42
  43static const char *fence_name(struct dma_fence *f)
  44{
  45	return "ttm-bo-validate-fence";
  46}
  47
  48static const struct dma_fence_ops fence_ops = {
  49	.get_driver_name = fence_name,
  50	.get_timeline_name = fence_name,
  51};
  52
  53static struct dma_fence *alloc_mock_fence(struct kunit *test)
  54{
  55	struct dma_fence *fence;
  56
  57	fence = kunit_kzalloc(test, sizeof(*fence), GFP_KERNEL);
  58	KUNIT_ASSERT_NOT_NULL(test, fence);
  59
  60	dma_fence_init(fence, &fence_ops, &fence_lock, 0, 0);
  61
  62	return fence;
  63}
  64
  65static void dma_resv_kunit_active_fence_init(struct kunit *test,
  66					     struct dma_resv *resv,
  67					     enum dma_resv_usage usage)
  68{
  69	struct dma_fence *fence;
  70
  71	fence = alloc_mock_fence(test);
  72	dma_fence_enable_sw_signaling(fence);
  73
  74	dma_resv_lock(resv, NULL);
  75	dma_resv_reserve_fences(resv, 1);
  76	dma_resv_add_fence(resv, fence, usage);
  77	dma_resv_unlock(resv);
  78}
  79
  80static void ttm_bo_validate_case_desc(const struct ttm_bo_validate_test_case *t,
  81				      char *desc)
  82{
  83	strscpy(desc, t->description, KUNIT_PARAM_DESC_SIZE);
  84}
  85
  86static const struct ttm_bo_validate_test_case ttm_bo_type_cases[] = {
  87	{
  88		.description = "Buffer object for userspace",
  89		.bo_type = ttm_bo_type_device,
  90	},
  91	{
  92		.description = "Kernel buffer object",
  93		.bo_type = ttm_bo_type_kernel,
  94	},
  95	{
  96		.description = "Shared buffer object",
  97		.bo_type = ttm_bo_type_sg,
  98	},
  99};
 100
 101KUNIT_ARRAY_PARAM(ttm_bo_types, ttm_bo_type_cases,
 102		  ttm_bo_validate_case_desc);
 103
 104static void ttm_bo_init_reserved_sys_man(struct kunit *test)
 105{
 106	const struct ttm_bo_validate_test_case *params = test->param_value;
 107	struct ttm_test_devices *priv = test->priv;
 108	enum ttm_bo_type bo_type = params->bo_type;
 109	u32 size = ALIGN(BO_SIZE, PAGE_SIZE);
 110	struct ttm_operation_ctx ctx = { };
 111	struct ttm_placement *placement;
 112	struct ttm_buffer_object *bo;
 113	struct ttm_place *place;
 114	int err;
 115
 116	bo = kunit_kzalloc(test, sizeof(*bo), GFP_KERNEL);
 117	KUNIT_ASSERT_NOT_NULL(test, bo);
 118
 119	place = ttm_place_kunit_init(test, TTM_PL_SYSTEM, 0);
 120	placement = ttm_placement_kunit_init(test, place, 1);
 121
 122	drm_gem_private_object_init(priv->drm, &bo->base, size);
 123
 124	err = ttm_bo_init_reserved(priv->ttm_dev, bo, bo_type, placement,
 125				   PAGE_SIZE, &ctx, NULL, NULL,
 126				   &dummy_ttm_bo_destroy);
 127	dma_resv_unlock(bo->base.resv);
 128
 129	KUNIT_EXPECT_EQ(test, err, 0);
 130	KUNIT_EXPECT_EQ(test, kref_read(&bo->kref), 1);
 131	KUNIT_EXPECT_PTR_EQ(test, bo->bdev, priv->ttm_dev);
 132	KUNIT_EXPECT_EQ(test, bo->type, bo_type);
 133	KUNIT_EXPECT_EQ(test, bo->page_alignment, PAGE_SIZE);
 134	KUNIT_EXPECT_PTR_EQ(test, bo->destroy, &dummy_ttm_bo_destroy);
 135	KUNIT_EXPECT_EQ(test, bo->pin_count, 0);
 136	KUNIT_EXPECT_NULL(test, bo->bulk_move);
 137	KUNIT_EXPECT_NOT_NULL(test, bo->ttm);
 138	KUNIT_EXPECT_FALSE(test, ttm_tt_is_populated(bo->ttm));
 139	KUNIT_EXPECT_NOT_NULL(test, (void *)bo->base.resv->fences);
 140	KUNIT_EXPECT_EQ(test, ctx.bytes_moved, size);
 141
 142	if (bo_type != ttm_bo_type_kernel)
 143		KUNIT_EXPECT_TRUE(test,
 144				  drm_mm_node_allocated(&bo->base.vma_node.vm_node));
 145
 146	ttm_resource_free(bo, &bo->resource);
 147	ttm_bo_put(bo);
 148}
 149
 150static void ttm_bo_init_reserved_mock_man(struct kunit *test)
 151{
 152	const struct ttm_bo_validate_test_case *params = test->param_value;
 153	enum ttm_bo_type bo_type = params->bo_type;
 154	struct ttm_test_devices *priv = test->priv;
 155	u32 size = ALIGN(BO_SIZE, PAGE_SIZE);
 156	struct ttm_operation_ctx ctx = { };
 157	struct ttm_placement *placement;
 158	u32 mem_type = TTM_PL_VRAM;
 159	struct ttm_buffer_object *bo;
 160	struct ttm_place *place;
 161	int err;
 162
 163	ttm_mock_manager_init(priv->ttm_dev, mem_type, MANAGER_SIZE);
 164
 165	bo = kunit_kzalloc(test, sizeof(*bo), GFP_KERNEL);
 166	KUNIT_ASSERT_NOT_NULL(test, bo);
 167
 168	place = ttm_place_kunit_init(test, mem_type, 0);
 169	placement = ttm_placement_kunit_init(test, place, 1);
 170
 171	drm_gem_private_object_init(priv->drm, &bo->base, size);
 172
 173	err = ttm_bo_init_reserved(priv->ttm_dev, bo, bo_type, placement,
 174				   PAGE_SIZE, &ctx, NULL, NULL,
 175				   &dummy_ttm_bo_destroy);
 176	dma_resv_unlock(bo->base.resv);
 177
 178	KUNIT_EXPECT_EQ(test, err, 0);
 179	KUNIT_EXPECT_EQ(test, kref_read(&bo->kref), 1);
 180	KUNIT_EXPECT_PTR_EQ(test, bo->bdev, priv->ttm_dev);
 181	KUNIT_EXPECT_EQ(test, bo->type, bo_type);
 182	KUNIT_EXPECT_EQ(test, ctx.bytes_moved, size);
 183
 184	if (bo_type != ttm_bo_type_kernel)
 185		KUNIT_EXPECT_TRUE(test,
 186				  drm_mm_node_allocated(&bo->base.vma_node.vm_node));
 187
 188	ttm_resource_free(bo, &bo->resource);
 189	ttm_bo_put(bo);
 190	ttm_mock_manager_fini(priv->ttm_dev, mem_type);
 191}
 192
 193static void ttm_bo_init_reserved_resv(struct kunit *test)
 194{
 195	enum ttm_bo_type bo_type = ttm_bo_type_device;
 196	struct ttm_test_devices *priv = test->priv;
 197	u32 size = ALIGN(BO_SIZE, PAGE_SIZE);
 198	struct ttm_operation_ctx ctx = { };
 199	struct ttm_placement *placement;
 200	struct ttm_buffer_object *bo;
 201	struct ttm_place *place;
 202	struct dma_resv resv;
 203	int err;
 204
 205	bo = kunit_kzalloc(test, sizeof(*bo), GFP_KERNEL);
 206	KUNIT_ASSERT_NOT_NULL(test, bo);
 207
 208	place = ttm_place_kunit_init(test, TTM_PL_SYSTEM, 0);
 209	placement = ttm_placement_kunit_init(test, place, 1);
 210
 211	drm_gem_private_object_init(priv->drm, &bo->base, size);
 212	dma_resv_init(&resv);
 213	dma_resv_lock(&resv, NULL);
 214
 215	err = ttm_bo_init_reserved(priv->ttm_dev, bo, bo_type, placement,
 216				   PAGE_SIZE, &ctx, NULL, &resv,
 217				   &dummy_ttm_bo_destroy);
 218	dma_resv_unlock(bo->base.resv);
 219
 220	KUNIT_EXPECT_EQ(test, err, 0);
 221	KUNIT_EXPECT_PTR_EQ(test, bo->base.resv, &resv);
 222
 223	ttm_resource_free(bo, &bo->resource);
 224	ttm_bo_put(bo);
 225}
 226
 227static void ttm_bo_validate_basic(struct kunit *test)
 228{
 229	const struct ttm_bo_validate_test_case *params = test->param_value;
 230	u32 fst_mem = TTM_PL_SYSTEM, snd_mem = TTM_PL_VRAM;
 231	struct ttm_operation_ctx ctx_init = { }, ctx_val = { };
 232	struct ttm_placement *fst_placement, *snd_placement;
 233	struct ttm_test_devices *priv = test->priv;
 234	struct ttm_place *fst_place, *snd_place;
 235	u32 size = ALIGN(SZ_8K, PAGE_SIZE);
 236	struct ttm_buffer_object *bo;
 237	int err;
 238
 239	ttm_mock_manager_init(priv->ttm_dev, snd_mem, MANAGER_SIZE);
 240
 241	fst_place = ttm_place_kunit_init(test, fst_mem, 0);
 242	fst_placement = ttm_placement_kunit_init(test, fst_place, 1);
 243
 244	bo = kunit_kzalloc(test, sizeof(*bo), GFP_KERNEL);
 245	KUNIT_ASSERT_NOT_NULL(test, bo);
 246
 247	drm_gem_private_object_init(priv->drm, &bo->base, size);
 248
 249	err = ttm_bo_init_reserved(priv->ttm_dev, bo, params->bo_type,
 250				   fst_placement, PAGE_SIZE, &ctx_init, NULL,
 251				   NULL, &dummy_ttm_bo_destroy);
 252	KUNIT_EXPECT_EQ(test, err, 0);
 253
 254	snd_place = ttm_place_kunit_init(test, snd_mem, DRM_BUDDY_TOPDOWN_ALLOCATION);
 255	snd_placement = ttm_placement_kunit_init(test, snd_place, 1);
 256
 257	err = ttm_bo_validate(bo, snd_placement, &ctx_val);
 258	dma_resv_unlock(bo->base.resv);
 259
 260	KUNIT_EXPECT_EQ(test, err, 0);
 261	KUNIT_EXPECT_EQ(test, ctx_val.bytes_moved, bo->base.size);
 262	KUNIT_EXPECT_NOT_NULL(test, bo->ttm);
 263	KUNIT_EXPECT_TRUE(test, ttm_tt_is_populated(bo->ttm));
 264	KUNIT_EXPECT_EQ(test, bo->resource->mem_type, snd_mem);
 265	KUNIT_EXPECT_EQ(test, bo->resource->placement,
 266			DRM_BUDDY_TOPDOWN_ALLOCATION);
 267
 268	ttm_bo_put(bo);
 269	ttm_mock_manager_fini(priv->ttm_dev, snd_mem);
 270}
 271
 272static void ttm_bo_validate_invalid_placement(struct kunit *test)
 273{
 274	enum ttm_bo_type bo_type = ttm_bo_type_device;
 275	u32 unknown_mem_type = TTM_PL_PRIV + 1;
 276	u32 size = ALIGN(BO_SIZE, PAGE_SIZE);
 277	struct ttm_operation_ctx ctx = { };
 278	struct ttm_placement *placement;
 279	struct ttm_buffer_object *bo;
 280	struct ttm_place *place;
 281	int err;
 282
 283	place = ttm_place_kunit_init(test, unknown_mem_type, 0);
 284	placement = ttm_placement_kunit_init(test, place, 1);
 285
 286	bo = ttm_bo_kunit_init(test, test->priv, size, NULL);
 287	bo->type = bo_type;
 288
 289	ttm_bo_reserve(bo, false, false, NULL);
 290	err = ttm_bo_validate(bo, placement, &ctx);
 291	dma_resv_unlock(bo->base.resv);
 292
 293	KUNIT_EXPECT_EQ(test, err, -ENOMEM);
 294
 295	ttm_bo_put(bo);
 296}
 297
 298static void ttm_bo_validate_failed_alloc(struct kunit *test)
 299{
 300	enum ttm_bo_type bo_type = ttm_bo_type_device;
 301	struct ttm_test_devices *priv = test->priv;
 302	u32 size = ALIGN(BO_SIZE, PAGE_SIZE);
 303	struct ttm_operation_ctx ctx = { };
 304	struct ttm_placement *placement;
 305	u32 mem_type = TTM_PL_VRAM;
 306	struct ttm_buffer_object *bo;
 307	struct ttm_place *place;
 308	int err;
 309
 310	bo = ttm_bo_kunit_init(test, test->priv, size, NULL);
 311	bo->type = bo_type;
 312
 313	ttm_bad_manager_init(priv->ttm_dev, mem_type, MANAGER_SIZE);
 314
 315	place = ttm_place_kunit_init(test, mem_type, 0);
 316	placement = ttm_placement_kunit_init(test, place, 1);
 317
 318	ttm_bo_reserve(bo, false, false, NULL);
 319	err = ttm_bo_validate(bo, placement, &ctx);
 320	dma_resv_unlock(bo->base.resv);
 321
 322	KUNIT_EXPECT_EQ(test, err, -ENOMEM);
 323
 324	ttm_bo_put(bo);
 325	ttm_bad_manager_fini(priv->ttm_dev, mem_type);
 326}
 327
 328static void ttm_bo_validate_pinned(struct kunit *test)
 329{
 330	enum ttm_bo_type bo_type = ttm_bo_type_device;
 331	u32 size = ALIGN(BO_SIZE, PAGE_SIZE);
 332	struct ttm_operation_ctx ctx = { };
 333	u32 mem_type = TTM_PL_SYSTEM;
 334	struct ttm_placement *placement;
 335	struct ttm_buffer_object *bo;
 336	struct ttm_place *place;
 337	int err;
 338
 339	place = ttm_place_kunit_init(test, mem_type, 0);
 340	placement = ttm_placement_kunit_init(test, place, 1);
 341
 342	bo = ttm_bo_kunit_init(test, test->priv, size, NULL);
 343	bo->type = bo_type;
 344
 345	ttm_bo_reserve(bo, false, false, NULL);
 346	ttm_bo_pin(bo);
 347	err = ttm_bo_validate(bo, placement, &ctx);
 348	dma_resv_unlock(bo->base.resv);
 349
 350	KUNIT_EXPECT_EQ(test, err, -EINVAL);
 351
 352	ttm_bo_reserve(bo, false, false, NULL);
 353	ttm_bo_unpin(bo);
 354	dma_resv_unlock(bo->base.resv);
 355
 356	ttm_bo_put(bo);
 357}
 358
 359static const struct ttm_bo_validate_test_case ttm_mem_type_cases[] = {
 360	{
 361		.description = "System manager",
 362		.mem_type = TTM_PL_SYSTEM,
 363	},
 364	{
 365		.description = "VRAM manager",
 366		.mem_type = TTM_PL_VRAM,
 367	},
 368};
 369
 370KUNIT_ARRAY_PARAM(ttm_bo_validate_mem, ttm_mem_type_cases,
 371		  ttm_bo_validate_case_desc);
 372
 373static void ttm_bo_validate_same_placement(struct kunit *test)
 374{
 375	const struct ttm_bo_validate_test_case *params = test->param_value;
 376	struct ttm_operation_ctx ctx_init = { }, ctx_val = { };
 377	struct ttm_test_devices *priv = test->priv;
 378	u32 size = ALIGN(BO_SIZE, PAGE_SIZE);
 379	struct ttm_placement *placement;
 380	struct ttm_buffer_object *bo;
 381	struct ttm_place *place;
 382	int err;
 383
 384	place = ttm_place_kunit_init(test, params->mem_type, 0);
 385	placement = ttm_placement_kunit_init(test, place, 1);
 386
 387	if (params->mem_type != TTM_PL_SYSTEM)
 388		ttm_mock_manager_init(priv->ttm_dev, params->mem_type, MANAGER_SIZE);
 389
 390	bo = kunit_kzalloc(test, sizeof(*bo), GFP_KERNEL);
 391	KUNIT_ASSERT_NOT_NULL(test, bo);
 392
 393	drm_gem_private_object_init(priv->drm, &bo->base, size);
 394
 395	err = ttm_bo_init_reserved(priv->ttm_dev, bo, params->bo_type,
 396				   placement, PAGE_SIZE, &ctx_init, NULL,
 397				   NULL, &dummy_ttm_bo_destroy);
 398	KUNIT_EXPECT_EQ(test, err, 0);
 399
 400	err = ttm_bo_validate(bo, placement, &ctx_val);
 401	dma_resv_unlock(bo->base.resv);
 402
 403	KUNIT_EXPECT_EQ(test, err, 0);
 404	KUNIT_EXPECT_EQ(test, ctx_val.bytes_moved, 0);
 405
 406	ttm_bo_put(bo);
 407
 408	if (params->mem_type != TTM_PL_SYSTEM)
 409		ttm_mock_manager_fini(priv->ttm_dev, params->mem_type);
 410}
 411
 412static void ttm_bo_validate_busy_placement(struct kunit *test)
 413{
 414	u32 fst_mem = TTM_PL_VRAM, snd_mem = TTM_PL_VRAM + 1;
 415	struct ttm_operation_ctx ctx_init = { }, ctx_val = { };
 416	struct ttm_placement *placement_init, *placement_val;
 417	enum ttm_bo_type bo_type = ttm_bo_type_device;
 418	struct ttm_test_devices *priv = test->priv;
 419	u32 size = ALIGN(BO_SIZE, PAGE_SIZE);
 420	struct ttm_place *init_place, places[2];
 421	struct ttm_resource_manager *man;
 422	struct ttm_buffer_object *bo;
 423	int err;
 424
 425	ttm_bad_manager_init(priv->ttm_dev, fst_mem, MANAGER_SIZE);
 426	ttm_mock_manager_init(priv->ttm_dev, snd_mem, MANAGER_SIZE);
 427
 428	init_place = ttm_place_kunit_init(test, TTM_PL_SYSTEM, 0);
 429	placement_init = ttm_placement_kunit_init(test, init_place, 1);
 430
 431	bo = kunit_kzalloc(test, sizeof(*bo), GFP_KERNEL);
 432	KUNIT_ASSERT_NOT_NULL(test, bo);
 433
 434	drm_gem_private_object_init(priv->drm, &bo->base, size);
 435
 436	err = ttm_bo_init_reserved(priv->ttm_dev, bo, bo_type, placement_init,
 437				   PAGE_SIZE, &ctx_init, NULL, NULL,
 438				   &dummy_ttm_bo_destroy);
 439	KUNIT_EXPECT_EQ(test, err, 0);
 440
 441	places[0] = (struct ttm_place){ .mem_type = fst_mem, .flags = TTM_PL_FLAG_DESIRED };
 442	places[1] = (struct ttm_place){ .mem_type = snd_mem, .flags = TTM_PL_FLAG_FALLBACK };
 443	placement_val = ttm_placement_kunit_init(test, places, 2);
 444
 445	err = ttm_bo_validate(bo, placement_val, &ctx_val);
 446	dma_resv_unlock(bo->base.resv);
 447
 448	man = ttm_manager_type(priv->ttm_dev, snd_mem);
 449
 450	KUNIT_EXPECT_EQ(test, err, 0);
 451	KUNIT_EXPECT_EQ(test, ctx_val.bytes_moved, bo->base.size);
 452	KUNIT_EXPECT_EQ(test, bo->resource->mem_type, snd_mem);
 453	KUNIT_ASSERT_TRUE(test, list_is_singular(&man->lru[bo->priority]));
 454
 455	ttm_bo_put(bo);
 456	ttm_bad_manager_fini(priv->ttm_dev, fst_mem);
 457	ttm_mock_manager_fini(priv->ttm_dev, snd_mem);
 458}
 459
 460static void ttm_bo_validate_multihop(struct kunit *test)
 461{
 462	const struct ttm_bo_validate_test_case *params = test->param_value;
 463	struct ttm_operation_ctx ctx_init = { }, ctx_val = { };
 464	struct ttm_placement *placement_init, *placement_val;
 465	u32 fst_mem = TTM_PL_VRAM, tmp_mem = TTM_PL_TT, final_mem = TTM_PL_SYSTEM;
 466	struct ttm_test_devices *priv = test->priv;
 467	struct ttm_place *fst_place, *final_place;
 468	u32 size = ALIGN(BO_SIZE, PAGE_SIZE);
 469	struct ttm_buffer_object *bo;
 470	int err;
 471
 472	ttm_mock_manager_init(priv->ttm_dev, fst_mem, MANAGER_SIZE);
 473	ttm_mock_manager_init(priv->ttm_dev, tmp_mem, MANAGER_SIZE);
 474
 475	fst_place = ttm_place_kunit_init(test, fst_mem, 0);
 476	placement_init = ttm_placement_kunit_init(test, fst_place, 1);
 477
 478	bo = kunit_kzalloc(test, sizeof(*bo), GFP_KERNEL);
 479	KUNIT_ASSERT_NOT_NULL(test, bo);
 480
 481	drm_gem_private_object_init(priv->drm, &bo->base, size);
 482
 483	err = ttm_bo_init_reserved(priv->ttm_dev, bo, params->bo_type,
 484				   placement_init, PAGE_SIZE, &ctx_init, NULL,
 485				   NULL, &dummy_ttm_bo_destroy);
 486	KUNIT_EXPECT_EQ(test, err, 0);
 487
 488	final_place = ttm_place_kunit_init(test, final_mem, 0);
 489	placement_val = ttm_placement_kunit_init(test, final_place, 1);
 490
 491	err = ttm_bo_validate(bo, placement_val, &ctx_val);
 492	dma_resv_unlock(bo->base.resv);
 493
 494	KUNIT_EXPECT_EQ(test, err, 0);
 495	KUNIT_EXPECT_EQ(test, ctx_val.bytes_moved, size * 2);
 496	KUNIT_EXPECT_EQ(test, bo->resource->mem_type, final_mem);
 497
 498	ttm_bo_put(bo);
 499
 500	ttm_mock_manager_fini(priv->ttm_dev, fst_mem);
 501	ttm_mock_manager_fini(priv->ttm_dev, tmp_mem);
 502}
 503
 504static const struct ttm_bo_validate_test_case ttm_bo_no_placement_cases[] = {
 505	{
 506		.description = "Buffer object in system domain, no page vector",
 507	},
 508	{
 509		.description = "Buffer object in system domain with an existing page vector",
 510		.with_ttm = true,
 511	},
 512};
 513
 514KUNIT_ARRAY_PARAM(ttm_bo_no_placement, ttm_bo_no_placement_cases,
 515		  ttm_bo_validate_case_desc);
 516
 517static void ttm_bo_validate_no_placement_signaled(struct kunit *test)
 518{
 519	const struct ttm_bo_validate_test_case *params = test->param_value;
 520	enum ttm_bo_type bo_type = ttm_bo_type_device;
 521	struct ttm_test_devices *priv = test->priv;
 522	u32 size = ALIGN(BO_SIZE, PAGE_SIZE);
 523	struct ttm_operation_ctx ctx = { };
 524	u32 mem_type = TTM_PL_SYSTEM;
 525	struct ttm_resource_manager *man;
 526	struct ttm_placement *placement;
 527	struct ttm_buffer_object *bo;
 528	struct ttm_place *place;
 529	struct ttm_tt *old_tt;
 530	u32 flags;
 531	int err;
 532
 533	place = ttm_place_kunit_init(test, mem_type, 0);
 534	man = ttm_manager_type(priv->ttm_dev, mem_type);
 535
 536	bo = ttm_bo_kunit_init(test, test->priv, size, NULL);
 537	bo->type = bo_type;
 538
 539	if (params->with_ttm) {
 540		old_tt = priv->ttm_dev->funcs->ttm_tt_create(bo, 0);
 541		ttm_pool_alloc(&priv->ttm_dev->pool, old_tt, &ctx);
 542		bo->ttm = old_tt;
 543	}
 544
 545	err = ttm_resource_alloc(bo, place, &bo->resource);
 546	KUNIT_EXPECT_EQ(test, err, 0);
 547	KUNIT_ASSERT_EQ(test, man->usage, size);
 548
 549	placement = kunit_kzalloc(test, sizeof(*placement), GFP_KERNEL);
 550	KUNIT_ASSERT_NOT_NULL(test, placement);
 551
 552	ttm_bo_reserve(bo, false, false, NULL);
 553	err = ttm_bo_validate(bo, placement, &ctx);
 554	ttm_bo_unreserve(bo);
 555
 556	KUNIT_EXPECT_EQ(test, err, 0);
 557	KUNIT_ASSERT_EQ(test, man->usage, 0);
 558	KUNIT_ASSERT_NOT_NULL(test, bo->ttm);
 559	KUNIT_EXPECT_EQ(test, ctx.bytes_moved, 0);
 560
 561	if (params->with_ttm) {
 562		flags = bo->ttm->page_flags;
 563
 564		KUNIT_ASSERT_PTR_EQ(test, bo->ttm, old_tt);
 565		KUNIT_ASSERT_FALSE(test, flags & TTM_TT_FLAG_PRIV_POPULATED);
 566		KUNIT_ASSERT_TRUE(test, flags & TTM_TT_FLAG_ZERO_ALLOC);
 567	}
 568
 569	ttm_bo_put(bo);
 570}
 571
 572static int threaded_dma_resv_signal(void *arg)
 573{
 574	struct ttm_buffer_object *bo = arg;
 575	struct dma_resv *resv = bo->base.resv;
 576	struct dma_resv_iter cursor;
 577	struct dma_fence *fence;
 578
 579	dma_resv_iter_begin(&cursor, resv, DMA_RESV_USAGE_BOOKKEEP);
 580	dma_resv_for_each_fence_unlocked(&cursor, fence) {
 581		dma_fence_signal(fence);
 582	}
 583	dma_resv_iter_end(&cursor);
 584
 585	return 0;
 586}
 587
 588static void ttm_bo_validate_no_placement_not_signaled(struct kunit *test)
 589{
 590	const struct ttm_bo_validate_test_case *params = test->param_value;
 591	enum dma_resv_usage usage = DMA_RESV_USAGE_BOOKKEEP;
 592	u32 size = ALIGN(BO_SIZE, PAGE_SIZE);
 593	struct ttm_operation_ctx ctx = { };
 594	u32 mem_type = TTM_PL_SYSTEM;
 595	struct ttm_placement *placement;
 596	struct ttm_buffer_object *bo;
 597	struct task_struct *task;
 598	struct ttm_place *place;
 599	int err;
 600
 601	place = ttm_place_kunit_init(test, mem_type, 0);
 602
 603	bo = ttm_bo_kunit_init(test, test->priv, size, NULL);
 604	bo->type = params->bo_type;
 605
 606	err = ttm_resource_alloc(bo, place, &bo->resource);
 607	KUNIT_EXPECT_EQ(test, err, 0);
 608
 609	placement = kunit_kzalloc(test, sizeof(*placement), GFP_KERNEL);
 610	KUNIT_ASSERT_NOT_NULL(test, placement);
 611
 612	/* Create an active fence to simulate a non-idle resv object */
 613	spin_lock_init(&fence_lock);
 614	dma_resv_kunit_active_fence_init(test, bo->base.resv, usage);
 615
 616	task = kthread_create(threaded_dma_resv_signal, bo, "dma-resv-signal");
 617	if (IS_ERR(task))
 618		KUNIT_FAIL(test, "Couldn't create dma resv signal task\n");
 619
 620	wake_up_process(task);
 621	ttm_bo_reserve(bo, false, false, NULL);
 622	err = ttm_bo_validate(bo, placement, &ctx);
 623	ttm_bo_unreserve(bo);
 624
 625	KUNIT_EXPECT_EQ(test, err, 0);
 626	KUNIT_ASSERT_NOT_NULL(test, bo->ttm);
 627	KUNIT_ASSERT_NULL(test, bo->resource);
 628	KUNIT_ASSERT_NULL(test, bo->bulk_move);
 629	KUNIT_EXPECT_EQ(test, ctx.bytes_moved, 0);
 630
 631	if (bo->type != ttm_bo_type_sg)
 632		KUNIT_ASSERT_PTR_EQ(test, bo->base.resv, &bo->base._resv);
 633
 634	/* Make sure we have an idle object at this point */
 635	dma_resv_wait_timeout(bo->base.resv, usage, false, MAX_SCHEDULE_TIMEOUT);
 636
 637	ttm_bo_put(bo);
 638}
 639
 640static void ttm_bo_validate_move_fence_signaled(struct kunit *test)
 641{
 642	enum ttm_bo_type bo_type = ttm_bo_type_device;
 643	struct ttm_test_devices *priv = test->priv;
 644	u32 size = ALIGN(BO_SIZE, PAGE_SIZE);
 645	struct ttm_operation_ctx ctx = { };
 646	u32 mem_type = TTM_PL_SYSTEM;
 647	struct ttm_resource_manager *man;
 648	struct ttm_placement *placement;
 649	struct ttm_buffer_object *bo;
 650	struct ttm_place *place;
 651	int err;
 652
 653	man = ttm_manager_type(priv->ttm_dev, mem_type);
 654	man->move = dma_fence_get_stub();
 655
 656	bo = ttm_bo_kunit_init(test, test->priv, size, NULL);
 657	bo->type = bo_type;
 658
 659	place = ttm_place_kunit_init(test, mem_type, 0);
 660	placement = ttm_placement_kunit_init(test, place, 1);
 661
 662	ttm_bo_reserve(bo, false, false, NULL);
 663	err = ttm_bo_validate(bo, placement, &ctx);
 664	ttm_bo_unreserve(bo);
 665
 666	KUNIT_EXPECT_EQ(test, err, 0);
 667	KUNIT_EXPECT_EQ(test, bo->resource->mem_type, mem_type);
 668	KUNIT_EXPECT_EQ(test, ctx.bytes_moved, size);
 669
 670	ttm_bo_put(bo);
 671	dma_fence_put(man->move);
 672}
 673
 674static const struct ttm_bo_validate_test_case ttm_bo_validate_wait_cases[] = {
 675	{
 676		.description = "Waits for GPU",
 677		.no_gpu_wait = false,
 678	},
 679	{
 680		.description = "Tries to lock straight away",
 681		.no_gpu_wait = true,
 682	},
 683};
 684
 685KUNIT_ARRAY_PARAM(ttm_bo_validate_wait, ttm_bo_validate_wait_cases,
 686		  ttm_bo_validate_case_desc);
 687
 688static int threaded_fence_signal(void *arg)
 689{
 690	struct dma_fence *fence = arg;
 691
 692	msleep(20);
 693
 694	return dma_fence_signal(fence);
 695}
 696
 697static void ttm_bo_validate_move_fence_not_signaled(struct kunit *test)
 698{
 699	const struct ttm_bo_validate_test_case *params = test->param_value;
 700	struct ttm_operation_ctx ctx_init = { },
 701				 ctx_val  = { .no_wait_gpu = params->no_gpu_wait };
 702	u32 fst_mem = TTM_PL_VRAM, snd_mem = TTM_PL_VRAM + 1;
 703	struct ttm_placement *placement_init, *placement_val;
 704	enum ttm_bo_type bo_type = ttm_bo_type_device;
 705	struct ttm_test_devices *priv = test->priv;
 706	u32 size = ALIGN(BO_SIZE, PAGE_SIZE);
 707	struct ttm_place *init_place, places[2];
 708	struct ttm_resource_manager *man;
 709	struct ttm_buffer_object *bo;
 710	struct task_struct *task;
 711	int err;
 712
 713	init_place = ttm_place_kunit_init(test, TTM_PL_SYSTEM, 0);
 714	placement_init = ttm_placement_kunit_init(test, init_place, 1);
 715
 716	bo = kunit_kzalloc(test, sizeof(*bo), GFP_KERNEL);
 717	KUNIT_ASSERT_NOT_NULL(test, bo);
 718
 719	drm_gem_private_object_init(priv->drm, &bo->base, size);
 720
 721	err = ttm_bo_init_reserved(priv->ttm_dev, bo, bo_type, placement_init,
 722				   PAGE_SIZE, &ctx_init, NULL, NULL,
 723				   &dummy_ttm_bo_destroy);
 724	KUNIT_EXPECT_EQ(test, err, 0);
 725
 726	ttm_mock_manager_init(priv->ttm_dev, fst_mem, MANAGER_SIZE);
 727	ttm_mock_manager_init(priv->ttm_dev, snd_mem, MANAGER_SIZE);
 728
 729	places[0] = (struct ttm_place){ .mem_type = fst_mem, .flags = TTM_PL_FLAG_DESIRED };
 730	places[1] = (struct ttm_place){ .mem_type = snd_mem, .flags = TTM_PL_FLAG_FALLBACK };
 731	placement_val = ttm_placement_kunit_init(test, places, 2);
 732
 733	spin_lock_init(&fence_lock);
 734	man = ttm_manager_type(priv->ttm_dev, fst_mem);
 735	man->move = alloc_mock_fence(test);
 736
 737	task = kthread_create(threaded_fence_signal, man->move, "move-fence-signal");
 738	if (IS_ERR(task))
 739		KUNIT_FAIL(test, "Couldn't create move fence signal task\n");
 740
 741	wake_up_process(task);
 742	err = ttm_bo_validate(bo, placement_val, &ctx_val);
 743	dma_resv_unlock(bo->base.resv);
 744
 745	dma_fence_wait_timeout(man->move, false, MAX_SCHEDULE_TIMEOUT);
 746
 747	KUNIT_EXPECT_EQ(test, err, 0);
 748	KUNIT_EXPECT_EQ(test, ctx_val.bytes_moved, size);
 749
 750	if (params->no_gpu_wait)
 751		KUNIT_EXPECT_EQ(test, bo->resource->mem_type, snd_mem);
 752	else
 753		KUNIT_EXPECT_EQ(test, bo->resource->mem_type, fst_mem);
 754
 755	ttm_bo_put(bo);
 756	ttm_mock_manager_fini(priv->ttm_dev, fst_mem);
 757	ttm_mock_manager_fini(priv->ttm_dev, snd_mem);
 758}
 759
 760static void ttm_bo_validate_swapout(struct kunit *test)
 761{
 762	unsigned long size_big, size = ALIGN(BO_SIZE, PAGE_SIZE);
 763	enum ttm_bo_type bo_type = ttm_bo_type_device;
 764	struct ttm_buffer_object *bo_small, *bo_big;
 765	struct ttm_test_devices *priv = test->priv;
 766	struct ttm_operation_ctx ctx = { };
 767	struct ttm_placement *placement;
 768	u32 mem_type = TTM_PL_TT;
 769	struct ttm_place *place;
 770	struct sysinfo si;
 771	int err;
 772
 773	si_meminfo(&si);
 774	size_big = ALIGN(((u64)si.totalram * si.mem_unit / 2), PAGE_SIZE);
 775
 776	ttm_mock_manager_init(priv->ttm_dev, mem_type, size_big + size);
 777
 778	place = ttm_place_kunit_init(test, mem_type, 0);
 779	placement = ttm_placement_kunit_init(test, place, 1);
 780
 781	bo_small = kunit_kzalloc(test, sizeof(*bo_small), GFP_KERNEL);
 782	KUNIT_ASSERT_NOT_NULL(test, bo_small);
 783
 784	drm_gem_private_object_init(priv->drm, &bo_small->base, size);
 785
 786	err = ttm_bo_init_reserved(priv->ttm_dev, bo_small, bo_type, placement,
 787				   PAGE_SIZE, &ctx, NULL, NULL,
 788				   &dummy_ttm_bo_destroy);
 789	KUNIT_EXPECT_EQ(test, err, 0);
 790	dma_resv_unlock(bo_small->base.resv);
 791
 792	bo_big = ttm_bo_kunit_init(test, priv, size_big, NULL);
 793
 794	dma_resv_lock(bo_big->base.resv, NULL);
 795	err = ttm_bo_validate(bo_big, placement, &ctx);
 796	dma_resv_unlock(bo_big->base.resv);
 797
 798	KUNIT_EXPECT_EQ(test, err, 0);
 799	KUNIT_EXPECT_NOT_NULL(test, bo_big->resource);
 800	KUNIT_EXPECT_EQ(test, bo_big->resource->mem_type, mem_type);
 801	KUNIT_EXPECT_EQ(test, bo_small->resource->mem_type, TTM_PL_SYSTEM);
 802	KUNIT_EXPECT_TRUE(test, bo_small->ttm->page_flags & TTM_TT_FLAG_SWAPPED);
 803
 804	ttm_bo_put(bo_big);
 805	ttm_bo_put(bo_small);
 806
 807	ttm_mock_manager_fini(priv->ttm_dev, mem_type);
 808}
 809
 810static void ttm_bo_validate_happy_evict(struct kunit *test)
 811{
 812	u32 mem_type = TTM_PL_VRAM, mem_multihop = TTM_PL_TT,
 813	    mem_type_evict = TTM_PL_SYSTEM;
 814	struct ttm_operation_ctx ctx_init = { }, ctx_val  = { };
 815	enum ttm_bo_type bo_type = ttm_bo_type_device;
 816	u32 small = SZ_8K, medium = SZ_512K,
 817	    big = MANAGER_SIZE - (small + medium);
 818	u32 bo_sizes[] = { small, medium, big };
 819	struct ttm_test_devices *priv = test->priv;
 820	struct ttm_buffer_object *bos, *bo_val;
 821	struct ttm_placement *placement;
 822	struct ttm_place *place;
 823	u32 bo_no = 3;
 824	int i, err;
 825
 826	ttm_mock_manager_init(priv->ttm_dev, mem_type, MANAGER_SIZE);
 827	ttm_mock_manager_init(priv->ttm_dev, mem_multihop, MANAGER_SIZE);
 828
 829	place = ttm_place_kunit_init(test, mem_type, 0);
 830	placement = ttm_placement_kunit_init(test, place, 1);
 831
 832	bos = kunit_kmalloc_array(test, bo_no, sizeof(*bos), GFP_KERNEL);
 833	KUNIT_ASSERT_NOT_NULL(test, bos);
 834
 835	memset(bos, 0, sizeof(*bos) * bo_no);
 836	for (i = 0; i < bo_no; i++) {
 837		drm_gem_private_object_init(priv->drm, &bos[i].base, bo_sizes[i]);
 838		err = ttm_bo_init_reserved(priv->ttm_dev, &bos[i], bo_type, placement,
 839					   PAGE_SIZE, &ctx_init, NULL, NULL,
 840					   &dummy_ttm_bo_destroy);
 841		dma_resv_unlock(bos[i].base.resv);
 842	}
 843
 844	bo_val = ttm_bo_kunit_init(test, test->priv, BO_SIZE, NULL);
 845	bo_val->type = bo_type;
 846
 847	ttm_bo_reserve(bo_val, false, false, NULL);
 848	err = ttm_bo_validate(bo_val, placement, &ctx_val);
 849	ttm_bo_unreserve(bo_val);
 850
 851	KUNIT_EXPECT_EQ(test, err, 0);
 852	KUNIT_EXPECT_EQ(test, bos[0].resource->mem_type, mem_type_evict);
 853	KUNIT_EXPECT_TRUE(test, bos[0].ttm->page_flags & TTM_TT_FLAG_ZERO_ALLOC);
 854	KUNIT_EXPECT_TRUE(test, bos[0].ttm->page_flags & TTM_TT_FLAG_PRIV_POPULATED);
 855	KUNIT_EXPECT_EQ(test, ctx_val.bytes_moved, small * 2 + BO_SIZE);
 856	KUNIT_EXPECT_EQ(test, bos[1].resource->mem_type, mem_type);
 857
 858	for (i = 0; i < bo_no; i++)
 859		ttm_bo_put(&bos[i]);
 860	ttm_bo_put(bo_val);
 861
 862	ttm_mock_manager_fini(priv->ttm_dev, mem_type);
 863	ttm_mock_manager_fini(priv->ttm_dev, mem_multihop);
 864}
 865
 866static void ttm_bo_validate_all_pinned_evict(struct kunit *test)
 867{
 868	struct ttm_operation_ctx ctx_init = { }, ctx_val  = { };
 869	enum ttm_bo_type bo_type = ttm_bo_type_device;
 870	struct ttm_buffer_object *bo_big, *bo_small;
 871	struct ttm_test_devices *priv = test->priv;
 872	struct ttm_placement *placement;
 873	u32 mem_type = TTM_PL_VRAM, mem_multihop = TTM_PL_TT;
 874	struct ttm_place *place;
 875	int err;
 876
 877	ttm_mock_manager_init(priv->ttm_dev, mem_type, MANAGER_SIZE);
 878	ttm_mock_manager_init(priv->ttm_dev, mem_multihop, MANAGER_SIZE);
 879
 880	place = ttm_place_kunit_init(test, mem_type, 0);
 881	placement = ttm_placement_kunit_init(test, place, 1);
 882
 883	bo_big = kunit_kzalloc(test, sizeof(*bo_big), GFP_KERNEL);
 884	KUNIT_ASSERT_NOT_NULL(test, bo_big);
 885
 886	drm_gem_private_object_init(priv->drm, &bo_big->base, MANAGER_SIZE);
 887	err = ttm_bo_init_reserved(priv->ttm_dev, bo_big, bo_type, placement,
 888				   PAGE_SIZE, &ctx_init, NULL, NULL,
 889				   &dummy_ttm_bo_destroy);
 890	KUNIT_EXPECT_EQ(test, err, 0);
 891
 892	ttm_bo_pin(bo_big);
 893	dma_resv_unlock(bo_big->base.resv);
 894
 895	bo_small = ttm_bo_kunit_init(test, test->priv, BO_SIZE, NULL);
 896	bo_small->type = bo_type;
 897
 898	ttm_bo_reserve(bo_small, false, false, NULL);
 899	err = ttm_bo_validate(bo_small, placement, &ctx_val);
 900	ttm_bo_unreserve(bo_small);
 901
 902	KUNIT_EXPECT_EQ(test, err, -ENOMEM);
 903
 904	ttm_bo_put(bo_small);
 905
 906	ttm_bo_reserve(bo_big, false, false, NULL);
 907	ttm_bo_unpin(bo_big);
 908	dma_resv_unlock(bo_big->base.resv);
 909	ttm_bo_put(bo_big);
 910
 911	ttm_mock_manager_fini(priv->ttm_dev, mem_type);
 912	ttm_mock_manager_fini(priv->ttm_dev, mem_multihop);
 913}
 914
 915static void ttm_bo_validate_allowed_only_evict(struct kunit *test)
 916{
 917	u32 mem_type = TTM_PL_VRAM, mem_multihop = TTM_PL_TT,
 918	    mem_type_evict = TTM_PL_SYSTEM;
 919	struct ttm_buffer_object *bo, *bo_evictable, *bo_pinned;
 920	struct ttm_operation_ctx ctx_init = { }, ctx_val  = { };
 921	enum ttm_bo_type bo_type = ttm_bo_type_device;
 922	struct ttm_test_devices *priv = test->priv;
 923	struct ttm_placement *placement;
 924	struct ttm_place *place;
 925	u32 size = SZ_512K;
 926	int err;
 927
 928	ttm_mock_manager_init(priv->ttm_dev, mem_type, MANAGER_SIZE);
 929	ttm_mock_manager_init(priv->ttm_dev, mem_multihop, MANAGER_SIZE);
 930
 931	place = ttm_place_kunit_init(test, mem_type, 0);
 932	placement = ttm_placement_kunit_init(test, place, 1);
 933
 934	bo_pinned = kunit_kzalloc(test, sizeof(*bo_pinned), GFP_KERNEL);
 935	KUNIT_ASSERT_NOT_NULL(test, bo_pinned);
 936
 937	drm_gem_private_object_init(priv->drm, &bo_pinned->base, size);
 938	err = ttm_bo_init_reserved(priv->ttm_dev, bo_pinned, bo_type, placement,
 939				   PAGE_SIZE, &ctx_init, NULL, NULL,
 940				   &dummy_ttm_bo_destroy);
 941	KUNIT_EXPECT_EQ(test, err, 0);
 942	ttm_bo_pin(bo_pinned);
 943	dma_resv_unlock(bo_pinned->base.resv);
 944
 945	bo_evictable = kunit_kzalloc(test, sizeof(*bo_evictable), GFP_KERNEL);
 946	KUNIT_ASSERT_NOT_NULL(test, bo_evictable);
 947
 948	drm_gem_private_object_init(priv->drm, &bo_evictable->base, size);
 949	err = ttm_bo_init_reserved(priv->ttm_dev, bo_evictable, bo_type, placement,
 950				   PAGE_SIZE, &ctx_init, NULL, NULL,
 951				   &dummy_ttm_bo_destroy);
 952	KUNIT_EXPECT_EQ(test, err, 0);
 953	dma_resv_unlock(bo_evictable->base.resv);
 954
 955	bo = ttm_bo_kunit_init(test, test->priv, BO_SIZE, NULL);
 956	bo->type = bo_type;
 957
 958	ttm_bo_reserve(bo, false, false, NULL);
 959	err = ttm_bo_validate(bo, placement, &ctx_val);
 960	ttm_bo_unreserve(bo);
 961
 962	KUNIT_EXPECT_EQ(test, err, 0);
 963	KUNIT_EXPECT_EQ(test, bo->resource->mem_type, mem_type);
 964	KUNIT_EXPECT_EQ(test, bo_pinned->resource->mem_type, mem_type);
 965	KUNIT_EXPECT_EQ(test, bo_evictable->resource->mem_type, mem_type_evict);
 966	KUNIT_EXPECT_EQ(test, ctx_val.bytes_moved, size * 2 + BO_SIZE);
 967
 968	ttm_bo_put(bo);
 969	ttm_bo_put(bo_evictable);
 970
 971	ttm_bo_reserve(bo_pinned, false, false, NULL);
 972	ttm_bo_unpin(bo_pinned);
 973	dma_resv_unlock(bo_pinned->base.resv);
 974	ttm_bo_put(bo_pinned);
 975
 976	ttm_mock_manager_fini(priv->ttm_dev, mem_type);
 977	ttm_mock_manager_fini(priv->ttm_dev, mem_multihop);
 978}
 979
 980static void ttm_bo_validate_deleted_evict(struct kunit *test)
 981{
 982	struct ttm_operation_ctx ctx_init = { }, ctx_val  = { };
 983	u32 small = SZ_8K, big = MANAGER_SIZE - BO_SIZE;
 984	enum ttm_bo_type bo_type = ttm_bo_type_device;
 985	struct ttm_buffer_object *bo_big, *bo_small;
 986	struct ttm_test_devices *priv = test->priv;
 987	struct ttm_resource_manager *man;
 988	u32 mem_type = TTM_PL_VRAM;
 989	struct ttm_placement *placement;
 990	struct ttm_place *place;
 991	int err;
 992
 993	ttm_mock_manager_init(priv->ttm_dev, mem_type, MANAGER_SIZE);
 994	man = ttm_manager_type(priv->ttm_dev, mem_type);
 995
 996	place = ttm_place_kunit_init(test, mem_type, 0);
 997	placement = ttm_placement_kunit_init(test, place, 1);
 998
 999	bo_big = kunit_kzalloc(test, sizeof(*bo_big), GFP_KERNEL);
1000	KUNIT_ASSERT_NOT_NULL(test, bo_big);
1001
1002	drm_gem_private_object_init(priv->drm, &bo_big->base, big);
1003	err = ttm_bo_init_reserved(priv->ttm_dev, bo_big, bo_type, placement,
1004				   PAGE_SIZE, &ctx_init, NULL, NULL,
1005				   &dummy_ttm_bo_destroy);
1006	KUNIT_EXPECT_EQ(test, err, 0);
1007	KUNIT_EXPECT_EQ(test, ttm_resource_manager_usage(man), big);
1008
1009	dma_resv_unlock(bo_big->base.resv);
1010	bo_big->deleted = true;
1011
1012	bo_small = ttm_bo_kunit_init(test, test->priv, small, NULL);
1013	bo_small->type = bo_type;
1014
1015	ttm_bo_reserve(bo_small, false, false, NULL);
1016	err = ttm_bo_validate(bo_small, placement, &ctx_val);
1017	ttm_bo_unreserve(bo_small);
1018
1019	KUNIT_EXPECT_EQ(test, err, 0);
1020	KUNIT_EXPECT_EQ(test, bo_small->resource->mem_type, mem_type);
1021	KUNIT_EXPECT_EQ(test, ttm_resource_manager_usage(man), small);
1022	KUNIT_EXPECT_NULL(test, bo_big->ttm);
1023	KUNIT_EXPECT_NULL(test, bo_big->resource);
1024
1025	ttm_bo_put(bo_small);
1026	ttm_bo_put(bo_big);
1027	ttm_mock_manager_fini(priv->ttm_dev, mem_type);
1028}
1029
1030static void ttm_bo_validate_busy_domain_evict(struct kunit *test)
1031{
1032	u32 mem_type = TTM_PL_VRAM, mem_type_evict = TTM_PL_MOCK1;
1033	struct ttm_operation_ctx ctx_init = { }, ctx_val  = { };
1034	enum ttm_bo_type bo_type = ttm_bo_type_device;
1035	struct ttm_test_devices *priv = test->priv;
1036	struct ttm_buffer_object *bo_init, *bo_val;
1037	struct ttm_placement *placement;
1038	struct ttm_place *place;
1039	int err;
1040
1041	/*
1042	 * Drop the default device and setup a new one that points to busy
1043	 * thus unsuitable eviction domain
1044	 */
1045	ttm_device_fini(priv->ttm_dev);
1046
1047	err = ttm_device_kunit_init_bad_evict(test->priv, priv->ttm_dev, false, false);
1048	KUNIT_ASSERT_EQ(test, err, 0);
1049
1050	ttm_mock_manager_init(priv->ttm_dev, mem_type, MANAGER_SIZE);
1051	ttm_busy_manager_init(priv->ttm_dev, mem_type_evict, MANAGER_SIZE);
1052
1053	place = ttm_place_kunit_init(test, mem_type, 0);
1054	placement = ttm_placement_kunit_init(test, place, 1);
1055
1056	bo_init = kunit_kzalloc(test, sizeof(*bo_init), GFP_KERNEL);
1057	KUNIT_ASSERT_NOT_NULL(test, bo_init);
1058
1059	drm_gem_private_object_init(priv->drm, &bo_init->base, MANAGER_SIZE);
1060	err = ttm_bo_init_reserved(priv->ttm_dev, bo_init, bo_type, placement,
1061				   PAGE_SIZE, &ctx_init, NULL, NULL,
1062				   &dummy_ttm_bo_destroy);
1063	KUNIT_EXPECT_EQ(test, err, 0);
1064	dma_resv_unlock(bo_init->base.resv);
1065
1066	bo_val = ttm_bo_kunit_init(test, test->priv, BO_SIZE, NULL);
1067	bo_val->type = bo_type;
1068
1069	ttm_bo_reserve(bo_val, false, false, NULL);
1070	err = ttm_bo_validate(bo_val, placement, &ctx_val);
1071	ttm_bo_unreserve(bo_val);
1072
1073	KUNIT_EXPECT_EQ(test, err, -ENOMEM);
1074	KUNIT_EXPECT_EQ(test, bo_init->resource->mem_type, mem_type);
1075	KUNIT_EXPECT_NULL(test, bo_val->resource);
1076
1077	ttm_bo_put(bo_init);
1078	ttm_bo_put(bo_val);
1079
1080	ttm_mock_manager_fini(priv->ttm_dev, mem_type);
1081	ttm_bad_manager_fini(priv->ttm_dev, mem_type_evict);
1082}
1083
1084static void ttm_bo_validate_evict_gutting(struct kunit *test)
1085{
1086	struct ttm_operation_ctx ctx_init = { }, ctx_val  = { };
1087	enum ttm_bo_type bo_type = ttm_bo_type_device;
1088	struct ttm_test_devices *priv = test->priv;
1089	struct ttm_buffer_object *bo, *bo_evict;
1090	u32 mem_type = TTM_PL_MOCK1;
1091	struct ttm_placement *placement;
1092	struct ttm_place *place;
1093	int err;
1094
1095	ttm_mock_manager_init(priv->ttm_dev, mem_type, MANAGER_SIZE);
1096
1097	place = ttm_place_kunit_init(test, mem_type, 0);
1098	placement = ttm_placement_kunit_init(test, place, 1);
1099
1100	bo_evict = kunit_kzalloc(test, sizeof(*bo_evict), GFP_KERNEL);
1101	KUNIT_ASSERT_NOT_NULL(test, bo_evict);
1102
1103	drm_gem_private_object_init(priv->drm, &bo_evict->base, MANAGER_SIZE);
1104	err = ttm_bo_init_reserved(priv->ttm_dev, bo_evict, bo_type, placement,
1105				   PAGE_SIZE, &ctx_init, NULL, NULL,
1106				   &dummy_ttm_bo_destroy);
1107	KUNIT_EXPECT_EQ(test, err, 0);
1108	dma_resv_unlock(bo_evict->base.resv);
1109
1110	bo = ttm_bo_kunit_init(test, test->priv, BO_SIZE, NULL);
1111	bo->type = bo_type;
1112
1113	ttm_bo_reserve(bo, false, false, NULL);
1114	err = ttm_bo_validate(bo, placement, &ctx_val);
1115	ttm_bo_unreserve(bo);
1116
1117	KUNIT_EXPECT_EQ(test, err, 0);
1118	KUNIT_EXPECT_EQ(test, bo->resource->mem_type, mem_type);
1119	KUNIT_ASSERT_NULL(test, bo_evict->resource);
1120	KUNIT_ASSERT_TRUE(test, bo_evict->ttm->page_flags & TTM_TT_FLAG_ZERO_ALLOC);
1121
1122	ttm_bo_put(bo_evict);
1123	ttm_bo_put(bo);
1124
1125	ttm_mock_manager_fini(priv->ttm_dev, mem_type);
1126}
1127
1128static void ttm_bo_validate_recrusive_evict(struct kunit *test)
1129{
1130	u32 mem_type = TTM_PL_TT, mem_type_evict = TTM_PL_MOCK2;
1131	struct ttm_operation_ctx ctx_init = { }, ctx_val  = { };
1132	struct ttm_placement *placement_tt, *placement_mock;
1133	struct ttm_buffer_object *bo_tt, *bo_mock, *bo_val;
1134	enum ttm_bo_type bo_type = ttm_bo_type_device;
1135	struct ttm_test_devices *priv = test->priv;
1136	struct ttm_place *place_tt, *place_mock;
1137	int err;
1138
1139	ttm_mock_manager_init(priv->ttm_dev, mem_type, MANAGER_SIZE);
1140	ttm_mock_manager_init(priv->ttm_dev, mem_type_evict, MANAGER_SIZE);
1141
1142	place_tt = ttm_place_kunit_init(test, mem_type, 0);
1143	place_mock = ttm_place_kunit_init(test, mem_type_evict, 0);
1144
1145	placement_tt = ttm_placement_kunit_init(test, place_tt, 1);
1146	placement_mock = ttm_placement_kunit_init(test, place_mock, 1);
1147
1148	bo_tt = kunit_kzalloc(test, sizeof(*bo_tt), GFP_KERNEL);
1149	KUNIT_ASSERT_NOT_NULL(test, bo_tt);
1150
1151	bo_mock = kunit_kzalloc(test, sizeof(*bo_mock), GFP_KERNEL);
1152	KUNIT_ASSERT_NOT_NULL(test, bo_mock);
1153
1154	drm_gem_private_object_init(priv->drm, &bo_tt->base, MANAGER_SIZE);
1155	err = ttm_bo_init_reserved(priv->ttm_dev, bo_tt, bo_type, placement_tt,
1156				   PAGE_SIZE, &ctx_init, NULL, NULL,
1157				   &dummy_ttm_bo_destroy);
1158	KUNIT_EXPECT_EQ(test, err, 0);
1159	dma_resv_unlock(bo_tt->base.resv);
1160
1161	drm_gem_private_object_init(priv->drm, &bo_mock->base, MANAGER_SIZE);
1162	err = ttm_bo_init_reserved(priv->ttm_dev, bo_mock, bo_type, placement_mock,
1163				   PAGE_SIZE, &ctx_init, NULL, NULL,
1164				   &dummy_ttm_bo_destroy);
1165	KUNIT_EXPECT_EQ(test, err, 0);
1166	dma_resv_unlock(bo_mock->base.resv);
1167
1168	bo_val = ttm_bo_kunit_init(test, test->priv, BO_SIZE, NULL);
1169	bo_val->type = bo_type;
1170
1171	ttm_bo_reserve(bo_val, false, false, NULL);
1172	err = ttm_bo_validate(bo_val, placement_tt, &ctx_val);
1173	ttm_bo_unreserve(bo_val);
1174
1175	KUNIT_EXPECT_EQ(test, err, 0);
1176
1177	ttm_mock_manager_fini(priv->ttm_dev, mem_type);
1178	ttm_mock_manager_fini(priv->ttm_dev, mem_type_evict);
1179
1180	ttm_bo_put(bo_val);
1181	ttm_bo_put(bo_tt);
1182	ttm_bo_put(bo_mock);
1183}
1184
1185static struct kunit_case ttm_bo_validate_test_cases[] = {
1186	KUNIT_CASE_PARAM(ttm_bo_init_reserved_sys_man, ttm_bo_types_gen_params),
1187	KUNIT_CASE_PARAM(ttm_bo_init_reserved_mock_man, ttm_bo_types_gen_params),
1188	KUNIT_CASE(ttm_bo_init_reserved_resv),
1189	KUNIT_CASE_PARAM(ttm_bo_validate_basic, ttm_bo_types_gen_params),
1190	KUNIT_CASE(ttm_bo_validate_invalid_placement),
1191	KUNIT_CASE_PARAM(ttm_bo_validate_same_placement,
1192			 ttm_bo_validate_mem_gen_params),
1193	KUNIT_CASE(ttm_bo_validate_failed_alloc),
1194	KUNIT_CASE(ttm_bo_validate_pinned),
1195	KUNIT_CASE(ttm_bo_validate_busy_placement),
1196	KUNIT_CASE_PARAM(ttm_bo_validate_multihop, ttm_bo_types_gen_params),
1197	KUNIT_CASE_PARAM(ttm_bo_validate_no_placement_signaled,
1198			 ttm_bo_no_placement_gen_params),
1199	KUNIT_CASE_PARAM(ttm_bo_validate_no_placement_not_signaled,
1200			 ttm_bo_types_gen_params),
1201	KUNIT_CASE(ttm_bo_validate_move_fence_signaled),
1202	KUNIT_CASE_PARAM(ttm_bo_validate_move_fence_not_signaled,
1203			 ttm_bo_validate_wait_gen_params),
1204	KUNIT_CASE(ttm_bo_validate_swapout),
1205	KUNIT_CASE(ttm_bo_validate_happy_evict),
1206	KUNIT_CASE(ttm_bo_validate_all_pinned_evict),
1207	KUNIT_CASE(ttm_bo_validate_allowed_only_evict),
1208	KUNIT_CASE(ttm_bo_validate_deleted_evict),
1209	KUNIT_CASE(ttm_bo_validate_busy_domain_evict),
1210	KUNIT_CASE(ttm_bo_validate_evict_gutting),
1211	KUNIT_CASE(ttm_bo_validate_recrusive_evict),
1212	{}
1213};
1214
1215static struct kunit_suite ttm_bo_validate_test_suite = {
1216	.name = "ttm_bo_validate",
1217	.init = ttm_test_devices_all_init,
1218	.exit = ttm_test_devices_fini,
1219	.test_cases = ttm_bo_validate_test_cases,
1220};
1221
1222kunit_test_suites(&ttm_bo_validate_test_suite);
1223
1224MODULE_DESCRIPTION("KUnit tests for ttm_bo APIs");
1225MODULE_LICENSE("GPL and additional rights");