Linux Audio

Check our new training course

Loading...
Note: File does not exist in v4.6.
  1// SPDX-License-Identifier: MIT
  2/*
  3 * Copyright © 2022 Intel Corporation
  4 */
  5
  6#include "xe_bo_evict.h"
  7
  8#include "xe_bo.h"
  9#include "xe_device.h"
 10#include "xe_ggtt.h"
 11#include "xe_tile.h"
 12
 13/**
 14 * xe_bo_evict_all - evict all BOs from VRAM
 15 *
 16 * @xe: xe device
 17 *
 18 * Evict non-pinned user BOs first (via GPU), evict pinned external BOs next
 19 * (via GPU), wait for evictions, and finally evict pinned kernel BOs via CPU.
 20 * All eviction magic done via TTM calls.
 21 *
 22 * Evict == move VRAM BOs to temporary (typically system) memory.
 23 *
 24 * This function should be called before the device goes into a suspend state
 25 * where the VRAM loses power.
 26 */
 27int xe_bo_evict_all(struct xe_device *xe)
 28{
 29	struct ttm_device *bdev = &xe->ttm;
 30	struct xe_bo *bo;
 31	struct xe_tile *tile;
 32	struct list_head still_in_list;
 33	u32 mem_type;
 34	u8 id;
 35	int ret;
 36
 37	/* User memory */
 38	for (mem_type = XE_PL_TT; mem_type <= XE_PL_VRAM1; ++mem_type) {
 39		struct ttm_resource_manager *man =
 40			ttm_manager_type(bdev, mem_type);
 41
 42		/*
 43		 * On igpu platforms with flat CCS we need to ensure we save and restore any CCS
 44		 * state since this state lives inside graphics stolen memory which doesn't survive
 45		 * hibernation.
 46		 *
 47		 * This can be further improved by only evicting objects that we know have actually
 48		 * used a compression enabled PAT index.
 49		 */
 50		if (mem_type == XE_PL_TT && (IS_DGFX(xe) || !xe_device_has_flat_ccs(xe)))
 51			continue;
 52
 53		if (man) {
 54			ret = ttm_resource_manager_evict_all(bdev, man);
 55			if (ret)
 56				return ret;
 57		}
 58	}
 59
 60	/* Pinned user memory in VRAM */
 61	INIT_LIST_HEAD(&still_in_list);
 62	spin_lock(&xe->pinned.lock);
 63	for (;;) {
 64		bo = list_first_entry_or_null(&xe->pinned.external_vram,
 65					      typeof(*bo), pinned_link);
 66		if (!bo)
 67			break;
 68		xe_bo_get(bo);
 69		list_move_tail(&bo->pinned_link, &still_in_list);
 70		spin_unlock(&xe->pinned.lock);
 71
 72		xe_bo_lock(bo, false);
 73		ret = xe_bo_evict_pinned(bo);
 74		xe_bo_unlock(bo);
 75		xe_bo_put(bo);
 76		if (ret) {
 77			spin_lock(&xe->pinned.lock);
 78			list_splice_tail(&still_in_list,
 79					 &xe->pinned.external_vram);
 80			spin_unlock(&xe->pinned.lock);
 81			return ret;
 82		}
 83
 84		spin_lock(&xe->pinned.lock);
 85	}
 86	list_splice_tail(&still_in_list, &xe->pinned.external_vram);
 87	spin_unlock(&xe->pinned.lock);
 88
 89	/*
 90	 * Wait for all user BO to be evicted as those evictions depend on the
 91	 * memory moved below.
 92	 */
 93	for_each_tile(tile, xe, id)
 94		xe_tile_migrate_wait(tile);
 95
 96	spin_lock(&xe->pinned.lock);
 97	for (;;) {
 98		bo = list_first_entry_or_null(&xe->pinned.kernel_bo_present,
 99					      typeof(*bo), pinned_link);
100		if (!bo)
101			break;
102		xe_bo_get(bo);
103		list_move_tail(&bo->pinned_link, &xe->pinned.evicted);
104		spin_unlock(&xe->pinned.lock);
105
106		xe_bo_lock(bo, false);
107		ret = xe_bo_evict_pinned(bo);
108		xe_bo_unlock(bo);
109		xe_bo_put(bo);
110		if (ret)
111			return ret;
112
113		spin_lock(&xe->pinned.lock);
114	}
115	spin_unlock(&xe->pinned.lock);
116
117	return 0;
118}
119
120/**
121 * xe_bo_restore_kernel - restore kernel BOs to VRAM
122 *
123 * @xe: xe device
124 *
125 * Move kernel BOs from temporary (typically system) memory to VRAM via CPU. All
126 * moves done via TTM calls.
127 *
128 * This function should be called early, before trying to init the GT, on device
129 * resume.
130 */
131int xe_bo_restore_kernel(struct xe_device *xe)
132{
133	struct xe_bo *bo;
134	int ret;
135
136	spin_lock(&xe->pinned.lock);
137	for (;;) {
138		bo = list_first_entry_or_null(&xe->pinned.evicted,
139					      typeof(*bo), pinned_link);
140		if (!bo)
141			break;
142		xe_bo_get(bo);
143		list_move_tail(&bo->pinned_link, &xe->pinned.kernel_bo_present);
144		spin_unlock(&xe->pinned.lock);
145
146		xe_bo_lock(bo, false);
147		ret = xe_bo_restore_pinned(bo);
148		xe_bo_unlock(bo);
149		if (ret) {
150			xe_bo_put(bo);
151			return ret;
152		}
153
154		if (bo->flags & XE_BO_FLAG_GGTT) {
155			struct xe_tile *tile = bo->tile;
156
157			mutex_lock(&tile->mem.ggtt->lock);
158			xe_ggtt_map_bo(tile->mem.ggtt, bo);
159			mutex_unlock(&tile->mem.ggtt->lock);
160		}
161
162		/*
163		 * We expect validate to trigger a move VRAM and our move code
164		 * should setup the iosys map.
165		 */
166		xe_assert(xe, !iosys_map_is_null(&bo->vmap));
167
168		xe_bo_put(bo);
169
170		spin_lock(&xe->pinned.lock);
171	}
172	spin_unlock(&xe->pinned.lock);
173
174	return 0;
175}
176
177/**
178 * xe_bo_restore_user - restore pinned user BOs to VRAM
179 *
180 * @xe: xe device
181 *
182 * Move pinned user BOs from temporary (typically system) memory to VRAM via
183 * CPU. All moves done via TTM calls.
184 *
185 * This function should be called late, after GT init, on device resume.
186 */
187int xe_bo_restore_user(struct xe_device *xe)
188{
189	struct xe_bo *bo;
190	struct xe_tile *tile;
191	struct list_head still_in_list;
192	u8 id;
193	int ret;
194
195	if (!IS_DGFX(xe))
196		return 0;
197
198	/* Pinned user memory in VRAM should be validated on resume */
199	INIT_LIST_HEAD(&still_in_list);
200	spin_lock(&xe->pinned.lock);
201	for (;;) {
202		bo = list_first_entry_or_null(&xe->pinned.external_vram,
203					      typeof(*bo), pinned_link);
204		if (!bo)
205			break;
206		list_move_tail(&bo->pinned_link, &still_in_list);
207		xe_bo_get(bo);
208		spin_unlock(&xe->pinned.lock);
209
210		xe_bo_lock(bo, false);
211		ret = xe_bo_restore_pinned(bo);
212		xe_bo_unlock(bo);
213		xe_bo_put(bo);
214		if (ret) {
215			spin_lock(&xe->pinned.lock);
216			list_splice_tail(&still_in_list,
217					 &xe->pinned.external_vram);
218			spin_unlock(&xe->pinned.lock);
219			return ret;
220		}
221
222		spin_lock(&xe->pinned.lock);
223	}
224	list_splice_tail(&still_in_list, &xe->pinned.external_vram);
225	spin_unlock(&xe->pinned.lock);
226
227	/* Wait for restore to complete */
228	for_each_tile(tile, xe, id)
229		xe_tile_migrate_wait(tile);
230
231	return 0;
232}