Linux Audio

Check our new training course

Yocto / OpenEmbedded training

Mar 24-27, 2025, special US time zones
Register
Loading...
v6.2
  1/*
  2 * Copyright 2020 Advanced Micro Devices, Inc.
  3 *
  4 * Permission is hereby granted, free of charge, to any person obtaining a
  5 * copy of this software and associated documentation files (the "Software"),
  6 * to deal in the Software without restriction, including without limitation
  7 * the rights to use, copy, modify, merge, publish, distribute, sublicense,
  8 * and/or sell copies of the Software, and to permit persons to whom the
  9 * Software is furnished to do so, subject to the following conditions:
 10 *
 11 * The above copyright notice and this permission notice shall be included in
 12 * all copies or substantial portions of the Software.
 13 *
 14 * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
 15 * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
 16 * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT.  IN NO EVENT SHALL
 17 * THE COPYRIGHT HOLDER(S) OR AUTHOR(S) BE LIABLE FOR ANY CLAIM, DAMAGES OR
 18 * OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE,
 19 * ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR
 20 * OTHER DEALINGS IN THE SOFTWARE.
 21 *
 22 * Authors: Christian König
 23 */
 24
 25#ifndef _TTM_DEVICE_H_
 26#define _TTM_DEVICE_H_
 27
 28#include <linux/types.h>
 29#include <linux/workqueue.h>
 30#include <drm/ttm/ttm_resource.h>
 31#include <drm/ttm/ttm_pool.h>
 32
 
 
 33struct ttm_device;
 34struct ttm_placement;
 35struct ttm_buffer_object;
 36struct ttm_operation_ctx;
 37
 38/**
 39 * struct ttm_global - Buffer object driver global data.
 
 
 
 
 
 
 
 
 
 40 */
 41extern struct ttm_global {
 42
 43	/**
 44	 * @dummy_read_page: Pointer to a dummy page used for mapping requests
 45	 * of unpopulated pages. Constant after init.
 46	 */
 
 47	struct page *dummy_read_page;
 48
 49	/**
 50	 * @device_list: List of buffer object devices. Protected by
 51	 * ttm_global_mutex.
 52	 */
 53	struct list_head device_list;
 54
 55	/**
 56	 * @bo_count: Number of buffer objects allocated by devices.
 57	 */
 58	atomic_t bo_count;
 59} ttm_glob;
 60
 61struct ttm_device_funcs {
 62	/**
 63	 * ttm_tt_create
 64	 *
 65	 * @bo: The buffer object to create the ttm for.
 66	 * @page_flags: Page flags as identified by TTM_TT_FLAG_XX flags.
 67	 *
 68	 * Create a struct ttm_tt to back data with system memory pages.
 69	 * No pages are actually allocated.
 70	 * Returns:
 71	 * NULL: Out of memory.
 72	 */
 73	struct ttm_tt *(*ttm_tt_create)(struct ttm_buffer_object *bo,
 74					uint32_t page_flags);
 75
 76	/**
 77	 * ttm_tt_populate
 78	 *
 79	 * @ttm: The struct ttm_tt to contain the backing pages.
 80	 *
 81	 * Allocate all backing pages
 82	 * Returns:
 83	 * -ENOMEM: Out of memory.
 84	 */
 85	int (*ttm_tt_populate)(struct ttm_device *bdev,
 86			       struct ttm_tt *ttm,
 87			       struct ttm_operation_ctx *ctx);
 88
 89	/**
 90	 * ttm_tt_unpopulate
 91	 *
 92	 * @ttm: The struct ttm_tt to contain the backing pages.
 93	 *
 94	 * Free all backing page
 95	 */
 96	void (*ttm_tt_unpopulate)(struct ttm_device *bdev,
 97				  struct ttm_tt *ttm);
 98
 99	/**
100	 * ttm_tt_destroy
101	 *
102	 * @bdev: Pointer to a ttm device
103	 * @ttm: Pointer to a struct ttm_tt.
104	 *
105	 * Destroy the backend. This will be call back from ttm_tt_destroy so
106	 * don't call ttm_tt_destroy from the callback or infinite loop.
107	 */
108	void (*ttm_tt_destroy)(struct ttm_device *bdev, struct ttm_tt *ttm);
109
110	/**
111	 * struct ttm_bo_driver member eviction_valuable
112	 *
113	 * @bo: the buffer object to be evicted
114	 * @place: placement we need room for
115	 *
116	 * Check with the driver if it is valuable to evict a BO to make room
117	 * for a certain placement.
118	 */
119	bool (*eviction_valuable)(struct ttm_buffer_object *bo,
120				  const struct ttm_place *place);
121	/**
122	 * struct ttm_bo_driver member evict_flags:
123	 *
124	 * @bo: the buffer object to be evicted
125	 *
126	 * Return the bo flags for a buffer which is not mapped to the hardware.
127	 * These will be placed in proposed_flags so that when the move is
128	 * finished, they'll end up in bo->mem.flags
129	 * This should not cause multihop evictions, and the core will warn
130	 * if one is proposed.
131	 */
132
133	void (*evict_flags)(struct ttm_buffer_object *bo,
134			    struct ttm_placement *placement);
135
136	/**
137	 * struct ttm_bo_driver member move:
138	 *
139	 * @bo: the buffer to move
140	 * @evict: whether this motion is evicting the buffer from
141	 * the graphics address space
142	 * @ctx: context for this move with parameters
143	 * @new_mem: the new memory region receiving the buffer
144	 @ @hop: placement for driver directed intermediate hop
145	 *
146	 * Move a buffer between two memory regions.
147	 * Returns errno -EMULTIHOP if driver requests a hop
148	 */
149	int (*move)(struct ttm_buffer_object *bo, bool evict,
150		    struct ttm_operation_ctx *ctx,
151		    struct ttm_resource *new_mem,
152		    struct ttm_place *hop);
153
154	/**
155	 * Hook to notify driver about a resource delete.
156	 */
157	void (*delete_mem_notify)(struct ttm_buffer_object *bo);
158
159	/**
160	 * notify the driver that we're about to swap out this bo
161	 */
162	void (*swap_notify)(struct ttm_buffer_object *bo);
163
164	/**
165	 * Driver callback on when mapping io memory (for bo_move_memcpy
166	 * for instance). TTM will take care to call io_mem_free whenever
167	 * the mapping is not use anymore. io_mem_reserve & io_mem_free
168	 * are balanced.
169	 */
170	int (*io_mem_reserve)(struct ttm_device *bdev,
171			      struct ttm_resource *mem);
172	void (*io_mem_free)(struct ttm_device *bdev,
173			    struct ttm_resource *mem);
174
175	/**
176	 * Return the pfn for a given page_offset inside the BO.
177	 *
178	 * @bo: the BO to look up the pfn for
179	 * @page_offset: the offset to look up
180	 */
181	unsigned long (*io_mem_pfn)(struct ttm_buffer_object *bo,
182				    unsigned long page_offset);
183
184	/**
185	 * Read/write memory buffers for ptrace access
186	 *
187	 * @bo: the BO to access
188	 * @offset: the offset from the start of the BO
189	 * @buf: pointer to source/destination buffer
190	 * @len: number of bytes to copy
191	 * @write: whether to read (0) from or write (non-0) to BO
192	 *
193	 * If successful, this function should return the number of
194	 * bytes copied, -EIO otherwise. If the number of bytes
195	 * returned is < len, the function may be called again with
196	 * the remainder of the buffer to copy.
197	 */
198	int (*access_memory)(struct ttm_buffer_object *bo, unsigned long offset,
199			     void *buf, int len, int write);
200
201	/**
 
 
 
 
 
 
 
 
 
202	 * Notify the driver that we're about to release a BO
203	 *
204	 * @bo: BO that is about to be released
205	 *
206	 * Gives the driver a chance to do any cleanup, including
207	 * adding fences that may force a delayed delete
208	 */
209	void (*release_notify)(struct ttm_buffer_object *bo);
210};
211
212/**
213 * struct ttm_device - Buffer object driver device-specific data.
 
 
 
 
 
 
 
 
 
 
214 */
215struct ttm_device {
216	/**
217	 * @device_list: Our entry in the global device list.
218	 * Constant after bo device init
219	 */
220	struct list_head device_list;
221
222	/**
223	 * @funcs: Function table for the device.
224	 * Constant after bo device init
225	 */
226	struct ttm_device_funcs *funcs;
227
228	/**
229	 * @sysman: Resource manager for the system domain.
230	 * Access via ttm_manager_type.
231	 */
232	struct ttm_resource_manager sysman;
233
234	/**
235	 * @man_drv: An array of resource_managers, one per resource type.
236	 */
237	struct ttm_resource_manager *man_drv[TTM_NUM_MEM_TYPES];
238
239	/**
240	 * @vma_manager: Address space manager for finding BOs to mmap.
241	 */
242	struct drm_vma_offset_manager *vma_manager;
243
244	/**
245	 * @pool: page pool for the device.
246	 */
247	struct ttm_pool pool;
248
249	/**
250	 * @lru_lock: Protection for the per manager LRU and ddestroy lists.
251	 */
252	spinlock_t lru_lock;
253
254	/**
255	 * @ddestroy: Destroyed but not yet cleaned up buffer objects.
256	 */
257	struct list_head ddestroy;
258
259	/**
260	 * @pinned: Buffer objects which are pinned and so not on any LRU list.
261	 */
262	struct list_head pinned;
263
264	/**
265	 * @dev_mapping: A pointer to the struct address_space for invalidating
266	 * CPU mappings on buffer move. Protected by load/unload sync.
267	 */
268	struct address_space *dev_mapping;
269
270	/**
271	 * @wq: Work queue structure for the delayed delete workqueue.
272	 */
273	struct delayed_work wq;
274};
275
276int ttm_global_swapout(struct ttm_operation_ctx *ctx, gfp_t gfp_flags);
277int ttm_device_swapout(struct ttm_device *bdev, struct ttm_operation_ctx *ctx,
278		       gfp_t gfp_flags);
279
280static inline struct ttm_resource_manager *
281ttm_manager_type(struct ttm_device *bdev, int mem_type)
282{
283	BUILD_BUG_ON(__builtin_constant_p(mem_type)
284		     && mem_type >= TTM_NUM_MEM_TYPES);
285	return bdev->man_drv[mem_type];
286}
287
288static inline void ttm_set_driver_manager(struct ttm_device *bdev, int type,
289					  struct ttm_resource_manager *manager)
290{
291	BUILD_BUG_ON(__builtin_constant_p(type) && type >= TTM_NUM_MEM_TYPES);
292	bdev->man_drv[type] = manager;
293}
294
295int ttm_device_init(struct ttm_device *bdev, struct ttm_device_funcs *funcs,
296		    struct device *dev, struct address_space *mapping,
297		    struct drm_vma_offset_manager *vma_manager,
298		    bool use_dma_alloc, bool use_dma32);
299void ttm_device_fini(struct ttm_device *bdev);
300void ttm_device_clear_dma_mappings(struct ttm_device *bdev);
301
302#endif
v5.14.15
  1/*
  2 * Copyright 2020 Advanced Micro Devices, Inc.
  3 *
  4 * Permission is hereby granted, free of charge, to any person obtaining a
  5 * copy of this software and associated documentation files (the "Software"),
  6 * to deal in the Software without restriction, including without limitation
  7 * the rights to use, copy, modify, merge, publish, distribute, sublicense,
  8 * and/or sell copies of the Software, and to permit persons to whom the
  9 * Software is furnished to do so, subject to the following conditions:
 10 *
 11 * The above copyright notice and this permission notice shall be included in
 12 * all copies or substantial portions of the Software.
 13 *
 14 * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
 15 * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
 16 * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT.  IN NO EVENT SHALL
 17 * THE COPYRIGHT HOLDER(S) OR AUTHOR(S) BE LIABLE FOR ANY CLAIM, DAMAGES OR
 18 * OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE,
 19 * ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR
 20 * OTHER DEALINGS IN THE SOFTWARE.
 21 *
 22 * Authors: Christian König
 23 */
 24
 25#ifndef _TTM_DEVICE_H_
 26#define _TTM_DEVICE_H_
 27
 28#include <linux/types.h>
 29#include <linux/workqueue.h>
 30#include <drm/ttm/ttm_resource.h>
 31#include <drm/ttm/ttm_pool.h>
 32
 33#define TTM_NUM_MEM_TYPES 8
 34
 35struct ttm_device;
 36struct ttm_placement;
 37struct ttm_buffer_object;
 38struct ttm_operation_ctx;
 39
 40/**
 41 * struct ttm_global - Buffer object driver global data.
 42 *
 43 * @dummy_read_page: Pointer to a dummy page used for mapping requests
 44 * of unpopulated pages.
 45 * @shrink: A shrink callback object used for buffer object swap.
 46 * @device_list_mutex: Mutex protecting the device list.
 47 * This mutex is held while traversing the device list for pm options.
 48 * @lru_lock: Spinlock protecting the bo subsystem lru lists.
 49 * @device_list: List of buffer object devices.
 50 * @swap_lru: Lru list of buffer objects used for swapping.
 51 */
 52extern struct ttm_global {
 53
 54	/**
 55	 * Constant after init.
 
 56	 */
 57
 58	struct page *dummy_read_page;
 59
 60	/**
 61	 * Protected by ttm_global_mutex.
 
 62	 */
 63	struct list_head device_list;
 64
 65	/**
 66	 * Internal protection.
 67	 */
 68	atomic_t bo_count;
 69} ttm_glob;
 70
 71struct ttm_device_funcs {
 72	/**
 73	 * ttm_tt_create
 74	 *
 75	 * @bo: The buffer object to create the ttm for.
 76	 * @page_flags: Page flags as identified by TTM_PAGE_FLAG_XX flags.
 77	 *
 78	 * Create a struct ttm_tt to back data with system memory pages.
 79	 * No pages are actually allocated.
 80	 * Returns:
 81	 * NULL: Out of memory.
 82	 */
 83	struct ttm_tt *(*ttm_tt_create)(struct ttm_buffer_object *bo,
 84					uint32_t page_flags);
 85
 86	/**
 87	 * ttm_tt_populate
 88	 *
 89	 * @ttm: The struct ttm_tt to contain the backing pages.
 90	 *
 91	 * Allocate all backing pages
 92	 * Returns:
 93	 * -ENOMEM: Out of memory.
 94	 */
 95	int (*ttm_tt_populate)(struct ttm_device *bdev,
 96			       struct ttm_tt *ttm,
 97			       struct ttm_operation_ctx *ctx);
 98
 99	/**
100	 * ttm_tt_unpopulate
101	 *
102	 * @ttm: The struct ttm_tt to contain the backing pages.
103	 *
104	 * Free all backing page
105	 */
106	void (*ttm_tt_unpopulate)(struct ttm_device *bdev,
107				  struct ttm_tt *ttm);
108
109	/**
110	 * ttm_tt_destroy
111	 *
112	 * @bdev: Pointer to a ttm device
113	 * @ttm: Pointer to a struct ttm_tt.
114	 *
115	 * Destroy the backend. This will be call back from ttm_tt_destroy so
116	 * don't call ttm_tt_destroy from the callback or infinite loop.
117	 */
118	void (*ttm_tt_destroy)(struct ttm_device *bdev, struct ttm_tt *ttm);
119
120	/**
121	 * struct ttm_bo_driver member eviction_valuable
122	 *
123	 * @bo: the buffer object to be evicted
124	 * @place: placement we need room for
125	 *
126	 * Check with the driver if it is valuable to evict a BO to make room
127	 * for a certain placement.
128	 */
129	bool (*eviction_valuable)(struct ttm_buffer_object *bo,
130				  const struct ttm_place *place);
131	/**
132	 * struct ttm_bo_driver member evict_flags:
133	 *
134	 * @bo: the buffer object to be evicted
135	 *
136	 * Return the bo flags for a buffer which is not mapped to the hardware.
137	 * These will be placed in proposed_flags so that when the move is
138	 * finished, they'll end up in bo->mem.flags
139	 * This should not cause multihop evictions, and the core will warn
140	 * if one is proposed.
141	 */
142
143	void (*evict_flags)(struct ttm_buffer_object *bo,
144			    struct ttm_placement *placement);
145
146	/**
147	 * struct ttm_bo_driver member move:
148	 *
149	 * @bo: the buffer to move
150	 * @evict: whether this motion is evicting the buffer from
151	 * the graphics address space
152	 * @ctx: context for this move with parameters
153	 * @new_mem: the new memory region receiving the buffer
154	 @ @hop: placement for driver directed intermediate hop
155	 *
156	 * Move a buffer between two memory regions.
157	 * Returns errno -EMULTIHOP if driver requests a hop
158	 */
159	int (*move)(struct ttm_buffer_object *bo, bool evict,
160		    struct ttm_operation_ctx *ctx,
161		    struct ttm_resource *new_mem,
162		    struct ttm_place *hop);
163
164	/**
165	 * Hook to notify driver about a resource delete.
166	 */
167	void (*delete_mem_notify)(struct ttm_buffer_object *bo);
168
169	/**
170	 * notify the driver that we're about to swap out this bo
171	 */
172	void (*swap_notify)(struct ttm_buffer_object *bo);
173
174	/**
175	 * Driver callback on when mapping io memory (for bo_move_memcpy
176	 * for instance). TTM will take care to call io_mem_free whenever
177	 * the mapping is not use anymore. io_mem_reserve & io_mem_free
178	 * are balanced.
179	 */
180	int (*io_mem_reserve)(struct ttm_device *bdev,
181			      struct ttm_resource *mem);
182	void (*io_mem_free)(struct ttm_device *bdev,
183			    struct ttm_resource *mem);
184
185	/**
186	 * Return the pfn for a given page_offset inside the BO.
187	 *
188	 * @bo: the BO to look up the pfn for
189	 * @page_offset: the offset to look up
190	 */
191	unsigned long (*io_mem_pfn)(struct ttm_buffer_object *bo,
192				    unsigned long page_offset);
193
194	/**
195	 * Read/write memory buffers for ptrace access
196	 *
197	 * @bo: the BO to access
198	 * @offset: the offset from the start of the BO
199	 * @buf: pointer to source/destination buffer
200	 * @len: number of bytes to copy
201	 * @write: whether to read (0) from or write (non-0) to BO
202	 *
203	 * If successful, this function should return the number of
204	 * bytes copied, -EIO otherwise. If the number of bytes
205	 * returned is < len, the function may be called again with
206	 * the remainder of the buffer to copy.
207	 */
208	int (*access_memory)(struct ttm_buffer_object *bo, unsigned long offset,
209			     void *buf, int len, int write);
210
211	/**
212	 * struct ttm_bo_driver member del_from_lru_notify
213	 *
214	 * @bo: the buffer object deleted from lru
215	 *
216	 * notify driver that a BO was deleted from LRU.
217	 */
218	void (*del_from_lru_notify)(struct ttm_buffer_object *bo);
219
220	/**
221	 * Notify the driver that we're about to release a BO
222	 *
223	 * @bo: BO that is about to be released
224	 *
225	 * Gives the driver a chance to do any cleanup, including
226	 * adding fences that may force a delayed delete
227	 */
228	void (*release_notify)(struct ttm_buffer_object *bo);
229};
230
231/**
232 * struct ttm_device - Buffer object driver device-specific data.
233 *
234 * @device_list: Our entry in the global device list.
235 * @funcs: Function table for the device.
236 * @sysman: Resource manager for the system domain.
237 * @man_drv: An array of resource_managers.
238 * @vma_manager: Address space manager.
239 * @pool: page pool for the device.
240 * @dev_mapping: A pointer to the struct address_space representing the
241 * device address space.
242 * @wq: Work queue structure for the delayed delete workqueue.
243 */
244struct ttm_device {
245	/*
 
246	 * Constant after bo device init
247	 */
248	struct list_head device_list;
 
 
 
 
 
249	struct ttm_device_funcs *funcs;
250
251	/*
 
252	 * Access via ttm_manager_type.
253	 */
254	struct ttm_resource_manager sysman;
 
 
 
 
255	struct ttm_resource_manager *man_drv[TTM_NUM_MEM_TYPES];
256
257	/*
258	 * Protected by internal locks.
259	 */
260	struct drm_vma_offset_manager *vma_manager;
 
 
 
 
261	struct ttm_pool pool;
262
263	/*
264	 * Protection for the per manager LRU and ddestroy lists.
265	 */
266	spinlock_t lru_lock;
 
 
 
 
267	struct list_head ddestroy;
268
269	/*
270	 * Protected by load / firstopen / lastclose /unload sync.
 
 
 
 
 
 
271	 */
272	struct address_space *dev_mapping;
273
274	/*
275	 * Internal protection.
276	 */
277	struct delayed_work wq;
278};
279
280int ttm_global_swapout(struct ttm_operation_ctx *ctx, gfp_t gfp_flags);
281int ttm_device_swapout(struct ttm_device *bdev, struct ttm_operation_ctx *ctx,
282		       gfp_t gfp_flags);
283
284static inline struct ttm_resource_manager *
285ttm_manager_type(struct ttm_device *bdev, int mem_type)
286{
 
 
287	return bdev->man_drv[mem_type];
288}
289
290static inline void ttm_set_driver_manager(struct ttm_device *bdev, int type,
291					  struct ttm_resource_manager *manager)
292{
 
293	bdev->man_drv[type] = manager;
294}
295
296int ttm_device_init(struct ttm_device *bdev, struct ttm_device_funcs *funcs,
297		    struct device *dev, struct address_space *mapping,
298		    struct drm_vma_offset_manager *vma_manager,
299		    bool use_dma_alloc, bool use_dma32);
300void ttm_device_fini(struct ttm_device *bdev);
 
301
302#endif