Linux Audio

Check our new training course

Loading...
v5.9
 
  1/*
  2 * Copyright 2014 Advanced Micro Devices, Inc.
  3 *
  4 * Permission is hereby granted, free of charge, to any person obtaining a
  5 * copy of this software and associated documentation files (the "Software"),
  6 * to deal in the Software without restriction, including without limitation
  7 * the rights to use, copy, modify, merge, publish, distribute, sublicense,
  8 * and/or sell copies of the Software, and to permit persons to whom the
  9 * Software is furnished to do so, subject to the following conditions:
 10 *
 11 * The above copyright notice and this permission notice shall be included in
 12 * all copies or substantial portions of the Software.
 13 *
 14 * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
 15 * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
 16 * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT.  IN NO EVENT SHALL
 17 * THE COPYRIGHT HOLDER(S) OR AUTHOR(S) BE LIABLE FOR ANY CLAIM, DAMAGES OR
 18 * OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE,
 19 * ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR
 20 * OTHER DEALINGS IN THE SOFTWARE.
 21 *
 22 */
 23
 24#include <linux/slab.h>
 25#include <linux/mutex.h>
 26#include "kfd_device_queue_manager.h"
 27#include "kfd_kernel_queue.h"
 28#include "kfd_priv.h"
 29
 30static inline void inc_wptr(unsigned int *wptr, unsigned int increment_bytes,
 31				unsigned int buffer_size_bytes)
 32{
 33	unsigned int temp = *wptr + increment_bytes / sizeof(uint32_t);
 34
 35	WARN((temp * sizeof(uint32_t)) > buffer_size_bytes,
 36	     "Runlist IB overflow");
 37	*wptr = temp;
 38}
 39
 40static void pm_calc_rlib_size(struct packet_manager *pm,
 41				unsigned int *rlib_size,
 42				bool *over_subscription)
 43{
 44	unsigned int process_count, queue_count, compute_queue_count, gws_queue_count;
 45	unsigned int map_queue_size;
 46	unsigned int max_proc_per_quantum = 1;
 47	struct kfd_dev *dev = pm->dqm->dev;
 
 48
 49	process_count = pm->dqm->processes_count;
 50	queue_count = pm->dqm->active_queue_count;
 51	compute_queue_count = pm->dqm->active_cp_queue_count;
 52	gws_queue_count = pm->dqm->gws_queue_count;
 53
 54	/* check if there is over subscription
 55	 * Note: the arbitration between the number of VMIDs and
 56	 * hws_max_conc_proc has been done in
 57	 * kgd2kfd_device_init().
 58	 */
 59	*over_subscription = false;
 60
 61	if (dev->max_proc_per_quantum > 1)
 62		max_proc_per_quantum = dev->max_proc_per_quantum;
 63
 64	if ((process_count > max_proc_per_quantum) ||
 65	    compute_queue_count > get_cp_queues_num(pm->dqm) ||
 66	    gws_queue_count > 1) {
 67		*over_subscription = true;
 68		pr_debug("Over subscribed runlist\n");
 69	}
 70
 71	map_queue_size = pm->pmf->map_queues_size;
 72	/* calculate run list ib allocation size */
 73	*rlib_size = process_count * pm->pmf->map_process_size +
 74		     queue_count * map_queue_size;
 75
 76	/*
 77	 * Increase the allocation size in case we need a chained run list
 78	 * when over subscription
 79	 */
 80	if (*over_subscription)
 81		*rlib_size += pm->pmf->runlist_size;
 82
 83	pr_debug("runlist ib size %d\n", *rlib_size);
 84}
 85
 86static int pm_allocate_runlist_ib(struct packet_manager *pm,
 87				unsigned int **rl_buffer,
 88				uint64_t *rl_gpu_buffer,
 89				unsigned int *rl_buffer_size,
 90				bool *is_over_subscription)
 91{
 
 
 92	int retval;
 93
 94	if (WARN_ON(pm->allocated))
 95		return -EINVAL;
 96
 97	pm_calc_rlib_size(pm, rl_buffer_size, is_over_subscription);
 98
 99	mutex_lock(&pm->lock);
100
101	retval = kfd_gtt_sa_allocate(pm->dqm->dev, *rl_buffer_size,
102					&pm->ib_buffer_obj);
103
104	if (retval) {
105		pr_err("Failed to allocate runlist IB\n");
106		goto out;
107	}
108
109	*(void **)rl_buffer = pm->ib_buffer_obj->cpu_ptr;
110	*rl_gpu_buffer = pm->ib_buffer_obj->gpu_addr;
111
112	memset(*rl_buffer, 0, *rl_buffer_size);
113	pm->allocated = true;
114
115out:
116	mutex_unlock(&pm->lock);
117	return retval;
118}
119
120static int pm_create_runlist_ib(struct packet_manager *pm,
121				struct list_head *queues,
122				uint64_t *rl_gpu_addr,
123				size_t *rl_size_bytes)
124{
125	unsigned int alloc_size_bytes;
126	unsigned int *rl_buffer, rl_wptr, i;
127	int retval, proccesses_mapped;
 
 
128	struct device_process_node *cur;
129	struct qcm_process_device *qpd;
130	struct queue *q;
131	struct kernel_queue *kq;
132	bool is_over_subscription;
133
134	rl_wptr = retval = proccesses_mapped = 0;
135
136	retval = pm_allocate_runlist_ib(pm, &rl_buffer, rl_gpu_addr,
137				&alloc_size_bytes, &is_over_subscription);
138	if (retval)
139		return retval;
140
141	*rl_size_bytes = alloc_size_bytes;
142	pm->ib_size_bytes = alloc_size_bytes;
143
144	pr_debug("Building runlist ib process count: %d queues count %d\n",
145		pm->dqm->processes_count, pm->dqm->active_queue_count);
146
147	/* build the run list ib packet */
148	list_for_each_entry(cur, queues, list) {
149		qpd = cur->qpd;
150		/* build map process packet */
151		if (proccesses_mapped >= pm->dqm->processes_count) {
152			pr_debug("Not enough space left in runlist IB\n");
153			pm_release_ib(pm);
154			return -ENOMEM;
155		}
156
157		retval = pm->pmf->map_process(pm, &rl_buffer[rl_wptr], qpd);
158		if (retval)
159			return retval;
160
161		proccesses_mapped++;
162		inc_wptr(&rl_wptr, pm->pmf->map_process_size,
163				alloc_size_bytes);
164
165		list_for_each_entry(kq, &qpd->priv_queue_list, list) {
166			if (!kq->queue->properties.is_active)
167				continue;
168
169			pr_debug("static_queue, mapping kernel q %d, is debug status %d\n",
 
170				kq->queue->queue, qpd->is_debug);
171
172			retval = pm->pmf->map_queues(pm,
173						&rl_buffer[rl_wptr],
174						kq->queue,
175						qpd->is_debug);
176			if (retval)
177				return retval;
178
179			inc_wptr(&rl_wptr,
180				pm->pmf->map_queues_size,
181				alloc_size_bytes);
182		}
183
184		list_for_each_entry(q, &qpd->queues_list, list) {
185			if (!q->properties.is_active)
186				continue;
187
188			pr_debug("static_queue, mapping user queue %d, is debug status %d\n",
 
189				q->queue, qpd->is_debug);
190
191			retval = pm->pmf->map_queues(pm,
192						&rl_buffer[rl_wptr],
193						q,
194						qpd->is_debug);
195
196			if (retval)
197				return retval;
198
199			inc_wptr(&rl_wptr,
200				pm->pmf->map_queues_size,
201				alloc_size_bytes);
202		}
203	}
204
205	pr_debug("Finished map process and queues to runlist\n");
206
207	if (is_over_subscription) {
208		if (!pm->is_over_subscription)
209			pr_warn("Runlist is getting oversubscribed. Expect reduced ROCm performance.\n");
 
 
210		retval = pm->pmf->runlist(pm, &rl_buffer[rl_wptr],
211					*rl_gpu_addr,
212					alloc_size_bytes / sizeof(uint32_t),
213					true);
214	}
215	pm->is_over_subscription = is_over_subscription;
216
217	for (i = 0; i < alloc_size_bytes / sizeof(uint32_t); i++)
218		pr_debug("0x%2X ", rl_buffer[i]);
219	pr_debug("\n");
220
221	return retval;
222}
223
224int pm_init(struct packet_manager *pm, struct device_queue_manager *dqm)
225{
226	switch (dqm->dev->device_info->asic_family) {
227	case CHIP_KAVERI:
228	case CHIP_HAWAII:
229		/* PM4 packet structures on CIK are the same as on VI */
230	case CHIP_CARRIZO:
231	case CHIP_TONGA:
232	case CHIP_FIJI:
233	case CHIP_POLARIS10:
234	case CHIP_POLARIS11:
235	case CHIP_POLARIS12:
236	case CHIP_VEGAM:
237		pm->pmf = &kfd_vi_pm_funcs;
238		break;
239	case CHIP_VEGA10:
240	case CHIP_VEGA12:
241	case CHIP_VEGA20:
242	case CHIP_RAVEN:
243	case CHIP_RENOIR:
244	case CHIP_ARCTURUS:
245	case CHIP_NAVI10:
246	case CHIP_NAVI12:
247	case CHIP_NAVI14:
248	case CHIP_SIENNA_CICHLID:
249	case CHIP_NAVY_FLOUNDER:
250		pm->pmf = &kfd_v9_pm_funcs;
251		break;
252	default:
253		WARN(1, "Unexpected ASIC family %u",
254		     dqm->dev->device_info->asic_family);
255		return -EINVAL;
 
 
 
 
 
 
 
 
256	}
257
258	pm->dqm = dqm;
259	mutex_init(&pm->lock);
260	pm->priv_queue = kernel_queue_init(dqm->dev, KFD_QUEUE_TYPE_HIQ);
261	if (!pm->priv_queue) {
262		mutex_destroy(&pm->lock);
263		return -ENOMEM;
264	}
265	pm->allocated = false;
266
267	return 0;
268}
269
270void pm_uninit(struct packet_manager *pm, bool hanging)
271{
272	mutex_destroy(&pm->lock);
273	kernel_queue_uninit(pm->priv_queue, hanging);
 
274}
275
276int pm_send_set_resources(struct packet_manager *pm,
277				struct scheduling_resources *res)
278{
 
 
279	uint32_t *buffer, size;
280	int retval = 0;
281
282	size = pm->pmf->set_resources_size;
283	mutex_lock(&pm->lock);
284	kq_acquire_packet_buffer(pm->priv_queue,
285					size / sizeof(uint32_t),
286					(unsigned int **)&buffer);
287	if (!buffer) {
288		pr_err("Failed to allocate buffer on kernel queue\n");
289		retval = -ENOMEM;
290		goto out;
291	}
292
293	retval = pm->pmf->set_resources(pm, buffer, res);
294	if (!retval)
295		kq_submit_packet(pm->priv_queue);
296	else
297		kq_rollback_packet(pm->priv_queue);
298
299out:
300	mutex_unlock(&pm->lock);
301
302	return retval;
303}
304
305int pm_send_runlist(struct packet_manager *pm, struct list_head *dqm_queues)
306{
307	uint64_t rl_gpu_ib_addr;
308	uint32_t *rl_buffer;
309	size_t rl_ib_size, packet_size_dwords;
310	int retval;
311
312	retval = pm_create_runlist_ib(pm, dqm_queues, &rl_gpu_ib_addr,
313					&rl_ib_size);
314	if (retval)
315		goto fail_create_runlist_ib;
316
317	pr_debug("runlist IB address: 0x%llX\n", rl_gpu_ib_addr);
318
319	packet_size_dwords = pm->pmf->runlist_size / sizeof(uint32_t);
320	mutex_lock(&pm->lock);
321
322	retval = kq_acquire_packet_buffer(pm->priv_queue,
323					packet_size_dwords, &rl_buffer);
324	if (retval)
325		goto fail_acquire_packet_buffer;
326
327	retval = pm->pmf->runlist(pm, rl_buffer, rl_gpu_ib_addr,
328					rl_ib_size / sizeof(uint32_t), false);
329	if (retval)
330		goto fail_create_runlist;
331
332	kq_submit_packet(pm->priv_queue);
333
334	mutex_unlock(&pm->lock);
335
336	return retval;
337
338fail_create_runlist:
339	kq_rollback_packet(pm->priv_queue);
340fail_acquire_packet_buffer:
341	mutex_unlock(&pm->lock);
342fail_create_runlist_ib:
343	pm_release_ib(pm);
344	return retval;
345}
346
347int pm_send_query_status(struct packet_manager *pm, uint64_t fence_address,
348			uint32_t fence_value)
349{
 
 
350	uint32_t *buffer, size;
351	int retval = 0;
352
353	if (WARN_ON(!fence_address))
354		return -EFAULT;
355
356	size = pm->pmf->query_status_size;
357	mutex_lock(&pm->lock);
358	kq_acquire_packet_buffer(pm->priv_queue,
359			size / sizeof(uint32_t), (unsigned int **)&buffer);
360	if (!buffer) {
361		pr_err("Failed to allocate buffer on kernel queue\n");
362		retval = -ENOMEM;
363		goto out;
364	}
365
366	retval = pm->pmf->query_status(pm, buffer, fence_address, fence_value);
367	if (!retval)
368		kq_submit_packet(pm->priv_queue);
369	else
370		kq_rollback_packet(pm->priv_queue);
371
372out:
373	mutex_unlock(&pm->lock);
374	return retval;
375}
376
377int pm_send_unmap_queue(struct packet_manager *pm, enum kfd_queue_type type,
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
378			enum kfd_unmap_queues_filter filter,
379			uint32_t filter_param, bool reset,
380			unsigned int sdma_engine)
381{
 
 
382	uint32_t *buffer, size;
383	int retval = 0;
384
385	size = pm->pmf->unmap_queues_size;
386	mutex_lock(&pm->lock);
387	kq_acquire_packet_buffer(pm->priv_queue,
388			size / sizeof(uint32_t), (unsigned int **)&buffer);
389	if (!buffer) {
390		pr_err("Failed to allocate buffer on kernel queue\n");
391		retval = -ENOMEM;
392		goto out;
393	}
394
395	retval = pm->pmf->unmap_queues(pm, buffer, type, filter, filter_param,
396				       reset, sdma_engine);
397	if (!retval)
398		kq_submit_packet(pm->priv_queue);
399	else
400		kq_rollback_packet(pm->priv_queue);
401
402out:
403	mutex_unlock(&pm->lock);
404	return retval;
405}
406
407void pm_release_ib(struct packet_manager *pm)
408{
409	mutex_lock(&pm->lock);
410	if (pm->allocated) {
411		kfd_gtt_sa_free(pm->dqm->dev, pm->ib_buffer_obj);
412		pm->allocated = false;
413	}
414	mutex_unlock(&pm->lock);
415}
416
417#if defined(CONFIG_DEBUG_FS)
418
419int pm_debugfs_runlist(struct seq_file *m, void *data)
420{
421	struct packet_manager *pm = data;
422
423	mutex_lock(&pm->lock);
424
425	if (!pm->allocated) {
426		seq_puts(m, "  No active runlist\n");
427		goto out;
428	}
429
430	seq_hex_dump(m, "  ", DUMP_PREFIX_OFFSET, 32, 4,
431		     pm->ib_buffer_obj->cpu_ptr, pm->ib_size_bytes, false);
432
433out:
434	mutex_unlock(&pm->lock);
435	return 0;
436}
437
438int pm_debugfs_hang_hws(struct packet_manager *pm)
439{
 
 
440	uint32_t *buffer, size;
441	int r = 0;
442
 
 
 
443	size = pm->pmf->query_status_size;
444	mutex_lock(&pm->lock);
445	kq_acquire_packet_buffer(pm->priv_queue,
446			size / sizeof(uint32_t), (unsigned int **)&buffer);
447	if (!buffer) {
448		pr_err("Failed to allocate buffer on kernel queue\n");
449		r = -ENOMEM;
450		goto out;
451	}
452	memset(buffer, 0x55, size);
453	kq_submit_packet(pm->priv_queue);
454
455	pr_info("Submitting %x %x %x %x %x %x %x to HIQ to hang the HWS.",
456		buffer[0], buffer[1], buffer[2], buffer[3],
457		buffer[4], buffer[5], buffer[6]);
458out:
459	mutex_unlock(&pm->lock);
460	return r;
461}
462
463
464#endif
v6.13.7
  1// SPDX-License-Identifier: GPL-2.0 OR MIT
  2/*
  3 * Copyright 2014-2022 Advanced Micro Devices, Inc.
  4 *
  5 * Permission is hereby granted, free of charge, to any person obtaining a
  6 * copy of this software and associated documentation files (the "Software"),
  7 * to deal in the Software without restriction, including without limitation
  8 * the rights to use, copy, modify, merge, publish, distribute, sublicense,
  9 * and/or sell copies of the Software, and to permit persons to whom the
 10 * Software is furnished to do so, subject to the following conditions:
 11 *
 12 * The above copyright notice and this permission notice shall be included in
 13 * all copies or substantial portions of the Software.
 14 *
 15 * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
 16 * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
 17 * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT.  IN NO EVENT SHALL
 18 * THE COPYRIGHT HOLDER(S) OR AUTHOR(S) BE LIABLE FOR ANY CLAIM, DAMAGES OR
 19 * OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE,
 20 * ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR
 21 * OTHER DEALINGS IN THE SOFTWARE.
 22 *
 23 */
 24
 25#include <linux/slab.h>
 26#include <linux/mutex.h>
 27#include "kfd_device_queue_manager.h"
 28#include "kfd_kernel_queue.h"
 29#include "kfd_priv.h"
 30
 31static inline void inc_wptr(unsigned int *wptr, unsigned int increment_bytes,
 32				unsigned int buffer_size_bytes)
 33{
 34	unsigned int temp = *wptr + increment_bytes / sizeof(uint32_t);
 35
 36	WARN((temp * sizeof(uint32_t)) > buffer_size_bytes,
 37	     "Runlist IB overflow");
 38	*wptr = temp;
 39}
 40
 41static void pm_calc_rlib_size(struct packet_manager *pm,
 42				unsigned int *rlib_size,
 43				bool *over_subscription)
 44{
 45	unsigned int process_count, queue_count, compute_queue_count, gws_queue_count;
 46	unsigned int map_queue_size;
 47	unsigned int max_proc_per_quantum = 1;
 48	struct kfd_node *node = pm->dqm->dev;
 49	struct device *dev = node->adev->dev;
 50
 51	process_count = pm->dqm->processes_count;
 52	queue_count = pm->dqm->active_queue_count;
 53	compute_queue_count = pm->dqm->active_cp_queue_count;
 54	gws_queue_count = pm->dqm->gws_queue_count;
 55
 56	/* check if there is over subscription
 57	 * Note: the arbitration between the number of VMIDs and
 58	 * hws_max_conc_proc has been done in
 59	 * kgd2kfd_device_init().
 60	 */
 61	*over_subscription = false;
 62
 63	if (node->max_proc_per_quantum > 1)
 64		max_proc_per_quantum = node->max_proc_per_quantum;
 65
 66	if ((process_count > max_proc_per_quantum) ||
 67	    compute_queue_count > get_cp_queues_num(pm->dqm) ||
 68	    gws_queue_count > 1) {
 69		*over_subscription = true;
 70		dev_dbg(dev, "Over subscribed runlist\n");
 71	}
 72
 73	map_queue_size = pm->pmf->map_queues_size;
 74	/* calculate run list ib allocation size */
 75	*rlib_size = process_count * pm->pmf->map_process_size +
 76		     queue_count * map_queue_size;
 77
 78	/*
 79	 * Increase the allocation size in case we need a chained run list
 80	 * when over subscription
 81	 */
 82	if (*over_subscription)
 83		*rlib_size += pm->pmf->runlist_size;
 84
 85	dev_dbg(dev, "runlist ib size %d\n", *rlib_size);
 86}
 87
 88static int pm_allocate_runlist_ib(struct packet_manager *pm,
 89				unsigned int **rl_buffer,
 90				uint64_t *rl_gpu_buffer,
 91				unsigned int *rl_buffer_size,
 92				bool *is_over_subscription)
 93{
 94	struct kfd_node *node = pm->dqm->dev;
 95	struct device *dev = node->adev->dev;
 96	int retval;
 97
 98	if (WARN_ON(pm->allocated))
 99		return -EINVAL;
100
101	pm_calc_rlib_size(pm, rl_buffer_size, is_over_subscription);
102
103	mutex_lock(&pm->lock);
104
105	retval = kfd_gtt_sa_allocate(node, *rl_buffer_size, &pm->ib_buffer_obj);
 
106
107	if (retval) {
108		dev_err(dev, "Failed to allocate runlist IB\n");
109		goto out;
110	}
111
112	*(void **)rl_buffer = pm->ib_buffer_obj->cpu_ptr;
113	*rl_gpu_buffer = pm->ib_buffer_obj->gpu_addr;
114
115	memset(*rl_buffer, 0, *rl_buffer_size);
116	pm->allocated = true;
117
118out:
119	mutex_unlock(&pm->lock);
120	return retval;
121}
122
123static int pm_create_runlist_ib(struct packet_manager *pm,
124				struct list_head *queues,
125				uint64_t *rl_gpu_addr,
126				size_t *rl_size_bytes)
127{
128	unsigned int alloc_size_bytes;
129	unsigned int *rl_buffer, rl_wptr, i;
130	struct kfd_node *node = pm->dqm->dev;
131	struct device *dev = node->adev->dev;
132	int retval, processes_mapped;
133	struct device_process_node *cur;
134	struct qcm_process_device *qpd;
135	struct queue *q;
136	struct kernel_queue *kq;
137	bool is_over_subscription;
138
139	rl_wptr = retval = processes_mapped = 0;
140
141	retval = pm_allocate_runlist_ib(pm, &rl_buffer, rl_gpu_addr,
142				&alloc_size_bytes, &is_over_subscription);
143	if (retval)
144		return retval;
145
146	*rl_size_bytes = alloc_size_bytes;
147	pm->ib_size_bytes = alloc_size_bytes;
148
149	dev_dbg(dev, "Building runlist ib process count: %d queues count %d\n",
150		pm->dqm->processes_count, pm->dqm->active_queue_count);
151
152	/* build the run list ib packet */
153	list_for_each_entry(cur, queues, list) {
154		qpd = cur->qpd;
155		/* build map process packet */
156		if (processes_mapped >= pm->dqm->processes_count) {
157			dev_dbg(dev, "Not enough space left in runlist IB\n");
158			pm_release_ib(pm);
159			return -ENOMEM;
160		}
161
162		retval = pm->pmf->map_process(pm, &rl_buffer[rl_wptr], qpd);
163		if (retval)
164			return retval;
165
166		processes_mapped++;
167		inc_wptr(&rl_wptr, pm->pmf->map_process_size,
168				alloc_size_bytes);
169
170		list_for_each_entry(kq, &qpd->priv_queue_list, list) {
171			if (!kq->queue->properties.is_active)
172				continue;
173
174			dev_dbg(dev,
175				"static_queue, mapping kernel q %d, is debug status %d\n",
176				kq->queue->queue, qpd->is_debug);
177
178			retval = pm->pmf->map_queues(pm,
179						&rl_buffer[rl_wptr],
180						kq->queue,
181						qpd->is_debug);
182			if (retval)
183				return retval;
184
185			inc_wptr(&rl_wptr,
186				pm->pmf->map_queues_size,
187				alloc_size_bytes);
188		}
189
190		list_for_each_entry(q, &qpd->queues_list, list) {
191			if (!q->properties.is_active)
192				continue;
193
194			dev_dbg(dev,
195				"static_queue, mapping user queue %d, is debug status %d\n",
196				q->queue, qpd->is_debug);
197
198			retval = pm->pmf->map_queues(pm,
199						&rl_buffer[rl_wptr],
200						q,
201						qpd->is_debug);
202
203			if (retval)
204				return retval;
205
206			inc_wptr(&rl_wptr,
207				pm->pmf->map_queues_size,
208				alloc_size_bytes);
209		}
210	}
211
212	dev_dbg(dev, "Finished map process and queues to runlist\n");
213
214	if (is_over_subscription) {
215		if (!pm->is_over_subscription)
216			dev_warn(
217				dev,
218				"Runlist is getting oversubscribed. Expect reduced ROCm performance.\n");
219		retval = pm->pmf->runlist(pm, &rl_buffer[rl_wptr],
220					*rl_gpu_addr,
221					alloc_size_bytes / sizeof(uint32_t),
222					true);
223	}
224	pm->is_over_subscription = is_over_subscription;
225
226	for (i = 0; i < alloc_size_bytes / sizeof(uint32_t); i++)
227		pr_debug("0x%2X ", rl_buffer[i]);
228	pr_debug("\n");
229
230	return retval;
231}
232
233int pm_init(struct packet_manager *pm, struct device_queue_manager *dqm)
234{
235	switch (dqm->dev->adev->asic_type) {
236	case CHIP_KAVERI:
237	case CHIP_HAWAII:
238		/* PM4 packet structures on CIK are the same as on VI */
239	case CHIP_CARRIZO:
240	case CHIP_TONGA:
241	case CHIP_FIJI:
242	case CHIP_POLARIS10:
243	case CHIP_POLARIS11:
244	case CHIP_POLARIS12:
245	case CHIP_VEGAM:
246		pm->pmf = &kfd_vi_pm_funcs;
247		break;
 
 
 
 
 
 
 
 
 
 
 
 
 
248	default:
249		if (KFD_GC_VERSION(dqm->dev) == IP_VERSION(9, 4, 2) ||
250		    KFD_GC_VERSION(dqm->dev) == IP_VERSION(9, 4, 3) ||
251		    KFD_GC_VERSION(dqm->dev) == IP_VERSION(9, 4, 4))
252			pm->pmf = &kfd_aldebaran_pm_funcs;
253		else if (KFD_GC_VERSION(dqm->dev) >= IP_VERSION(9, 0, 1))
254			pm->pmf = &kfd_v9_pm_funcs;
255		else {
256			WARN(1, "Unexpected ASIC family %u",
257			     dqm->dev->adev->asic_type);
258			return -EINVAL;
259		}
260	}
261
262	pm->dqm = dqm;
263	mutex_init(&pm->lock);
264	pm->priv_queue = kernel_queue_init(dqm->dev, KFD_QUEUE_TYPE_HIQ);
265	if (!pm->priv_queue) {
266		mutex_destroy(&pm->lock);
267		return -ENOMEM;
268	}
269	pm->allocated = false;
270
271	return 0;
272}
273
274void pm_uninit(struct packet_manager *pm)
275{
276	mutex_destroy(&pm->lock);
277	kernel_queue_uninit(pm->priv_queue);
278	pm->priv_queue = NULL;
279}
280
281int pm_send_set_resources(struct packet_manager *pm,
282				struct scheduling_resources *res)
283{
284	struct kfd_node *node = pm->dqm->dev;
285	struct device *dev = node->adev->dev;
286	uint32_t *buffer, size;
287	int retval = 0;
288
289	size = pm->pmf->set_resources_size;
290	mutex_lock(&pm->lock);
291	kq_acquire_packet_buffer(pm->priv_queue,
292					size / sizeof(uint32_t),
293					(unsigned int **)&buffer);
294	if (!buffer) {
295		dev_err(dev, "Failed to allocate buffer on kernel queue\n");
296		retval = -ENOMEM;
297		goto out;
298	}
299
300	retval = pm->pmf->set_resources(pm, buffer, res);
301	if (!retval)
302		retval = kq_submit_packet(pm->priv_queue);
303	else
304		kq_rollback_packet(pm->priv_queue);
305
306out:
307	mutex_unlock(&pm->lock);
308
309	return retval;
310}
311
312int pm_send_runlist(struct packet_manager *pm, struct list_head *dqm_queues)
313{
314	uint64_t rl_gpu_ib_addr;
315	uint32_t *rl_buffer;
316	size_t rl_ib_size, packet_size_dwords;
317	int retval;
318
319	retval = pm_create_runlist_ib(pm, dqm_queues, &rl_gpu_ib_addr,
320					&rl_ib_size);
321	if (retval)
322		goto fail_create_runlist_ib;
323
324	pr_debug("runlist IB address: 0x%llX\n", rl_gpu_ib_addr);
325
326	packet_size_dwords = pm->pmf->runlist_size / sizeof(uint32_t);
327	mutex_lock(&pm->lock);
328
329	retval = kq_acquire_packet_buffer(pm->priv_queue,
330					packet_size_dwords, &rl_buffer);
331	if (retval)
332		goto fail_acquire_packet_buffer;
333
334	retval = pm->pmf->runlist(pm, rl_buffer, rl_gpu_ib_addr,
335					rl_ib_size / sizeof(uint32_t), false);
336	if (retval)
337		goto fail_create_runlist;
338
339	retval = kq_submit_packet(pm->priv_queue);
340
341	mutex_unlock(&pm->lock);
342
343	return retval;
344
345fail_create_runlist:
346	kq_rollback_packet(pm->priv_queue);
347fail_acquire_packet_buffer:
348	mutex_unlock(&pm->lock);
349fail_create_runlist_ib:
350	pm_release_ib(pm);
351	return retval;
352}
353
354int pm_send_query_status(struct packet_manager *pm, uint64_t fence_address,
355			uint64_t fence_value)
356{
357	struct kfd_node *node = pm->dqm->dev;
358	struct device *dev = node->adev->dev;
359	uint32_t *buffer, size;
360	int retval = 0;
361
362	if (WARN_ON(!fence_address))
363		return -EFAULT;
364
365	size = pm->pmf->query_status_size;
366	mutex_lock(&pm->lock);
367	kq_acquire_packet_buffer(pm->priv_queue,
368			size / sizeof(uint32_t), (unsigned int **)&buffer);
369	if (!buffer) {
370		dev_err(dev, "Failed to allocate buffer on kernel queue\n");
371		retval = -ENOMEM;
372		goto out;
373	}
374
375	retval = pm->pmf->query_status(pm, buffer, fence_address, fence_value);
376	if (!retval)
377		retval = kq_submit_packet(pm->priv_queue);
378	else
379		kq_rollback_packet(pm->priv_queue);
380
381out:
382	mutex_unlock(&pm->lock);
383	return retval;
384}
385
386int pm_update_grace_period(struct packet_manager *pm, uint32_t grace_period)
387{
388	struct kfd_node *node = pm->dqm->dev;
389	struct device *dev = node->adev->dev;
390	int retval = 0;
391	uint32_t *buffer, size;
392
393	size = pm->pmf->set_grace_period_size;
394
395	mutex_lock(&pm->lock);
396
397	if (size) {
398		kq_acquire_packet_buffer(pm->priv_queue,
399			size / sizeof(uint32_t),
400			(unsigned int **)&buffer);
401
402		if (!buffer) {
403			dev_err(dev,
404				"Failed to allocate buffer on kernel queue\n");
405			retval = -ENOMEM;
406			goto out;
407		}
408
409		retval = pm->pmf->set_grace_period(pm, buffer, grace_period);
410		if (!retval)
411			retval = kq_submit_packet(pm->priv_queue);
412		else
413			kq_rollback_packet(pm->priv_queue);
414	}
415
416out:
417	mutex_unlock(&pm->lock);
418	return retval;
419}
420
421int pm_send_unmap_queue(struct packet_manager *pm,
422			enum kfd_unmap_queues_filter filter,
423			uint32_t filter_param, bool reset)
 
424{
425	struct kfd_node *node = pm->dqm->dev;
426	struct device *dev = node->adev->dev;
427	uint32_t *buffer, size;
428	int retval = 0;
429
430	size = pm->pmf->unmap_queues_size;
431	mutex_lock(&pm->lock);
432	kq_acquire_packet_buffer(pm->priv_queue,
433			size / sizeof(uint32_t), (unsigned int **)&buffer);
434	if (!buffer) {
435		dev_err(dev, "Failed to allocate buffer on kernel queue\n");
436		retval = -ENOMEM;
437		goto out;
438	}
439
440	retval = pm->pmf->unmap_queues(pm, buffer, filter, filter_param, reset);
 
441	if (!retval)
442		retval = kq_submit_packet(pm->priv_queue);
443	else
444		kq_rollback_packet(pm->priv_queue);
445
446out:
447	mutex_unlock(&pm->lock);
448	return retval;
449}
450
451void pm_release_ib(struct packet_manager *pm)
452{
453	mutex_lock(&pm->lock);
454	if (pm->allocated) {
455		kfd_gtt_sa_free(pm->dqm->dev, pm->ib_buffer_obj);
456		pm->allocated = false;
457	}
458	mutex_unlock(&pm->lock);
459}
460
461#if defined(CONFIG_DEBUG_FS)
462
463int pm_debugfs_runlist(struct seq_file *m, void *data)
464{
465	struct packet_manager *pm = data;
466
467	mutex_lock(&pm->lock);
468
469	if (!pm->allocated) {
470		seq_puts(m, "  No active runlist\n");
471		goto out;
472	}
473
474	seq_hex_dump(m, "  ", DUMP_PREFIX_OFFSET, 32, 4,
475		     pm->ib_buffer_obj->cpu_ptr, pm->ib_size_bytes, false);
476
477out:
478	mutex_unlock(&pm->lock);
479	return 0;
480}
481
482int pm_debugfs_hang_hws(struct packet_manager *pm)
483{
484	struct kfd_node *node = pm->dqm->dev;
485	struct device *dev = node->adev->dev;
486	uint32_t *buffer, size;
487	int r = 0;
488
489	if (!pm->priv_queue)
490		return -EAGAIN;
491
492	size = pm->pmf->query_status_size;
493	mutex_lock(&pm->lock);
494	kq_acquire_packet_buffer(pm->priv_queue,
495			size / sizeof(uint32_t), (unsigned int **)&buffer);
496	if (!buffer) {
497		dev_err(dev, "Failed to allocate buffer on kernel queue\n");
498		r = -ENOMEM;
499		goto out;
500	}
501	memset(buffer, 0x55, size);
502	kq_submit_packet(pm->priv_queue);
503
504	dev_info(dev, "Submitting %x %x %x %x %x %x %x to HIQ to hang the HWS.",
505		 buffer[0], buffer[1], buffer[2], buffer[3], buffer[4],
506		 buffer[5], buffer[6]);
507out:
508	mutex_unlock(&pm->lock);
509	return r;
510}
511
512
513#endif