Linux Audio

Check our new training course

Loading...
v5.9
  1/*
  2 * Copyright 2014 Advanced Micro Devices, Inc.
  3 *
  4 * Permission is hereby granted, free of charge, to any person obtaining a
  5 * copy of this software and associated documentation files (the "Software"),
  6 * to deal in the Software without restriction, including without limitation
  7 * the rights to use, copy, modify, merge, publish, distribute, sublicense,
  8 * and/or sell copies of the Software, and to permit persons to whom the
  9 * Software is furnished to do so, subject to the following conditions:
 10 *
 11 * The above copyright notice and this permission notice shall be included in
 12 * all copies or substantial portions of the Software.
 13 *
 14 * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
 15 * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
 16 * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT.  IN NO EVENT SHALL
 17 * THE COPYRIGHT HOLDER(S) OR AUTHOR(S) BE LIABLE FOR ANY CLAIM, DAMAGES OR
 18 * OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE,
 19 * ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR
 20 * OTHER DEALINGS IN THE SOFTWARE.
 21 *
 22 */
 23
 24#include <linux/slab.h>
 25#include <linux/mutex.h>
 26#include "kfd_device_queue_manager.h"
 27#include "kfd_kernel_queue.h"
 28#include "kfd_priv.h"
 29
 30static inline void inc_wptr(unsigned int *wptr, unsigned int increment_bytes,
 31				unsigned int buffer_size_bytes)
 32{
 33	unsigned int temp = *wptr + increment_bytes / sizeof(uint32_t);
 34
 35	WARN((temp * sizeof(uint32_t)) > buffer_size_bytes,
 36	     "Runlist IB overflow");
 37	*wptr = temp;
 38}
 39
 40static void pm_calc_rlib_size(struct packet_manager *pm,
 41				unsigned int *rlib_size,
 42				bool *over_subscription)
 43{
 44	unsigned int process_count, queue_count, compute_queue_count, gws_queue_count;
 45	unsigned int map_queue_size;
 46	unsigned int max_proc_per_quantum = 1;
 47	struct kfd_dev *dev = pm->dqm->dev;
 48
 49	process_count = pm->dqm->processes_count;
 50	queue_count = pm->dqm->active_queue_count;
 51	compute_queue_count = pm->dqm->active_cp_queue_count;
 52	gws_queue_count = pm->dqm->gws_queue_count;
 53
 54	/* check if there is over subscription
 55	 * Note: the arbitration between the number of VMIDs and
 56	 * hws_max_conc_proc has been done in
 57	 * kgd2kfd_device_init().
 58	 */
 59	*over_subscription = false;
 60
 61	if (dev->max_proc_per_quantum > 1)
 62		max_proc_per_quantum = dev->max_proc_per_quantum;
 63
 64	if ((process_count > max_proc_per_quantum) ||
 65	    compute_queue_count > get_cp_queues_num(pm->dqm) ||
 66	    gws_queue_count > 1) {
 67		*over_subscription = true;
 68		pr_debug("Over subscribed runlist\n");
 69	}
 70
 71	map_queue_size = pm->pmf->map_queues_size;
 72	/* calculate run list ib allocation size */
 73	*rlib_size = process_count * pm->pmf->map_process_size +
 74		     queue_count * map_queue_size;
 75
 76	/*
 77	 * Increase the allocation size in case we need a chained run list
 78	 * when over subscription
 79	 */
 80	if (*over_subscription)
 81		*rlib_size += pm->pmf->runlist_size;
 82
 83	pr_debug("runlist ib size %d\n", *rlib_size);
 84}
 85
 86static int pm_allocate_runlist_ib(struct packet_manager *pm,
 87				unsigned int **rl_buffer,
 88				uint64_t *rl_gpu_buffer,
 89				unsigned int *rl_buffer_size,
 90				bool *is_over_subscription)
 91{
 92	int retval;
 93
 94	if (WARN_ON(pm->allocated))
 95		return -EINVAL;
 96
 97	pm_calc_rlib_size(pm, rl_buffer_size, is_over_subscription);
 98
 99	mutex_lock(&pm->lock);
100
101	retval = kfd_gtt_sa_allocate(pm->dqm->dev, *rl_buffer_size,
102					&pm->ib_buffer_obj);
103
104	if (retval) {
105		pr_err("Failed to allocate runlist IB\n");
106		goto out;
107	}
108
109	*(void **)rl_buffer = pm->ib_buffer_obj->cpu_ptr;
110	*rl_gpu_buffer = pm->ib_buffer_obj->gpu_addr;
111
112	memset(*rl_buffer, 0, *rl_buffer_size);
113	pm->allocated = true;
114
115out:
116	mutex_unlock(&pm->lock);
117	return retval;
118}
119
120static int pm_create_runlist_ib(struct packet_manager *pm,
121				struct list_head *queues,
122				uint64_t *rl_gpu_addr,
123				size_t *rl_size_bytes)
124{
125	unsigned int alloc_size_bytes;
126	unsigned int *rl_buffer, rl_wptr, i;
127	int retval, proccesses_mapped;
128	struct device_process_node *cur;
129	struct qcm_process_device *qpd;
130	struct queue *q;
131	struct kernel_queue *kq;
132	bool is_over_subscription;
133
134	rl_wptr = retval = proccesses_mapped = 0;
135
136	retval = pm_allocate_runlist_ib(pm, &rl_buffer, rl_gpu_addr,
137				&alloc_size_bytes, &is_over_subscription);
138	if (retval)
139		return retval;
140
141	*rl_size_bytes = alloc_size_bytes;
142	pm->ib_size_bytes = alloc_size_bytes;
143
144	pr_debug("Building runlist ib process count: %d queues count %d\n",
145		pm->dqm->processes_count, pm->dqm->active_queue_count);
146
147	/* build the run list ib packet */
148	list_for_each_entry(cur, queues, list) {
149		qpd = cur->qpd;
150		/* build map process packet */
151		if (proccesses_mapped >= pm->dqm->processes_count) {
152			pr_debug("Not enough space left in runlist IB\n");
153			pm_release_ib(pm);
154			return -ENOMEM;
155		}
156
157		retval = pm->pmf->map_process(pm, &rl_buffer[rl_wptr], qpd);
158		if (retval)
159			return retval;
160
161		proccesses_mapped++;
162		inc_wptr(&rl_wptr, pm->pmf->map_process_size,
163				alloc_size_bytes);
164
165		list_for_each_entry(kq, &qpd->priv_queue_list, list) {
166			if (!kq->queue->properties.is_active)
167				continue;
168
169			pr_debug("static_queue, mapping kernel q %d, is debug status %d\n",
170				kq->queue->queue, qpd->is_debug);
171
172			retval = pm->pmf->map_queues(pm,
173						&rl_buffer[rl_wptr],
174						kq->queue,
175						qpd->is_debug);
176			if (retval)
177				return retval;
178
179			inc_wptr(&rl_wptr,
180				pm->pmf->map_queues_size,
181				alloc_size_bytes);
182		}
183
184		list_for_each_entry(q, &qpd->queues_list, list) {
185			if (!q->properties.is_active)
186				continue;
187
188			pr_debug("static_queue, mapping user queue %d, is debug status %d\n",
189				q->queue, qpd->is_debug);
190
191			retval = pm->pmf->map_queues(pm,
192						&rl_buffer[rl_wptr],
193						q,
194						qpd->is_debug);
195
196			if (retval)
197				return retval;
198
199			inc_wptr(&rl_wptr,
200				pm->pmf->map_queues_size,
201				alloc_size_bytes);
202		}
203	}
204
205	pr_debug("Finished map process and queues to runlist\n");
206
207	if (is_over_subscription) {
208		if (!pm->is_over_subscription)
209			pr_warn("Runlist is getting oversubscribed. Expect reduced ROCm performance.\n");
210		retval = pm->pmf->runlist(pm, &rl_buffer[rl_wptr],
211					*rl_gpu_addr,
212					alloc_size_bytes / sizeof(uint32_t),
213					true);
214	}
215	pm->is_over_subscription = is_over_subscription;
216
217	for (i = 0; i < alloc_size_bytes / sizeof(uint32_t); i++)
218		pr_debug("0x%2X ", rl_buffer[i]);
219	pr_debug("\n");
220
221	return retval;
222}
223
224int pm_init(struct packet_manager *pm, struct device_queue_manager *dqm)
225{
226	switch (dqm->dev->device_info->asic_family) {
227	case CHIP_KAVERI:
228	case CHIP_HAWAII:
229		/* PM4 packet structures on CIK are the same as on VI */
230	case CHIP_CARRIZO:
231	case CHIP_TONGA:
232	case CHIP_FIJI:
233	case CHIP_POLARIS10:
234	case CHIP_POLARIS11:
235	case CHIP_POLARIS12:
236	case CHIP_VEGAM:
237		pm->pmf = &kfd_vi_pm_funcs;
238		break;
239	case CHIP_VEGA10:
240	case CHIP_VEGA12:
241	case CHIP_VEGA20:
242	case CHIP_RAVEN:
243	case CHIP_RENOIR:
244	case CHIP_ARCTURUS:
245	case CHIP_NAVI10:
246	case CHIP_NAVI12:
247	case CHIP_NAVI14:
248	case CHIP_SIENNA_CICHLID:
249	case CHIP_NAVY_FLOUNDER:
250		pm->pmf = &kfd_v9_pm_funcs;
251		break;
 
 
 
252	default:
253		WARN(1, "Unexpected ASIC family %u",
254		     dqm->dev->device_info->asic_family);
255		return -EINVAL;
256	}
257
258	pm->dqm = dqm;
259	mutex_init(&pm->lock);
260	pm->priv_queue = kernel_queue_init(dqm->dev, KFD_QUEUE_TYPE_HIQ);
261	if (!pm->priv_queue) {
262		mutex_destroy(&pm->lock);
263		return -ENOMEM;
264	}
265	pm->allocated = false;
266
267	return 0;
268}
269
270void pm_uninit(struct packet_manager *pm, bool hanging)
271{
272	mutex_destroy(&pm->lock);
273	kernel_queue_uninit(pm->priv_queue, hanging);
274}
275
276int pm_send_set_resources(struct packet_manager *pm,
277				struct scheduling_resources *res)
278{
279	uint32_t *buffer, size;
280	int retval = 0;
281
282	size = pm->pmf->set_resources_size;
283	mutex_lock(&pm->lock);
284	kq_acquire_packet_buffer(pm->priv_queue,
285					size / sizeof(uint32_t),
286					(unsigned int **)&buffer);
287	if (!buffer) {
288		pr_err("Failed to allocate buffer on kernel queue\n");
289		retval = -ENOMEM;
290		goto out;
291	}
292
293	retval = pm->pmf->set_resources(pm, buffer, res);
294	if (!retval)
295		kq_submit_packet(pm->priv_queue);
296	else
297		kq_rollback_packet(pm->priv_queue);
298
299out:
300	mutex_unlock(&pm->lock);
301
302	return retval;
303}
304
305int pm_send_runlist(struct packet_manager *pm, struct list_head *dqm_queues)
306{
307	uint64_t rl_gpu_ib_addr;
308	uint32_t *rl_buffer;
309	size_t rl_ib_size, packet_size_dwords;
310	int retval;
311
312	retval = pm_create_runlist_ib(pm, dqm_queues, &rl_gpu_ib_addr,
313					&rl_ib_size);
314	if (retval)
315		goto fail_create_runlist_ib;
316
317	pr_debug("runlist IB address: 0x%llX\n", rl_gpu_ib_addr);
318
319	packet_size_dwords = pm->pmf->runlist_size / sizeof(uint32_t);
320	mutex_lock(&pm->lock);
321
322	retval = kq_acquire_packet_buffer(pm->priv_queue,
323					packet_size_dwords, &rl_buffer);
324	if (retval)
325		goto fail_acquire_packet_buffer;
326
327	retval = pm->pmf->runlist(pm, rl_buffer, rl_gpu_ib_addr,
328					rl_ib_size / sizeof(uint32_t), false);
329	if (retval)
330		goto fail_create_runlist;
331
332	kq_submit_packet(pm->priv_queue);
333
334	mutex_unlock(&pm->lock);
335
336	return retval;
337
338fail_create_runlist:
339	kq_rollback_packet(pm->priv_queue);
340fail_acquire_packet_buffer:
341	mutex_unlock(&pm->lock);
342fail_create_runlist_ib:
343	pm_release_ib(pm);
344	return retval;
345}
346
347int pm_send_query_status(struct packet_manager *pm, uint64_t fence_address,
348			uint32_t fence_value)
349{
350	uint32_t *buffer, size;
351	int retval = 0;
352
353	if (WARN_ON(!fence_address))
354		return -EFAULT;
355
356	size = pm->pmf->query_status_size;
357	mutex_lock(&pm->lock);
358	kq_acquire_packet_buffer(pm->priv_queue,
359			size / sizeof(uint32_t), (unsigned int **)&buffer);
360	if (!buffer) {
361		pr_err("Failed to allocate buffer on kernel queue\n");
362		retval = -ENOMEM;
363		goto out;
364	}
365
366	retval = pm->pmf->query_status(pm, buffer, fence_address, fence_value);
367	if (!retval)
368		kq_submit_packet(pm->priv_queue);
369	else
370		kq_rollback_packet(pm->priv_queue);
371
372out:
373	mutex_unlock(&pm->lock);
374	return retval;
375}
376
377int pm_send_unmap_queue(struct packet_manager *pm, enum kfd_queue_type type,
378			enum kfd_unmap_queues_filter filter,
379			uint32_t filter_param, bool reset,
380			unsigned int sdma_engine)
381{
382	uint32_t *buffer, size;
383	int retval = 0;
384
385	size = pm->pmf->unmap_queues_size;
386	mutex_lock(&pm->lock);
387	kq_acquire_packet_buffer(pm->priv_queue,
388			size / sizeof(uint32_t), (unsigned int **)&buffer);
389	if (!buffer) {
390		pr_err("Failed to allocate buffer on kernel queue\n");
391		retval = -ENOMEM;
392		goto out;
393	}
394
395	retval = pm->pmf->unmap_queues(pm, buffer, type, filter, filter_param,
396				       reset, sdma_engine);
397	if (!retval)
398		kq_submit_packet(pm->priv_queue);
399	else
400		kq_rollback_packet(pm->priv_queue);
401
402out:
403	mutex_unlock(&pm->lock);
404	return retval;
405}
406
407void pm_release_ib(struct packet_manager *pm)
408{
409	mutex_lock(&pm->lock);
410	if (pm->allocated) {
411		kfd_gtt_sa_free(pm->dqm->dev, pm->ib_buffer_obj);
412		pm->allocated = false;
413	}
414	mutex_unlock(&pm->lock);
415}
416
417#if defined(CONFIG_DEBUG_FS)
418
419int pm_debugfs_runlist(struct seq_file *m, void *data)
420{
421	struct packet_manager *pm = data;
422
423	mutex_lock(&pm->lock);
424
425	if (!pm->allocated) {
426		seq_puts(m, "  No active runlist\n");
427		goto out;
428	}
429
430	seq_hex_dump(m, "  ", DUMP_PREFIX_OFFSET, 32, 4,
431		     pm->ib_buffer_obj->cpu_ptr, pm->ib_size_bytes, false);
432
433out:
434	mutex_unlock(&pm->lock);
435	return 0;
436}
437
438int pm_debugfs_hang_hws(struct packet_manager *pm)
439{
440	uint32_t *buffer, size;
441	int r = 0;
442
443	size = pm->pmf->query_status_size;
444	mutex_lock(&pm->lock);
445	kq_acquire_packet_buffer(pm->priv_queue,
446			size / sizeof(uint32_t), (unsigned int **)&buffer);
447	if (!buffer) {
448		pr_err("Failed to allocate buffer on kernel queue\n");
449		r = -ENOMEM;
450		goto out;
451	}
452	memset(buffer, 0x55, size);
453	kq_submit_packet(pm->priv_queue);
454
455	pr_info("Submitting %x %x %x %x %x %x %x to HIQ to hang the HWS.",
456		buffer[0], buffer[1], buffer[2], buffer[3],
457		buffer[4], buffer[5], buffer[6]);
458out:
459	mutex_unlock(&pm->lock);
460	return r;
461}
462
463
464#endif
v5.4
  1/*
  2 * Copyright 2014 Advanced Micro Devices, Inc.
  3 *
  4 * Permission is hereby granted, free of charge, to any person obtaining a
  5 * copy of this software and associated documentation files (the "Software"),
  6 * to deal in the Software without restriction, including without limitation
  7 * the rights to use, copy, modify, merge, publish, distribute, sublicense,
  8 * and/or sell copies of the Software, and to permit persons to whom the
  9 * Software is furnished to do so, subject to the following conditions:
 10 *
 11 * The above copyright notice and this permission notice shall be included in
 12 * all copies or substantial portions of the Software.
 13 *
 14 * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
 15 * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
 16 * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT.  IN NO EVENT SHALL
 17 * THE COPYRIGHT HOLDER(S) OR AUTHOR(S) BE LIABLE FOR ANY CLAIM, DAMAGES OR
 18 * OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE,
 19 * ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR
 20 * OTHER DEALINGS IN THE SOFTWARE.
 21 *
 22 */
 23
 24#include <linux/slab.h>
 25#include <linux/mutex.h>
 26#include "kfd_device_queue_manager.h"
 27#include "kfd_kernel_queue.h"
 28#include "kfd_priv.h"
 29
 30static inline void inc_wptr(unsigned int *wptr, unsigned int increment_bytes,
 31				unsigned int buffer_size_bytes)
 32{
 33	unsigned int temp = *wptr + increment_bytes / sizeof(uint32_t);
 34
 35	WARN((temp * sizeof(uint32_t)) > buffer_size_bytes,
 36	     "Runlist IB overflow");
 37	*wptr = temp;
 38}
 39
 40static void pm_calc_rlib_size(struct packet_manager *pm,
 41				unsigned int *rlib_size,
 42				bool *over_subscription)
 43{
 44	unsigned int process_count, queue_count, compute_queue_count;
 45	unsigned int map_queue_size;
 46	unsigned int max_proc_per_quantum = 1;
 47	struct kfd_dev *dev = pm->dqm->dev;
 48
 49	process_count = pm->dqm->processes_count;
 50	queue_count = pm->dqm->queue_count;
 51	compute_queue_count = queue_count - pm->dqm->sdma_queue_count -
 52				pm->dqm->xgmi_sdma_queue_count;
 53
 54	/* check if there is over subscription
 55	 * Note: the arbitration between the number of VMIDs and
 56	 * hws_max_conc_proc has been done in
 57	 * kgd2kfd_device_init().
 58	 */
 59	*over_subscription = false;
 60
 61	if (dev->max_proc_per_quantum > 1)
 62		max_proc_per_quantum = dev->max_proc_per_quantum;
 63
 64	if ((process_count > max_proc_per_quantum) ||
 65	    compute_queue_count > get_queues_num(pm->dqm)) {
 
 66		*over_subscription = true;
 67		pr_debug("Over subscribed runlist\n");
 68	}
 69
 70	map_queue_size = pm->pmf->map_queues_size;
 71	/* calculate run list ib allocation size */
 72	*rlib_size = process_count * pm->pmf->map_process_size +
 73		     queue_count * map_queue_size;
 74
 75	/*
 76	 * Increase the allocation size in case we need a chained run list
 77	 * when over subscription
 78	 */
 79	if (*over_subscription)
 80		*rlib_size += pm->pmf->runlist_size;
 81
 82	pr_debug("runlist ib size %d\n", *rlib_size);
 83}
 84
 85static int pm_allocate_runlist_ib(struct packet_manager *pm,
 86				unsigned int **rl_buffer,
 87				uint64_t *rl_gpu_buffer,
 88				unsigned int *rl_buffer_size,
 89				bool *is_over_subscription)
 90{
 91	int retval;
 92
 93	if (WARN_ON(pm->allocated))
 94		return -EINVAL;
 95
 96	pm_calc_rlib_size(pm, rl_buffer_size, is_over_subscription);
 97
 98	mutex_lock(&pm->lock);
 99
100	retval = kfd_gtt_sa_allocate(pm->dqm->dev, *rl_buffer_size,
101					&pm->ib_buffer_obj);
102
103	if (retval) {
104		pr_err("Failed to allocate runlist IB\n");
105		goto out;
106	}
107
108	*(void **)rl_buffer = pm->ib_buffer_obj->cpu_ptr;
109	*rl_gpu_buffer = pm->ib_buffer_obj->gpu_addr;
110
111	memset(*rl_buffer, 0, *rl_buffer_size);
112	pm->allocated = true;
113
114out:
115	mutex_unlock(&pm->lock);
116	return retval;
117}
118
119static int pm_create_runlist_ib(struct packet_manager *pm,
120				struct list_head *queues,
121				uint64_t *rl_gpu_addr,
122				size_t *rl_size_bytes)
123{
124	unsigned int alloc_size_bytes;
125	unsigned int *rl_buffer, rl_wptr, i;
126	int retval, proccesses_mapped;
127	struct device_process_node *cur;
128	struct qcm_process_device *qpd;
129	struct queue *q;
130	struct kernel_queue *kq;
131	bool is_over_subscription;
132
133	rl_wptr = retval = proccesses_mapped = 0;
134
135	retval = pm_allocate_runlist_ib(pm, &rl_buffer, rl_gpu_addr,
136				&alloc_size_bytes, &is_over_subscription);
137	if (retval)
138		return retval;
139
140	*rl_size_bytes = alloc_size_bytes;
141	pm->ib_size_bytes = alloc_size_bytes;
142
143	pr_debug("Building runlist ib process count: %d queues count %d\n",
144		pm->dqm->processes_count, pm->dqm->queue_count);
145
146	/* build the run list ib packet */
147	list_for_each_entry(cur, queues, list) {
148		qpd = cur->qpd;
149		/* build map process packet */
150		if (proccesses_mapped >= pm->dqm->processes_count) {
151			pr_debug("Not enough space left in runlist IB\n");
152			pm_release_ib(pm);
153			return -ENOMEM;
154		}
155
156		retval = pm->pmf->map_process(pm, &rl_buffer[rl_wptr], qpd);
157		if (retval)
158			return retval;
159
160		proccesses_mapped++;
161		inc_wptr(&rl_wptr, pm->pmf->map_process_size,
162				alloc_size_bytes);
163
164		list_for_each_entry(kq, &qpd->priv_queue_list, list) {
165			if (!kq->queue->properties.is_active)
166				continue;
167
168			pr_debug("static_queue, mapping kernel q %d, is debug status %d\n",
169				kq->queue->queue, qpd->is_debug);
170
171			retval = pm->pmf->map_queues(pm,
172						&rl_buffer[rl_wptr],
173						kq->queue,
174						qpd->is_debug);
175			if (retval)
176				return retval;
177
178			inc_wptr(&rl_wptr,
179				pm->pmf->map_queues_size,
180				alloc_size_bytes);
181		}
182
183		list_for_each_entry(q, &qpd->queues_list, list) {
184			if (!q->properties.is_active)
185				continue;
186
187			pr_debug("static_queue, mapping user queue %d, is debug status %d\n",
188				q->queue, qpd->is_debug);
189
190			retval = pm->pmf->map_queues(pm,
191						&rl_buffer[rl_wptr],
192						q,
193						qpd->is_debug);
194
195			if (retval)
196				return retval;
197
198			inc_wptr(&rl_wptr,
199				pm->pmf->map_queues_size,
200				alloc_size_bytes);
201		}
202	}
203
204	pr_debug("Finished map process and queues to runlist\n");
205
206	if (is_over_subscription) {
207		if (!pm->is_over_subscription)
208			pr_warn("Runlist is getting oversubscribed. Expect reduced ROCm performance.\n");
209		retval = pm->pmf->runlist(pm, &rl_buffer[rl_wptr],
210					*rl_gpu_addr,
211					alloc_size_bytes / sizeof(uint32_t),
212					true);
213	}
214	pm->is_over_subscription = is_over_subscription;
215
216	for (i = 0; i < alloc_size_bytes / sizeof(uint32_t); i++)
217		pr_debug("0x%2X ", rl_buffer[i]);
218	pr_debug("\n");
219
220	return retval;
221}
222
223int pm_init(struct packet_manager *pm, struct device_queue_manager *dqm)
224{
225	switch (dqm->dev->device_info->asic_family) {
226	case CHIP_KAVERI:
227	case CHIP_HAWAII:
228		/* PM4 packet structures on CIK are the same as on VI */
229	case CHIP_CARRIZO:
230	case CHIP_TONGA:
231	case CHIP_FIJI:
232	case CHIP_POLARIS10:
233	case CHIP_POLARIS11:
234	case CHIP_POLARIS12:
235	case CHIP_VEGAM:
236		pm->pmf = &kfd_vi_pm_funcs;
237		break;
238	case CHIP_VEGA10:
239	case CHIP_VEGA12:
240	case CHIP_VEGA20:
241	case CHIP_RAVEN:
 
242	case CHIP_ARCTURUS:
 
 
 
 
 
243		pm->pmf = &kfd_v9_pm_funcs;
244		break;
245	case CHIP_NAVI10:
246		pm->pmf = &kfd_v10_pm_funcs;
247		break;
248	default:
249		WARN(1, "Unexpected ASIC family %u",
250		     dqm->dev->device_info->asic_family);
251		return -EINVAL;
252	}
253
254	pm->dqm = dqm;
255	mutex_init(&pm->lock);
256	pm->priv_queue = kernel_queue_init(dqm->dev, KFD_QUEUE_TYPE_HIQ);
257	if (!pm->priv_queue) {
258		mutex_destroy(&pm->lock);
259		return -ENOMEM;
260	}
261	pm->allocated = false;
262
263	return 0;
264}
265
266void pm_uninit(struct packet_manager *pm)
267{
268	mutex_destroy(&pm->lock);
269	kernel_queue_uninit(pm->priv_queue);
270}
271
272int pm_send_set_resources(struct packet_manager *pm,
273				struct scheduling_resources *res)
274{
275	uint32_t *buffer, size;
276	int retval = 0;
277
278	size = pm->pmf->set_resources_size;
279	mutex_lock(&pm->lock);
280	pm->priv_queue->ops.acquire_packet_buffer(pm->priv_queue,
281					size / sizeof(uint32_t),
282					(unsigned int **)&buffer);
283	if (!buffer) {
284		pr_err("Failed to allocate buffer on kernel queue\n");
285		retval = -ENOMEM;
286		goto out;
287	}
288
289	retval = pm->pmf->set_resources(pm, buffer, res);
290	if (!retval)
291		pm->priv_queue->ops.submit_packet(pm->priv_queue);
292	else
293		pm->priv_queue->ops.rollback_packet(pm->priv_queue);
294
295out:
296	mutex_unlock(&pm->lock);
297
298	return retval;
299}
300
301int pm_send_runlist(struct packet_manager *pm, struct list_head *dqm_queues)
302{
303	uint64_t rl_gpu_ib_addr;
304	uint32_t *rl_buffer;
305	size_t rl_ib_size, packet_size_dwords;
306	int retval;
307
308	retval = pm_create_runlist_ib(pm, dqm_queues, &rl_gpu_ib_addr,
309					&rl_ib_size);
310	if (retval)
311		goto fail_create_runlist_ib;
312
313	pr_debug("runlist IB address: 0x%llX\n", rl_gpu_ib_addr);
314
315	packet_size_dwords = pm->pmf->runlist_size / sizeof(uint32_t);
316	mutex_lock(&pm->lock);
317
318	retval = pm->priv_queue->ops.acquire_packet_buffer(pm->priv_queue,
319					packet_size_dwords, &rl_buffer);
320	if (retval)
321		goto fail_acquire_packet_buffer;
322
323	retval = pm->pmf->runlist(pm, rl_buffer, rl_gpu_ib_addr,
324					rl_ib_size / sizeof(uint32_t), false);
325	if (retval)
326		goto fail_create_runlist;
327
328	pm->priv_queue->ops.submit_packet(pm->priv_queue);
329
330	mutex_unlock(&pm->lock);
331
332	return retval;
333
334fail_create_runlist:
335	pm->priv_queue->ops.rollback_packet(pm->priv_queue);
336fail_acquire_packet_buffer:
337	mutex_unlock(&pm->lock);
338fail_create_runlist_ib:
339	pm_release_ib(pm);
340	return retval;
341}
342
343int pm_send_query_status(struct packet_manager *pm, uint64_t fence_address,
344			uint32_t fence_value)
345{
346	uint32_t *buffer, size;
347	int retval = 0;
348
349	if (WARN_ON(!fence_address))
350		return -EFAULT;
351
352	size = pm->pmf->query_status_size;
353	mutex_lock(&pm->lock);
354	pm->priv_queue->ops.acquire_packet_buffer(pm->priv_queue,
355			size / sizeof(uint32_t), (unsigned int **)&buffer);
356	if (!buffer) {
357		pr_err("Failed to allocate buffer on kernel queue\n");
358		retval = -ENOMEM;
359		goto out;
360	}
361
362	retval = pm->pmf->query_status(pm, buffer, fence_address, fence_value);
363	if (!retval)
364		pm->priv_queue->ops.submit_packet(pm->priv_queue);
365	else
366		pm->priv_queue->ops.rollback_packet(pm->priv_queue);
367
368out:
369	mutex_unlock(&pm->lock);
370	return retval;
371}
372
373int pm_send_unmap_queue(struct packet_manager *pm, enum kfd_queue_type type,
374			enum kfd_unmap_queues_filter filter,
375			uint32_t filter_param, bool reset,
376			unsigned int sdma_engine)
377{
378	uint32_t *buffer, size;
379	int retval = 0;
380
381	size = pm->pmf->unmap_queues_size;
382	mutex_lock(&pm->lock);
383	pm->priv_queue->ops.acquire_packet_buffer(pm->priv_queue,
384			size / sizeof(uint32_t), (unsigned int **)&buffer);
385	if (!buffer) {
386		pr_err("Failed to allocate buffer on kernel queue\n");
387		retval = -ENOMEM;
388		goto out;
389	}
390
391	retval = pm->pmf->unmap_queues(pm, buffer, type, filter, filter_param,
392				       reset, sdma_engine);
393	if (!retval)
394		pm->priv_queue->ops.submit_packet(pm->priv_queue);
395	else
396		pm->priv_queue->ops.rollback_packet(pm->priv_queue);
397
398out:
399	mutex_unlock(&pm->lock);
400	return retval;
401}
402
403void pm_release_ib(struct packet_manager *pm)
404{
405	mutex_lock(&pm->lock);
406	if (pm->allocated) {
407		kfd_gtt_sa_free(pm->dqm->dev, pm->ib_buffer_obj);
408		pm->allocated = false;
409	}
410	mutex_unlock(&pm->lock);
411}
412
413#if defined(CONFIG_DEBUG_FS)
414
415int pm_debugfs_runlist(struct seq_file *m, void *data)
416{
417	struct packet_manager *pm = data;
418
419	mutex_lock(&pm->lock);
420
421	if (!pm->allocated) {
422		seq_puts(m, "  No active runlist\n");
423		goto out;
424	}
425
426	seq_hex_dump(m, "  ", DUMP_PREFIX_OFFSET, 32, 4,
427		     pm->ib_buffer_obj->cpu_ptr, pm->ib_size_bytes, false);
428
429out:
430	mutex_unlock(&pm->lock);
431	return 0;
432}
433
434int pm_debugfs_hang_hws(struct packet_manager *pm)
435{
436	uint32_t *buffer, size;
437	int r = 0;
438
439	size = pm->pmf->query_status_size;
440	mutex_lock(&pm->lock);
441	pm->priv_queue->ops.acquire_packet_buffer(pm->priv_queue,
442			size / sizeof(uint32_t), (unsigned int **)&buffer);
443	if (!buffer) {
444		pr_err("Failed to allocate buffer on kernel queue\n");
445		r = -ENOMEM;
446		goto out;
447	}
448	memset(buffer, 0x55, size);
449	pm->priv_queue->ops.submit_packet(pm->priv_queue);
450
451	pr_info("Submitting %x %x %x %x %x %x %x to HIQ to hang the HWS.",
452		buffer[0], buffer[1], buffer[2], buffer[3],
453		buffer[4], buffer[5], buffer[6]);
454out:
455	mutex_unlock(&pm->lock);
456	return r;
457}
458
459
460#endif