Linux Audio

Check our new training course

Loading...
Note: File does not exist in v3.1.
  1/*
  2 * Copyright 2014 Advanced Micro Devices, Inc.
  3 *
  4 * Permission is hereby granted, free of charge, to any person obtaining a
  5 * copy of this software and associated documentation files (the "Software"),
  6 * to deal in the Software without restriction, including without limitation
  7 * the rights to use, copy, modify, merge, publish, distribute, sublicense,
  8 * and/or sell copies of the Software, and to permit persons to whom the
  9 * Software is furnished to do so, subject to the following conditions:
 10 *
 11 * The above copyright notice and this permission notice shall be included in
 12 * all copies or substantial portions of the Software.
 13 *
 14 * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
 15 * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
 16 * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT.  IN NO EVENT SHALL
 17 * THE COPYRIGHT HOLDER(S) OR AUTHOR(S) BE LIABLE FOR ANY CLAIM, DAMAGES OR
 18 * OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE,
 19 * ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR
 20 * OTHER DEALINGS IN THE SOFTWARE.
 21 *
 22 */
 23
 24#include <linux/slab.h>
 25#include <linux/mutex.h>
 26#include "kfd_device_queue_manager.h"
 27#include "kfd_kernel_queue.h"
 28#include "kfd_priv.h"
 29
 30static inline void inc_wptr(unsigned int *wptr, unsigned int increment_bytes,
 31				unsigned int buffer_size_bytes)
 32{
 33	unsigned int temp = *wptr + increment_bytes / sizeof(uint32_t);
 34
 35	WARN((temp * sizeof(uint32_t)) > buffer_size_bytes,
 36	     "Runlist IB overflow");
 37	*wptr = temp;
 38}
 39
 40static void pm_calc_rlib_size(struct packet_manager *pm,
 41				unsigned int *rlib_size,
 42				bool *over_subscription)
 43{
 44	unsigned int process_count, queue_count, compute_queue_count, gws_queue_count;
 45	unsigned int map_queue_size;
 46	unsigned int max_proc_per_quantum = 1;
 47	struct kfd_dev *dev = pm->dqm->dev;
 48
 49	process_count = pm->dqm->processes_count;
 50	queue_count = pm->dqm->active_queue_count;
 51	compute_queue_count = pm->dqm->active_cp_queue_count;
 52	gws_queue_count = pm->dqm->gws_queue_count;
 53
 54	/* check if there is over subscription
 55	 * Note: the arbitration between the number of VMIDs and
 56	 * hws_max_conc_proc has been done in
 57	 * kgd2kfd_device_init().
 58	 */
 59	*over_subscription = false;
 60
 61	if (dev->max_proc_per_quantum > 1)
 62		max_proc_per_quantum = dev->max_proc_per_quantum;
 63
 64	if ((process_count > max_proc_per_quantum) ||
 65	    compute_queue_count > get_cp_queues_num(pm->dqm) ||
 66	    gws_queue_count > 1) {
 67		*over_subscription = true;
 68		pr_debug("Over subscribed runlist\n");
 69	}
 70
 71	map_queue_size = pm->pmf->map_queues_size;
 72	/* calculate run list ib allocation size */
 73	*rlib_size = process_count * pm->pmf->map_process_size +
 74		     queue_count * map_queue_size;
 75
 76	/*
 77	 * Increase the allocation size in case we need a chained run list
 78	 * when over subscription
 79	 */
 80	if (*over_subscription)
 81		*rlib_size += pm->pmf->runlist_size;
 82
 83	pr_debug("runlist ib size %d\n", *rlib_size);
 84}
 85
 86static int pm_allocate_runlist_ib(struct packet_manager *pm,
 87				unsigned int **rl_buffer,
 88				uint64_t *rl_gpu_buffer,
 89				unsigned int *rl_buffer_size,
 90				bool *is_over_subscription)
 91{
 92	int retval;
 93
 94	if (WARN_ON(pm->allocated))
 95		return -EINVAL;
 96
 97	pm_calc_rlib_size(pm, rl_buffer_size, is_over_subscription);
 98
 99	mutex_lock(&pm->lock);
100
101	retval = kfd_gtt_sa_allocate(pm->dqm->dev, *rl_buffer_size,
102					&pm->ib_buffer_obj);
103
104	if (retval) {
105		pr_err("Failed to allocate runlist IB\n");
106		goto out;
107	}
108
109	*(void **)rl_buffer = pm->ib_buffer_obj->cpu_ptr;
110	*rl_gpu_buffer = pm->ib_buffer_obj->gpu_addr;
111
112	memset(*rl_buffer, 0, *rl_buffer_size);
113	pm->allocated = true;
114
115out:
116	mutex_unlock(&pm->lock);
117	return retval;
118}
119
120static int pm_create_runlist_ib(struct packet_manager *pm,
121				struct list_head *queues,
122				uint64_t *rl_gpu_addr,
123				size_t *rl_size_bytes)
124{
125	unsigned int alloc_size_bytes;
126	unsigned int *rl_buffer, rl_wptr, i;
127	int retval, proccesses_mapped;
128	struct device_process_node *cur;
129	struct qcm_process_device *qpd;
130	struct queue *q;
131	struct kernel_queue *kq;
132	bool is_over_subscription;
133
134	rl_wptr = retval = proccesses_mapped = 0;
135
136	retval = pm_allocate_runlist_ib(pm, &rl_buffer, rl_gpu_addr,
137				&alloc_size_bytes, &is_over_subscription);
138	if (retval)
139		return retval;
140
141	*rl_size_bytes = alloc_size_bytes;
142	pm->ib_size_bytes = alloc_size_bytes;
143
144	pr_debug("Building runlist ib process count: %d queues count %d\n",
145		pm->dqm->processes_count, pm->dqm->active_queue_count);
146
147	/* build the run list ib packet */
148	list_for_each_entry(cur, queues, list) {
149		qpd = cur->qpd;
150		/* build map process packet */
151		if (proccesses_mapped >= pm->dqm->processes_count) {
152			pr_debug("Not enough space left in runlist IB\n");
153			pm_release_ib(pm);
154			return -ENOMEM;
155		}
156
157		retval = pm->pmf->map_process(pm, &rl_buffer[rl_wptr], qpd);
158		if (retval)
159			return retval;
160
161		proccesses_mapped++;
162		inc_wptr(&rl_wptr, pm->pmf->map_process_size,
163				alloc_size_bytes);
164
165		list_for_each_entry(kq, &qpd->priv_queue_list, list) {
166			if (!kq->queue->properties.is_active)
167				continue;
168
169			pr_debug("static_queue, mapping kernel q %d, is debug status %d\n",
170				kq->queue->queue, qpd->is_debug);
171
172			retval = pm->pmf->map_queues(pm,
173						&rl_buffer[rl_wptr],
174						kq->queue,
175						qpd->is_debug);
176			if (retval)
177				return retval;
178
179			inc_wptr(&rl_wptr,
180				pm->pmf->map_queues_size,
181				alloc_size_bytes);
182		}
183
184		list_for_each_entry(q, &qpd->queues_list, list) {
185			if (!q->properties.is_active)
186				continue;
187
188			pr_debug("static_queue, mapping user queue %d, is debug status %d\n",
189				q->queue, qpd->is_debug);
190
191			retval = pm->pmf->map_queues(pm,
192						&rl_buffer[rl_wptr],
193						q,
194						qpd->is_debug);
195
196			if (retval)
197				return retval;
198
199			inc_wptr(&rl_wptr,
200				pm->pmf->map_queues_size,
201				alloc_size_bytes);
202		}
203	}
204
205	pr_debug("Finished map process and queues to runlist\n");
206
207	if (is_over_subscription) {
208		if (!pm->is_over_subscription)
209			pr_warn("Runlist is getting oversubscribed. Expect reduced ROCm performance.\n");
210		retval = pm->pmf->runlist(pm, &rl_buffer[rl_wptr],
211					*rl_gpu_addr,
212					alloc_size_bytes / sizeof(uint32_t),
213					true);
214	}
215	pm->is_over_subscription = is_over_subscription;
216
217	for (i = 0; i < alloc_size_bytes / sizeof(uint32_t); i++)
218		pr_debug("0x%2X ", rl_buffer[i]);
219	pr_debug("\n");
220
221	return retval;
222}
223
224int pm_init(struct packet_manager *pm, struct device_queue_manager *dqm)
225{
226	switch (dqm->dev->device_info->asic_family) {
227	case CHIP_KAVERI:
228	case CHIP_HAWAII:
229		/* PM4 packet structures on CIK are the same as on VI */
230	case CHIP_CARRIZO:
231	case CHIP_TONGA:
232	case CHIP_FIJI:
233	case CHIP_POLARIS10:
234	case CHIP_POLARIS11:
235	case CHIP_POLARIS12:
236	case CHIP_VEGAM:
237		pm->pmf = &kfd_vi_pm_funcs;
238		break;
239	case CHIP_VEGA10:
240	case CHIP_VEGA12:
241	case CHIP_VEGA20:
242	case CHIP_RAVEN:
243	case CHIP_RENOIR:
244	case CHIP_ARCTURUS:
245	case CHIP_NAVI10:
246	case CHIP_NAVI12:
247	case CHIP_NAVI14:
248	case CHIP_SIENNA_CICHLID:
249	case CHIP_NAVY_FLOUNDER:
250		pm->pmf = &kfd_v9_pm_funcs;
251		break;
252	default:
253		WARN(1, "Unexpected ASIC family %u",
254		     dqm->dev->device_info->asic_family);
255		return -EINVAL;
256	}
257
258	pm->dqm = dqm;
259	mutex_init(&pm->lock);
260	pm->priv_queue = kernel_queue_init(dqm->dev, KFD_QUEUE_TYPE_HIQ);
261	if (!pm->priv_queue) {
262		mutex_destroy(&pm->lock);
263		return -ENOMEM;
264	}
265	pm->allocated = false;
266
267	return 0;
268}
269
270void pm_uninit(struct packet_manager *pm, bool hanging)
271{
272	mutex_destroy(&pm->lock);
273	kernel_queue_uninit(pm->priv_queue, hanging);
274}
275
276int pm_send_set_resources(struct packet_manager *pm,
277				struct scheduling_resources *res)
278{
279	uint32_t *buffer, size;
280	int retval = 0;
281
282	size = pm->pmf->set_resources_size;
283	mutex_lock(&pm->lock);
284	kq_acquire_packet_buffer(pm->priv_queue,
285					size / sizeof(uint32_t),
286					(unsigned int **)&buffer);
287	if (!buffer) {
288		pr_err("Failed to allocate buffer on kernel queue\n");
289		retval = -ENOMEM;
290		goto out;
291	}
292
293	retval = pm->pmf->set_resources(pm, buffer, res);
294	if (!retval)
295		kq_submit_packet(pm->priv_queue);
296	else
297		kq_rollback_packet(pm->priv_queue);
298
299out:
300	mutex_unlock(&pm->lock);
301
302	return retval;
303}
304
305int pm_send_runlist(struct packet_manager *pm, struct list_head *dqm_queues)
306{
307	uint64_t rl_gpu_ib_addr;
308	uint32_t *rl_buffer;
309	size_t rl_ib_size, packet_size_dwords;
310	int retval;
311
312	retval = pm_create_runlist_ib(pm, dqm_queues, &rl_gpu_ib_addr,
313					&rl_ib_size);
314	if (retval)
315		goto fail_create_runlist_ib;
316
317	pr_debug("runlist IB address: 0x%llX\n", rl_gpu_ib_addr);
318
319	packet_size_dwords = pm->pmf->runlist_size / sizeof(uint32_t);
320	mutex_lock(&pm->lock);
321
322	retval = kq_acquire_packet_buffer(pm->priv_queue,
323					packet_size_dwords, &rl_buffer);
324	if (retval)
325		goto fail_acquire_packet_buffer;
326
327	retval = pm->pmf->runlist(pm, rl_buffer, rl_gpu_ib_addr,
328					rl_ib_size / sizeof(uint32_t), false);
329	if (retval)
330		goto fail_create_runlist;
331
332	kq_submit_packet(pm->priv_queue);
333
334	mutex_unlock(&pm->lock);
335
336	return retval;
337
338fail_create_runlist:
339	kq_rollback_packet(pm->priv_queue);
340fail_acquire_packet_buffer:
341	mutex_unlock(&pm->lock);
342fail_create_runlist_ib:
343	pm_release_ib(pm);
344	return retval;
345}
346
347int pm_send_query_status(struct packet_manager *pm, uint64_t fence_address,
348			uint32_t fence_value)
349{
350	uint32_t *buffer, size;
351	int retval = 0;
352
353	if (WARN_ON(!fence_address))
354		return -EFAULT;
355
356	size = pm->pmf->query_status_size;
357	mutex_lock(&pm->lock);
358	kq_acquire_packet_buffer(pm->priv_queue,
359			size / sizeof(uint32_t), (unsigned int **)&buffer);
360	if (!buffer) {
361		pr_err("Failed to allocate buffer on kernel queue\n");
362		retval = -ENOMEM;
363		goto out;
364	}
365
366	retval = pm->pmf->query_status(pm, buffer, fence_address, fence_value);
367	if (!retval)
368		kq_submit_packet(pm->priv_queue);
369	else
370		kq_rollback_packet(pm->priv_queue);
371
372out:
373	mutex_unlock(&pm->lock);
374	return retval;
375}
376
377int pm_send_unmap_queue(struct packet_manager *pm, enum kfd_queue_type type,
378			enum kfd_unmap_queues_filter filter,
379			uint32_t filter_param, bool reset,
380			unsigned int sdma_engine)
381{
382	uint32_t *buffer, size;
383	int retval = 0;
384
385	size = pm->pmf->unmap_queues_size;
386	mutex_lock(&pm->lock);
387	kq_acquire_packet_buffer(pm->priv_queue,
388			size / sizeof(uint32_t), (unsigned int **)&buffer);
389	if (!buffer) {
390		pr_err("Failed to allocate buffer on kernel queue\n");
391		retval = -ENOMEM;
392		goto out;
393	}
394
395	retval = pm->pmf->unmap_queues(pm, buffer, type, filter, filter_param,
396				       reset, sdma_engine);
397	if (!retval)
398		kq_submit_packet(pm->priv_queue);
399	else
400		kq_rollback_packet(pm->priv_queue);
401
402out:
403	mutex_unlock(&pm->lock);
404	return retval;
405}
406
407void pm_release_ib(struct packet_manager *pm)
408{
409	mutex_lock(&pm->lock);
410	if (pm->allocated) {
411		kfd_gtt_sa_free(pm->dqm->dev, pm->ib_buffer_obj);
412		pm->allocated = false;
413	}
414	mutex_unlock(&pm->lock);
415}
416
417#if defined(CONFIG_DEBUG_FS)
418
419int pm_debugfs_runlist(struct seq_file *m, void *data)
420{
421	struct packet_manager *pm = data;
422
423	mutex_lock(&pm->lock);
424
425	if (!pm->allocated) {
426		seq_puts(m, "  No active runlist\n");
427		goto out;
428	}
429
430	seq_hex_dump(m, "  ", DUMP_PREFIX_OFFSET, 32, 4,
431		     pm->ib_buffer_obj->cpu_ptr, pm->ib_size_bytes, false);
432
433out:
434	mutex_unlock(&pm->lock);
435	return 0;
436}
437
438int pm_debugfs_hang_hws(struct packet_manager *pm)
439{
440	uint32_t *buffer, size;
441	int r = 0;
442
443	size = pm->pmf->query_status_size;
444	mutex_lock(&pm->lock);
445	kq_acquire_packet_buffer(pm->priv_queue,
446			size / sizeof(uint32_t), (unsigned int **)&buffer);
447	if (!buffer) {
448		pr_err("Failed to allocate buffer on kernel queue\n");
449		r = -ENOMEM;
450		goto out;
451	}
452	memset(buffer, 0x55, size);
453	kq_submit_packet(pm->priv_queue);
454
455	pr_info("Submitting %x %x %x %x %x %x %x to HIQ to hang the HWS.",
456		buffer[0], buffer[1], buffer[2], buffer[3],
457		buffer[4], buffer[5], buffer[6]);
458out:
459	mutex_unlock(&pm->lock);
460	return r;
461}
462
463
464#endif