Linux Audio

Check our new training course

Loading...
Note: File does not exist in v3.1.
  1/*
  2 * Copyright 2014 Advanced Micro Devices, Inc.
  3 *
  4 * Permission is hereby granted, free of charge, to any person obtaining a
  5 * copy of this software and associated documentation files (the "Software"),
  6 * to deal in the Software without restriction, including without limitation
  7 * the rights to use, copy, modify, merge, publish, distribute, sublicense,
  8 * and/or sell copies of the Software, and to permit persons to whom the
  9 * Software is furnished to do so, subject to the following conditions:
 10 *
 11 * The above copyright notice and this permission notice shall be included in
 12 * all copies or substantial portions of the Software.
 13 *
 14 * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
 15 * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
 16 * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT.  IN NO EVENT SHALL
 17 * THE COPYRIGHT HOLDER(S) OR AUTHOR(S) BE LIABLE FOR ANY CLAIM, DAMAGES OR
 18 * OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE,
 19 * ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR
 20 * OTHER DEALINGS IN THE SOFTWARE.
 21 *
 22 */
 23
 24#include <linux/slab.h>
 25#include <linux/mutex.h>
 26#include "kfd_device_queue_manager.h"
 27#include "kfd_kernel_queue.h"
 28#include "kfd_priv.h"
 29
 30static inline void inc_wptr(unsigned int *wptr, unsigned int increment_bytes,
 31				unsigned int buffer_size_bytes)
 32{
 33	unsigned int temp = *wptr + increment_bytes / sizeof(uint32_t);
 34
 35	WARN((temp * sizeof(uint32_t)) > buffer_size_bytes,
 36	     "Runlist IB overflow");
 37	*wptr = temp;
 38}
 39
 40static void pm_calc_rlib_size(struct packet_manager *pm,
 41				unsigned int *rlib_size,
 42				bool *over_subscription)
 43{
 44	unsigned int process_count, queue_count, compute_queue_count;
 45	unsigned int map_queue_size;
 46	unsigned int max_proc_per_quantum = 1;
 47	struct kfd_dev *dev = pm->dqm->dev;
 48
 49	process_count = pm->dqm->processes_count;
 50	queue_count = pm->dqm->queue_count;
 51	compute_queue_count = queue_count - pm->dqm->sdma_queue_count -
 52				pm->dqm->xgmi_sdma_queue_count;
 53
 54	/* check if there is over subscription
 55	 * Note: the arbitration between the number of VMIDs and
 56	 * hws_max_conc_proc has been done in
 57	 * kgd2kfd_device_init().
 58	 */
 59	*over_subscription = false;
 60
 61	if (dev->max_proc_per_quantum > 1)
 62		max_proc_per_quantum = dev->max_proc_per_quantum;
 63
 64	if ((process_count > max_proc_per_quantum) ||
 65	    compute_queue_count > get_queues_num(pm->dqm)) {
 66		*over_subscription = true;
 67		pr_debug("Over subscribed runlist\n");
 68	}
 69
 70	map_queue_size = pm->pmf->map_queues_size;
 71	/* calculate run list ib allocation size */
 72	*rlib_size = process_count * pm->pmf->map_process_size +
 73		     queue_count * map_queue_size;
 74
 75	/*
 76	 * Increase the allocation size in case we need a chained run list
 77	 * when over subscription
 78	 */
 79	if (*over_subscription)
 80		*rlib_size += pm->pmf->runlist_size;
 81
 82	pr_debug("runlist ib size %d\n", *rlib_size);
 83}
 84
 85static int pm_allocate_runlist_ib(struct packet_manager *pm,
 86				unsigned int **rl_buffer,
 87				uint64_t *rl_gpu_buffer,
 88				unsigned int *rl_buffer_size,
 89				bool *is_over_subscription)
 90{
 91	int retval;
 92
 93	if (WARN_ON(pm->allocated))
 94		return -EINVAL;
 95
 96	pm_calc_rlib_size(pm, rl_buffer_size, is_over_subscription);
 97
 98	mutex_lock(&pm->lock);
 99
100	retval = kfd_gtt_sa_allocate(pm->dqm->dev, *rl_buffer_size,
101					&pm->ib_buffer_obj);
102
103	if (retval) {
104		pr_err("Failed to allocate runlist IB\n");
105		goto out;
106	}
107
108	*(void **)rl_buffer = pm->ib_buffer_obj->cpu_ptr;
109	*rl_gpu_buffer = pm->ib_buffer_obj->gpu_addr;
110
111	memset(*rl_buffer, 0, *rl_buffer_size);
112	pm->allocated = true;
113
114out:
115	mutex_unlock(&pm->lock);
116	return retval;
117}
118
119static int pm_create_runlist_ib(struct packet_manager *pm,
120				struct list_head *queues,
121				uint64_t *rl_gpu_addr,
122				size_t *rl_size_bytes)
123{
124	unsigned int alloc_size_bytes;
125	unsigned int *rl_buffer, rl_wptr, i;
126	int retval, proccesses_mapped;
127	struct device_process_node *cur;
128	struct qcm_process_device *qpd;
129	struct queue *q;
130	struct kernel_queue *kq;
131	bool is_over_subscription;
132
133	rl_wptr = retval = proccesses_mapped = 0;
134
135	retval = pm_allocate_runlist_ib(pm, &rl_buffer, rl_gpu_addr,
136				&alloc_size_bytes, &is_over_subscription);
137	if (retval)
138		return retval;
139
140	*rl_size_bytes = alloc_size_bytes;
141	pm->ib_size_bytes = alloc_size_bytes;
142
143	pr_debug("Building runlist ib process count: %d queues count %d\n",
144		pm->dqm->processes_count, pm->dqm->queue_count);
145
146	/* build the run list ib packet */
147	list_for_each_entry(cur, queues, list) {
148		qpd = cur->qpd;
149		/* build map process packet */
150		if (proccesses_mapped >= pm->dqm->processes_count) {
151			pr_debug("Not enough space left in runlist IB\n");
152			pm_release_ib(pm);
153			return -ENOMEM;
154		}
155
156		retval = pm->pmf->map_process(pm, &rl_buffer[rl_wptr], qpd);
157		if (retval)
158			return retval;
159
160		proccesses_mapped++;
161		inc_wptr(&rl_wptr, pm->pmf->map_process_size,
162				alloc_size_bytes);
163
164		list_for_each_entry(kq, &qpd->priv_queue_list, list) {
165			if (!kq->queue->properties.is_active)
166				continue;
167
168			pr_debug("static_queue, mapping kernel q %d, is debug status %d\n",
169				kq->queue->queue, qpd->is_debug);
170
171			retval = pm->pmf->map_queues(pm,
172						&rl_buffer[rl_wptr],
173						kq->queue,
174						qpd->is_debug);
175			if (retval)
176				return retval;
177
178			inc_wptr(&rl_wptr,
179				pm->pmf->map_queues_size,
180				alloc_size_bytes);
181		}
182
183		list_for_each_entry(q, &qpd->queues_list, list) {
184			if (!q->properties.is_active)
185				continue;
186
187			pr_debug("static_queue, mapping user queue %d, is debug status %d\n",
188				q->queue, qpd->is_debug);
189
190			retval = pm->pmf->map_queues(pm,
191						&rl_buffer[rl_wptr],
192						q,
193						qpd->is_debug);
194
195			if (retval)
196				return retval;
197
198			inc_wptr(&rl_wptr,
199				pm->pmf->map_queues_size,
200				alloc_size_bytes);
201		}
202	}
203
204	pr_debug("Finished map process and queues to runlist\n");
205
206	if (is_over_subscription) {
207		if (!pm->is_over_subscription)
208			pr_warn("Runlist is getting oversubscribed. Expect reduced ROCm performance.\n");
209		retval = pm->pmf->runlist(pm, &rl_buffer[rl_wptr],
210					*rl_gpu_addr,
211					alloc_size_bytes / sizeof(uint32_t),
212					true);
213	}
214	pm->is_over_subscription = is_over_subscription;
215
216	for (i = 0; i < alloc_size_bytes / sizeof(uint32_t); i++)
217		pr_debug("0x%2X ", rl_buffer[i]);
218	pr_debug("\n");
219
220	return retval;
221}
222
223int pm_init(struct packet_manager *pm, struct device_queue_manager *dqm)
224{
225	switch (dqm->dev->device_info->asic_family) {
226	case CHIP_KAVERI:
227	case CHIP_HAWAII:
228		/* PM4 packet structures on CIK are the same as on VI */
229	case CHIP_CARRIZO:
230	case CHIP_TONGA:
231	case CHIP_FIJI:
232	case CHIP_POLARIS10:
233	case CHIP_POLARIS11:
234	case CHIP_POLARIS12:
235	case CHIP_VEGAM:
236		pm->pmf = &kfd_vi_pm_funcs;
237		break;
238	case CHIP_VEGA10:
239	case CHIP_VEGA12:
240	case CHIP_VEGA20:
241	case CHIP_RAVEN:
242	case CHIP_ARCTURUS:
243		pm->pmf = &kfd_v9_pm_funcs;
244		break;
245	case CHIP_NAVI10:
246		pm->pmf = &kfd_v10_pm_funcs;
247		break;
248	default:
249		WARN(1, "Unexpected ASIC family %u",
250		     dqm->dev->device_info->asic_family);
251		return -EINVAL;
252	}
253
254	pm->dqm = dqm;
255	mutex_init(&pm->lock);
256	pm->priv_queue = kernel_queue_init(dqm->dev, KFD_QUEUE_TYPE_HIQ);
257	if (!pm->priv_queue) {
258		mutex_destroy(&pm->lock);
259		return -ENOMEM;
260	}
261	pm->allocated = false;
262
263	return 0;
264}
265
266void pm_uninit(struct packet_manager *pm)
267{
268	mutex_destroy(&pm->lock);
269	kernel_queue_uninit(pm->priv_queue);
270}
271
272int pm_send_set_resources(struct packet_manager *pm,
273				struct scheduling_resources *res)
274{
275	uint32_t *buffer, size;
276	int retval = 0;
277
278	size = pm->pmf->set_resources_size;
279	mutex_lock(&pm->lock);
280	pm->priv_queue->ops.acquire_packet_buffer(pm->priv_queue,
281					size / sizeof(uint32_t),
282					(unsigned int **)&buffer);
283	if (!buffer) {
284		pr_err("Failed to allocate buffer on kernel queue\n");
285		retval = -ENOMEM;
286		goto out;
287	}
288
289	retval = pm->pmf->set_resources(pm, buffer, res);
290	if (!retval)
291		pm->priv_queue->ops.submit_packet(pm->priv_queue);
292	else
293		pm->priv_queue->ops.rollback_packet(pm->priv_queue);
294
295out:
296	mutex_unlock(&pm->lock);
297
298	return retval;
299}
300
301int pm_send_runlist(struct packet_manager *pm, struct list_head *dqm_queues)
302{
303	uint64_t rl_gpu_ib_addr;
304	uint32_t *rl_buffer;
305	size_t rl_ib_size, packet_size_dwords;
306	int retval;
307
308	retval = pm_create_runlist_ib(pm, dqm_queues, &rl_gpu_ib_addr,
309					&rl_ib_size);
310	if (retval)
311		goto fail_create_runlist_ib;
312
313	pr_debug("runlist IB address: 0x%llX\n", rl_gpu_ib_addr);
314
315	packet_size_dwords = pm->pmf->runlist_size / sizeof(uint32_t);
316	mutex_lock(&pm->lock);
317
318	retval = pm->priv_queue->ops.acquire_packet_buffer(pm->priv_queue,
319					packet_size_dwords, &rl_buffer);
320	if (retval)
321		goto fail_acquire_packet_buffer;
322
323	retval = pm->pmf->runlist(pm, rl_buffer, rl_gpu_ib_addr,
324					rl_ib_size / sizeof(uint32_t), false);
325	if (retval)
326		goto fail_create_runlist;
327
328	pm->priv_queue->ops.submit_packet(pm->priv_queue);
329
330	mutex_unlock(&pm->lock);
331
332	return retval;
333
334fail_create_runlist:
335	pm->priv_queue->ops.rollback_packet(pm->priv_queue);
336fail_acquire_packet_buffer:
337	mutex_unlock(&pm->lock);
338fail_create_runlist_ib:
339	pm_release_ib(pm);
340	return retval;
341}
342
343int pm_send_query_status(struct packet_manager *pm, uint64_t fence_address,
344			uint32_t fence_value)
345{
346	uint32_t *buffer, size;
347	int retval = 0;
348
349	if (WARN_ON(!fence_address))
350		return -EFAULT;
351
352	size = pm->pmf->query_status_size;
353	mutex_lock(&pm->lock);
354	pm->priv_queue->ops.acquire_packet_buffer(pm->priv_queue,
355			size / sizeof(uint32_t), (unsigned int **)&buffer);
356	if (!buffer) {
357		pr_err("Failed to allocate buffer on kernel queue\n");
358		retval = -ENOMEM;
359		goto out;
360	}
361
362	retval = pm->pmf->query_status(pm, buffer, fence_address, fence_value);
363	if (!retval)
364		pm->priv_queue->ops.submit_packet(pm->priv_queue);
365	else
366		pm->priv_queue->ops.rollback_packet(pm->priv_queue);
367
368out:
369	mutex_unlock(&pm->lock);
370	return retval;
371}
372
373int pm_send_unmap_queue(struct packet_manager *pm, enum kfd_queue_type type,
374			enum kfd_unmap_queues_filter filter,
375			uint32_t filter_param, bool reset,
376			unsigned int sdma_engine)
377{
378	uint32_t *buffer, size;
379	int retval = 0;
380
381	size = pm->pmf->unmap_queues_size;
382	mutex_lock(&pm->lock);
383	pm->priv_queue->ops.acquire_packet_buffer(pm->priv_queue,
384			size / sizeof(uint32_t), (unsigned int **)&buffer);
385	if (!buffer) {
386		pr_err("Failed to allocate buffer on kernel queue\n");
387		retval = -ENOMEM;
388		goto out;
389	}
390
391	retval = pm->pmf->unmap_queues(pm, buffer, type, filter, filter_param,
392				       reset, sdma_engine);
393	if (!retval)
394		pm->priv_queue->ops.submit_packet(pm->priv_queue);
395	else
396		pm->priv_queue->ops.rollback_packet(pm->priv_queue);
397
398out:
399	mutex_unlock(&pm->lock);
400	return retval;
401}
402
403void pm_release_ib(struct packet_manager *pm)
404{
405	mutex_lock(&pm->lock);
406	if (pm->allocated) {
407		kfd_gtt_sa_free(pm->dqm->dev, pm->ib_buffer_obj);
408		pm->allocated = false;
409	}
410	mutex_unlock(&pm->lock);
411}
412
413#if defined(CONFIG_DEBUG_FS)
414
415int pm_debugfs_runlist(struct seq_file *m, void *data)
416{
417	struct packet_manager *pm = data;
418
419	mutex_lock(&pm->lock);
420
421	if (!pm->allocated) {
422		seq_puts(m, "  No active runlist\n");
423		goto out;
424	}
425
426	seq_hex_dump(m, "  ", DUMP_PREFIX_OFFSET, 32, 4,
427		     pm->ib_buffer_obj->cpu_ptr, pm->ib_size_bytes, false);
428
429out:
430	mutex_unlock(&pm->lock);
431	return 0;
432}
433
434int pm_debugfs_hang_hws(struct packet_manager *pm)
435{
436	uint32_t *buffer, size;
437	int r = 0;
438
439	size = pm->pmf->query_status_size;
440	mutex_lock(&pm->lock);
441	pm->priv_queue->ops.acquire_packet_buffer(pm->priv_queue,
442			size / sizeof(uint32_t), (unsigned int **)&buffer);
443	if (!buffer) {
444		pr_err("Failed to allocate buffer on kernel queue\n");
445		r = -ENOMEM;
446		goto out;
447	}
448	memset(buffer, 0x55, size);
449	pm->priv_queue->ops.submit_packet(pm->priv_queue);
450
451	pr_info("Submitting %x %x %x %x %x %x %x to HIQ to hang the HWS.",
452		buffer[0], buffer[1], buffer[2], buffer[3],
453		buffer[4], buffer[5], buffer[6]);
454out:
455	mutex_unlock(&pm->lock);
456	return r;
457}
458
459
460#endif