Loading...
Note: File does not exist in v3.15.
1/*
2 * Copyright 2014 Advanced Micro Devices, Inc.
3 *
4 * Permission is hereby granted, free of charge, to any person obtaining a
5 * copy of this software and associated documentation files (the "Software"),
6 * to deal in the Software without restriction, including without limitation
7 * the rights to use, copy, modify, merge, publish, distribute, sublicense,
8 * and/or sell copies of the Software, and to permit persons to whom the
9 * Software is furnished to do so, subject to the following conditions:
10 *
11 * The above copyright notice and this permission notice shall be included in
12 * all copies or substantial portions of the Software.
13 *
14 * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
15 * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
16 * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL
17 * THE COPYRIGHT HOLDER(S) OR AUTHOR(S) BE LIABLE FOR ANY CLAIM, DAMAGES OR
18 * OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE,
19 * ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR
20 * OTHER DEALINGS IN THE SOFTWARE.
21 *
22 */
23
24#include <linux/ratelimit.h>
25#include <linux/printk.h>
26#include <linux/slab.h>
27#include <linux/list.h>
28#include <linux/types.h>
29#include <linux/bitops.h>
30#include <linux/sched.h>
31#include "kfd_priv.h"
32#include "kfd_device_queue_manager.h"
33#include "kfd_mqd_manager.h"
34#include "cik_regs.h"
35#include "kfd_kernel_queue.h"
36#include "amdgpu_amdkfd.h"
37
38/* Size of the per-pipe EOP queue */
39#define CIK_HPD_EOP_BYTES_LOG2 11
40#define CIK_HPD_EOP_BYTES (1U << CIK_HPD_EOP_BYTES_LOG2)
41
42static int set_pasid_vmid_mapping(struct device_queue_manager *dqm,
43 u32 pasid, unsigned int vmid);
44
45static int execute_queues_cpsch(struct device_queue_manager *dqm,
46 enum kfd_unmap_queues_filter filter,
47 uint32_t filter_param);
48static int unmap_queues_cpsch(struct device_queue_manager *dqm,
49 enum kfd_unmap_queues_filter filter,
50 uint32_t filter_param);
51
52static int map_queues_cpsch(struct device_queue_manager *dqm);
53
54static void deallocate_sdma_queue(struct device_queue_manager *dqm,
55 struct queue *q);
56
57static inline void deallocate_hqd(struct device_queue_manager *dqm,
58 struct queue *q);
59static int allocate_hqd(struct device_queue_manager *dqm, struct queue *q);
60static int allocate_sdma_queue(struct device_queue_manager *dqm,
61 struct queue *q);
62static void kfd_process_hw_exception(struct work_struct *work);
63
64static inline
65enum KFD_MQD_TYPE get_mqd_type_from_queue_type(enum kfd_queue_type type)
66{
67 if (type == KFD_QUEUE_TYPE_SDMA || type == KFD_QUEUE_TYPE_SDMA_XGMI)
68 return KFD_MQD_TYPE_SDMA;
69 return KFD_MQD_TYPE_CP;
70}
71
72static bool is_pipe_enabled(struct device_queue_manager *dqm, int mec, int pipe)
73{
74 int i;
75 int pipe_offset = (mec * dqm->dev->shared_resources.num_pipe_per_mec
76 + pipe) * dqm->dev->shared_resources.num_queue_per_pipe;
77
78 /* queue is available for KFD usage if bit is 1 */
79 for (i = 0; i < dqm->dev->shared_resources.num_queue_per_pipe; ++i)
80 if (test_bit(pipe_offset + i,
81 dqm->dev->shared_resources.cp_queue_bitmap))
82 return true;
83 return false;
84}
85
86unsigned int get_cp_queues_num(struct device_queue_manager *dqm)
87{
88 return bitmap_weight(dqm->dev->shared_resources.cp_queue_bitmap,
89 KGD_MAX_QUEUES);
90}
91
92unsigned int get_queues_per_pipe(struct device_queue_manager *dqm)
93{
94 return dqm->dev->shared_resources.num_queue_per_pipe;
95}
96
97unsigned int get_pipes_per_mec(struct device_queue_manager *dqm)
98{
99 return dqm->dev->shared_resources.num_pipe_per_mec;
100}
101
102static unsigned int get_num_sdma_engines(struct device_queue_manager *dqm)
103{
104 return dqm->dev->device_info->num_sdma_engines;
105}
106
107static unsigned int get_num_xgmi_sdma_engines(struct device_queue_manager *dqm)
108{
109 return dqm->dev->device_info->num_xgmi_sdma_engines;
110}
111
112static unsigned int get_num_all_sdma_engines(struct device_queue_manager *dqm)
113{
114 return get_num_sdma_engines(dqm) + get_num_xgmi_sdma_engines(dqm);
115}
116
117unsigned int get_num_sdma_queues(struct device_queue_manager *dqm)
118{
119 return dqm->dev->device_info->num_sdma_engines
120 * dqm->dev->device_info->num_sdma_queues_per_engine;
121}
122
123unsigned int get_num_xgmi_sdma_queues(struct device_queue_manager *dqm)
124{
125 return dqm->dev->device_info->num_xgmi_sdma_engines
126 * dqm->dev->device_info->num_sdma_queues_per_engine;
127}
128
129void program_sh_mem_settings(struct device_queue_manager *dqm,
130 struct qcm_process_device *qpd)
131{
132 return dqm->dev->kfd2kgd->program_sh_mem_settings(
133 dqm->dev->kgd, qpd->vmid,
134 qpd->sh_mem_config,
135 qpd->sh_mem_ape1_base,
136 qpd->sh_mem_ape1_limit,
137 qpd->sh_mem_bases);
138}
139
140static void increment_queue_count(struct device_queue_manager *dqm,
141 enum kfd_queue_type type)
142{
143 dqm->active_queue_count++;
144 if (type == KFD_QUEUE_TYPE_COMPUTE || type == KFD_QUEUE_TYPE_DIQ)
145 dqm->active_cp_queue_count++;
146}
147
148static void decrement_queue_count(struct device_queue_manager *dqm,
149 enum kfd_queue_type type)
150{
151 dqm->active_queue_count--;
152 if (type == KFD_QUEUE_TYPE_COMPUTE || type == KFD_QUEUE_TYPE_DIQ)
153 dqm->active_cp_queue_count--;
154}
155
156static int allocate_doorbell(struct qcm_process_device *qpd, struct queue *q)
157{
158 struct kfd_dev *dev = qpd->dqm->dev;
159
160 if (!KFD_IS_SOC15(dev->device_info->asic_family)) {
161 /* On pre-SOC15 chips we need to use the queue ID to
162 * preserve the user mode ABI.
163 */
164 q->doorbell_id = q->properties.queue_id;
165 } else if (q->properties.type == KFD_QUEUE_TYPE_SDMA ||
166 q->properties.type == KFD_QUEUE_TYPE_SDMA_XGMI) {
167 /* For SDMA queues on SOC15 with 8-byte doorbell, use static
168 * doorbell assignments based on the engine and queue id.
169 * The doobell index distance between RLC (2*i) and (2*i+1)
170 * for a SDMA engine is 512.
171 */
172 uint32_t *idx_offset =
173 dev->shared_resources.sdma_doorbell_idx;
174
175 q->doorbell_id = idx_offset[q->properties.sdma_engine_id]
176 + (q->properties.sdma_queue_id & 1)
177 * KFD_QUEUE_DOORBELL_MIRROR_OFFSET
178 + (q->properties.sdma_queue_id >> 1);
179 } else {
180 /* For CP queues on SOC15 reserve a free doorbell ID */
181 unsigned int found;
182
183 found = find_first_zero_bit(qpd->doorbell_bitmap,
184 KFD_MAX_NUM_OF_QUEUES_PER_PROCESS);
185 if (found >= KFD_MAX_NUM_OF_QUEUES_PER_PROCESS) {
186 pr_debug("No doorbells available");
187 return -EBUSY;
188 }
189 set_bit(found, qpd->doorbell_bitmap);
190 q->doorbell_id = found;
191 }
192
193 q->properties.doorbell_off =
194 kfd_get_doorbell_dw_offset_in_bar(dev, qpd_to_pdd(qpd),
195 q->doorbell_id);
196 return 0;
197}
198
199static void deallocate_doorbell(struct qcm_process_device *qpd,
200 struct queue *q)
201{
202 unsigned int old;
203 struct kfd_dev *dev = qpd->dqm->dev;
204
205 if (!KFD_IS_SOC15(dev->device_info->asic_family) ||
206 q->properties.type == KFD_QUEUE_TYPE_SDMA ||
207 q->properties.type == KFD_QUEUE_TYPE_SDMA_XGMI)
208 return;
209
210 old = test_and_clear_bit(q->doorbell_id, qpd->doorbell_bitmap);
211 WARN_ON(!old);
212}
213
214static int allocate_vmid(struct device_queue_manager *dqm,
215 struct qcm_process_device *qpd,
216 struct queue *q)
217{
218 int allocated_vmid = -1, i;
219
220 for (i = dqm->dev->vm_info.first_vmid_kfd;
221 i <= dqm->dev->vm_info.last_vmid_kfd; i++) {
222 if (!dqm->vmid_pasid[i]) {
223 allocated_vmid = i;
224 break;
225 }
226 }
227
228 if (allocated_vmid < 0) {
229 pr_err("no more vmid to allocate\n");
230 return -ENOSPC;
231 }
232
233 pr_debug("vmid allocated: %d\n", allocated_vmid);
234
235 dqm->vmid_pasid[allocated_vmid] = q->process->pasid;
236
237 set_pasid_vmid_mapping(dqm, q->process->pasid, allocated_vmid);
238
239 qpd->vmid = allocated_vmid;
240 q->properties.vmid = allocated_vmid;
241
242 program_sh_mem_settings(dqm, qpd);
243
244 /* qpd->page_table_base is set earlier when register_process()
245 * is called, i.e. when the first queue is created.
246 */
247 dqm->dev->kfd2kgd->set_vm_context_page_table_base(dqm->dev->kgd,
248 qpd->vmid,
249 qpd->page_table_base);
250 /* invalidate the VM context after pasid and vmid mapping is set up */
251 kfd_flush_tlb(qpd_to_pdd(qpd), TLB_FLUSH_LEGACY);
252
253 if (dqm->dev->kfd2kgd->set_scratch_backing_va)
254 dqm->dev->kfd2kgd->set_scratch_backing_va(dqm->dev->kgd,
255 qpd->sh_hidden_private_base, qpd->vmid);
256
257 return 0;
258}
259
260static int flush_texture_cache_nocpsch(struct kfd_dev *kdev,
261 struct qcm_process_device *qpd)
262{
263 const struct packet_manager_funcs *pmf = qpd->dqm->packets.pmf;
264 int ret;
265
266 if (!qpd->ib_kaddr)
267 return -ENOMEM;
268
269 ret = pmf->release_mem(qpd->ib_base, (uint32_t *)qpd->ib_kaddr);
270 if (ret)
271 return ret;
272
273 return amdgpu_amdkfd_submit_ib(kdev->kgd, KGD_ENGINE_MEC1, qpd->vmid,
274 qpd->ib_base, (uint32_t *)qpd->ib_kaddr,
275 pmf->release_mem_size / sizeof(uint32_t));
276}
277
278static void deallocate_vmid(struct device_queue_manager *dqm,
279 struct qcm_process_device *qpd,
280 struct queue *q)
281{
282 /* On GFX v7, CP doesn't flush TC at dequeue */
283 if (q->device->device_info->asic_family == CHIP_HAWAII)
284 if (flush_texture_cache_nocpsch(q->device, qpd))
285 pr_err("Failed to flush TC\n");
286
287 kfd_flush_tlb(qpd_to_pdd(qpd), TLB_FLUSH_LEGACY);
288
289 /* Release the vmid mapping */
290 set_pasid_vmid_mapping(dqm, 0, qpd->vmid);
291 dqm->vmid_pasid[qpd->vmid] = 0;
292
293 qpd->vmid = 0;
294 q->properties.vmid = 0;
295}
296
297static int create_queue_nocpsch(struct device_queue_manager *dqm,
298 struct queue *q,
299 struct qcm_process_device *qpd)
300{
301 struct mqd_manager *mqd_mgr;
302 int retval;
303
304 dqm_lock(dqm);
305
306 if (dqm->total_queue_count >= max_num_of_queues_per_device) {
307 pr_warn("Can't create new usermode queue because %d queues were already created\n",
308 dqm->total_queue_count);
309 retval = -EPERM;
310 goto out_unlock;
311 }
312
313 if (list_empty(&qpd->queues_list)) {
314 retval = allocate_vmid(dqm, qpd, q);
315 if (retval)
316 goto out_unlock;
317 }
318 q->properties.vmid = qpd->vmid;
319 /*
320 * Eviction state logic: mark all queues as evicted, even ones
321 * not currently active. Restoring inactive queues later only
322 * updates the is_evicted flag but is a no-op otherwise.
323 */
324 q->properties.is_evicted = !!qpd->evicted;
325
326 q->properties.tba_addr = qpd->tba_addr;
327 q->properties.tma_addr = qpd->tma_addr;
328
329 mqd_mgr = dqm->mqd_mgrs[get_mqd_type_from_queue_type(
330 q->properties.type)];
331 if (q->properties.type == KFD_QUEUE_TYPE_COMPUTE) {
332 retval = allocate_hqd(dqm, q);
333 if (retval)
334 goto deallocate_vmid;
335 pr_debug("Loading mqd to hqd on pipe %d, queue %d\n",
336 q->pipe, q->queue);
337 } else if (q->properties.type == KFD_QUEUE_TYPE_SDMA ||
338 q->properties.type == KFD_QUEUE_TYPE_SDMA_XGMI) {
339 retval = allocate_sdma_queue(dqm, q);
340 if (retval)
341 goto deallocate_vmid;
342 dqm->asic_ops.init_sdma_vm(dqm, q, qpd);
343 }
344
345 retval = allocate_doorbell(qpd, q);
346 if (retval)
347 goto out_deallocate_hqd;
348
349 /* Temporarily release dqm lock to avoid a circular lock dependency */
350 dqm_unlock(dqm);
351 q->mqd_mem_obj = mqd_mgr->allocate_mqd(mqd_mgr->dev, &q->properties);
352 dqm_lock(dqm);
353
354 if (!q->mqd_mem_obj) {
355 retval = -ENOMEM;
356 goto out_deallocate_doorbell;
357 }
358 mqd_mgr->init_mqd(mqd_mgr, &q->mqd, q->mqd_mem_obj,
359 &q->gart_mqd_addr, &q->properties);
360 if (q->properties.is_active) {
361 if (!dqm->sched_running) {
362 WARN_ONCE(1, "Load non-HWS mqd while stopped\n");
363 goto add_queue_to_list;
364 }
365
366 if (WARN(q->process->mm != current->mm,
367 "should only run in user thread"))
368 retval = -EFAULT;
369 else
370 retval = mqd_mgr->load_mqd(mqd_mgr, q->mqd, q->pipe,
371 q->queue, &q->properties, current->mm);
372 if (retval)
373 goto out_free_mqd;
374 }
375
376add_queue_to_list:
377 list_add(&q->list, &qpd->queues_list);
378 qpd->queue_count++;
379 if (q->properties.is_active)
380 increment_queue_count(dqm, q->properties.type);
381
382 /*
383 * Unconditionally increment this counter, regardless of the queue's
384 * type or whether the queue is active.
385 */
386 dqm->total_queue_count++;
387 pr_debug("Total of %d queues are accountable so far\n",
388 dqm->total_queue_count);
389 goto out_unlock;
390
391out_free_mqd:
392 mqd_mgr->free_mqd(mqd_mgr, q->mqd, q->mqd_mem_obj);
393out_deallocate_doorbell:
394 deallocate_doorbell(qpd, q);
395out_deallocate_hqd:
396 if (q->properties.type == KFD_QUEUE_TYPE_COMPUTE)
397 deallocate_hqd(dqm, q);
398 else if (q->properties.type == KFD_QUEUE_TYPE_SDMA ||
399 q->properties.type == KFD_QUEUE_TYPE_SDMA_XGMI)
400 deallocate_sdma_queue(dqm, q);
401deallocate_vmid:
402 if (list_empty(&qpd->queues_list))
403 deallocate_vmid(dqm, qpd, q);
404out_unlock:
405 dqm_unlock(dqm);
406 return retval;
407}
408
409static int allocate_hqd(struct device_queue_manager *dqm, struct queue *q)
410{
411 bool set;
412 int pipe, bit, i;
413
414 set = false;
415
416 for (pipe = dqm->next_pipe_to_allocate, i = 0;
417 i < get_pipes_per_mec(dqm);
418 pipe = ((pipe + 1) % get_pipes_per_mec(dqm)), ++i) {
419
420 if (!is_pipe_enabled(dqm, 0, pipe))
421 continue;
422
423 if (dqm->allocated_queues[pipe] != 0) {
424 bit = ffs(dqm->allocated_queues[pipe]) - 1;
425 dqm->allocated_queues[pipe] &= ~(1 << bit);
426 q->pipe = pipe;
427 q->queue = bit;
428 set = true;
429 break;
430 }
431 }
432
433 if (!set)
434 return -EBUSY;
435
436 pr_debug("hqd slot - pipe %d, queue %d\n", q->pipe, q->queue);
437 /* horizontal hqd allocation */
438 dqm->next_pipe_to_allocate = (pipe + 1) % get_pipes_per_mec(dqm);
439
440 return 0;
441}
442
443static inline void deallocate_hqd(struct device_queue_manager *dqm,
444 struct queue *q)
445{
446 dqm->allocated_queues[q->pipe] |= (1 << q->queue);
447}
448
449/* Access to DQM has to be locked before calling destroy_queue_nocpsch_locked
450 * to avoid asynchronized access
451 */
452static int destroy_queue_nocpsch_locked(struct device_queue_manager *dqm,
453 struct qcm_process_device *qpd,
454 struct queue *q)
455{
456 int retval;
457 struct mqd_manager *mqd_mgr;
458
459 mqd_mgr = dqm->mqd_mgrs[get_mqd_type_from_queue_type(
460 q->properties.type)];
461
462 if (q->properties.type == KFD_QUEUE_TYPE_COMPUTE)
463 deallocate_hqd(dqm, q);
464 else if (q->properties.type == KFD_QUEUE_TYPE_SDMA)
465 deallocate_sdma_queue(dqm, q);
466 else if (q->properties.type == KFD_QUEUE_TYPE_SDMA_XGMI)
467 deallocate_sdma_queue(dqm, q);
468 else {
469 pr_debug("q->properties.type %d is invalid\n",
470 q->properties.type);
471 return -EINVAL;
472 }
473 dqm->total_queue_count--;
474
475 deallocate_doorbell(qpd, q);
476
477 if (!dqm->sched_running) {
478 WARN_ONCE(1, "Destroy non-HWS queue while stopped\n");
479 return 0;
480 }
481
482 retval = mqd_mgr->destroy_mqd(mqd_mgr, q->mqd,
483 KFD_PREEMPT_TYPE_WAVEFRONT_RESET,
484 KFD_UNMAP_LATENCY_MS,
485 q->pipe, q->queue);
486 if (retval == -ETIME)
487 qpd->reset_wavefronts = true;
488
489 list_del(&q->list);
490 if (list_empty(&qpd->queues_list)) {
491 if (qpd->reset_wavefronts) {
492 pr_warn("Resetting wave fronts (nocpsch) on dev %p\n",
493 dqm->dev);
494 /* dbgdev_wave_reset_wavefronts has to be called before
495 * deallocate_vmid(), i.e. when vmid is still in use.
496 */
497 dbgdev_wave_reset_wavefronts(dqm->dev,
498 qpd->pqm->process);
499 qpd->reset_wavefronts = false;
500 }
501
502 deallocate_vmid(dqm, qpd, q);
503 }
504 qpd->queue_count--;
505 if (q->properties.is_active) {
506 decrement_queue_count(dqm, q->properties.type);
507 if (q->properties.is_gws) {
508 dqm->gws_queue_count--;
509 qpd->mapped_gws_queue = false;
510 }
511 }
512
513 return retval;
514}
515
516static int destroy_queue_nocpsch(struct device_queue_manager *dqm,
517 struct qcm_process_device *qpd,
518 struct queue *q)
519{
520 int retval;
521 uint64_t sdma_val = 0;
522 struct kfd_process_device *pdd = qpd_to_pdd(qpd);
523 struct mqd_manager *mqd_mgr =
524 dqm->mqd_mgrs[get_mqd_type_from_queue_type(q->properties.type)];
525
526 /* Get the SDMA queue stats */
527 if ((q->properties.type == KFD_QUEUE_TYPE_SDMA) ||
528 (q->properties.type == KFD_QUEUE_TYPE_SDMA_XGMI)) {
529 retval = read_sdma_queue_counter((uint64_t __user *)q->properties.read_ptr,
530 &sdma_val);
531 if (retval)
532 pr_err("Failed to read SDMA queue counter for queue: %d\n",
533 q->properties.queue_id);
534 }
535
536 dqm_lock(dqm);
537 retval = destroy_queue_nocpsch_locked(dqm, qpd, q);
538 if (!retval)
539 pdd->sdma_past_activity_counter += sdma_val;
540 dqm_unlock(dqm);
541
542 mqd_mgr->free_mqd(mqd_mgr, q->mqd, q->mqd_mem_obj);
543
544 return retval;
545}
546
547static int update_queue(struct device_queue_manager *dqm, struct queue *q)
548{
549 int retval = 0;
550 struct mqd_manager *mqd_mgr;
551 struct kfd_process_device *pdd;
552 bool prev_active = false;
553
554 dqm_lock(dqm);
555 pdd = kfd_get_process_device_data(q->device, q->process);
556 if (!pdd) {
557 retval = -ENODEV;
558 goto out_unlock;
559 }
560 mqd_mgr = dqm->mqd_mgrs[get_mqd_type_from_queue_type(
561 q->properties.type)];
562
563 /* Save previous activity state for counters */
564 prev_active = q->properties.is_active;
565
566 /* Make sure the queue is unmapped before updating the MQD */
567 if (dqm->sched_policy != KFD_SCHED_POLICY_NO_HWS) {
568 retval = unmap_queues_cpsch(dqm,
569 KFD_UNMAP_QUEUES_FILTER_DYNAMIC_QUEUES, 0);
570 if (retval) {
571 pr_err("unmap queue failed\n");
572 goto out_unlock;
573 }
574 } else if (prev_active &&
575 (q->properties.type == KFD_QUEUE_TYPE_COMPUTE ||
576 q->properties.type == KFD_QUEUE_TYPE_SDMA ||
577 q->properties.type == KFD_QUEUE_TYPE_SDMA_XGMI)) {
578
579 if (!dqm->sched_running) {
580 WARN_ONCE(1, "Update non-HWS queue while stopped\n");
581 goto out_unlock;
582 }
583
584 retval = mqd_mgr->destroy_mqd(mqd_mgr, q->mqd,
585 KFD_PREEMPT_TYPE_WAVEFRONT_DRAIN,
586 KFD_UNMAP_LATENCY_MS, q->pipe, q->queue);
587 if (retval) {
588 pr_err("destroy mqd failed\n");
589 goto out_unlock;
590 }
591 }
592
593 mqd_mgr->update_mqd(mqd_mgr, q->mqd, &q->properties);
594
595 /*
596 * check active state vs. the previous state and modify
597 * counter accordingly. map_queues_cpsch uses the
598 * dqm->active_queue_count to determine whether a new runlist must be
599 * uploaded.
600 */
601 if (q->properties.is_active && !prev_active)
602 increment_queue_count(dqm, q->properties.type);
603 else if (!q->properties.is_active && prev_active)
604 decrement_queue_count(dqm, q->properties.type);
605
606 if (q->gws && !q->properties.is_gws) {
607 if (q->properties.is_active) {
608 dqm->gws_queue_count++;
609 pdd->qpd.mapped_gws_queue = true;
610 }
611 q->properties.is_gws = true;
612 } else if (!q->gws && q->properties.is_gws) {
613 if (q->properties.is_active) {
614 dqm->gws_queue_count--;
615 pdd->qpd.mapped_gws_queue = false;
616 }
617 q->properties.is_gws = false;
618 }
619
620 if (dqm->sched_policy != KFD_SCHED_POLICY_NO_HWS)
621 retval = map_queues_cpsch(dqm);
622 else if (q->properties.is_active &&
623 (q->properties.type == KFD_QUEUE_TYPE_COMPUTE ||
624 q->properties.type == KFD_QUEUE_TYPE_SDMA ||
625 q->properties.type == KFD_QUEUE_TYPE_SDMA_XGMI)) {
626 if (WARN(q->process->mm != current->mm,
627 "should only run in user thread"))
628 retval = -EFAULT;
629 else
630 retval = mqd_mgr->load_mqd(mqd_mgr, q->mqd,
631 q->pipe, q->queue,
632 &q->properties, current->mm);
633 }
634
635out_unlock:
636 dqm_unlock(dqm);
637 return retval;
638}
639
640static int evict_process_queues_nocpsch(struct device_queue_manager *dqm,
641 struct qcm_process_device *qpd)
642{
643 struct queue *q;
644 struct mqd_manager *mqd_mgr;
645 struct kfd_process_device *pdd;
646 int retval, ret = 0;
647
648 dqm_lock(dqm);
649 if (qpd->evicted++ > 0) /* already evicted, do nothing */
650 goto out;
651
652 pdd = qpd_to_pdd(qpd);
653 pr_debug_ratelimited("Evicting PASID 0x%x queues\n",
654 pdd->process->pasid);
655
656 pdd->last_evict_timestamp = get_jiffies_64();
657 /* Mark all queues as evicted. Deactivate all active queues on
658 * the qpd.
659 */
660 list_for_each_entry(q, &qpd->queues_list, list) {
661 q->properties.is_evicted = true;
662 if (!q->properties.is_active)
663 continue;
664
665 mqd_mgr = dqm->mqd_mgrs[get_mqd_type_from_queue_type(
666 q->properties.type)];
667 q->properties.is_active = false;
668 decrement_queue_count(dqm, q->properties.type);
669 if (q->properties.is_gws) {
670 dqm->gws_queue_count--;
671 qpd->mapped_gws_queue = false;
672 }
673
674 if (WARN_ONCE(!dqm->sched_running, "Evict when stopped\n"))
675 continue;
676
677 retval = mqd_mgr->destroy_mqd(mqd_mgr, q->mqd,
678 KFD_PREEMPT_TYPE_WAVEFRONT_DRAIN,
679 KFD_UNMAP_LATENCY_MS, q->pipe, q->queue);
680 if (retval && !ret)
681 /* Return the first error, but keep going to
682 * maintain a consistent eviction state
683 */
684 ret = retval;
685 }
686
687out:
688 dqm_unlock(dqm);
689 return ret;
690}
691
692static int evict_process_queues_cpsch(struct device_queue_manager *dqm,
693 struct qcm_process_device *qpd)
694{
695 struct queue *q;
696 struct kfd_process_device *pdd;
697 int retval = 0;
698
699 dqm_lock(dqm);
700 if (qpd->evicted++ > 0) /* already evicted, do nothing */
701 goto out;
702
703 pdd = qpd_to_pdd(qpd);
704 pr_debug_ratelimited("Evicting PASID 0x%x queues\n",
705 pdd->process->pasid);
706
707 /* Mark all queues as evicted. Deactivate all active queues on
708 * the qpd.
709 */
710 list_for_each_entry(q, &qpd->queues_list, list) {
711 q->properties.is_evicted = true;
712 if (!q->properties.is_active)
713 continue;
714
715 q->properties.is_active = false;
716 decrement_queue_count(dqm, q->properties.type);
717 }
718 pdd->last_evict_timestamp = get_jiffies_64();
719 retval = execute_queues_cpsch(dqm,
720 qpd->is_debug ?
721 KFD_UNMAP_QUEUES_FILTER_ALL_QUEUES :
722 KFD_UNMAP_QUEUES_FILTER_DYNAMIC_QUEUES, 0);
723
724out:
725 dqm_unlock(dqm);
726 return retval;
727}
728
729static int restore_process_queues_nocpsch(struct device_queue_manager *dqm,
730 struct qcm_process_device *qpd)
731{
732 struct mm_struct *mm = NULL;
733 struct queue *q;
734 struct mqd_manager *mqd_mgr;
735 struct kfd_process_device *pdd;
736 uint64_t pd_base;
737 uint64_t eviction_duration;
738 int retval, ret = 0;
739
740 pdd = qpd_to_pdd(qpd);
741 /* Retrieve PD base */
742 pd_base = amdgpu_amdkfd_gpuvm_get_process_page_dir(pdd->drm_priv);
743
744 dqm_lock(dqm);
745 if (WARN_ON_ONCE(!qpd->evicted)) /* already restored, do nothing */
746 goto out;
747 if (qpd->evicted > 1) { /* ref count still > 0, decrement & quit */
748 qpd->evicted--;
749 goto out;
750 }
751
752 pr_debug_ratelimited("Restoring PASID 0x%x queues\n",
753 pdd->process->pasid);
754
755 /* Update PD Base in QPD */
756 qpd->page_table_base = pd_base;
757 pr_debug("Updated PD address to 0x%llx\n", pd_base);
758
759 if (!list_empty(&qpd->queues_list)) {
760 dqm->dev->kfd2kgd->set_vm_context_page_table_base(
761 dqm->dev->kgd,
762 qpd->vmid,
763 qpd->page_table_base);
764 kfd_flush_tlb(pdd, TLB_FLUSH_LEGACY);
765 }
766
767 /* Take a safe reference to the mm_struct, which may otherwise
768 * disappear even while the kfd_process is still referenced.
769 */
770 mm = get_task_mm(pdd->process->lead_thread);
771 if (!mm) {
772 ret = -EFAULT;
773 goto out;
774 }
775
776 /* Remove the eviction flags. Activate queues that are not
777 * inactive for other reasons.
778 */
779 list_for_each_entry(q, &qpd->queues_list, list) {
780 q->properties.is_evicted = false;
781 if (!QUEUE_IS_ACTIVE(q->properties))
782 continue;
783
784 mqd_mgr = dqm->mqd_mgrs[get_mqd_type_from_queue_type(
785 q->properties.type)];
786 q->properties.is_active = true;
787 increment_queue_count(dqm, q->properties.type);
788 if (q->properties.is_gws) {
789 dqm->gws_queue_count++;
790 qpd->mapped_gws_queue = true;
791 }
792
793 if (WARN_ONCE(!dqm->sched_running, "Restore when stopped\n"))
794 continue;
795
796 retval = mqd_mgr->load_mqd(mqd_mgr, q->mqd, q->pipe,
797 q->queue, &q->properties, mm);
798 if (retval && !ret)
799 /* Return the first error, but keep going to
800 * maintain a consistent eviction state
801 */
802 ret = retval;
803 }
804 qpd->evicted = 0;
805 eviction_duration = get_jiffies_64() - pdd->last_evict_timestamp;
806 atomic64_add(eviction_duration, &pdd->evict_duration_counter);
807out:
808 if (mm)
809 mmput(mm);
810 dqm_unlock(dqm);
811 return ret;
812}
813
814static int restore_process_queues_cpsch(struct device_queue_manager *dqm,
815 struct qcm_process_device *qpd)
816{
817 struct queue *q;
818 struct kfd_process_device *pdd;
819 uint64_t pd_base;
820 uint64_t eviction_duration;
821 int retval = 0;
822
823 pdd = qpd_to_pdd(qpd);
824 /* Retrieve PD base */
825 pd_base = amdgpu_amdkfd_gpuvm_get_process_page_dir(pdd->drm_priv);
826
827 dqm_lock(dqm);
828 if (WARN_ON_ONCE(!qpd->evicted)) /* already restored, do nothing */
829 goto out;
830 if (qpd->evicted > 1) { /* ref count still > 0, decrement & quit */
831 qpd->evicted--;
832 goto out;
833 }
834
835 pr_debug_ratelimited("Restoring PASID 0x%x queues\n",
836 pdd->process->pasid);
837
838 /* Update PD Base in QPD */
839 qpd->page_table_base = pd_base;
840 pr_debug("Updated PD address to 0x%llx\n", pd_base);
841
842 /* activate all active queues on the qpd */
843 list_for_each_entry(q, &qpd->queues_list, list) {
844 q->properties.is_evicted = false;
845 if (!QUEUE_IS_ACTIVE(q->properties))
846 continue;
847
848 q->properties.is_active = true;
849 increment_queue_count(dqm, q->properties.type);
850 }
851 retval = execute_queues_cpsch(dqm,
852 KFD_UNMAP_QUEUES_FILTER_DYNAMIC_QUEUES, 0);
853 qpd->evicted = 0;
854 eviction_duration = get_jiffies_64() - pdd->last_evict_timestamp;
855 atomic64_add(eviction_duration, &pdd->evict_duration_counter);
856out:
857 dqm_unlock(dqm);
858 return retval;
859}
860
861static int register_process(struct device_queue_manager *dqm,
862 struct qcm_process_device *qpd)
863{
864 struct device_process_node *n;
865 struct kfd_process_device *pdd;
866 uint64_t pd_base;
867 int retval;
868
869 n = kzalloc(sizeof(*n), GFP_KERNEL);
870 if (!n)
871 return -ENOMEM;
872
873 n->qpd = qpd;
874
875 pdd = qpd_to_pdd(qpd);
876 /* Retrieve PD base */
877 pd_base = amdgpu_amdkfd_gpuvm_get_process_page_dir(pdd->drm_priv);
878
879 dqm_lock(dqm);
880 list_add(&n->list, &dqm->queues);
881
882 /* Update PD Base in QPD */
883 qpd->page_table_base = pd_base;
884 pr_debug("Updated PD address to 0x%llx\n", pd_base);
885
886 retval = dqm->asic_ops.update_qpd(dqm, qpd);
887
888 dqm->processes_count++;
889
890 dqm_unlock(dqm);
891
892 /* Outside the DQM lock because under the DQM lock we can't do
893 * reclaim or take other locks that others hold while reclaiming.
894 */
895 kfd_inc_compute_active(dqm->dev);
896
897 return retval;
898}
899
900static int unregister_process(struct device_queue_manager *dqm,
901 struct qcm_process_device *qpd)
902{
903 int retval;
904 struct device_process_node *cur, *next;
905
906 pr_debug("qpd->queues_list is %s\n",
907 list_empty(&qpd->queues_list) ? "empty" : "not empty");
908
909 retval = 0;
910 dqm_lock(dqm);
911
912 list_for_each_entry_safe(cur, next, &dqm->queues, list) {
913 if (qpd == cur->qpd) {
914 list_del(&cur->list);
915 kfree(cur);
916 dqm->processes_count--;
917 goto out;
918 }
919 }
920 /* qpd not found in dqm list */
921 retval = 1;
922out:
923 dqm_unlock(dqm);
924
925 /* Outside the DQM lock because under the DQM lock we can't do
926 * reclaim or take other locks that others hold while reclaiming.
927 */
928 if (!retval)
929 kfd_dec_compute_active(dqm->dev);
930
931 return retval;
932}
933
934static int
935set_pasid_vmid_mapping(struct device_queue_manager *dqm, u32 pasid,
936 unsigned int vmid)
937{
938 return dqm->dev->kfd2kgd->set_pasid_vmid_mapping(
939 dqm->dev->kgd, pasid, vmid);
940}
941
942static void init_interrupts(struct device_queue_manager *dqm)
943{
944 unsigned int i;
945
946 for (i = 0 ; i < get_pipes_per_mec(dqm) ; i++)
947 if (is_pipe_enabled(dqm, 0, i))
948 dqm->dev->kfd2kgd->init_interrupts(dqm->dev->kgd, i);
949}
950
951static int initialize_nocpsch(struct device_queue_manager *dqm)
952{
953 int pipe, queue;
954
955 pr_debug("num of pipes: %d\n", get_pipes_per_mec(dqm));
956
957 dqm->allocated_queues = kcalloc(get_pipes_per_mec(dqm),
958 sizeof(unsigned int), GFP_KERNEL);
959 if (!dqm->allocated_queues)
960 return -ENOMEM;
961
962 mutex_init(&dqm->lock_hidden);
963 INIT_LIST_HEAD(&dqm->queues);
964 dqm->active_queue_count = dqm->next_pipe_to_allocate = 0;
965 dqm->active_cp_queue_count = 0;
966 dqm->gws_queue_count = 0;
967
968 for (pipe = 0; pipe < get_pipes_per_mec(dqm); pipe++) {
969 int pipe_offset = pipe * get_queues_per_pipe(dqm);
970
971 for (queue = 0; queue < get_queues_per_pipe(dqm); queue++)
972 if (test_bit(pipe_offset + queue,
973 dqm->dev->shared_resources.cp_queue_bitmap))
974 dqm->allocated_queues[pipe] |= 1 << queue;
975 }
976
977 memset(dqm->vmid_pasid, 0, sizeof(dqm->vmid_pasid));
978
979 dqm->sdma_bitmap = ~0ULL >> (64 - get_num_sdma_queues(dqm));
980 dqm->xgmi_sdma_bitmap = ~0ULL >> (64 - get_num_xgmi_sdma_queues(dqm));
981
982 return 0;
983}
984
985static void uninitialize(struct device_queue_manager *dqm)
986{
987 int i;
988
989 WARN_ON(dqm->active_queue_count > 0 || dqm->processes_count > 0);
990
991 kfree(dqm->allocated_queues);
992 for (i = 0 ; i < KFD_MQD_TYPE_MAX ; i++)
993 kfree(dqm->mqd_mgrs[i]);
994 mutex_destroy(&dqm->lock_hidden);
995}
996
997static int start_nocpsch(struct device_queue_manager *dqm)
998{
999 pr_info("SW scheduler is used");
1000 init_interrupts(dqm);
1001
1002 if (dqm->dev->device_info->asic_family == CHIP_HAWAII)
1003 return pm_init(&dqm->packets, dqm);
1004 dqm->sched_running = true;
1005
1006 return 0;
1007}
1008
1009static int stop_nocpsch(struct device_queue_manager *dqm)
1010{
1011 if (dqm->dev->device_info->asic_family == CHIP_HAWAII)
1012 pm_uninit(&dqm->packets, false);
1013 dqm->sched_running = false;
1014
1015 return 0;
1016}
1017
1018static void pre_reset(struct device_queue_manager *dqm)
1019{
1020 dqm_lock(dqm);
1021 dqm->is_resetting = true;
1022 dqm_unlock(dqm);
1023}
1024
1025static int allocate_sdma_queue(struct device_queue_manager *dqm,
1026 struct queue *q)
1027{
1028 int bit;
1029
1030 if (q->properties.type == KFD_QUEUE_TYPE_SDMA) {
1031 if (dqm->sdma_bitmap == 0) {
1032 pr_err("No more SDMA queue to allocate\n");
1033 return -ENOMEM;
1034 }
1035
1036 bit = __ffs64(dqm->sdma_bitmap);
1037 dqm->sdma_bitmap &= ~(1ULL << bit);
1038 q->sdma_id = bit;
1039 q->properties.sdma_engine_id = q->sdma_id %
1040 get_num_sdma_engines(dqm);
1041 q->properties.sdma_queue_id = q->sdma_id /
1042 get_num_sdma_engines(dqm);
1043 } else if (q->properties.type == KFD_QUEUE_TYPE_SDMA_XGMI) {
1044 if (dqm->xgmi_sdma_bitmap == 0) {
1045 pr_err("No more XGMI SDMA queue to allocate\n");
1046 return -ENOMEM;
1047 }
1048 bit = __ffs64(dqm->xgmi_sdma_bitmap);
1049 dqm->xgmi_sdma_bitmap &= ~(1ULL << bit);
1050 q->sdma_id = bit;
1051 /* sdma_engine_id is sdma id including
1052 * both PCIe-optimized SDMAs and XGMI-
1053 * optimized SDMAs. The calculation below
1054 * assumes the first N engines are always
1055 * PCIe-optimized ones
1056 */
1057 q->properties.sdma_engine_id = get_num_sdma_engines(dqm) +
1058 q->sdma_id % get_num_xgmi_sdma_engines(dqm);
1059 q->properties.sdma_queue_id = q->sdma_id /
1060 get_num_xgmi_sdma_engines(dqm);
1061 }
1062
1063 pr_debug("SDMA engine id: %d\n", q->properties.sdma_engine_id);
1064 pr_debug("SDMA queue id: %d\n", q->properties.sdma_queue_id);
1065
1066 return 0;
1067}
1068
1069static void deallocate_sdma_queue(struct device_queue_manager *dqm,
1070 struct queue *q)
1071{
1072 if (q->properties.type == KFD_QUEUE_TYPE_SDMA) {
1073 if (q->sdma_id >= get_num_sdma_queues(dqm))
1074 return;
1075 dqm->sdma_bitmap |= (1ULL << q->sdma_id);
1076 } else if (q->properties.type == KFD_QUEUE_TYPE_SDMA_XGMI) {
1077 if (q->sdma_id >= get_num_xgmi_sdma_queues(dqm))
1078 return;
1079 dqm->xgmi_sdma_bitmap |= (1ULL << q->sdma_id);
1080 }
1081}
1082
1083/*
1084 * Device Queue Manager implementation for cp scheduler
1085 */
1086
1087static int set_sched_resources(struct device_queue_manager *dqm)
1088{
1089 int i, mec;
1090 struct scheduling_resources res;
1091
1092 res.vmid_mask = dqm->dev->shared_resources.compute_vmid_bitmap;
1093
1094 res.queue_mask = 0;
1095 for (i = 0; i < KGD_MAX_QUEUES; ++i) {
1096 mec = (i / dqm->dev->shared_resources.num_queue_per_pipe)
1097 / dqm->dev->shared_resources.num_pipe_per_mec;
1098
1099 if (!test_bit(i, dqm->dev->shared_resources.cp_queue_bitmap))
1100 continue;
1101
1102 /* only acquire queues from the first MEC */
1103 if (mec > 0)
1104 continue;
1105
1106 /* This situation may be hit in the future if a new HW
1107 * generation exposes more than 64 queues. If so, the
1108 * definition of res.queue_mask needs updating
1109 */
1110 if (WARN_ON(i >= (sizeof(res.queue_mask)*8))) {
1111 pr_err("Invalid queue enabled by amdgpu: %d\n", i);
1112 break;
1113 }
1114
1115 res.queue_mask |= 1ull
1116 << amdgpu_queue_mask_bit_to_set_resource_bit(
1117 (struct amdgpu_device *)dqm->dev->kgd, i);
1118 }
1119 res.gws_mask = ~0ull;
1120 res.oac_mask = res.gds_heap_base = res.gds_heap_size = 0;
1121
1122 pr_debug("Scheduling resources:\n"
1123 "vmid mask: 0x%8X\n"
1124 "queue mask: 0x%8llX\n",
1125 res.vmid_mask, res.queue_mask);
1126
1127 return pm_send_set_resources(&dqm->packets, &res);
1128}
1129
1130static int initialize_cpsch(struct device_queue_manager *dqm)
1131{
1132 uint64_t num_sdma_queues;
1133 uint64_t num_xgmi_sdma_queues;
1134
1135 pr_debug("num of pipes: %d\n", get_pipes_per_mec(dqm));
1136
1137 mutex_init(&dqm->lock_hidden);
1138 INIT_LIST_HEAD(&dqm->queues);
1139 dqm->active_queue_count = dqm->processes_count = 0;
1140 dqm->active_cp_queue_count = 0;
1141 dqm->gws_queue_count = 0;
1142 dqm->active_runlist = false;
1143
1144 num_sdma_queues = get_num_sdma_queues(dqm);
1145 if (num_sdma_queues >= BITS_PER_TYPE(dqm->sdma_bitmap))
1146 dqm->sdma_bitmap = ULLONG_MAX;
1147 else
1148 dqm->sdma_bitmap = (BIT_ULL(num_sdma_queues) - 1);
1149
1150 num_xgmi_sdma_queues = get_num_xgmi_sdma_queues(dqm);
1151 if (num_xgmi_sdma_queues >= BITS_PER_TYPE(dqm->xgmi_sdma_bitmap))
1152 dqm->xgmi_sdma_bitmap = ULLONG_MAX;
1153 else
1154 dqm->xgmi_sdma_bitmap = (BIT_ULL(num_xgmi_sdma_queues) - 1);
1155
1156 INIT_WORK(&dqm->hw_exception_work, kfd_process_hw_exception);
1157
1158 return 0;
1159}
1160
1161static int start_cpsch(struct device_queue_manager *dqm)
1162{
1163 int retval;
1164
1165 retval = 0;
1166
1167 retval = pm_init(&dqm->packets, dqm);
1168 if (retval)
1169 goto fail_packet_manager_init;
1170
1171 retval = set_sched_resources(dqm);
1172 if (retval)
1173 goto fail_set_sched_resources;
1174
1175 pr_debug("Allocating fence memory\n");
1176
1177 /* allocate fence memory on the gart */
1178 retval = kfd_gtt_sa_allocate(dqm->dev, sizeof(*dqm->fence_addr),
1179 &dqm->fence_mem);
1180
1181 if (retval)
1182 goto fail_allocate_vidmem;
1183
1184 dqm->fence_addr = (uint64_t *)dqm->fence_mem->cpu_ptr;
1185 dqm->fence_gpu_addr = dqm->fence_mem->gpu_addr;
1186
1187 init_interrupts(dqm);
1188
1189 dqm_lock(dqm);
1190 /* clear hang status when driver try to start the hw scheduler */
1191 dqm->is_hws_hang = false;
1192 dqm->is_resetting = false;
1193 dqm->sched_running = true;
1194 execute_queues_cpsch(dqm, KFD_UNMAP_QUEUES_FILTER_DYNAMIC_QUEUES, 0);
1195 dqm_unlock(dqm);
1196
1197 return 0;
1198fail_allocate_vidmem:
1199fail_set_sched_resources:
1200 pm_uninit(&dqm->packets, false);
1201fail_packet_manager_init:
1202 return retval;
1203}
1204
1205static int stop_cpsch(struct device_queue_manager *dqm)
1206{
1207 bool hanging;
1208
1209 dqm_lock(dqm);
1210 if (!dqm->is_hws_hang)
1211 unmap_queues_cpsch(dqm, KFD_UNMAP_QUEUES_FILTER_ALL_QUEUES, 0);
1212 hanging = dqm->is_hws_hang || dqm->is_resetting;
1213 dqm->sched_running = false;
1214 dqm_unlock(dqm);
1215
1216 pm_release_ib(&dqm->packets);
1217
1218 kfd_gtt_sa_free(dqm->dev, dqm->fence_mem);
1219 pm_uninit(&dqm->packets, hanging);
1220
1221 return 0;
1222}
1223
1224static int create_kernel_queue_cpsch(struct device_queue_manager *dqm,
1225 struct kernel_queue *kq,
1226 struct qcm_process_device *qpd)
1227{
1228 dqm_lock(dqm);
1229 if (dqm->total_queue_count >= max_num_of_queues_per_device) {
1230 pr_warn("Can't create new kernel queue because %d queues were already created\n",
1231 dqm->total_queue_count);
1232 dqm_unlock(dqm);
1233 return -EPERM;
1234 }
1235
1236 /*
1237 * Unconditionally increment this counter, regardless of the queue's
1238 * type or whether the queue is active.
1239 */
1240 dqm->total_queue_count++;
1241 pr_debug("Total of %d queues are accountable so far\n",
1242 dqm->total_queue_count);
1243
1244 list_add(&kq->list, &qpd->priv_queue_list);
1245 increment_queue_count(dqm, kq->queue->properties.type);
1246 qpd->is_debug = true;
1247 execute_queues_cpsch(dqm, KFD_UNMAP_QUEUES_FILTER_DYNAMIC_QUEUES, 0);
1248 dqm_unlock(dqm);
1249
1250 return 0;
1251}
1252
1253static void destroy_kernel_queue_cpsch(struct device_queue_manager *dqm,
1254 struct kernel_queue *kq,
1255 struct qcm_process_device *qpd)
1256{
1257 dqm_lock(dqm);
1258 list_del(&kq->list);
1259 decrement_queue_count(dqm, kq->queue->properties.type);
1260 qpd->is_debug = false;
1261 execute_queues_cpsch(dqm, KFD_UNMAP_QUEUES_FILTER_ALL_QUEUES, 0);
1262 /*
1263 * Unconditionally decrement this counter, regardless of the queue's
1264 * type.
1265 */
1266 dqm->total_queue_count--;
1267 pr_debug("Total of %d queues are accountable so far\n",
1268 dqm->total_queue_count);
1269 dqm_unlock(dqm);
1270}
1271
1272static int create_queue_cpsch(struct device_queue_manager *dqm, struct queue *q,
1273 struct qcm_process_device *qpd)
1274{
1275 int retval;
1276 struct mqd_manager *mqd_mgr;
1277
1278 if (dqm->total_queue_count >= max_num_of_queues_per_device) {
1279 pr_warn("Can't create new usermode queue because %d queues were already created\n",
1280 dqm->total_queue_count);
1281 retval = -EPERM;
1282 goto out;
1283 }
1284
1285 if (q->properties.type == KFD_QUEUE_TYPE_SDMA ||
1286 q->properties.type == KFD_QUEUE_TYPE_SDMA_XGMI) {
1287 dqm_lock(dqm);
1288 retval = allocate_sdma_queue(dqm, q);
1289 dqm_unlock(dqm);
1290 if (retval)
1291 goto out;
1292 }
1293
1294 retval = allocate_doorbell(qpd, q);
1295 if (retval)
1296 goto out_deallocate_sdma_queue;
1297
1298 mqd_mgr = dqm->mqd_mgrs[get_mqd_type_from_queue_type(
1299 q->properties.type)];
1300
1301 if (q->properties.type == KFD_QUEUE_TYPE_SDMA ||
1302 q->properties.type == KFD_QUEUE_TYPE_SDMA_XGMI)
1303 dqm->asic_ops.init_sdma_vm(dqm, q, qpd);
1304 q->properties.tba_addr = qpd->tba_addr;
1305 q->properties.tma_addr = qpd->tma_addr;
1306 q->mqd_mem_obj = mqd_mgr->allocate_mqd(mqd_mgr->dev, &q->properties);
1307 if (!q->mqd_mem_obj) {
1308 retval = -ENOMEM;
1309 goto out_deallocate_doorbell;
1310 }
1311
1312 dqm_lock(dqm);
1313 /*
1314 * Eviction state logic: mark all queues as evicted, even ones
1315 * not currently active. Restoring inactive queues later only
1316 * updates the is_evicted flag but is a no-op otherwise.
1317 */
1318 q->properties.is_evicted = !!qpd->evicted;
1319 mqd_mgr->init_mqd(mqd_mgr, &q->mqd, q->mqd_mem_obj,
1320 &q->gart_mqd_addr, &q->properties);
1321
1322 list_add(&q->list, &qpd->queues_list);
1323 qpd->queue_count++;
1324
1325 if (q->properties.is_active) {
1326 increment_queue_count(dqm, q->properties.type);
1327
1328 execute_queues_cpsch(dqm,
1329 KFD_UNMAP_QUEUES_FILTER_DYNAMIC_QUEUES, 0);
1330 }
1331
1332 /*
1333 * Unconditionally increment this counter, regardless of the queue's
1334 * type or whether the queue is active.
1335 */
1336 dqm->total_queue_count++;
1337
1338 pr_debug("Total of %d queues are accountable so far\n",
1339 dqm->total_queue_count);
1340
1341 dqm_unlock(dqm);
1342 return retval;
1343
1344out_deallocate_doorbell:
1345 deallocate_doorbell(qpd, q);
1346out_deallocate_sdma_queue:
1347 if (q->properties.type == KFD_QUEUE_TYPE_SDMA ||
1348 q->properties.type == KFD_QUEUE_TYPE_SDMA_XGMI) {
1349 dqm_lock(dqm);
1350 deallocate_sdma_queue(dqm, q);
1351 dqm_unlock(dqm);
1352 }
1353out:
1354 return retval;
1355}
1356
1357int amdkfd_fence_wait_timeout(uint64_t *fence_addr,
1358 uint64_t fence_value,
1359 unsigned int timeout_ms)
1360{
1361 unsigned long end_jiffies = msecs_to_jiffies(timeout_ms) + jiffies;
1362
1363 while (*fence_addr != fence_value) {
1364 if (time_after(jiffies, end_jiffies)) {
1365 pr_err("qcm fence wait loop timeout expired\n");
1366 /* In HWS case, this is used to halt the driver thread
1367 * in order not to mess up CP states before doing
1368 * scandumps for FW debugging.
1369 */
1370 while (halt_if_hws_hang)
1371 schedule();
1372
1373 return -ETIME;
1374 }
1375 schedule();
1376 }
1377
1378 return 0;
1379}
1380
1381/* dqm->lock mutex has to be locked before calling this function */
1382static int map_queues_cpsch(struct device_queue_manager *dqm)
1383{
1384 int retval;
1385
1386 if (!dqm->sched_running)
1387 return 0;
1388 if (dqm->active_queue_count <= 0 || dqm->processes_count <= 0)
1389 return 0;
1390 if (dqm->active_runlist)
1391 return 0;
1392
1393 retval = pm_send_runlist(&dqm->packets, &dqm->queues);
1394 pr_debug("%s sent runlist\n", __func__);
1395 if (retval) {
1396 pr_err("failed to execute runlist\n");
1397 return retval;
1398 }
1399 dqm->active_runlist = true;
1400
1401 return retval;
1402}
1403
1404/* dqm->lock mutex has to be locked before calling this function */
1405static int unmap_queues_cpsch(struct device_queue_manager *dqm,
1406 enum kfd_unmap_queues_filter filter,
1407 uint32_t filter_param)
1408{
1409 int retval = 0;
1410 struct mqd_manager *mqd_mgr;
1411
1412 if (!dqm->sched_running)
1413 return 0;
1414 if (dqm->is_hws_hang)
1415 return -EIO;
1416 if (!dqm->active_runlist)
1417 return retval;
1418
1419 retval = pm_send_unmap_queue(&dqm->packets, KFD_QUEUE_TYPE_COMPUTE,
1420 filter, filter_param, false, 0);
1421 if (retval)
1422 return retval;
1423
1424 *dqm->fence_addr = KFD_FENCE_INIT;
1425 pm_send_query_status(&dqm->packets, dqm->fence_gpu_addr,
1426 KFD_FENCE_COMPLETED);
1427 /* should be timed out */
1428 retval = amdkfd_fence_wait_timeout(dqm->fence_addr, KFD_FENCE_COMPLETED,
1429 queue_preemption_timeout_ms);
1430 if (retval) {
1431 pr_err("The cp might be in an unrecoverable state due to an unsuccessful queues preemption\n");
1432 dqm->is_hws_hang = true;
1433 /* It's possible we're detecting a HWS hang in the
1434 * middle of a GPU reset. No need to schedule another
1435 * reset in this case.
1436 */
1437 if (!dqm->is_resetting)
1438 schedule_work(&dqm->hw_exception_work);
1439 return retval;
1440 }
1441
1442 /* In the current MEC firmware implementation, if compute queue
1443 * doesn't response to the preemption request in time, HIQ will
1444 * abandon the unmap request without returning any timeout error
1445 * to driver. Instead, MEC firmware will log the doorbell of the
1446 * unresponding compute queue to HIQ.MQD.queue_doorbell_id fields.
1447 * To make sure the queue unmap was successful, driver need to
1448 * check those fields
1449 */
1450 mqd_mgr = dqm->mqd_mgrs[KFD_MQD_TYPE_HIQ];
1451 if (mqd_mgr->read_doorbell_id(dqm->packets.priv_queue->queue->mqd)) {
1452 pr_err("HIQ MQD's queue_doorbell_id0 is not 0, Queue preemption time out\n");
1453 while (halt_if_hws_hang)
1454 schedule();
1455 return -ETIME;
1456 }
1457
1458 pm_release_ib(&dqm->packets);
1459 dqm->active_runlist = false;
1460
1461 return retval;
1462}
1463
1464/* dqm->lock mutex has to be locked before calling this function */
1465static int execute_queues_cpsch(struct device_queue_manager *dqm,
1466 enum kfd_unmap_queues_filter filter,
1467 uint32_t filter_param)
1468{
1469 int retval;
1470
1471 if (dqm->is_hws_hang)
1472 return -EIO;
1473 retval = unmap_queues_cpsch(dqm, filter, filter_param);
1474 if (retval)
1475 return retval;
1476
1477 return map_queues_cpsch(dqm);
1478}
1479
1480static int destroy_queue_cpsch(struct device_queue_manager *dqm,
1481 struct qcm_process_device *qpd,
1482 struct queue *q)
1483{
1484 int retval;
1485 struct mqd_manager *mqd_mgr;
1486 uint64_t sdma_val = 0;
1487 struct kfd_process_device *pdd = qpd_to_pdd(qpd);
1488
1489 /* Get the SDMA queue stats */
1490 if ((q->properties.type == KFD_QUEUE_TYPE_SDMA) ||
1491 (q->properties.type == KFD_QUEUE_TYPE_SDMA_XGMI)) {
1492 retval = read_sdma_queue_counter((uint64_t __user *)q->properties.read_ptr,
1493 &sdma_val);
1494 if (retval)
1495 pr_err("Failed to read SDMA queue counter for queue: %d\n",
1496 q->properties.queue_id);
1497 }
1498
1499 retval = 0;
1500
1501 /* remove queue from list to prevent rescheduling after preemption */
1502 dqm_lock(dqm);
1503
1504 if (qpd->is_debug) {
1505 /*
1506 * error, currently we do not allow to destroy a queue
1507 * of a currently debugged process
1508 */
1509 retval = -EBUSY;
1510 goto failed_try_destroy_debugged_queue;
1511
1512 }
1513
1514 mqd_mgr = dqm->mqd_mgrs[get_mqd_type_from_queue_type(
1515 q->properties.type)];
1516
1517 deallocate_doorbell(qpd, q);
1518
1519 if ((q->properties.type == KFD_QUEUE_TYPE_SDMA) ||
1520 (q->properties.type == KFD_QUEUE_TYPE_SDMA_XGMI)) {
1521 deallocate_sdma_queue(dqm, q);
1522 pdd->sdma_past_activity_counter += sdma_val;
1523 }
1524
1525 list_del(&q->list);
1526 qpd->queue_count--;
1527 if (q->properties.is_active) {
1528 decrement_queue_count(dqm, q->properties.type);
1529 retval = execute_queues_cpsch(dqm,
1530 KFD_UNMAP_QUEUES_FILTER_DYNAMIC_QUEUES, 0);
1531 if (retval == -ETIME)
1532 qpd->reset_wavefronts = true;
1533 if (q->properties.is_gws) {
1534 dqm->gws_queue_count--;
1535 qpd->mapped_gws_queue = false;
1536 }
1537 }
1538
1539 /*
1540 * Unconditionally decrement this counter, regardless of the queue's
1541 * type
1542 */
1543 dqm->total_queue_count--;
1544 pr_debug("Total of %d queues are accountable so far\n",
1545 dqm->total_queue_count);
1546
1547 dqm_unlock(dqm);
1548
1549 /* Do free_mqd after dqm_unlock(dqm) to avoid circular locking */
1550 mqd_mgr->free_mqd(mqd_mgr, q->mqd, q->mqd_mem_obj);
1551
1552 return retval;
1553
1554failed_try_destroy_debugged_queue:
1555
1556 dqm_unlock(dqm);
1557 return retval;
1558}
1559
1560/*
1561 * Low bits must be 0000/FFFF as required by HW, high bits must be 0 to
1562 * stay in user mode.
1563 */
1564#define APE1_FIXED_BITS_MASK 0xFFFF80000000FFFFULL
1565/* APE1 limit is inclusive and 64K aligned. */
1566#define APE1_LIMIT_ALIGNMENT 0xFFFF
1567
1568static bool set_cache_memory_policy(struct device_queue_manager *dqm,
1569 struct qcm_process_device *qpd,
1570 enum cache_policy default_policy,
1571 enum cache_policy alternate_policy,
1572 void __user *alternate_aperture_base,
1573 uint64_t alternate_aperture_size)
1574{
1575 bool retval = true;
1576
1577 if (!dqm->asic_ops.set_cache_memory_policy)
1578 return retval;
1579
1580 dqm_lock(dqm);
1581
1582 if (alternate_aperture_size == 0) {
1583 /* base > limit disables APE1 */
1584 qpd->sh_mem_ape1_base = 1;
1585 qpd->sh_mem_ape1_limit = 0;
1586 } else {
1587 /*
1588 * In FSA64, APE1_Base[63:0] = { 16{SH_MEM_APE1_BASE[31]},
1589 * SH_MEM_APE1_BASE[31:0], 0x0000 }
1590 * APE1_Limit[63:0] = { 16{SH_MEM_APE1_LIMIT[31]},
1591 * SH_MEM_APE1_LIMIT[31:0], 0xFFFF }
1592 * Verify that the base and size parameters can be
1593 * represented in this format and convert them.
1594 * Additionally restrict APE1 to user-mode addresses.
1595 */
1596
1597 uint64_t base = (uintptr_t)alternate_aperture_base;
1598 uint64_t limit = base + alternate_aperture_size - 1;
1599
1600 if (limit <= base || (base & APE1_FIXED_BITS_MASK) != 0 ||
1601 (limit & APE1_FIXED_BITS_MASK) != APE1_LIMIT_ALIGNMENT) {
1602 retval = false;
1603 goto out;
1604 }
1605
1606 qpd->sh_mem_ape1_base = base >> 16;
1607 qpd->sh_mem_ape1_limit = limit >> 16;
1608 }
1609
1610 retval = dqm->asic_ops.set_cache_memory_policy(
1611 dqm,
1612 qpd,
1613 default_policy,
1614 alternate_policy,
1615 alternate_aperture_base,
1616 alternate_aperture_size);
1617
1618 if ((dqm->sched_policy == KFD_SCHED_POLICY_NO_HWS) && (qpd->vmid != 0))
1619 program_sh_mem_settings(dqm, qpd);
1620
1621 pr_debug("sh_mem_config: 0x%x, ape1_base: 0x%x, ape1_limit: 0x%x\n",
1622 qpd->sh_mem_config, qpd->sh_mem_ape1_base,
1623 qpd->sh_mem_ape1_limit);
1624
1625out:
1626 dqm_unlock(dqm);
1627 return retval;
1628}
1629
1630static int process_termination_nocpsch(struct device_queue_manager *dqm,
1631 struct qcm_process_device *qpd)
1632{
1633 struct queue *q;
1634 struct device_process_node *cur, *next_dpn;
1635 int retval = 0;
1636 bool found = false;
1637
1638 dqm_lock(dqm);
1639
1640 /* Clear all user mode queues */
1641 while (!list_empty(&qpd->queues_list)) {
1642 struct mqd_manager *mqd_mgr;
1643 int ret;
1644
1645 q = list_first_entry(&qpd->queues_list, struct queue, list);
1646 mqd_mgr = dqm->mqd_mgrs[get_mqd_type_from_queue_type(
1647 q->properties.type)];
1648 ret = destroy_queue_nocpsch_locked(dqm, qpd, q);
1649 if (ret)
1650 retval = ret;
1651 dqm_unlock(dqm);
1652 mqd_mgr->free_mqd(mqd_mgr, q->mqd, q->mqd_mem_obj);
1653 dqm_lock(dqm);
1654 }
1655
1656 /* Unregister process */
1657 list_for_each_entry_safe(cur, next_dpn, &dqm->queues, list) {
1658 if (qpd == cur->qpd) {
1659 list_del(&cur->list);
1660 kfree(cur);
1661 dqm->processes_count--;
1662 found = true;
1663 break;
1664 }
1665 }
1666
1667 dqm_unlock(dqm);
1668
1669 /* Outside the DQM lock because under the DQM lock we can't do
1670 * reclaim or take other locks that others hold while reclaiming.
1671 */
1672 if (found)
1673 kfd_dec_compute_active(dqm->dev);
1674
1675 return retval;
1676}
1677
1678static int get_wave_state(struct device_queue_manager *dqm,
1679 struct queue *q,
1680 void __user *ctl_stack,
1681 u32 *ctl_stack_used_size,
1682 u32 *save_area_used_size)
1683{
1684 struct mqd_manager *mqd_mgr;
1685
1686 dqm_lock(dqm);
1687
1688 mqd_mgr = dqm->mqd_mgrs[KFD_MQD_TYPE_CP];
1689
1690 if (q->properties.type != KFD_QUEUE_TYPE_COMPUTE ||
1691 q->properties.is_active || !q->device->cwsr_enabled ||
1692 !mqd_mgr->get_wave_state) {
1693 dqm_unlock(dqm);
1694 return -EINVAL;
1695 }
1696
1697 dqm_unlock(dqm);
1698
1699 /*
1700 * get_wave_state is outside the dqm lock to prevent circular locking
1701 * and the queue should be protected against destruction by the process
1702 * lock.
1703 */
1704 return mqd_mgr->get_wave_state(mqd_mgr, q->mqd, ctl_stack,
1705 ctl_stack_used_size, save_area_used_size);
1706}
1707
1708static int process_termination_cpsch(struct device_queue_manager *dqm,
1709 struct qcm_process_device *qpd)
1710{
1711 int retval;
1712 struct queue *q;
1713 struct kernel_queue *kq, *kq_next;
1714 struct mqd_manager *mqd_mgr;
1715 struct device_process_node *cur, *next_dpn;
1716 enum kfd_unmap_queues_filter filter =
1717 KFD_UNMAP_QUEUES_FILTER_DYNAMIC_QUEUES;
1718 bool found = false;
1719
1720 retval = 0;
1721
1722 dqm_lock(dqm);
1723
1724 /* Clean all kernel queues */
1725 list_for_each_entry_safe(kq, kq_next, &qpd->priv_queue_list, list) {
1726 list_del(&kq->list);
1727 decrement_queue_count(dqm, kq->queue->properties.type);
1728 qpd->is_debug = false;
1729 dqm->total_queue_count--;
1730 filter = KFD_UNMAP_QUEUES_FILTER_ALL_QUEUES;
1731 }
1732
1733 /* Clear all user mode queues */
1734 list_for_each_entry(q, &qpd->queues_list, list) {
1735 if (q->properties.type == KFD_QUEUE_TYPE_SDMA)
1736 deallocate_sdma_queue(dqm, q);
1737 else if (q->properties.type == KFD_QUEUE_TYPE_SDMA_XGMI)
1738 deallocate_sdma_queue(dqm, q);
1739
1740 if (q->properties.is_active) {
1741 decrement_queue_count(dqm, q->properties.type);
1742 if (q->properties.is_gws) {
1743 dqm->gws_queue_count--;
1744 qpd->mapped_gws_queue = false;
1745 }
1746 }
1747
1748 dqm->total_queue_count--;
1749 }
1750
1751 /* Unregister process */
1752 list_for_each_entry_safe(cur, next_dpn, &dqm->queues, list) {
1753 if (qpd == cur->qpd) {
1754 list_del(&cur->list);
1755 kfree(cur);
1756 dqm->processes_count--;
1757 found = true;
1758 break;
1759 }
1760 }
1761
1762 retval = execute_queues_cpsch(dqm, filter, 0);
1763 if ((!dqm->is_hws_hang) && (retval || qpd->reset_wavefronts)) {
1764 pr_warn("Resetting wave fronts (cpsch) on dev %p\n", dqm->dev);
1765 dbgdev_wave_reset_wavefronts(dqm->dev, qpd->pqm->process);
1766 qpd->reset_wavefronts = false;
1767 }
1768
1769 /* Lastly, free mqd resources.
1770 * Do free_mqd() after dqm_unlock to avoid circular locking.
1771 */
1772 while (!list_empty(&qpd->queues_list)) {
1773 q = list_first_entry(&qpd->queues_list, struct queue, list);
1774 mqd_mgr = dqm->mqd_mgrs[get_mqd_type_from_queue_type(
1775 q->properties.type)];
1776 list_del(&q->list);
1777 qpd->queue_count--;
1778 dqm_unlock(dqm);
1779 mqd_mgr->free_mqd(mqd_mgr, q->mqd, q->mqd_mem_obj);
1780 dqm_lock(dqm);
1781 }
1782 dqm_unlock(dqm);
1783
1784 /* Outside the DQM lock because under the DQM lock we can't do
1785 * reclaim or take other locks that others hold while reclaiming.
1786 */
1787 if (found)
1788 kfd_dec_compute_active(dqm->dev);
1789
1790 return retval;
1791}
1792
1793static int init_mqd_managers(struct device_queue_manager *dqm)
1794{
1795 int i, j;
1796 struct mqd_manager *mqd_mgr;
1797
1798 for (i = 0; i < KFD_MQD_TYPE_MAX; i++) {
1799 mqd_mgr = dqm->asic_ops.mqd_manager_init(i, dqm->dev);
1800 if (!mqd_mgr) {
1801 pr_err("mqd manager [%d] initialization failed\n", i);
1802 goto out_free;
1803 }
1804 dqm->mqd_mgrs[i] = mqd_mgr;
1805 }
1806
1807 return 0;
1808
1809out_free:
1810 for (j = 0; j < i; j++) {
1811 kfree(dqm->mqd_mgrs[j]);
1812 dqm->mqd_mgrs[j] = NULL;
1813 }
1814
1815 return -ENOMEM;
1816}
1817
1818/* Allocate one hiq mqd (HWS) and all SDMA mqd in a continuous trunk*/
1819static int allocate_hiq_sdma_mqd(struct device_queue_manager *dqm)
1820{
1821 int retval;
1822 struct kfd_dev *dev = dqm->dev;
1823 struct kfd_mem_obj *mem_obj = &dqm->hiq_sdma_mqd;
1824 uint32_t size = dqm->mqd_mgrs[KFD_MQD_TYPE_SDMA]->mqd_size *
1825 get_num_all_sdma_engines(dqm) *
1826 dev->device_info->num_sdma_queues_per_engine +
1827 dqm->mqd_mgrs[KFD_MQD_TYPE_HIQ]->mqd_size;
1828
1829 retval = amdgpu_amdkfd_alloc_gtt_mem(dev->kgd, size,
1830 &(mem_obj->gtt_mem), &(mem_obj->gpu_addr),
1831 (void *)&(mem_obj->cpu_ptr), false);
1832
1833 return retval;
1834}
1835
1836struct device_queue_manager *device_queue_manager_init(struct kfd_dev *dev)
1837{
1838 struct device_queue_manager *dqm;
1839
1840 pr_debug("Loading device queue manager\n");
1841
1842 dqm = kzalloc(sizeof(*dqm), GFP_KERNEL);
1843 if (!dqm)
1844 return NULL;
1845
1846 switch (dev->device_info->asic_family) {
1847 /* HWS is not available on Hawaii. */
1848 case CHIP_HAWAII:
1849 /* HWS depends on CWSR for timely dequeue. CWSR is not
1850 * available on Tonga.
1851 *
1852 * FIXME: This argument also applies to Kaveri.
1853 */
1854 case CHIP_TONGA:
1855 dqm->sched_policy = KFD_SCHED_POLICY_NO_HWS;
1856 break;
1857 default:
1858 dqm->sched_policy = sched_policy;
1859 break;
1860 }
1861
1862 dqm->dev = dev;
1863 switch (dqm->sched_policy) {
1864 case KFD_SCHED_POLICY_HWS:
1865 case KFD_SCHED_POLICY_HWS_NO_OVERSUBSCRIPTION:
1866 /* initialize dqm for cp scheduling */
1867 dqm->ops.create_queue = create_queue_cpsch;
1868 dqm->ops.initialize = initialize_cpsch;
1869 dqm->ops.start = start_cpsch;
1870 dqm->ops.stop = stop_cpsch;
1871 dqm->ops.pre_reset = pre_reset;
1872 dqm->ops.destroy_queue = destroy_queue_cpsch;
1873 dqm->ops.update_queue = update_queue;
1874 dqm->ops.register_process = register_process;
1875 dqm->ops.unregister_process = unregister_process;
1876 dqm->ops.uninitialize = uninitialize;
1877 dqm->ops.create_kernel_queue = create_kernel_queue_cpsch;
1878 dqm->ops.destroy_kernel_queue = destroy_kernel_queue_cpsch;
1879 dqm->ops.set_cache_memory_policy = set_cache_memory_policy;
1880 dqm->ops.process_termination = process_termination_cpsch;
1881 dqm->ops.evict_process_queues = evict_process_queues_cpsch;
1882 dqm->ops.restore_process_queues = restore_process_queues_cpsch;
1883 dqm->ops.get_wave_state = get_wave_state;
1884 break;
1885 case KFD_SCHED_POLICY_NO_HWS:
1886 /* initialize dqm for no cp scheduling */
1887 dqm->ops.start = start_nocpsch;
1888 dqm->ops.stop = stop_nocpsch;
1889 dqm->ops.pre_reset = pre_reset;
1890 dqm->ops.create_queue = create_queue_nocpsch;
1891 dqm->ops.destroy_queue = destroy_queue_nocpsch;
1892 dqm->ops.update_queue = update_queue;
1893 dqm->ops.register_process = register_process;
1894 dqm->ops.unregister_process = unregister_process;
1895 dqm->ops.initialize = initialize_nocpsch;
1896 dqm->ops.uninitialize = uninitialize;
1897 dqm->ops.set_cache_memory_policy = set_cache_memory_policy;
1898 dqm->ops.process_termination = process_termination_nocpsch;
1899 dqm->ops.evict_process_queues = evict_process_queues_nocpsch;
1900 dqm->ops.restore_process_queues =
1901 restore_process_queues_nocpsch;
1902 dqm->ops.get_wave_state = get_wave_state;
1903 break;
1904 default:
1905 pr_err("Invalid scheduling policy %d\n", dqm->sched_policy);
1906 goto out_free;
1907 }
1908
1909 switch (dev->device_info->asic_family) {
1910 case CHIP_CARRIZO:
1911 device_queue_manager_init_vi(&dqm->asic_ops);
1912 break;
1913
1914 case CHIP_KAVERI:
1915 device_queue_manager_init_cik(&dqm->asic_ops);
1916 break;
1917
1918 case CHIP_HAWAII:
1919 device_queue_manager_init_cik_hawaii(&dqm->asic_ops);
1920 break;
1921
1922 case CHIP_TONGA:
1923 case CHIP_FIJI:
1924 case CHIP_POLARIS10:
1925 case CHIP_POLARIS11:
1926 case CHIP_POLARIS12:
1927 case CHIP_VEGAM:
1928 device_queue_manager_init_vi_tonga(&dqm->asic_ops);
1929 break;
1930
1931 case CHIP_VEGA10:
1932 case CHIP_VEGA12:
1933 case CHIP_VEGA20:
1934 case CHIP_RAVEN:
1935 case CHIP_RENOIR:
1936 case CHIP_ARCTURUS:
1937 case CHIP_ALDEBARAN:
1938 device_queue_manager_init_v9(&dqm->asic_ops);
1939 break;
1940 case CHIP_NAVI10:
1941 case CHIP_NAVI12:
1942 case CHIP_NAVI14:
1943 case CHIP_SIENNA_CICHLID:
1944 case CHIP_NAVY_FLOUNDER:
1945 case CHIP_VANGOGH:
1946 case CHIP_DIMGREY_CAVEFISH:
1947 case CHIP_BEIGE_GOBY:
1948 case CHIP_YELLOW_CARP:
1949 device_queue_manager_init_v10_navi10(&dqm->asic_ops);
1950 break;
1951 default:
1952 WARN(1, "Unexpected ASIC family %u",
1953 dev->device_info->asic_family);
1954 goto out_free;
1955 }
1956
1957 if (init_mqd_managers(dqm))
1958 goto out_free;
1959
1960 if (allocate_hiq_sdma_mqd(dqm)) {
1961 pr_err("Failed to allocate hiq sdma mqd trunk buffer\n");
1962 goto out_free;
1963 }
1964
1965 if (!dqm->ops.initialize(dqm))
1966 return dqm;
1967
1968out_free:
1969 kfree(dqm);
1970 return NULL;
1971}
1972
1973static void deallocate_hiq_sdma_mqd(struct kfd_dev *dev,
1974 struct kfd_mem_obj *mqd)
1975{
1976 WARN(!mqd, "No hiq sdma mqd trunk to free");
1977
1978 amdgpu_amdkfd_free_gtt_mem(dev->kgd, mqd->gtt_mem);
1979}
1980
1981void device_queue_manager_uninit(struct device_queue_manager *dqm)
1982{
1983 dqm->ops.uninitialize(dqm);
1984 deallocate_hiq_sdma_mqd(dqm->dev, &dqm->hiq_sdma_mqd);
1985 kfree(dqm);
1986}
1987
1988int kfd_process_vm_fault(struct device_queue_manager *dqm, u32 pasid)
1989{
1990 struct kfd_process_device *pdd;
1991 struct kfd_process *p = kfd_lookup_process_by_pasid(pasid);
1992 int ret = 0;
1993
1994 if (!p)
1995 return -EINVAL;
1996 WARN(debug_evictions, "Evicting pid %d", p->lead_thread->pid);
1997 pdd = kfd_get_process_device_data(dqm->dev, p);
1998 if (pdd)
1999 ret = dqm->ops.evict_process_queues(dqm, &pdd->qpd);
2000 kfd_unref_process(p);
2001
2002 return ret;
2003}
2004
2005static void kfd_process_hw_exception(struct work_struct *work)
2006{
2007 struct device_queue_manager *dqm = container_of(work,
2008 struct device_queue_manager, hw_exception_work);
2009 amdgpu_amdkfd_gpu_reset(dqm->dev->kgd);
2010}
2011
2012#if defined(CONFIG_DEBUG_FS)
2013
2014static void seq_reg_dump(struct seq_file *m,
2015 uint32_t (*dump)[2], uint32_t n_regs)
2016{
2017 uint32_t i, count;
2018
2019 for (i = 0, count = 0; i < n_regs; i++) {
2020 if (count == 0 ||
2021 dump[i-1][0] + sizeof(uint32_t) != dump[i][0]) {
2022 seq_printf(m, "%s %08x: %08x",
2023 i ? "\n" : "",
2024 dump[i][0], dump[i][1]);
2025 count = 7;
2026 } else {
2027 seq_printf(m, " %08x", dump[i][1]);
2028 count--;
2029 }
2030 }
2031
2032 seq_puts(m, "\n");
2033}
2034
2035int dqm_debugfs_hqds(struct seq_file *m, void *data)
2036{
2037 struct device_queue_manager *dqm = data;
2038 uint32_t (*dump)[2], n_regs;
2039 int pipe, queue;
2040 int r = 0;
2041
2042 if (!dqm->sched_running) {
2043 seq_printf(m, " Device is stopped\n");
2044
2045 return 0;
2046 }
2047
2048 r = dqm->dev->kfd2kgd->hqd_dump(dqm->dev->kgd,
2049 KFD_CIK_HIQ_PIPE, KFD_CIK_HIQ_QUEUE,
2050 &dump, &n_regs);
2051 if (!r) {
2052 seq_printf(m, " HIQ on MEC %d Pipe %d Queue %d\n",
2053 KFD_CIK_HIQ_PIPE/get_pipes_per_mec(dqm)+1,
2054 KFD_CIK_HIQ_PIPE%get_pipes_per_mec(dqm),
2055 KFD_CIK_HIQ_QUEUE);
2056 seq_reg_dump(m, dump, n_regs);
2057
2058 kfree(dump);
2059 }
2060
2061 for (pipe = 0; pipe < get_pipes_per_mec(dqm); pipe++) {
2062 int pipe_offset = pipe * get_queues_per_pipe(dqm);
2063
2064 for (queue = 0; queue < get_queues_per_pipe(dqm); queue++) {
2065 if (!test_bit(pipe_offset + queue,
2066 dqm->dev->shared_resources.cp_queue_bitmap))
2067 continue;
2068
2069 r = dqm->dev->kfd2kgd->hqd_dump(
2070 dqm->dev->kgd, pipe, queue, &dump, &n_regs);
2071 if (r)
2072 break;
2073
2074 seq_printf(m, " CP Pipe %d, Queue %d\n",
2075 pipe, queue);
2076 seq_reg_dump(m, dump, n_regs);
2077
2078 kfree(dump);
2079 }
2080 }
2081
2082 for (pipe = 0; pipe < get_num_all_sdma_engines(dqm); pipe++) {
2083 for (queue = 0;
2084 queue < dqm->dev->device_info->num_sdma_queues_per_engine;
2085 queue++) {
2086 r = dqm->dev->kfd2kgd->hqd_sdma_dump(
2087 dqm->dev->kgd, pipe, queue, &dump, &n_regs);
2088 if (r)
2089 break;
2090
2091 seq_printf(m, " SDMA Engine %d, RLC %d\n",
2092 pipe, queue);
2093 seq_reg_dump(m, dump, n_regs);
2094
2095 kfree(dump);
2096 }
2097 }
2098
2099 return r;
2100}
2101
2102int dqm_debugfs_execute_queues(struct device_queue_manager *dqm)
2103{
2104 int r = 0;
2105
2106 dqm_lock(dqm);
2107 dqm->active_runlist = true;
2108 r = execute_queues_cpsch(dqm, KFD_UNMAP_QUEUES_FILTER_ALL_QUEUES, 0);
2109 dqm_unlock(dqm);
2110
2111 return r;
2112}
2113
2114#endif