Linux Audio

Check our new training course

Loading...
v6.8
  1// SPDX-License-Identifier: GPL-2.0-only
  2/*
  3 * Copyright (C) 2020-2023 Intel Corporation
  4 */
  5
  6#include <linux/firmware.h>
  7#include <linux/module.h>
  8#include <linux/pci.h>
  9#include <linux/pm_runtime.h>
 
 10
 11#include <drm/drm_accel.h>
 12#include <drm/drm_file.h>
 13#include <drm/drm_gem.h>
 14#include <drm/drm_ioctl.h>
 15#include <drm/drm_prime.h>
 16
 17#include "vpu_boot_api.h"
 18#include "ivpu_debugfs.h"
 19#include "ivpu_drv.h"
 20#include "ivpu_fw.h"
 21#include "ivpu_fw_log.h"
 22#include "ivpu_gem.h"
 23#include "ivpu_hw.h"
 24#include "ivpu_ipc.h"
 25#include "ivpu_job.h"
 26#include "ivpu_jsm_msg.h"
 27#include "ivpu_mmu.h"
 28#include "ivpu_mmu_context.h"
 
 29#include "ivpu_pm.h"
 
 
 30
 31#ifndef DRIVER_VERSION_STR
 32#define DRIVER_VERSION_STR __stringify(DRM_IVPU_DRIVER_MAJOR) "." \
 33			   __stringify(DRM_IVPU_DRIVER_MINOR) "."
 34#endif
 35
 36static struct lock_class_key submitted_jobs_xa_lock_class_key;
 37
 38int ivpu_dbg_mask;
 39module_param_named(dbg_mask, ivpu_dbg_mask, int, 0644);
 40MODULE_PARM_DESC(dbg_mask, "Driver debug mask. See IVPU_DBG_* macros.");
 41
 42int ivpu_test_mode;
 
 43module_param_named_unsafe(test_mode, ivpu_test_mode, int, 0644);
 44MODULE_PARM_DESC(test_mode, "Test mode mask. See IVPU_TEST_MODE_* macros.");
 
 45
 46u8 ivpu_pll_min_ratio;
 47module_param_named(pll_min_ratio, ivpu_pll_min_ratio, byte, 0644);
 48MODULE_PARM_DESC(pll_min_ratio, "Minimum PLL ratio used to set VPU frequency");
 49
 50u8 ivpu_pll_max_ratio = U8_MAX;
 51module_param_named(pll_max_ratio, ivpu_pll_max_ratio, byte, 0644);
 52MODULE_PARM_DESC(pll_max_ratio, "Maximum PLL ratio used to set VPU frequency");
 
 
 
 
 53
 54bool ivpu_disable_mmu_cont_pages;
 55module_param_named(disable_mmu_cont_pages, ivpu_disable_mmu_cont_pages, bool, 0644);
 56MODULE_PARM_DESC(disable_mmu_cont_pages, "Disable MMU contiguous pages optimization");
 57
 
 
 
 
 58struct ivpu_file_priv *ivpu_file_priv_get(struct ivpu_file_priv *file_priv)
 59{
 60	struct ivpu_device *vdev = file_priv->vdev;
 61
 62	kref_get(&file_priv->ref);
 63
 64	ivpu_dbg(vdev, KREF, "file_priv get: ctx %u refcount %u\n",
 65		 file_priv->ctx.id, kref_read(&file_priv->ref));
 66
 67	return file_priv;
 68}
 69
 70static void file_priv_unbind(struct ivpu_device *vdev, struct ivpu_file_priv *file_priv)
 71{
 72	mutex_lock(&file_priv->lock);
 73	if (file_priv->bound) {
 74		ivpu_dbg(vdev, FILE, "file_priv unbind: ctx %u\n", file_priv->ctx.id);
 75
 76		ivpu_cmdq_release_all_locked(file_priv);
 77		ivpu_jsm_context_release(vdev, file_priv->ctx.id);
 78		ivpu_bo_unbind_all_bos_from_context(vdev, &file_priv->ctx);
 79		ivpu_mmu_user_context_fini(vdev, &file_priv->ctx);
 80		file_priv->bound = false;
 81		drm_WARN_ON(&vdev->drm, !xa_erase_irq(&vdev->context_xa, file_priv->ctx.id));
 82	}
 83	mutex_unlock(&file_priv->lock);
 84}
 85
 86static void file_priv_release(struct kref *ref)
 87{
 88	struct ivpu_file_priv *file_priv = container_of(ref, struct ivpu_file_priv, ref);
 89	struct ivpu_device *vdev = file_priv->vdev;
 90
 91	ivpu_dbg(vdev, FILE, "file_priv release: ctx %u bound %d\n",
 92		 file_priv->ctx.id, (bool)file_priv->bound);
 93
 94	pm_runtime_get_sync(vdev->drm.dev);
 95	mutex_lock(&vdev->context_list_lock);
 96	file_priv_unbind(vdev, file_priv);
 
 
 97	mutex_unlock(&vdev->context_list_lock);
 98	pm_runtime_put_autosuspend(vdev->drm.dev);
 99
 
100	mutex_destroy(&file_priv->lock);
101	kfree(file_priv);
102}
103
104void ivpu_file_priv_put(struct ivpu_file_priv **link)
105{
106	struct ivpu_file_priv *file_priv = *link;
107	struct ivpu_device *vdev = file_priv->vdev;
108
109	drm_WARN_ON(&vdev->drm, !file_priv);
110
111	ivpu_dbg(vdev, KREF, "file_priv put: ctx %u refcount %u\n",
112		 file_priv->ctx.id, kref_read(&file_priv->ref));
113
114	*link = NULL;
115	kref_put(&file_priv->ref, file_priv_release);
116}
117
118static int ivpu_get_capabilities(struct ivpu_device *vdev, struct drm_ivpu_param *args)
119{
120	switch (args->index) {
121	case DRM_IVPU_CAP_METRIC_STREAMER:
122		args->value = 0;
123		break;
124	case DRM_IVPU_CAP_DMA_MEMORY_RANGE:
125		args->value = 1;
126		break;
127	default:
128		return -EINVAL;
129	}
130
131	return 0;
132}
133
134static int ivpu_get_core_clock_rate(struct ivpu_device *vdev, u64 *clk_rate)
135{
136	int ret;
137
138	ret = ivpu_rpm_get_if_active(vdev);
139	if (ret < 0)
140		return ret;
141
142	*clk_rate = ret ? ivpu_hw_reg_pll_freq_get(vdev) : 0;
143
144	if (ret)
145		ivpu_rpm_put(vdev);
146
147	return 0;
148}
149
150static int ivpu_get_param_ioctl(struct drm_device *dev, void *data, struct drm_file *file)
151{
152	struct ivpu_file_priv *file_priv = file->driver_priv;
153	struct ivpu_device *vdev = file_priv->vdev;
154	struct pci_dev *pdev = to_pci_dev(vdev->drm.dev);
155	struct drm_ivpu_param *args = data;
156	int ret = 0;
157	int idx;
158
159	if (!drm_dev_enter(dev, &idx))
160		return -ENODEV;
161
162	switch (args->param) {
163	case DRM_IVPU_PARAM_DEVICE_ID:
164		args->value = pdev->device;
165		break;
166	case DRM_IVPU_PARAM_DEVICE_REVISION:
167		args->value = pdev->revision;
168		break;
169	case DRM_IVPU_PARAM_PLATFORM_TYPE:
170		args->value = vdev->platform;
171		break;
172	case DRM_IVPU_PARAM_CORE_CLOCK_RATE:
173		ret = ivpu_get_core_clock_rate(vdev, &args->value);
174		break;
175	case DRM_IVPU_PARAM_NUM_CONTEXTS:
176		args->value = ivpu_get_context_count(vdev);
177		break;
178	case DRM_IVPU_PARAM_CONTEXT_BASE_ADDRESS:
179		args->value = vdev->hw->ranges.user.start;
180		break;
181	case DRM_IVPU_PARAM_CONTEXT_ID:
182		args->value = file_priv->ctx.id;
183		break;
184	case DRM_IVPU_PARAM_FW_API_VERSION:
185		if (args->index < VPU_FW_API_VER_NUM) {
186			struct vpu_firmware_header *fw_hdr;
187
188			fw_hdr = (struct vpu_firmware_header *)vdev->fw->file->data;
189			args->value = fw_hdr->api_version[args->index];
190		} else {
191			ret = -EINVAL;
192		}
193		break;
194	case DRM_IVPU_PARAM_ENGINE_HEARTBEAT:
195		ret = ivpu_jsm_get_heartbeat(vdev, args->index, &args->value);
196		break;
197	case DRM_IVPU_PARAM_UNIQUE_INFERENCE_ID:
198		args->value = (u64)atomic64_inc_return(&vdev->unique_id_counter);
199		break;
200	case DRM_IVPU_PARAM_TILE_CONFIG:
201		args->value = vdev->hw->tile_fuse;
202		break;
203	case DRM_IVPU_PARAM_SKU:
204		args->value = vdev->hw->sku;
205		break;
206	case DRM_IVPU_PARAM_CAPABILITIES:
207		ret = ivpu_get_capabilities(vdev, args);
208		break;
209	default:
210		ret = -EINVAL;
211		break;
212	}
213
214	drm_dev_exit(idx);
215	return ret;
216}
217
218static int ivpu_set_param_ioctl(struct drm_device *dev, void *data, struct drm_file *file)
219{
220	struct drm_ivpu_param *args = data;
221	int ret = 0;
222
223	switch (args->param) {
224	default:
225		ret = -EINVAL;
226	}
227
228	return ret;
229}
230
231static int ivpu_open(struct drm_device *dev, struct drm_file *file)
232{
233	struct ivpu_device *vdev = to_ivpu_device(dev);
234	struct ivpu_file_priv *file_priv;
235	u32 ctx_id;
236	int idx, ret;
237
238	if (!drm_dev_enter(dev, &idx))
239		return -ENODEV;
240
241	file_priv = kzalloc(sizeof(*file_priv), GFP_KERNEL);
242	if (!file_priv) {
243		ret = -ENOMEM;
244		goto err_dev_exit;
245	}
246
 
 
247	file_priv->vdev = vdev;
248	file_priv->bound = true;
249	kref_init(&file_priv->ref);
250	mutex_init(&file_priv->lock);
 
251
252	mutex_lock(&vdev->context_list_lock);
253
254	ret = xa_alloc_irq(&vdev->context_xa, &ctx_id, file_priv,
255			   vdev->context_xa_limit, GFP_KERNEL);
256	if (ret) {
257		ivpu_err(vdev, "Failed to allocate context id: %d\n", ret);
258		goto err_unlock;
259	}
260
261	ret = ivpu_mmu_user_context_init(vdev, &file_priv->ctx, ctx_id);
262	if (ret)
263		goto err_xa_erase;
 
 
 
 
 
264
265	mutex_unlock(&vdev->context_list_lock);
266	drm_dev_exit(idx);
267
268	file->driver_priv = file_priv;
269
270	ivpu_dbg(vdev, FILE, "file_priv create: ctx %u process %s pid %d\n",
271		 ctx_id, current->comm, task_pid_nr(current));
272
273	return 0;
274
275err_xa_erase:
276	xa_erase_irq(&vdev->context_xa, ctx_id);
277err_unlock:
278	mutex_unlock(&vdev->context_list_lock);
 
279	mutex_destroy(&file_priv->lock);
280	kfree(file_priv);
281err_dev_exit:
282	drm_dev_exit(idx);
283	return ret;
284}
285
286static void ivpu_postclose(struct drm_device *dev, struct drm_file *file)
287{
288	struct ivpu_file_priv *file_priv = file->driver_priv;
289	struct ivpu_device *vdev = to_ivpu_device(dev);
290
291	ivpu_dbg(vdev, FILE, "file_priv close: ctx %u process %s pid %d\n",
292		 file_priv->ctx.id, current->comm, task_pid_nr(current));
293
 
294	ivpu_file_priv_put(&file_priv);
295}
296
297static const struct drm_ioctl_desc ivpu_drm_ioctls[] = {
298	DRM_IOCTL_DEF_DRV(IVPU_GET_PARAM, ivpu_get_param_ioctl, 0),
299	DRM_IOCTL_DEF_DRV(IVPU_SET_PARAM, ivpu_set_param_ioctl, 0),
300	DRM_IOCTL_DEF_DRV(IVPU_BO_CREATE, ivpu_bo_create_ioctl, 0),
301	DRM_IOCTL_DEF_DRV(IVPU_BO_INFO, ivpu_bo_info_ioctl, 0),
302	DRM_IOCTL_DEF_DRV(IVPU_SUBMIT, ivpu_submit_ioctl, 0),
303	DRM_IOCTL_DEF_DRV(IVPU_BO_WAIT, ivpu_bo_wait_ioctl, 0),
 
 
 
 
304};
305
306static int ivpu_wait_for_ready(struct ivpu_device *vdev)
307{
308	struct ivpu_ipc_consumer cons;
309	struct ivpu_ipc_hdr ipc_hdr;
310	unsigned long timeout;
311	int ret;
312
313	if (ivpu_test_mode & IVPU_TEST_MODE_FW_TEST)
314		return 0;
315
316	ivpu_ipc_consumer_add(vdev, &cons, IVPU_IPC_CHAN_BOOT_MSG, NULL);
317
318	timeout = jiffies + msecs_to_jiffies(vdev->timeout.boot);
319	while (1) {
320		ivpu_ipc_irq_handler(vdev, NULL);
321		ret = ivpu_ipc_receive(vdev, &cons, &ipc_hdr, NULL, 0);
322		if (ret != -ETIMEDOUT || time_after_eq(jiffies, timeout))
323			break;
324
325		cond_resched();
326	}
327
328	ivpu_ipc_consumer_del(vdev, &cons);
329
330	if (!ret && ipc_hdr.data_addr != IVPU_IPC_BOOT_MSG_DATA_ADDR) {
331		ivpu_err(vdev, "Invalid VPU ready message: 0x%x\n",
332			 ipc_hdr.data_addr);
333		return -EIO;
334	}
335
336	if (!ret)
337		ivpu_dbg(vdev, PM, "VPU ready message received successfully\n");
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
338
339	return ret;
340}
341
342/**
343 * ivpu_boot() - Start VPU firmware
344 * @vdev: VPU device
345 *
346 * This function is paired with ivpu_shutdown() but it doesn't power up the
347 * VPU because power up has to be called very early in ivpu_probe().
348 */
349int ivpu_boot(struct ivpu_device *vdev)
350{
351	int ret;
352
353	/* Update boot params located at first 4KB of FW memory */
354	ivpu_fw_boot_params_setup(vdev, ivpu_bo_vaddr(vdev->fw->mem));
355
356	ret = ivpu_hw_boot_fw(vdev);
357	if (ret) {
358		ivpu_err(vdev, "Failed to start the firmware: %d\n", ret);
359		return ret;
360	}
361
362	ret = ivpu_wait_for_ready(vdev);
363	if (ret) {
364		ivpu_err(vdev, "Failed to boot the firmware: %d\n", ret);
365		ivpu_hw_diagnose_failure(vdev);
366		ivpu_mmu_evtq_dump(vdev);
367		ivpu_fw_log_dump(vdev);
368		return ret;
369	}
370
371	ivpu_hw_irq_clear(vdev);
372	enable_irq(vdev->irq);
373	ivpu_hw_irq_enable(vdev);
374	ivpu_ipc_enable(vdev);
 
 
 
 
 
 
 
 
 
 
 
375	return 0;
 
 
 
 
 
 
 
 
 
 
376}
377
378void ivpu_prepare_for_reset(struct ivpu_device *vdev)
379{
380	ivpu_hw_irq_disable(vdev);
381	disable_irq(vdev->irq);
382	ivpu_ipc_disable(vdev);
383	ivpu_mmu_disable(vdev);
384}
385
386int ivpu_shutdown(struct ivpu_device *vdev)
387{
388	int ret;
389
390	ivpu_prepare_for_reset(vdev);
 
391
392	ret = ivpu_hw_power_down(vdev);
393	if (ret)
394		ivpu_warn(vdev, "Failed to power down HW: %d\n", ret);
395
 
 
396	return ret;
397}
398
399static const struct file_operations ivpu_fops = {
400	.owner		= THIS_MODULE,
401	DRM_ACCEL_FOPS,
402};
403
404static const struct drm_driver driver = {
405	.driver_features = DRIVER_GEM | DRIVER_COMPUTE_ACCEL,
406
407	.open = ivpu_open,
408	.postclose = ivpu_postclose,
409
410	.gem_create_object = ivpu_gem_create_object,
411	.gem_prime_import_sg_table = drm_gem_shmem_prime_import_sg_table,
412
413	.ioctls = ivpu_drm_ioctls,
414	.num_ioctls = ARRAY_SIZE(ivpu_drm_ioctls),
415	.fops = &ivpu_fops,
416
417	.name = DRIVER_NAME,
418	.desc = DRIVER_DESC,
 
 
419	.date = DRIVER_DATE,
420	.major = DRM_IVPU_DRIVER_MAJOR,
421	.minor = DRM_IVPU_DRIVER_MINOR,
 
 
 
 
 
422};
423
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
424static irqreturn_t ivpu_irq_thread_handler(int irq, void *arg)
425{
426	struct ivpu_device *vdev = arg;
 
427
428	return ivpu_ipc_irq_thread_handler(vdev);
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
429}
430
431static int ivpu_irq_init(struct ivpu_device *vdev)
432{
433	struct pci_dev *pdev = to_pci_dev(vdev->drm.dev);
434	int ret;
435
436	ret = pci_alloc_irq_vectors(pdev, 1, 1, PCI_IRQ_MSI | PCI_IRQ_MSIX);
437	if (ret < 0) {
438		ivpu_err(vdev, "Failed to allocate a MSI IRQ: %d\n", ret);
439		return ret;
440	}
441
 
 
442	vdev->irq = pci_irq_vector(pdev, 0);
443
444	ret = devm_request_threaded_irq(vdev->drm.dev, vdev->irq, vdev->hw->ops->irq_handler,
445					ivpu_irq_thread_handler, IRQF_NO_AUTOEN, DRIVER_NAME, vdev);
446	if (ret)
447		ivpu_err(vdev, "Failed to request an IRQ %d\n", ret);
448
449	return ret;
450}
451
452static int ivpu_pci_init(struct ivpu_device *vdev)
453{
454	struct pci_dev *pdev = to_pci_dev(vdev->drm.dev);
455	struct resource *bar0 = &pdev->resource[0];
456	struct resource *bar4 = &pdev->resource[4];
457	int ret;
458
459	ivpu_dbg(vdev, MISC, "Mapping BAR0 (RegV) %pR\n", bar0);
460	vdev->regv = devm_ioremap_resource(vdev->drm.dev, bar0);
461	if (IS_ERR(vdev->regv)) {
462		ivpu_err(vdev, "Failed to map bar 0: %pe\n", vdev->regv);
463		return PTR_ERR(vdev->regv);
464	}
465
466	ivpu_dbg(vdev, MISC, "Mapping BAR4 (RegB) %pR\n", bar4);
467	vdev->regb = devm_ioremap_resource(vdev->drm.dev, bar4);
468	if (IS_ERR(vdev->regb)) {
469		ivpu_err(vdev, "Failed to map bar 4: %pe\n", vdev->regb);
470		return PTR_ERR(vdev->regb);
471	}
472
473	ret = dma_set_mask_and_coherent(vdev->drm.dev, DMA_BIT_MASK(vdev->hw->dma_bits));
474	if (ret) {
475		ivpu_err(vdev, "Failed to set DMA mask: %d\n", ret);
476		return ret;
477	}
478	dma_set_max_seg_size(vdev->drm.dev, UINT_MAX);
479
480	/* Clear any pending errors */
481	pcie_capability_clear_word(pdev, PCI_EXP_DEVSTA, 0x3f);
482
483	/* NPU does not require 10m D3hot delay */
484	pdev->d3hot_delay = 0;
485
486	ret = pcim_enable_device(pdev);
487	if (ret) {
488		ivpu_err(vdev, "Failed to enable PCI device: %d\n", ret);
489		return ret;
490	}
491
492	pci_set_master(pdev);
493
494	return 0;
495}
496
497static int ivpu_dev_init(struct ivpu_device *vdev)
498{
499	int ret;
500
501	vdev->hw = drmm_kzalloc(&vdev->drm, sizeof(*vdev->hw), GFP_KERNEL);
502	if (!vdev->hw)
503		return -ENOMEM;
504
505	vdev->mmu = drmm_kzalloc(&vdev->drm, sizeof(*vdev->mmu), GFP_KERNEL);
506	if (!vdev->mmu)
507		return -ENOMEM;
508
509	vdev->fw = drmm_kzalloc(&vdev->drm, sizeof(*vdev->fw), GFP_KERNEL);
510	if (!vdev->fw)
511		return -ENOMEM;
512
513	vdev->ipc = drmm_kzalloc(&vdev->drm, sizeof(*vdev->ipc), GFP_KERNEL);
514	if (!vdev->ipc)
515		return -ENOMEM;
516
517	vdev->pm = drmm_kzalloc(&vdev->drm, sizeof(*vdev->pm), GFP_KERNEL);
518	if (!vdev->pm)
519		return -ENOMEM;
520
521	if (ivpu_hw_gen(vdev) >= IVPU_HW_40XX) {
522		vdev->hw->ops = &ivpu_hw_40xx_ops;
523		vdev->hw->dma_bits = 48;
524	} else {
525		vdev->hw->ops = &ivpu_hw_37xx_ops;
526		vdev->hw->dma_bits = 38;
527	}
528
529	vdev->platform = IVPU_PLATFORM_INVALID;
530	vdev->context_xa_limit.min = IVPU_USER_CONTEXT_MIN_SSID;
531	vdev->context_xa_limit.max = IVPU_USER_CONTEXT_MAX_SSID;
532	atomic64_set(&vdev->unique_id_counter, 0);
533	xa_init_flags(&vdev->context_xa, XA_FLAGS_ALLOC);
534	xa_init_flags(&vdev->submitted_jobs_xa, XA_FLAGS_ALLOC1);
 
535	lockdep_set_class(&vdev->submitted_jobs_xa.xa_lock, &submitted_jobs_xa_lock_class_key);
536	INIT_LIST_HEAD(&vdev->bo_list);
537
 
 
 
538	ret = drmm_mutex_init(&vdev->drm, &vdev->context_list_lock);
539	if (ret)
540		goto err_xa_destroy;
541
542	ret = drmm_mutex_init(&vdev->drm, &vdev->bo_list_lock);
543	if (ret)
544		goto err_xa_destroy;
545
546	ret = ivpu_pci_init(vdev);
547	if (ret)
548		goto err_xa_destroy;
549
550	ret = ivpu_irq_init(vdev);
551	if (ret)
552		goto err_xa_destroy;
553
554	/* Init basic HW info based on buttress registers which are accessible before power up */
555	ret = ivpu_hw_info_init(vdev);
556	if (ret)
557		goto err_xa_destroy;
558
559	/* Power up early so the rest of init code can access VPU registers */
560	ret = ivpu_hw_power_up(vdev);
561	if (ret)
562		goto err_power_down;
563
564	ret = ivpu_mmu_global_context_init(vdev);
565	if (ret)
566		goto err_power_down;
567
568	ret = ivpu_mmu_init(vdev);
569	if (ret)
570		goto err_mmu_gctx_fini;
571
572	ret = ivpu_mmu_reserved_context_init(vdev);
573	if (ret)
574		goto err_mmu_gctx_fini;
575
576	ret = ivpu_fw_init(vdev);
577	if (ret)
578		goto err_mmu_rctx_fini;
579
580	ret = ivpu_ipc_init(vdev);
581	if (ret)
582		goto err_fw_fini;
583
584	ivpu_pm_init(vdev);
585
586	ret = ivpu_boot(vdev);
587	if (ret)
588		goto err_ipc_fini;
589
590	ivpu_job_done_consumer_init(vdev);
591	ivpu_pm_enable(vdev);
592
593	return 0;
594
595err_ipc_fini:
596	ivpu_ipc_fini(vdev);
597err_fw_fini:
598	ivpu_fw_fini(vdev);
599err_mmu_rctx_fini:
600	ivpu_mmu_reserved_context_fini(vdev);
601err_mmu_gctx_fini:
602	ivpu_mmu_global_context_fini(vdev);
603err_power_down:
604	ivpu_hw_power_down(vdev);
605	if (IVPU_WA(d3hot_after_power_off))
606		pci_set_power_state(to_pci_dev(vdev->drm.dev), PCI_D3hot);
607err_xa_destroy:
 
608	xa_destroy(&vdev->submitted_jobs_xa);
609	xa_destroy(&vdev->context_xa);
610	return ret;
611}
612
613static void ivpu_bo_unbind_all_user_contexts(struct ivpu_device *vdev)
614{
615	struct ivpu_file_priv *file_priv;
616	unsigned long ctx_id;
617
618	mutex_lock(&vdev->context_list_lock);
619
620	xa_for_each(&vdev->context_xa, ctx_id, file_priv)
621		file_priv_unbind(vdev, file_priv);
622
623	mutex_unlock(&vdev->context_list_lock);
624}
625
626static void ivpu_dev_fini(struct ivpu_device *vdev)
627{
 
 
628	ivpu_pm_disable(vdev);
 
629	ivpu_shutdown(vdev);
630	if (IVPU_WA(d3hot_after_power_off))
631		pci_set_power_state(to_pci_dev(vdev->drm.dev), PCI_D3hot);
632
633	ivpu_jobs_abort_all(vdev);
634	ivpu_job_done_consumer_fini(vdev);
635	ivpu_pm_cancel_recovery(vdev);
636	ivpu_bo_unbind_all_user_contexts(vdev);
637
638	ivpu_ipc_fini(vdev);
639	ivpu_fw_fini(vdev);
640	ivpu_mmu_reserved_context_fini(vdev);
641	ivpu_mmu_global_context_fini(vdev);
642
 
 
643	drm_WARN_ON(&vdev->drm, !xa_empty(&vdev->submitted_jobs_xa));
644	xa_destroy(&vdev->submitted_jobs_xa);
645	drm_WARN_ON(&vdev->drm, !xa_empty(&vdev->context_xa));
646	xa_destroy(&vdev->context_xa);
647}
648
649static struct pci_device_id ivpu_pci_ids[] = {
650	{ PCI_DEVICE(PCI_VENDOR_ID_INTEL, PCI_DEVICE_ID_MTL) },
651	{ PCI_DEVICE(PCI_VENDOR_ID_INTEL, PCI_DEVICE_ID_ARL) },
652	{ PCI_DEVICE(PCI_VENDOR_ID_INTEL, PCI_DEVICE_ID_LNL) },
 
653	{ }
654};
655MODULE_DEVICE_TABLE(pci, ivpu_pci_ids);
656
657static int ivpu_probe(struct pci_dev *pdev, const struct pci_device_id *id)
658{
659	struct ivpu_device *vdev;
660	int ret;
661
662	vdev = devm_drm_dev_alloc(&pdev->dev, &driver, struct ivpu_device, drm);
663	if (IS_ERR(vdev))
664		return PTR_ERR(vdev);
665
666	pci_set_drvdata(pdev, vdev);
667
668	ret = ivpu_dev_init(vdev);
669	if (ret)
670		return ret;
671
672	ivpu_debugfs_init(vdev);
 
673
674	ret = drm_dev_register(&vdev->drm, 0);
675	if (ret) {
676		dev_err(&pdev->dev, "Failed to register DRM device: %d\n", ret);
677		ivpu_dev_fini(vdev);
678	}
679
680	return ret;
681}
682
683static void ivpu_remove(struct pci_dev *pdev)
684{
685	struct ivpu_device *vdev = pci_get_drvdata(pdev);
686
687	drm_dev_unplug(&vdev->drm);
688	ivpu_dev_fini(vdev);
689}
690
691static const struct dev_pm_ops ivpu_drv_pci_pm = {
692	SET_SYSTEM_SLEEP_PM_OPS(ivpu_pm_suspend_cb, ivpu_pm_resume_cb)
693	SET_RUNTIME_PM_OPS(ivpu_pm_runtime_suspend_cb, ivpu_pm_runtime_resume_cb, NULL)
694};
695
696static const struct pci_error_handlers ivpu_drv_pci_err = {
697	.reset_prepare = ivpu_pm_reset_prepare_cb,
698	.reset_done = ivpu_pm_reset_done_cb,
699};
700
701static struct pci_driver ivpu_pci_driver = {
702	.name = KBUILD_MODNAME,
703	.id_table = ivpu_pci_ids,
704	.probe = ivpu_probe,
705	.remove = ivpu_remove,
706	.driver = {
707		.pm = &ivpu_drv_pci_pm,
708	},
709	.err_handler = &ivpu_drv_pci_err,
710};
711
712module_pci_driver(ivpu_pci_driver);
713
714MODULE_AUTHOR("Intel Corporation");
715MODULE_DESCRIPTION(DRIVER_DESC);
716MODULE_LICENSE("GPL and additional rights");
717MODULE_VERSION(DRIVER_VERSION_STR);
v6.13.7
  1// SPDX-License-Identifier: GPL-2.0-only
  2/*
  3 * Copyright (C) 2020-2024 Intel Corporation
  4 */
  5
  6#include <linux/firmware.h>
  7#include <linux/module.h>
  8#include <linux/pci.h>
  9#include <linux/pm_runtime.h>
 10#include <generated/utsrelease.h>
 11
 12#include <drm/drm_accel.h>
 13#include <drm/drm_file.h>
 14#include <drm/drm_gem.h>
 15#include <drm/drm_ioctl.h>
 16#include <drm/drm_prime.h>
 17
 18#include "ivpu_coredump.h"
 19#include "ivpu_debugfs.h"
 20#include "ivpu_drv.h"
 21#include "ivpu_fw.h"
 22#include "ivpu_fw_log.h"
 23#include "ivpu_gem.h"
 24#include "ivpu_hw.h"
 25#include "ivpu_ipc.h"
 26#include "ivpu_job.h"
 27#include "ivpu_jsm_msg.h"
 28#include "ivpu_mmu.h"
 29#include "ivpu_mmu_context.h"
 30#include "ivpu_ms.h"
 31#include "ivpu_pm.h"
 32#include "ivpu_sysfs.h"
 33#include "vpu_boot_api.h"
 34
 35#ifndef DRIVER_VERSION_STR
 36#define DRIVER_VERSION_STR "1.0.0 " UTS_RELEASE
 
 37#endif
 38
 39static struct lock_class_key submitted_jobs_xa_lock_class_key;
 40
 41int ivpu_dbg_mask;
 42module_param_named(dbg_mask, ivpu_dbg_mask, int, 0644);
 43MODULE_PARM_DESC(dbg_mask, "Driver debug mask. See IVPU_DBG_* macros.");
 44
 45int ivpu_test_mode;
 46#if IS_ENABLED(CONFIG_DRM_ACCEL_IVPU_DEBUG)
 47module_param_named_unsafe(test_mode, ivpu_test_mode, int, 0644);
 48MODULE_PARM_DESC(test_mode, "Test mode mask. See IVPU_TEST_MODE_* macros.");
 49#endif
 50
 51u8 ivpu_pll_min_ratio;
 52module_param_named(pll_min_ratio, ivpu_pll_min_ratio, byte, 0644);
 53MODULE_PARM_DESC(pll_min_ratio, "Minimum PLL ratio used to set NPU frequency");
 54
 55u8 ivpu_pll_max_ratio = U8_MAX;
 56module_param_named(pll_max_ratio, ivpu_pll_max_ratio, byte, 0644);
 57MODULE_PARM_DESC(pll_max_ratio, "Maximum PLL ratio used to set NPU frequency");
 58
 59int ivpu_sched_mode = IVPU_SCHED_MODE_AUTO;
 60module_param_named(sched_mode, ivpu_sched_mode, int, 0444);
 61MODULE_PARM_DESC(sched_mode, "Scheduler mode: -1 - Use default scheduler, 0 - Use OS scheduler, 1 - Use HW scheduler");
 62
 63bool ivpu_disable_mmu_cont_pages;
 64module_param_named(disable_mmu_cont_pages, ivpu_disable_mmu_cont_pages, bool, 0444);
 65MODULE_PARM_DESC(disable_mmu_cont_pages, "Disable MMU contiguous pages optimization");
 66
 67bool ivpu_force_snoop;
 68module_param_named(force_snoop, ivpu_force_snoop, bool, 0444);
 69MODULE_PARM_DESC(force_snoop, "Force snooping for NPU host memory access");
 70
 71struct ivpu_file_priv *ivpu_file_priv_get(struct ivpu_file_priv *file_priv)
 72{
 73	struct ivpu_device *vdev = file_priv->vdev;
 74
 75	kref_get(&file_priv->ref);
 76
 77	ivpu_dbg(vdev, KREF, "file_priv get: ctx %u refcount %u\n",
 78		 file_priv->ctx.id, kref_read(&file_priv->ref));
 79
 80	return file_priv;
 81}
 82
 83static void file_priv_unbind(struct ivpu_device *vdev, struct ivpu_file_priv *file_priv)
 84{
 85	mutex_lock(&file_priv->lock);
 86	if (file_priv->bound) {
 87		ivpu_dbg(vdev, FILE, "file_priv unbind: ctx %u\n", file_priv->ctx.id);
 88
 89		ivpu_cmdq_release_all_locked(file_priv);
 
 90		ivpu_bo_unbind_all_bos_from_context(vdev, &file_priv->ctx);
 91		ivpu_mmu_context_fini(vdev, &file_priv->ctx);
 92		file_priv->bound = false;
 93		drm_WARN_ON(&vdev->drm, !xa_erase_irq(&vdev->context_xa, file_priv->ctx.id));
 94	}
 95	mutex_unlock(&file_priv->lock);
 96}
 97
 98static void file_priv_release(struct kref *ref)
 99{
100	struct ivpu_file_priv *file_priv = container_of(ref, struct ivpu_file_priv, ref);
101	struct ivpu_device *vdev = file_priv->vdev;
102
103	ivpu_dbg(vdev, FILE, "file_priv release: ctx %u bound %d\n",
104		 file_priv->ctx.id, (bool)file_priv->bound);
105
106	pm_runtime_get_sync(vdev->drm.dev);
107	mutex_lock(&vdev->context_list_lock);
108	file_priv_unbind(vdev, file_priv);
109	drm_WARN_ON(&vdev->drm, !xa_empty(&file_priv->cmdq_xa));
110	xa_destroy(&file_priv->cmdq_xa);
111	mutex_unlock(&vdev->context_list_lock);
112	pm_runtime_put_autosuspend(vdev->drm.dev);
113
114	mutex_destroy(&file_priv->ms_lock);
115	mutex_destroy(&file_priv->lock);
116	kfree(file_priv);
117}
118
119void ivpu_file_priv_put(struct ivpu_file_priv **link)
120{
121	struct ivpu_file_priv *file_priv = *link;
122	struct ivpu_device *vdev = file_priv->vdev;
123
 
 
124	ivpu_dbg(vdev, KREF, "file_priv put: ctx %u refcount %u\n",
125		 file_priv->ctx.id, kref_read(&file_priv->ref));
126
127	*link = NULL;
128	kref_put(&file_priv->ref, file_priv_release);
129}
130
131static int ivpu_get_capabilities(struct ivpu_device *vdev, struct drm_ivpu_param *args)
132{
133	switch (args->index) {
134	case DRM_IVPU_CAP_METRIC_STREAMER:
135		args->value = 1;
136		break;
137	case DRM_IVPU_CAP_DMA_MEMORY_RANGE:
138		args->value = 1;
139		break;
140	default:
141		return -EINVAL;
142	}
143
144	return 0;
145}
146
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
147static int ivpu_get_param_ioctl(struct drm_device *dev, void *data, struct drm_file *file)
148{
149	struct ivpu_file_priv *file_priv = file->driver_priv;
150	struct ivpu_device *vdev = file_priv->vdev;
151	struct pci_dev *pdev = to_pci_dev(vdev->drm.dev);
152	struct drm_ivpu_param *args = data;
153	int ret = 0;
154	int idx;
155
156	if (!drm_dev_enter(dev, &idx))
157		return -ENODEV;
158
159	switch (args->param) {
160	case DRM_IVPU_PARAM_DEVICE_ID:
161		args->value = pdev->device;
162		break;
163	case DRM_IVPU_PARAM_DEVICE_REVISION:
164		args->value = pdev->revision;
165		break;
166	case DRM_IVPU_PARAM_PLATFORM_TYPE:
167		args->value = vdev->platform;
168		break;
169	case DRM_IVPU_PARAM_CORE_CLOCK_RATE:
170		args->value = ivpu_hw_ratio_to_freq(vdev, vdev->hw->pll.max_ratio);
171		break;
172	case DRM_IVPU_PARAM_NUM_CONTEXTS:
173		args->value = ivpu_get_context_count(vdev);
174		break;
175	case DRM_IVPU_PARAM_CONTEXT_BASE_ADDRESS:
176		args->value = vdev->hw->ranges.user.start;
177		break;
178	case DRM_IVPU_PARAM_CONTEXT_ID:
179		args->value = file_priv->ctx.id;
180		break;
181	case DRM_IVPU_PARAM_FW_API_VERSION:
182		if (args->index < VPU_FW_API_VER_NUM) {
183			struct vpu_firmware_header *fw_hdr;
184
185			fw_hdr = (struct vpu_firmware_header *)vdev->fw->file->data;
186			args->value = fw_hdr->api_version[args->index];
187		} else {
188			ret = -EINVAL;
189		}
190		break;
191	case DRM_IVPU_PARAM_ENGINE_HEARTBEAT:
192		ret = ivpu_jsm_get_heartbeat(vdev, args->index, &args->value);
193		break;
194	case DRM_IVPU_PARAM_UNIQUE_INFERENCE_ID:
195		args->value = (u64)atomic64_inc_return(&vdev->unique_id_counter);
196		break;
197	case DRM_IVPU_PARAM_TILE_CONFIG:
198		args->value = vdev->hw->tile_fuse;
199		break;
200	case DRM_IVPU_PARAM_SKU:
201		args->value = vdev->hw->sku;
202		break;
203	case DRM_IVPU_PARAM_CAPABILITIES:
204		ret = ivpu_get_capabilities(vdev, args);
205		break;
206	default:
207		ret = -EINVAL;
208		break;
209	}
210
211	drm_dev_exit(idx);
212	return ret;
213}
214
215static int ivpu_set_param_ioctl(struct drm_device *dev, void *data, struct drm_file *file)
216{
217	struct drm_ivpu_param *args = data;
218	int ret = 0;
219
220	switch (args->param) {
221	default:
222		ret = -EINVAL;
223	}
224
225	return ret;
226}
227
228static int ivpu_open(struct drm_device *dev, struct drm_file *file)
229{
230	struct ivpu_device *vdev = to_ivpu_device(dev);
231	struct ivpu_file_priv *file_priv;
232	u32 ctx_id;
233	int idx, ret;
234
235	if (!drm_dev_enter(dev, &idx))
236		return -ENODEV;
237
238	file_priv = kzalloc(sizeof(*file_priv), GFP_KERNEL);
239	if (!file_priv) {
240		ret = -ENOMEM;
241		goto err_dev_exit;
242	}
243
244	INIT_LIST_HEAD(&file_priv->ms_instance_list);
245
246	file_priv->vdev = vdev;
247	file_priv->bound = true;
248	kref_init(&file_priv->ref);
249	mutex_init(&file_priv->lock);
250	mutex_init(&file_priv->ms_lock);
251
252	mutex_lock(&vdev->context_list_lock);
253
254	ret = xa_alloc_irq(&vdev->context_xa, &ctx_id, file_priv,
255			   vdev->context_xa_limit, GFP_KERNEL);
256	if (ret) {
257		ivpu_err(vdev, "Failed to allocate context id: %d\n", ret);
258		goto err_unlock;
259	}
260
261	ivpu_mmu_context_init(vdev, &file_priv->ctx, ctx_id);
262
263	file_priv->job_limit.min = FIELD_PREP(IVPU_JOB_ID_CONTEXT_MASK, (file_priv->ctx.id - 1));
264	file_priv->job_limit.max = file_priv->job_limit.min | IVPU_JOB_ID_JOB_MASK;
265
266	xa_init_flags(&file_priv->cmdq_xa, XA_FLAGS_ALLOC1);
267	file_priv->cmdq_limit.min = IVPU_CMDQ_MIN_ID;
268	file_priv->cmdq_limit.max = IVPU_CMDQ_MAX_ID;
269
270	mutex_unlock(&vdev->context_list_lock);
271	drm_dev_exit(idx);
272
273	file->driver_priv = file_priv;
274
275	ivpu_dbg(vdev, FILE, "file_priv create: ctx %u process %s pid %d\n",
276		 ctx_id, current->comm, task_pid_nr(current));
277
278	return 0;
279
 
 
280err_unlock:
281	mutex_unlock(&vdev->context_list_lock);
282	mutex_destroy(&file_priv->ms_lock);
283	mutex_destroy(&file_priv->lock);
284	kfree(file_priv);
285err_dev_exit:
286	drm_dev_exit(idx);
287	return ret;
288}
289
290static void ivpu_postclose(struct drm_device *dev, struct drm_file *file)
291{
292	struct ivpu_file_priv *file_priv = file->driver_priv;
293	struct ivpu_device *vdev = to_ivpu_device(dev);
294
295	ivpu_dbg(vdev, FILE, "file_priv close: ctx %u process %s pid %d\n",
296		 file_priv->ctx.id, current->comm, task_pid_nr(current));
297
298	ivpu_ms_cleanup(file_priv);
299	ivpu_file_priv_put(&file_priv);
300}
301
302static const struct drm_ioctl_desc ivpu_drm_ioctls[] = {
303	DRM_IOCTL_DEF_DRV(IVPU_GET_PARAM, ivpu_get_param_ioctl, 0),
304	DRM_IOCTL_DEF_DRV(IVPU_SET_PARAM, ivpu_set_param_ioctl, 0),
305	DRM_IOCTL_DEF_DRV(IVPU_BO_CREATE, ivpu_bo_create_ioctl, 0),
306	DRM_IOCTL_DEF_DRV(IVPU_BO_INFO, ivpu_bo_info_ioctl, 0),
307	DRM_IOCTL_DEF_DRV(IVPU_SUBMIT, ivpu_submit_ioctl, 0),
308	DRM_IOCTL_DEF_DRV(IVPU_BO_WAIT, ivpu_bo_wait_ioctl, 0),
309	DRM_IOCTL_DEF_DRV(IVPU_METRIC_STREAMER_START, ivpu_ms_start_ioctl, 0),
310	DRM_IOCTL_DEF_DRV(IVPU_METRIC_STREAMER_GET_DATA, ivpu_ms_get_data_ioctl, 0),
311	DRM_IOCTL_DEF_DRV(IVPU_METRIC_STREAMER_STOP, ivpu_ms_stop_ioctl, 0),
312	DRM_IOCTL_DEF_DRV(IVPU_METRIC_STREAMER_GET_INFO, ivpu_ms_get_info_ioctl, 0),
313};
314
315static int ivpu_wait_for_ready(struct ivpu_device *vdev)
316{
317	struct ivpu_ipc_consumer cons;
318	struct ivpu_ipc_hdr ipc_hdr;
319	unsigned long timeout;
320	int ret;
321
322	if (ivpu_test_mode & IVPU_TEST_MODE_FW_TEST)
323		return 0;
324
325	ivpu_ipc_consumer_add(vdev, &cons, IVPU_IPC_CHAN_BOOT_MSG, NULL);
326
327	timeout = jiffies + msecs_to_jiffies(vdev->timeout.boot);
328	while (1) {
329		ivpu_ipc_irq_handler(vdev);
330		ret = ivpu_ipc_receive(vdev, &cons, &ipc_hdr, NULL, 0);
331		if (ret != -ETIMEDOUT || time_after_eq(jiffies, timeout))
332			break;
333
334		cond_resched();
335	}
336
337	ivpu_ipc_consumer_del(vdev, &cons);
338
339	if (!ret && ipc_hdr.data_addr != IVPU_IPC_BOOT_MSG_DATA_ADDR) {
340		ivpu_err(vdev, "Invalid NPU ready message: 0x%x\n",
341			 ipc_hdr.data_addr);
342		return -EIO;
343	}
344
345	if (!ret)
346		ivpu_dbg(vdev, PM, "NPU ready message received successfully\n");
347
348	return ret;
349}
350
351static int ivpu_hw_sched_init(struct ivpu_device *vdev)
352{
353	int ret = 0;
354
355	if (vdev->fw->sched_mode == VPU_SCHEDULING_MODE_HW) {
356		ret = ivpu_jsm_hws_setup_priority_bands(vdev);
357		if (ret) {
358			ivpu_err(vdev, "Failed to enable hw scheduler: %d", ret);
359			return ret;
360		}
361	}
362
363	return ret;
364}
365
366/**
367 * ivpu_boot() - Start VPU firmware
368 * @vdev: VPU device
369 *
370 * This function is paired with ivpu_shutdown() but it doesn't power up the
371 * VPU because power up has to be called very early in ivpu_probe().
372 */
373int ivpu_boot(struct ivpu_device *vdev)
374{
375	int ret;
376
377	/* Update boot params located at first 4KB of FW memory */
378	ivpu_fw_boot_params_setup(vdev, ivpu_bo_vaddr(vdev->fw->mem));
379
380	ret = ivpu_hw_boot_fw(vdev);
381	if (ret) {
382		ivpu_err(vdev, "Failed to start the firmware: %d\n", ret);
383		return ret;
384	}
385
386	ret = ivpu_wait_for_ready(vdev);
387	if (ret) {
388		ivpu_err(vdev, "Failed to boot the firmware: %d\n", ret);
389		goto err_diagnose_failure;
 
 
 
390	}
391
392	ivpu_hw_irq_clear(vdev);
393	enable_irq(vdev->irq);
394	ivpu_hw_irq_enable(vdev);
395	ivpu_ipc_enable(vdev);
396
397	if (ivpu_fw_is_cold_boot(vdev)) {
398		ret = ivpu_pm_dct_init(vdev);
399		if (ret)
400			goto err_disable_ipc;
401
402		ret = ivpu_hw_sched_init(vdev);
403		if (ret)
404			goto err_disable_ipc;
405	}
406
407	return 0;
408
409err_disable_ipc:
410	ivpu_ipc_disable(vdev);
411	ivpu_hw_irq_disable(vdev);
412	disable_irq(vdev->irq);
413err_diagnose_failure:
414	ivpu_hw_diagnose_failure(vdev);
415	ivpu_mmu_evtq_dump(vdev);
416	ivpu_dev_coredump(vdev);
417	return ret;
418}
419
420void ivpu_prepare_for_reset(struct ivpu_device *vdev)
421{
422	ivpu_hw_irq_disable(vdev);
423	disable_irq(vdev->irq);
424	ivpu_ipc_disable(vdev);
425	ivpu_mmu_disable(vdev);
426}
427
428int ivpu_shutdown(struct ivpu_device *vdev)
429{
430	int ret;
431
432	/* Save PCI state before powering down as it sometimes gets corrupted if NPU hangs */
433	pci_save_state(to_pci_dev(vdev->drm.dev));
434
435	ret = ivpu_hw_power_down(vdev);
436	if (ret)
437		ivpu_warn(vdev, "Failed to power down HW: %d\n", ret);
438
439	pci_set_power_state(to_pci_dev(vdev->drm.dev), PCI_D3hot);
440
441	return ret;
442}
443
444static const struct file_operations ivpu_fops = {
445	.owner		= THIS_MODULE,
446	DRM_ACCEL_FOPS,
447};
448
449static const struct drm_driver driver = {
450	.driver_features = DRIVER_GEM | DRIVER_COMPUTE_ACCEL,
451
452	.open = ivpu_open,
453	.postclose = ivpu_postclose,
454
455	.gem_create_object = ivpu_gem_create_object,
456	.gem_prime_import_sg_table = drm_gem_shmem_prime_import_sg_table,
457
458	.ioctls = ivpu_drm_ioctls,
459	.num_ioctls = ARRAY_SIZE(ivpu_drm_ioctls),
460	.fops = &ivpu_fops,
461
462	.name = DRIVER_NAME,
463	.desc = DRIVER_DESC,
464
465#ifdef DRIVER_DATE
466	.date = DRIVER_DATE,
467	.major = DRIVER_MAJOR,
468	.minor = DRIVER_MINOR,
469	.patchlevel = DRIVER_PATCHLEVEL,
470#else
471	.date = UTS_RELEASE,
472	.major = 1,
473#endif
474};
475
476static void ivpu_context_abort_invalid(struct ivpu_device *vdev)
477{
478	struct ivpu_file_priv *file_priv;
479	unsigned long ctx_id;
480
481	mutex_lock(&vdev->context_list_lock);
482
483	xa_for_each(&vdev->context_xa, ctx_id, file_priv) {
484		if (!file_priv->has_mmu_faults || file_priv->aborted)
485			continue;
486
487		mutex_lock(&file_priv->lock);
488		ivpu_context_abort_locked(file_priv);
489		file_priv->aborted = true;
490		mutex_unlock(&file_priv->lock);
491	}
492
493	mutex_unlock(&vdev->context_list_lock);
494}
495
496static irqreturn_t ivpu_irq_thread_handler(int irq, void *arg)
497{
498	struct ivpu_device *vdev = arg;
499	u8 irq_src;
500
501	if (kfifo_is_empty(&vdev->hw->irq.fifo))
502		return IRQ_NONE;
503
504	while (kfifo_get(&vdev->hw->irq.fifo, &irq_src)) {
505		switch (irq_src) {
506		case IVPU_HW_IRQ_SRC_IPC:
507			ivpu_ipc_irq_thread_handler(vdev);
508			break;
509		case IVPU_HW_IRQ_SRC_MMU_EVTQ:
510			ivpu_context_abort_invalid(vdev);
511			break;
512		case IVPU_HW_IRQ_SRC_DCT:
513			ivpu_pm_dct_irq_thread_handler(vdev);
514			break;
515		default:
516			ivpu_err_ratelimited(vdev, "Unknown IRQ source: %u\n", irq_src);
517			break;
518		}
519	}
520
521	return IRQ_HANDLED;
522}
523
524static int ivpu_irq_init(struct ivpu_device *vdev)
525{
526	struct pci_dev *pdev = to_pci_dev(vdev->drm.dev);
527	int ret;
528
529	ret = pci_alloc_irq_vectors(pdev, 1, 1, PCI_IRQ_MSI | PCI_IRQ_MSIX);
530	if (ret < 0) {
531		ivpu_err(vdev, "Failed to allocate a MSI IRQ: %d\n", ret);
532		return ret;
533	}
534
535	ivpu_irq_handlers_init(vdev);
536
537	vdev->irq = pci_irq_vector(pdev, 0);
538
539	ret = devm_request_threaded_irq(vdev->drm.dev, vdev->irq, ivpu_hw_irq_handler,
540					ivpu_irq_thread_handler, IRQF_NO_AUTOEN, DRIVER_NAME, vdev);
541	if (ret)
542		ivpu_err(vdev, "Failed to request an IRQ %d\n", ret);
543
544	return ret;
545}
546
547static int ivpu_pci_init(struct ivpu_device *vdev)
548{
549	struct pci_dev *pdev = to_pci_dev(vdev->drm.dev);
550	struct resource *bar0 = &pdev->resource[0];
551	struct resource *bar4 = &pdev->resource[4];
552	int ret;
553
554	ivpu_dbg(vdev, MISC, "Mapping BAR0 (RegV) %pR\n", bar0);
555	vdev->regv = devm_ioremap_resource(vdev->drm.dev, bar0);
556	if (IS_ERR(vdev->regv)) {
557		ivpu_err(vdev, "Failed to map bar 0: %pe\n", vdev->regv);
558		return PTR_ERR(vdev->regv);
559	}
560
561	ivpu_dbg(vdev, MISC, "Mapping BAR4 (RegB) %pR\n", bar4);
562	vdev->regb = devm_ioremap_resource(vdev->drm.dev, bar4);
563	if (IS_ERR(vdev->regb)) {
564		ivpu_err(vdev, "Failed to map bar 4: %pe\n", vdev->regb);
565		return PTR_ERR(vdev->regb);
566	}
567
568	ret = dma_set_mask_and_coherent(vdev->drm.dev, DMA_BIT_MASK(vdev->hw->dma_bits));
569	if (ret) {
570		ivpu_err(vdev, "Failed to set DMA mask: %d\n", ret);
571		return ret;
572	}
573	dma_set_max_seg_size(vdev->drm.dev, UINT_MAX);
574
575	/* Clear any pending errors */
576	pcie_capability_clear_word(pdev, PCI_EXP_DEVSTA, 0x3f);
577
578	/* NPU does not require 10m D3hot delay */
579	pdev->d3hot_delay = 0;
580
581	ret = pcim_enable_device(pdev);
582	if (ret) {
583		ivpu_err(vdev, "Failed to enable PCI device: %d\n", ret);
584		return ret;
585	}
586
587	pci_set_master(pdev);
588
589	return 0;
590}
591
592static int ivpu_dev_init(struct ivpu_device *vdev)
593{
594	int ret;
595
596	vdev->hw = drmm_kzalloc(&vdev->drm, sizeof(*vdev->hw), GFP_KERNEL);
597	if (!vdev->hw)
598		return -ENOMEM;
599
600	vdev->mmu = drmm_kzalloc(&vdev->drm, sizeof(*vdev->mmu), GFP_KERNEL);
601	if (!vdev->mmu)
602		return -ENOMEM;
603
604	vdev->fw = drmm_kzalloc(&vdev->drm, sizeof(*vdev->fw), GFP_KERNEL);
605	if (!vdev->fw)
606		return -ENOMEM;
607
608	vdev->ipc = drmm_kzalloc(&vdev->drm, sizeof(*vdev->ipc), GFP_KERNEL);
609	if (!vdev->ipc)
610		return -ENOMEM;
611
612	vdev->pm = drmm_kzalloc(&vdev->drm, sizeof(*vdev->pm), GFP_KERNEL);
613	if (!vdev->pm)
614		return -ENOMEM;
615
616	if (ivpu_hw_ip_gen(vdev) >= IVPU_HW_IP_40XX)
 
617		vdev->hw->dma_bits = 48;
618	else
 
619		vdev->hw->dma_bits = 38;
 
620
621	vdev->platform = IVPU_PLATFORM_INVALID;
622	vdev->context_xa_limit.min = IVPU_USER_CONTEXT_MIN_SSID;
623	vdev->context_xa_limit.max = IVPU_USER_CONTEXT_MAX_SSID;
624	atomic64_set(&vdev->unique_id_counter, 0);
625	xa_init_flags(&vdev->context_xa, XA_FLAGS_ALLOC | XA_FLAGS_LOCK_IRQ);
626	xa_init_flags(&vdev->submitted_jobs_xa, XA_FLAGS_ALLOC1);
627	xa_init_flags(&vdev->db_xa, XA_FLAGS_ALLOC1);
628	lockdep_set_class(&vdev->submitted_jobs_xa.xa_lock, &submitted_jobs_xa_lock_class_key);
629	INIT_LIST_HEAD(&vdev->bo_list);
630
631	vdev->db_limit.min = IVPU_MIN_DB;
632	vdev->db_limit.max = IVPU_MAX_DB;
633
634	ret = drmm_mutex_init(&vdev->drm, &vdev->context_list_lock);
635	if (ret)
636		goto err_xa_destroy;
637
638	ret = drmm_mutex_init(&vdev->drm, &vdev->bo_list_lock);
639	if (ret)
640		goto err_xa_destroy;
641
642	ret = ivpu_pci_init(vdev);
643	if (ret)
644		goto err_xa_destroy;
645
646	ret = ivpu_irq_init(vdev);
647	if (ret)
648		goto err_xa_destroy;
649
650	/* Init basic HW info based on buttress registers which are accessible before power up */
651	ret = ivpu_hw_init(vdev);
652	if (ret)
653		goto err_xa_destroy;
654
655	/* Power up early so the rest of init code can access VPU registers */
656	ret = ivpu_hw_power_up(vdev);
657	if (ret)
658		goto err_shutdown;
659
660	ivpu_mmu_global_context_init(vdev);
 
 
661
662	ret = ivpu_mmu_init(vdev);
663	if (ret)
664		goto err_mmu_gctx_fini;
665
666	ret = ivpu_mmu_reserved_context_init(vdev);
667	if (ret)
668		goto err_mmu_gctx_fini;
669
670	ret = ivpu_fw_init(vdev);
671	if (ret)
672		goto err_mmu_rctx_fini;
673
674	ret = ivpu_ipc_init(vdev);
675	if (ret)
676		goto err_fw_fini;
677
678	ivpu_pm_init(vdev);
679
680	ret = ivpu_boot(vdev);
681	if (ret)
682		goto err_ipc_fini;
683
684	ivpu_job_done_consumer_init(vdev);
685	ivpu_pm_enable(vdev);
686
687	return 0;
688
689err_ipc_fini:
690	ivpu_ipc_fini(vdev);
691err_fw_fini:
692	ivpu_fw_fini(vdev);
693err_mmu_rctx_fini:
694	ivpu_mmu_reserved_context_fini(vdev);
695err_mmu_gctx_fini:
696	ivpu_mmu_global_context_fini(vdev);
697err_shutdown:
698	ivpu_shutdown(vdev);
 
 
699err_xa_destroy:
700	xa_destroy(&vdev->db_xa);
701	xa_destroy(&vdev->submitted_jobs_xa);
702	xa_destroy(&vdev->context_xa);
703	return ret;
704}
705
706static void ivpu_bo_unbind_all_user_contexts(struct ivpu_device *vdev)
707{
708	struct ivpu_file_priv *file_priv;
709	unsigned long ctx_id;
710
711	mutex_lock(&vdev->context_list_lock);
712
713	xa_for_each(&vdev->context_xa, ctx_id, file_priv)
714		file_priv_unbind(vdev, file_priv);
715
716	mutex_unlock(&vdev->context_list_lock);
717}
718
719static void ivpu_dev_fini(struct ivpu_device *vdev)
720{
721	ivpu_jobs_abort_all(vdev);
722	ivpu_pm_cancel_recovery(vdev);
723	ivpu_pm_disable(vdev);
724	ivpu_prepare_for_reset(vdev);
725	ivpu_shutdown(vdev);
 
 
726
727	ivpu_ms_cleanup_all(vdev);
728	ivpu_job_done_consumer_fini(vdev);
 
729	ivpu_bo_unbind_all_user_contexts(vdev);
730
731	ivpu_ipc_fini(vdev);
732	ivpu_fw_fini(vdev);
733	ivpu_mmu_reserved_context_fini(vdev);
734	ivpu_mmu_global_context_fini(vdev);
735
736	drm_WARN_ON(&vdev->drm, !xa_empty(&vdev->db_xa));
737	xa_destroy(&vdev->db_xa);
738	drm_WARN_ON(&vdev->drm, !xa_empty(&vdev->submitted_jobs_xa));
739	xa_destroy(&vdev->submitted_jobs_xa);
740	drm_WARN_ON(&vdev->drm, !xa_empty(&vdev->context_xa));
741	xa_destroy(&vdev->context_xa);
742}
743
744static struct pci_device_id ivpu_pci_ids[] = {
745	{ PCI_DEVICE(PCI_VENDOR_ID_INTEL, PCI_DEVICE_ID_MTL) },
746	{ PCI_DEVICE(PCI_VENDOR_ID_INTEL, PCI_DEVICE_ID_ARL) },
747	{ PCI_DEVICE(PCI_VENDOR_ID_INTEL, PCI_DEVICE_ID_LNL) },
748	{ PCI_DEVICE(PCI_VENDOR_ID_INTEL, PCI_DEVICE_ID_PTL_P) },
749	{ }
750};
751MODULE_DEVICE_TABLE(pci, ivpu_pci_ids);
752
753static int ivpu_probe(struct pci_dev *pdev, const struct pci_device_id *id)
754{
755	struct ivpu_device *vdev;
756	int ret;
757
758	vdev = devm_drm_dev_alloc(&pdev->dev, &driver, struct ivpu_device, drm);
759	if (IS_ERR(vdev))
760		return PTR_ERR(vdev);
761
762	pci_set_drvdata(pdev, vdev);
763
764	ret = ivpu_dev_init(vdev);
765	if (ret)
766		return ret;
767
768	ivpu_debugfs_init(vdev);
769	ivpu_sysfs_init(vdev);
770
771	ret = drm_dev_register(&vdev->drm, 0);
772	if (ret) {
773		dev_err(&pdev->dev, "Failed to register DRM device: %d\n", ret);
774		ivpu_dev_fini(vdev);
775	}
776
777	return ret;
778}
779
780static void ivpu_remove(struct pci_dev *pdev)
781{
782	struct ivpu_device *vdev = pci_get_drvdata(pdev);
783
784	drm_dev_unplug(&vdev->drm);
785	ivpu_dev_fini(vdev);
786}
787
788static const struct dev_pm_ops ivpu_drv_pci_pm = {
789	SET_SYSTEM_SLEEP_PM_OPS(ivpu_pm_suspend_cb, ivpu_pm_resume_cb)
790	SET_RUNTIME_PM_OPS(ivpu_pm_runtime_suspend_cb, ivpu_pm_runtime_resume_cb, NULL)
791};
792
793static const struct pci_error_handlers ivpu_drv_pci_err = {
794	.reset_prepare = ivpu_pm_reset_prepare_cb,
795	.reset_done = ivpu_pm_reset_done_cb,
796};
797
798static struct pci_driver ivpu_pci_driver = {
799	.name = KBUILD_MODNAME,
800	.id_table = ivpu_pci_ids,
801	.probe = ivpu_probe,
802	.remove = ivpu_remove,
803	.driver = {
804		.pm = &ivpu_drv_pci_pm,
805	},
806	.err_handler = &ivpu_drv_pci_err,
807};
808
809module_pci_driver(ivpu_pci_driver);
810
811MODULE_AUTHOR("Intel Corporation");
812MODULE_DESCRIPTION(DRIVER_DESC);
813MODULE_LICENSE("GPL and additional rights");
814MODULE_VERSION(DRIVER_VERSION_STR);