Linux Audio

Check our new training course

Loading...
Note: File does not exist in v3.15.
  1// SPDX-License-Identifier: GPL-2.0-only
  2/*
  3 * Copyright (C) 2020-2023 Intel Corporation
  4 */
  5
  6#include <linux/circ_buf.h>
  7#include <linux/highmem.h>
  8
  9#include "ivpu_drv.h"
 10#include "ivpu_hw.h"
 11#include "ivpu_hw_reg_io.h"
 12#include "ivpu_mmu.h"
 13#include "ivpu_mmu_context.h"
 14#include "ivpu_pm.h"
 15
 16#define IVPU_MMU_REG_IDR0		      0x00200000u
 17#define IVPU_MMU_REG_IDR1		      0x00200004u
 18#define IVPU_MMU_REG_IDR3		      0x0020000cu
 19#define IVPU_MMU_REG_IDR5		      0x00200014u
 20#define IVPU_MMU_REG_CR0		      0x00200020u
 21#define IVPU_MMU_REG_CR0ACK		      0x00200024u
 22#define IVPU_MMU_REG_CR0ACK_VAL_MASK	      GENMASK(31, 0)
 23#define IVPU_MMU_REG_CR1		      0x00200028u
 24#define IVPU_MMU_REG_CR2		      0x0020002cu
 25#define IVPU_MMU_REG_IRQ_CTRL		      0x00200050u
 26#define IVPU_MMU_REG_IRQ_CTRLACK	      0x00200054u
 27#define IVPU_MMU_REG_IRQ_CTRLACK_VAL_MASK     GENMASK(31, 0)
 28
 29#define IVPU_MMU_REG_GERROR		      0x00200060u
 30#define IVPU_MMU_REG_GERROR_CMDQ_MASK	      BIT_MASK(0)
 31#define IVPU_MMU_REG_GERROR_EVTQ_ABT_MASK     BIT_MASK(2)
 32#define IVPU_MMU_REG_GERROR_PRIQ_ABT_MASK     BIT_MASK(3)
 33#define IVPU_MMU_REG_GERROR_MSI_CMDQ_ABT_MASK BIT_MASK(4)
 34#define IVPU_MMU_REG_GERROR_MSI_EVTQ_ABT_MASK BIT_MASK(5)
 35#define IVPU_MMU_REG_GERROR_MSI_PRIQ_ABT_MASK BIT_MASK(6)
 36#define IVPU_MMU_REG_GERROR_MSI_ABT_MASK      BIT_MASK(7)
 37
 38#define IVPU_MMU_REG_GERRORN		      0x00200064u
 39
 40#define IVPU_MMU_REG_STRTAB_BASE	      0x00200080u
 41#define IVPU_MMU_REG_STRTAB_BASE_CFG	      0x00200088u
 42#define IVPU_MMU_REG_CMDQ_BASE		      0x00200090u
 43#define IVPU_MMU_REG_CMDQ_PROD		      0x00200098u
 44#define IVPU_MMU_REG_CMDQ_CONS		      0x0020009cu
 45#define IVPU_MMU_REG_CMDQ_CONS_VAL_MASK	      GENMASK(23, 0)
 46#define IVPU_MMU_REG_CMDQ_CONS_ERR_MASK	      GENMASK(30, 24)
 47#define IVPU_MMU_REG_EVTQ_BASE		      0x002000a0u
 48#define IVPU_MMU_REG_EVTQ_PROD		      0x002000a8u
 49#define IVPU_MMU_REG_EVTQ_CONS		      0x002000acu
 50#define IVPU_MMU_REG_EVTQ_PROD_SEC	      (0x002000a8u + SZ_64K)
 51#define IVPU_MMU_REG_EVTQ_CONS_SEC	      (0x002000acu + SZ_64K)
 52
 53#define IVPU_MMU_IDR0_REF		0x080f3e0f
 54#define IVPU_MMU_IDR0_REF_SIMICS	0x080f3e1f
 55#define IVPU_MMU_IDR1_REF		0x0e739d18
 56#define IVPU_MMU_IDR3_REF		0x0000003c
 57#define IVPU_MMU_IDR5_REF		0x00040070
 58#define IVPU_MMU_IDR5_REF_SIMICS	0x00000075
 59#define IVPU_MMU_IDR5_REF_FPGA		0x00800075
 60
 61#define IVPU_MMU_CDTAB_ENT_SIZE		64
 62#define IVPU_MMU_CDTAB_ENT_COUNT_LOG2	8 /* 256 entries */
 63#define IVPU_MMU_CDTAB_ENT_COUNT	((u32)1 << IVPU_MMU_CDTAB_ENT_COUNT_LOG2)
 64
 65#define IVPU_MMU_STREAM_ID0		0
 66#define IVPU_MMU_STREAM_ID3		3
 67
 68#define IVPU_MMU_STRTAB_ENT_SIZE	64
 69#define IVPU_MMU_STRTAB_ENT_COUNT	4
 70#define IVPU_MMU_STRTAB_CFG_LOG2SIZE	2
 71#define IVPU_MMU_STRTAB_CFG		IVPU_MMU_STRTAB_CFG_LOG2SIZE
 72
 73#define IVPU_MMU_Q_COUNT_LOG2		4 /* 16 entries */
 74#define IVPU_MMU_Q_COUNT		((u32)1 << IVPU_MMU_Q_COUNT_LOG2)
 75#define IVPU_MMU_Q_WRAP_MASK            GENMASK(IVPU_MMU_Q_COUNT_LOG2, 0)
 76#define IVPU_MMU_Q_IDX_MASK             (IVPU_MMU_Q_COUNT - 1)
 77#define IVPU_MMU_Q_IDX(val)		((val) & IVPU_MMU_Q_IDX_MASK)
 78#define IVPU_MMU_Q_WRP(val)             ((val) & IVPU_MMU_Q_COUNT)
 79
 80#define IVPU_MMU_CMDQ_CMD_SIZE		16
 81#define IVPU_MMU_CMDQ_SIZE		(IVPU_MMU_Q_COUNT * IVPU_MMU_CMDQ_CMD_SIZE)
 82
 83#define IVPU_MMU_EVTQ_CMD_SIZE		32
 84#define IVPU_MMU_EVTQ_SIZE		(IVPU_MMU_Q_COUNT * IVPU_MMU_EVTQ_CMD_SIZE)
 85
 86#define IVPU_MMU_CMD_OPCODE		GENMASK(7, 0)
 87
 88#define IVPU_MMU_CMD_SYNC_0_CS		GENMASK(13, 12)
 89#define IVPU_MMU_CMD_SYNC_0_MSH		GENMASK(23, 22)
 90#define IVPU_MMU_CMD_SYNC_0_MSI_ATTR	GENMASK(27, 24)
 91#define IVPU_MMU_CMD_SYNC_0_MSI_ATTR	GENMASK(27, 24)
 92#define IVPU_MMU_CMD_SYNC_0_MSI_DATA	GENMASK(63, 32)
 93
 94#define IVPU_MMU_CMD_CFGI_0_SSEC	BIT(10)
 95#define IVPU_MMU_CMD_CFGI_0_SSV		BIT(11)
 96#define IVPU_MMU_CMD_CFGI_0_SSID	GENMASK(31, 12)
 97#define IVPU_MMU_CMD_CFGI_0_SID		GENMASK(63, 32)
 98#define IVPU_MMU_CMD_CFGI_1_RANGE	GENMASK(4, 0)
 99
100#define IVPU_MMU_CMD_TLBI_0_ASID	GENMASK(63, 48)
101#define IVPU_MMU_CMD_TLBI_0_VMID	GENMASK(47, 32)
102
103#define CMD_PREFETCH_CFG		0x1
104#define CMD_CFGI_STE			0x3
105#define CMD_CFGI_ALL			0x4
106#define CMD_CFGI_CD			0x5
107#define CMD_CFGI_CD_ALL			0x6
108#define CMD_TLBI_NH_ASID		0x11
109#define CMD_TLBI_EL2_ALL		0x20
110#define CMD_TLBI_NSNH_ALL		0x30
111#define CMD_SYNC			0x46
112
113#define IVPU_MMU_EVT_F_UUT		0x01
114#define IVPU_MMU_EVT_C_BAD_STREAMID	0x02
115#define IVPU_MMU_EVT_F_STE_FETCH	0x03
116#define IVPU_MMU_EVT_C_BAD_STE		0x04
117#define IVPU_MMU_EVT_F_BAD_ATS_TREQ	0x05
118#define IVPU_MMU_EVT_F_STREAM_DISABLED	0x06
119#define IVPU_MMU_EVT_F_TRANSL_FORBIDDEN	0x07
120#define IVPU_MMU_EVT_C_BAD_SUBSTREAMID	0x08
121#define IVPU_MMU_EVT_F_CD_FETCH		0x09
122#define IVPU_MMU_EVT_C_BAD_CD		0x0a
123#define IVPU_MMU_EVT_F_WALK_EABT	0x0b
124#define IVPU_MMU_EVT_F_TRANSLATION	0x10
125#define IVPU_MMU_EVT_F_ADDR_SIZE	0x11
126#define IVPU_MMU_EVT_F_ACCESS		0x12
127#define IVPU_MMU_EVT_F_PERMISSION	0x13
128#define IVPU_MMU_EVT_F_TLB_CONFLICT	0x20
129#define IVPU_MMU_EVT_F_CFG_CONFLICT	0x21
130#define IVPU_MMU_EVT_E_PAGE_REQUEST	0x24
131#define IVPU_MMU_EVT_F_VMS_FETCH	0x25
132
133#define IVPU_MMU_EVT_OP_MASK		GENMASK_ULL(7, 0)
134#define IVPU_MMU_EVT_SSID_MASK		GENMASK_ULL(31, 12)
135
136#define IVPU_MMU_Q_BASE_RWA		BIT(62)
137#define IVPU_MMU_Q_BASE_ADDR_MASK	GENMASK_ULL(51, 5)
138#define IVPU_MMU_STRTAB_BASE_RA		BIT(62)
139#define IVPU_MMU_STRTAB_BASE_ADDR_MASK	GENMASK_ULL(51, 6)
140
141#define IVPU_MMU_IRQ_EVTQ_EN		BIT(2)
142#define IVPU_MMU_IRQ_GERROR_EN		BIT(0)
143
144#define IVPU_MMU_CR0_ATSCHK		BIT(4)
145#define IVPU_MMU_CR0_CMDQEN		BIT(3)
146#define IVPU_MMU_CR0_EVTQEN		BIT(2)
147#define IVPU_MMU_CR0_PRIQEN		BIT(1)
148#define IVPU_MMU_CR0_SMMUEN		BIT(0)
149
150#define IVPU_MMU_CR1_TABLE_SH		GENMASK(11, 10)
151#define IVPU_MMU_CR1_TABLE_OC		GENMASK(9, 8)
152#define IVPU_MMU_CR1_TABLE_IC		GENMASK(7, 6)
153#define IVPU_MMU_CR1_QUEUE_SH		GENMASK(5, 4)
154#define IVPU_MMU_CR1_QUEUE_OC		GENMASK(3, 2)
155#define IVPU_MMU_CR1_QUEUE_IC		GENMASK(1, 0)
156#define IVPU_MMU_CACHE_NC		0
157#define IVPU_MMU_CACHE_WB		1
158#define IVPU_MMU_CACHE_WT		2
159#define IVPU_MMU_SH_NSH			0
160#define IVPU_MMU_SH_OSH			2
161#define IVPU_MMU_SH_ISH			3
162
163#define IVPU_MMU_CMDQ_OP		GENMASK_ULL(7, 0)
164
165#define IVPU_MMU_CD_0_TCR_T0SZ		GENMASK_ULL(5, 0)
166#define IVPU_MMU_CD_0_TCR_TG0		GENMASK_ULL(7, 6)
167#define IVPU_MMU_CD_0_TCR_IRGN0		GENMASK_ULL(9, 8)
168#define IVPU_MMU_CD_0_TCR_ORGN0		GENMASK_ULL(11, 10)
169#define IVPU_MMU_CD_0_TCR_SH0		GENMASK_ULL(13, 12)
170#define IVPU_MMU_CD_0_TCR_EPD0		BIT_ULL(14)
171#define IVPU_MMU_CD_0_TCR_EPD1		BIT_ULL(30)
172#define IVPU_MMU_CD_0_ENDI		BIT(15)
173#define IVPU_MMU_CD_0_V			BIT(31)
174#define IVPU_MMU_CD_0_TCR_IPS		GENMASK_ULL(34, 32)
175#define IVPU_MMU_CD_0_TCR_TBI0		BIT_ULL(38)
176#define IVPU_MMU_CD_0_AA64		BIT(41)
177#define IVPU_MMU_CD_0_S			BIT(44)
178#define IVPU_MMU_CD_0_R			BIT(45)
179#define IVPU_MMU_CD_0_A			BIT(46)
180#define IVPU_MMU_CD_0_ASET		BIT(47)
181#define IVPU_MMU_CD_0_ASID		GENMASK_ULL(63, 48)
182
183#define IVPU_MMU_T0SZ_48BIT             16
184#define IVPU_MMU_T0SZ_38BIT             26
185
186#define IVPU_MMU_IPS_48BIT		5
187#define IVPU_MMU_IPS_44BIT		4
188#define IVPU_MMU_IPS_42BIT		3
189#define IVPU_MMU_IPS_40BIT		2
190#define IVPU_MMU_IPS_36BIT		1
191#define IVPU_MMU_IPS_32BIT		0
192
193#define IVPU_MMU_CD_1_TTB0_MASK		GENMASK_ULL(51, 4)
194
195#define IVPU_MMU_STE_0_S1CDMAX		GENMASK_ULL(63, 59)
196#define IVPU_MMU_STE_0_S1FMT		GENMASK_ULL(5, 4)
197#define IVPU_MMU_STE_0_S1FMT_LINEAR	0
198#define IVPU_MMU_STE_DWORDS		8
199#define IVPU_MMU_STE_0_CFG_S1_TRANS	5
200#define IVPU_MMU_STE_0_CFG		GENMASK_ULL(3, 1)
201#define IVPU_MMU_STE_0_S1CTXPTR_MASK	GENMASK_ULL(51, 6)
202#define IVPU_MMU_STE_0_V			BIT(0)
203
204#define IVPU_MMU_STE_1_STRW_NSEL1	0ul
205#define IVPU_MMU_STE_1_CONT		GENMASK_ULL(16, 13)
206#define IVPU_MMU_STE_1_STRW		GENMASK_ULL(31, 30)
207#define IVPU_MMU_STE_1_PRIVCFG		GENMASK_ULL(49, 48)
208#define IVPU_MMU_STE_1_PRIVCFG_UNPRIV	2ul
209#define IVPU_MMU_STE_1_INSTCFG		GENMASK_ULL(51, 50)
210#define IVPU_MMU_STE_1_INSTCFG_DATA	2ul
211#define IVPU_MMU_STE_1_MEV		BIT(19)
212#define IVPU_MMU_STE_1_S1STALLD		BIT(27)
213#define IVPU_MMU_STE_1_S1C_CACHE_NC	0ul
214#define IVPU_MMU_STE_1_S1C_CACHE_WBRA	1ul
215#define IVPU_MMU_STE_1_S1C_CACHE_WT	2ul
216#define IVPU_MMU_STE_1_S1C_CACHE_WB	3ul
217#define IVPU_MMU_STE_1_S1CIR		GENMASK_ULL(3, 2)
218#define IVPU_MMU_STE_1_S1COR		GENMASK_ULL(5, 4)
219#define IVPU_MMU_STE_1_S1CSH		GENMASK_ULL(7, 6)
220#define IVPU_MMU_STE_1_S1DSS		GENMASK_ULL(1, 0)
221#define IVPU_MMU_STE_1_S1DSS_TERMINATE	0x0
222
223#define IVPU_MMU_REG_TIMEOUT_US		(10 * USEC_PER_MSEC)
224#define IVPU_MMU_QUEUE_TIMEOUT_US	(100 * USEC_PER_MSEC)
225
226#define IVPU_MMU_GERROR_ERR_MASK ((REG_FLD(IVPU_MMU_REG_GERROR, CMDQ)) | \
227				  (REG_FLD(IVPU_MMU_REG_GERROR, EVTQ_ABT)) | \
228				  (REG_FLD(IVPU_MMU_REG_GERROR, PRIQ_ABT)) | \
229				  (REG_FLD(IVPU_MMU_REG_GERROR, MSI_CMDQ_ABT)) | \
230				  (REG_FLD(IVPU_MMU_REG_GERROR, MSI_EVTQ_ABT)) | \
231				  (REG_FLD(IVPU_MMU_REG_GERROR, MSI_PRIQ_ABT)) | \
232				  (REG_FLD(IVPU_MMU_REG_GERROR, MSI_ABT)))
233
234#define IVPU_MMU_CERROR_NONE         0x0
235#define IVPU_MMU_CERROR_ILL          0x1
236#define IVPU_MMU_CERROR_ABT          0x2
237#define IVPU_MMU_CERROR_ATC_INV_SYNC 0x3
238
239static const char *ivpu_mmu_event_to_str(u32 cmd)
240{
241	switch (cmd) {
242	case IVPU_MMU_EVT_F_UUT:
243		return "Unsupported Upstream Transaction";
244	case IVPU_MMU_EVT_C_BAD_STREAMID:
245		return "Transaction StreamID out of range";
246	case IVPU_MMU_EVT_F_STE_FETCH:
247		return "Fetch of STE caused external abort";
248	case IVPU_MMU_EVT_C_BAD_STE:
249		return "Used STE invalid";
250	case IVPU_MMU_EVT_F_BAD_ATS_TREQ:
251		return "Address Request disallowed for a StreamID";
252	case IVPU_MMU_EVT_F_STREAM_DISABLED:
253		return "Transaction marks non-substream disabled";
254	case IVPU_MMU_EVT_F_TRANSL_FORBIDDEN:
255		return "MMU bypass is disallowed for this StreamID";
256	case IVPU_MMU_EVT_C_BAD_SUBSTREAMID:
257		return "Invalid StreamID";
258	case IVPU_MMU_EVT_F_CD_FETCH:
259		return "Fetch of CD caused external abort";
260	case IVPU_MMU_EVT_C_BAD_CD:
261		return "Fetched CD invalid";
262	case IVPU_MMU_EVT_F_WALK_EABT:
263		return " An external abort occurred fetching a TLB";
264	case IVPU_MMU_EVT_F_TRANSLATION:
265		return "Translation fault";
266	case IVPU_MMU_EVT_F_ADDR_SIZE:
267		return " Output address caused address size fault";
268	case IVPU_MMU_EVT_F_ACCESS:
269		return "Access flag fault";
270	case IVPU_MMU_EVT_F_PERMISSION:
271		return "Permission fault occurred on page access";
272	case IVPU_MMU_EVT_F_TLB_CONFLICT:
273		return "A TLB conflict";
274	case IVPU_MMU_EVT_F_CFG_CONFLICT:
275		return "A configuration cache conflict";
276	case IVPU_MMU_EVT_E_PAGE_REQUEST:
277		return "Page request hint from a client device";
278	case IVPU_MMU_EVT_F_VMS_FETCH:
279		return "Fetch of VMS caused external abort";
280	default:
281		return "Unknown CMDQ command";
282	}
283}
284
285static const char *ivpu_mmu_cmdq_err_to_str(u32 err)
286{
287	switch (err) {
288	case IVPU_MMU_CERROR_NONE:
289		return "No CMDQ Error";
290	case IVPU_MMU_CERROR_ILL:
291		return "Illegal command";
292	case IVPU_MMU_CERROR_ABT:
293		return "External abort on CMDQ read";
294	case IVPU_MMU_CERROR_ATC_INV_SYNC:
295		return "Sync failed to complete ATS invalidation";
296	default:
297		return "Unknown CMDQ Error";
298	}
299}
300
301static void ivpu_mmu_config_check(struct ivpu_device *vdev)
302{
303	u32 val_ref;
304	u32 val;
305
306	if (ivpu_is_simics(vdev))
307		val_ref = IVPU_MMU_IDR0_REF_SIMICS;
308	else
309		val_ref = IVPU_MMU_IDR0_REF;
310
311	val = REGV_RD32(IVPU_MMU_REG_IDR0);
312	if (val != val_ref)
313		ivpu_dbg(vdev, MMU, "IDR0 0x%x != IDR0_REF 0x%x\n", val, val_ref);
314
315	val = REGV_RD32(IVPU_MMU_REG_IDR1);
316	if (val != IVPU_MMU_IDR1_REF)
317		ivpu_dbg(vdev, MMU, "IDR1 0x%x != IDR1_REF 0x%x\n", val, IVPU_MMU_IDR1_REF);
318
319	val = REGV_RD32(IVPU_MMU_REG_IDR3);
320	if (val != IVPU_MMU_IDR3_REF)
321		ivpu_dbg(vdev, MMU, "IDR3 0x%x != IDR3_REF 0x%x\n", val, IVPU_MMU_IDR3_REF);
322
323	if (ivpu_is_simics(vdev))
324		val_ref = IVPU_MMU_IDR5_REF_SIMICS;
325	else if (ivpu_is_fpga(vdev))
326		val_ref = IVPU_MMU_IDR5_REF_FPGA;
327	else
328		val_ref = IVPU_MMU_IDR5_REF;
329
330	val = REGV_RD32(IVPU_MMU_REG_IDR5);
331	if (val != val_ref)
332		ivpu_dbg(vdev, MMU, "IDR5 0x%x != IDR5_REF 0x%x\n", val, val_ref);
333}
334
335static int ivpu_mmu_cdtab_alloc(struct ivpu_device *vdev)
336{
337	struct ivpu_mmu_info *mmu = vdev->mmu;
338	struct ivpu_mmu_cdtab *cdtab = &mmu->cdtab;
339	size_t size = IVPU_MMU_CDTAB_ENT_COUNT * IVPU_MMU_CDTAB_ENT_SIZE;
340
341	cdtab->base = dmam_alloc_coherent(vdev->drm.dev, size, &cdtab->dma, GFP_KERNEL);
342	if (!cdtab->base)
343		return -ENOMEM;
344
345	ivpu_dbg(vdev, MMU, "CDTAB alloc: dma=%pad size=%zu\n", &cdtab->dma, size);
346
347	return 0;
348}
349
350static int ivpu_mmu_strtab_alloc(struct ivpu_device *vdev)
351{
352	struct ivpu_mmu_info *mmu = vdev->mmu;
353	struct ivpu_mmu_strtab *strtab = &mmu->strtab;
354	size_t size = IVPU_MMU_STRTAB_ENT_COUNT * IVPU_MMU_STRTAB_ENT_SIZE;
355
356	strtab->base = dmam_alloc_coherent(vdev->drm.dev, size, &strtab->dma, GFP_KERNEL);
357	if (!strtab->base)
358		return -ENOMEM;
359
360	strtab->base_cfg = IVPU_MMU_STRTAB_CFG;
361	strtab->dma_q = IVPU_MMU_STRTAB_BASE_RA;
362	strtab->dma_q |= strtab->dma & IVPU_MMU_STRTAB_BASE_ADDR_MASK;
363
364	ivpu_dbg(vdev, MMU, "STRTAB alloc: dma=%pad dma_q=%pad size=%zu\n",
365		 &strtab->dma, &strtab->dma_q, size);
366
367	return 0;
368}
369
370static int ivpu_mmu_cmdq_alloc(struct ivpu_device *vdev)
371{
372	struct ivpu_mmu_info *mmu = vdev->mmu;
373	struct ivpu_mmu_queue *q = &mmu->cmdq;
374
375	q->base = dmam_alloc_coherent(vdev->drm.dev, IVPU_MMU_CMDQ_SIZE, &q->dma, GFP_KERNEL);
376	if (!q->base)
377		return -ENOMEM;
378
379	q->dma_q = IVPU_MMU_Q_BASE_RWA;
380	q->dma_q |= q->dma & IVPU_MMU_Q_BASE_ADDR_MASK;
381	q->dma_q |= IVPU_MMU_Q_COUNT_LOG2;
382
383	ivpu_dbg(vdev, MMU, "CMDQ alloc: dma=%pad dma_q=%pad size=%u\n",
384		 &q->dma, &q->dma_q, IVPU_MMU_CMDQ_SIZE);
385
386	return 0;
387}
388
389static int ivpu_mmu_evtq_alloc(struct ivpu_device *vdev)
390{
391	struct ivpu_mmu_info *mmu = vdev->mmu;
392	struct ivpu_mmu_queue *q = &mmu->evtq;
393
394	q->base = dmam_alloc_coherent(vdev->drm.dev, IVPU_MMU_EVTQ_SIZE, &q->dma, GFP_KERNEL);
395	if (!q->base)
396		return -ENOMEM;
397
398	q->dma_q = IVPU_MMU_Q_BASE_RWA;
399	q->dma_q |= q->dma & IVPU_MMU_Q_BASE_ADDR_MASK;
400	q->dma_q |= IVPU_MMU_Q_COUNT_LOG2;
401
402	ivpu_dbg(vdev, MMU, "EVTQ alloc: dma=%pad dma_q=%pad size=%u\n",
403		 &q->dma, &q->dma_q, IVPU_MMU_EVTQ_SIZE);
404
405	return 0;
406}
407
408static int ivpu_mmu_structs_alloc(struct ivpu_device *vdev)
409{
410	int ret;
411
412	ret = ivpu_mmu_cdtab_alloc(vdev);
413	if (ret) {
414		ivpu_err(vdev, "Failed to allocate cdtab: %d\n", ret);
415		return ret;
416	}
417
418	ret = ivpu_mmu_strtab_alloc(vdev);
419	if (ret) {
420		ivpu_err(vdev, "Failed to allocate strtab: %d\n", ret);
421		return ret;
422	}
423
424	ret = ivpu_mmu_cmdq_alloc(vdev);
425	if (ret) {
426		ivpu_err(vdev, "Failed to allocate cmdq: %d\n", ret);
427		return ret;
428	}
429
430	ret = ivpu_mmu_evtq_alloc(vdev);
431	if (ret)
432		ivpu_err(vdev, "Failed to allocate evtq: %d\n", ret);
433
434	return ret;
435}
436
437static int ivpu_mmu_reg_write_cr0(struct ivpu_device *vdev, u32 val)
438{
439	REGV_WR32(IVPU_MMU_REG_CR0, val);
440
441	return REGV_POLL_FLD(IVPU_MMU_REG_CR0ACK, VAL, val, IVPU_MMU_REG_TIMEOUT_US);
442}
443
444static int ivpu_mmu_reg_write_irq_ctrl(struct ivpu_device *vdev, u32 val)
445{
446	REGV_WR32(IVPU_MMU_REG_IRQ_CTRL, val);
447
448	return REGV_POLL_FLD(IVPU_MMU_REG_IRQ_CTRLACK, VAL, val, IVPU_MMU_REG_TIMEOUT_US);
449}
450
451static int ivpu_mmu_irqs_setup(struct ivpu_device *vdev)
452{
453	u32 irq_ctrl = IVPU_MMU_IRQ_EVTQ_EN | IVPU_MMU_IRQ_GERROR_EN;
454	int ret;
455
456	ret = ivpu_mmu_reg_write_irq_ctrl(vdev, 0);
457	if (ret)
458		return ret;
459
460	return ivpu_mmu_reg_write_irq_ctrl(vdev, irq_ctrl);
461}
462
463static int ivpu_mmu_cmdq_wait_for_cons(struct ivpu_device *vdev)
464{
465	struct ivpu_mmu_queue *cmdq = &vdev->mmu->cmdq;
466	int ret;
467
468	ret = REGV_POLL_FLD(IVPU_MMU_REG_CMDQ_CONS, VAL, cmdq->prod,
469			    IVPU_MMU_QUEUE_TIMEOUT_US);
470	if (ret)
471		return ret;
472
473	cmdq->cons = cmdq->prod;
474
475	return 0;
476}
477
478static bool ivpu_mmu_queue_is_full(struct ivpu_mmu_queue *q)
479{
480	return ((IVPU_MMU_Q_IDX(q->prod) == IVPU_MMU_Q_IDX(q->cons)) &&
481		(IVPU_MMU_Q_WRP(q->prod) != IVPU_MMU_Q_WRP(q->cons)));
482}
483
484static bool ivpu_mmu_queue_is_empty(struct ivpu_mmu_queue *q)
485{
486	return ((IVPU_MMU_Q_IDX(q->prod) == IVPU_MMU_Q_IDX(q->cons)) &&
487		(IVPU_MMU_Q_WRP(q->prod) == IVPU_MMU_Q_WRP(q->cons)));
488}
489
490static int ivpu_mmu_cmdq_cmd_write(struct ivpu_device *vdev, const char *name, u64 data0, u64 data1)
491{
492	struct ivpu_mmu_queue *cmdq = &vdev->mmu->cmdq;
493	u64 *queue_buffer = cmdq->base;
494	int idx = IVPU_MMU_Q_IDX(cmdq->prod) * (IVPU_MMU_CMDQ_CMD_SIZE / sizeof(*queue_buffer));
495
496	if (ivpu_mmu_queue_is_full(cmdq)) {
497		ivpu_err(vdev, "Failed to write MMU CMD %s\n", name);
498		return -EBUSY;
499	}
500
501	queue_buffer[idx] = data0;
502	queue_buffer[idx + 1] = data1;
503	cmdq->prod = (cmdq->prod + 1) & IVPU_MMU_Q_WRAP_MASK;
504
505	ivpu_dbg(vdev, MMU, "CMD write: %s data: 0x%llx 0x%llx\n", name, data0, data1);
506
507	return 0;
508}
509
510static int ivpu_mmu_cmdq_sync(struct ivpu_device *vdev)
511{
512	struct ivpu_mmu_queue *q = &vdev->mmu->cmdq;
513	u64 val;
514	int ret;
515
516	val = FIELD_PREP(IVPU_MMU_CMD_OPCODE, CMD_SYNC);
517
518	ret = ivpu_mmu_cmdq_cmd_write(vdev, "SYNC", val, 0);
519	if (ret)
520		return ret;
521
522	clflush_cache_range(q->base, IVPU_MMU_CMDQ_SIZE);
523	REGV_WR32(IVPU_MMU_REG_CMDQ_PROD, q->prod);
524
525	ret = ivpu_mmu_cmdq_wait_for_cons(vdev);
526	if (ret) {
527		u32 err;
528
529		val = REGV_RD32(IVPU_MMU_REG_CMDQ_CONS);
530		err = REG_GET_FLD(IVPU_MMU_REG_CMDQ_CONS, ERR, val);
531
532		ivpu_err(vdev, "Timed out waiting for MMU consumer: %d, error: %s\n", ret,
533			 ivpu_mmu_cmdq_err_to_str(err));
534		ivpu_hw_diagnose_failure(vdev);
535	}
536
537	return ret;
538}
539
540static int ivpu_mmu_cmdq_write_cfgi_all(struct ivpu_device *vdev)
541{
542	u64 data0 = FIELD_PREP(IVPU_MMU_CMD_OPCODE, CMD_CFGI_ALL);
543	u64 data1 = FIELD_PREP(IVPU_MMU_CMD_CFGI_1_RANGE, 0x1f);
544
545	return ivpu_mmu_cmdq_cmd_write(vdev, "CFGI_ALL", data0, data1);
546}
547
548static int ivpu_mmu_cmdq_write_tlbi_nh_asid(struct ivpu_device *vdev, u16 ssid)
549{
550	u64 val = FIELD_PREP(IVPU_MMU_CMD_OPCODE, CMD_TLBI_NH_ASID) |
551		  FIELD_PREP(IVPU_MMU_CMD_TLBI_0_ASID, ssid);
552
553	return ivpu_mmu_cmdq_cmd_write(vdev, "TLBI_NH_ASID", val, 0);
554}
555
556static int ivpu_mmu_cmdq_write_tlbi_nsnh_all(struct ivpu_device *vdev)
557{
558	u64 val = FIELD_PREP(IVPU_MMU_CMD_OPCODE, CMD_TLBI_NSNH_ALL);
559
560	return ivpu_mmu_cmdq_cmd_write(vdev, "TLBI_NSNH_ALL", val, 0);
561}
562
563static int ivpu_mmu_reset(struct ivpu_device *vdev)
564{
565	struct ivpu_mmu_info *mmu = vdev->mmu;
566	u32 val;
567	int ret;
568
569	memset(mmu->cmdq.base, 0, IVPU_MMU_CMDQ_SIZE);
570	clflush_cache_range(mmu->cmdq.base, IVPU_MMU_CMDQ_SIZE);
571	mmu->cmdq.prod = 0;
572	mmu->cmdq.cons = 0;
573
574	memset(mmu->evtq.base, 0, IVPU_MMU_EVTQ_SIZE);
575	mmu->evtq.prod = 0;
576	mmu->evtq.cons = 0;
577
578	ret = ivpu_mmu_reg_write_cr0(vdev, 0);
579	if (ret)
580		return ret;
581
582	val = FIELD_PREP(IVPU_MMU_CR1_TABLE_SH, IVPU_MMU_SH_ISH) |
583	      FIELD_PREP(IVPU_MMU_CR1_TABLE_OC, IVPU_MMU_CACHE_WB) |
584	      FIELD_PREP(IVPU_MMU_CR1_TABLE_IC, IVPU_MMU_CACHE_WB) |
585	      FIELD_PREP(IVPU_MMU_CR1_QUEUE_SH, IVPU_MMU_SH_ISH) |
586	      FIELD_PREP(IVPU_MMU_CR1_QUEUE_OC, IVPU_MMU_CACHE_WB) |
587	      FIELD_PREP(IVPU_MMU_CR1_QUEUE_IC, IVPU_MMU_CACHE_WB);
588	REGV_WR32(IVPU_MMU_REG_CR1, val);
589
590	REGV_WR64(IVPU_MMU_REG_STRTAB_BASE, mmu->strtab.dma_q);
591	REGV_WR32(IVPU_MMU_REG_STRTAB_BASE_CFG, mmu->strtab.base_cfg);
592
593	REGV_WR64(IVPU_MMU_REG_CMDQ_BASE, mmu->cmdq.dma_q);
594	REGV_WR32(IVPU_MMU_REG_CMDQ_PROD, 0);
595	REGV_WR32(IVPU_MMU_REG_CMDQ_CONS, 0);
596
597	val = IVPU_MMU_CR0_CMDQEN;
598	ret = ivpu_mmu_reg_write_cr0(vdev, val);
599	if (ret)
600		return ret;
601
602	ret = ivpu_mmu_cmdq_write_cfgi_all(vdev);
603	if (ret)
604		return ret;
605
606	ret = ivpu_mmu_cmdq_write_tlbi_nsnh_all(vdev);
607	if (ret)
608		return ret;
609
610	ret = ivpu_mmu_cmdq_sync(vdev);
611	if (ret)
612		return ret;
613
614	REGV_WR64(IVPU_MMU_REG_EVTQ_BASE, mmu->evtq.dma_q);
615	REGV_WR32(IVPU_MMU_REG_EVTQ_PROD_SEC, 0);
616	REGV_WR32(IVPU_MMU_REG_EVTQ_CONS_SEC, 0);
617
618	val |= IVPU_MMU_CR0_EVTQEN;
619	ret = ivpu_mmu_reg_write_cr0(vdev, val);
620	if (ret)
621		return ret;
622
623	val |= IVPU_MMU_CR0_ATSCHK;
624	ret = ivpu_mmu_reg_write_cr0(vdev, val);
625	if (ret)
626		return ret;
627
628	ret = ivpu_mmu_irqs_setup(vdev);
629	if (ret)
630		return ret;
631
632	val |= IVPU_MMU_CR0_SMMUEN;
633	return ivpu_mmu_reg_write_cr0(vdev, val);
634}
635
636static void ivpu_mmu_strtab_link_cd(struct ivpu_device *vdev, u32 sid)
637{
638	struct ivpu_mmu_info *mmu = vdev->mmu;
639	struct ivpu_mmu_strtab *strtab = &mmu->strtab;
640	struct ivpu_mmu_cdtab *cdtab = &mmu->cdtab;
641	u64 *entry = strtab->base + (sid * IVPU_MMU_STRTAB_ENT_SIZE);
642	u64 str[2];
643
644	str[0] = FIELD_PREP(IVPU_MMU_STE_0_CFG, IVPU_MMU_STE_0_CFG_S1_TRANS) |
645		 FIELD_PREP(IVPU_MMU_STE_0_S1CDMAX, IVPU_MMU_CDTAB_ENT_COUNT_LOG2) |
646		 FIELD_PREP(IVPU_MMU_STE_0_S1FMT, IVPU_MMU_STE_0_S1FMT_LINEAR) |
647		 IVPU_MMU_STE_0_V |
648		 (cdtab->dma & IVPU_MMU_STE_0_S1CTXPTR_MASK);
649
650	str[1] = FIELD_PREP(IVPU_MMU_STE_1_S1DSS, IVPU_MMU_STE_1_S1DSS_TERMINATE) |
651		 FIELD_PREP(IVPU_MMU_STE_1_S1CIR, IVPU_MMU_STE_1_S1C_CACHE_NC) |
652		 FIELD_PREP(IVPU_MMU_STE_1_S1COR, IVPU_MMU_STE_1_S1C_CACHE_NC) |
653		 FIELD_PREP(IVPU_MMU_STE_1_S1CSH, IVPU_MMU_SH_NSH) |
654		 FIELD_PREP(IVPU_MMU_STE_1_PRIVCFG, IVPU_MMU_STE_1_PRIVCFG_UNPRIV) |
655		 FIELD_PREP(IVPU_MMU_STE_1_INSTCFG, IVPU_MMU_STE_1_INSTCFG_DATA) |
656		 FIELD_PREP(IVPU_MMU_STE_1_STRW, IVPU_MMU_STE_1_STRW_NSEL1) |
657		 FIELD_PREP(IVPU_MMU_STE_1_CONT, IVPU_MMU_STRTAB_CFG_LOG2SIZE) |
658		 IVPU_MMU_STE_1_MEV |
659		 IVPU_MMU_STE_1_S1STALLD;
660
661	WRITE_ONCE(entry[1], str[1]);
662	WRITE_ONCE(entry[0], str[0]);
663
664	clflush_cache_range(entry, IVPU_MMU_STRTAB_ENT_SIZE);
665
666	ivpu_dbg(vdev, MMU, "STRTAB write entry (SSID=%u): 0x%llx, 0x%llx\n", sid, str[0], str[1]);
667}
668
669static int ivpu_mmu_strtab_init(struct ivpu_device *vdev)
670{
671	ivpu_mmu_strtab_link_cd(vdev, IVPU_MMU_STREAM_ID0);
672	ivpu_mmu_strtab_link_cd(vdev, IVPU_MMU_STREAM_ID3);
673
674	return 0;
675}
676
677int ivpu_mmu_invalidate_tlb(struct ivpu_device *vdev, u16 ssid)
678{
679	struct ivpu_mmu_info *mmu = vdev->mmu;
680	int ret = 0;
681
682	mutex_lock(&mmu->lock);
683	if (!mmu->on)
684		goto unlock;
685
686	ret = ivpu_mmu_cmdq_write_tlbi_nh_asid(vdev, ssid);
687	if (ret)
688		goto unlock;
689
690	ret = ivpu_mmu_cmdq_sync(vdev);
691unlock:
692	mutex_unlock(&mmu->lock);
693	return ret;
694}
695
696static int ivpu_mmu_cd_add(struct ivpu_device *vdev, u32 ssid, u64 cd_dma)
697{
698	struct ivpu_mmu_info *mmu = vdev->mmu;
699	struct ivpu_mmu_cdtab *cdtab = &mmu->cdtab;
700	u64 *entry;
701	u64 cd[4];
702	int ret = 0;
703
704	if (ssid > IVPU_MMU_CDTAB_ENT_COUNT)
705		return -EINVAL;
706
707	entry = cdtab->base + (ssid * IVPU_MMU_CDTAB_ENT_SIZE);
708
709	if (cd_dma != 0) {
710		cd[0] = FIELD_PREP(IVPU_MMU_CD_0_TCR_T0SZ, IVPU_MMU_T0SZ_48BIT) |
711			FIELD_PREP(IVPU_MMU_CD_0_TCR_TG0, 0) |
712			FIELD_PREP(IVPU_MMU_CD_0_TCR_IRGN0, 0) |
713			FIELD_PREP(IVPU_MMU_CD_0_TCR_ORGN0, 0) |
714			FIELD_PREP(IVPU_MMU_CD_0_TCR_SH0, 0) |
715			FIELD_PREP(IVPU_MMU_CD_0_TCR_IPS, IVPU_MMU_IPS_48BIT) |
716			FIELD_PREP(IVPU_MMU_CD_0_ASID, ssid) |
717			IVPU_MMU_CD_0_TCR_EPD1 |
718			IVPU_MMU_CD_0_AA64 |
719			IVPU_MMU_CD_0_R |
720			IVPU_MMU_CD_0_ASET |
721			IVPU_MMU_CD_0_V;
722		cd[1] = cd_dma & IVPU_MMU_CD_1_TTB0_MASK;
723		cd[2] = 0;
724		cd[3] = 0x0000000000007444;
725
726		/* For global context generate memory fault on VPU */
727		if (ssid == IVPU_GLOBAL_CONTEXT_MMU_SSID)
728			cd[0] |= IVPU_MMU_CD_0_A;
729	} else {
730		memset(cd, 0, sizeof(cd));
731	}
732
733	WRITE_ONCE(entry[1], cd[1]);
734	WRITE_ONCE(entry[2], cd[2]);
735	WRITE_ONCE(entry[3], cd[3]);
736	WRITE_ONCE(entry[0], cd[0]);
737
738	clflush_cache_range(entry, IVPU_MMU_CDTAB_ENT_SIZE);
739
740	ivpu_dbg(vdev, MMU, "CDTAB %s entry (SSID=%u, dma=%pad): 0x%llx, 0x%llx, 0x%llx, 0x%llx\n",
741		 cd_dma ? "write" : "clear", ssid, &cd_dma, cd[0], cd[1], cd[2], cd[3]);
742
743	mutex_lock(&mmu->lock);
744	if (!mmu->on)
745		goto unlock;
746
747	ret = ivpu_mmu_cmdq_write_cfgi_all(vdev);
748	if (ret)
749		goto unlock;
750
751	ret = ivpu_mmu_cmdq_sync(vdev);
752unlock:
753	mutex_unlock(&mmu->lock);
754	return ret;
755}
756
757static int ivpu_mmu_cd_add_gbl(struct ivpu_device *vdev)
758{
759	int ret;
760
761	ret = ivpu_mmu_cd_add(vdev, 0, vdev->gctx.pgtable.pgd_dma);
762	if (ret)
763		ivpu_err(vdev, "Failed to add global CD entry: %d\n", ret);
764
765	return ret;
766}
767
768static int ivpu_mmu_cd_add_user(struct ivpu_device *vdev, u32 ssid, dma_addr_t cd_dma)
769{
770	int ret;
771
772	if (ssid == 0) {
773		ivpu_err(vdev, "Invalid SSID: %u\n", ssid);
774		return -EINVAL;
775	}
776
777	ret = ivpu_mmu_cd_add(vdev, ssid, cd_dma);
778	if (ret)
779		ivpu_err(vdev, "Failed to add CD entry SSID=%u: %d\n", ssid, ret);
780
781	return ret;
782}
783
784int ivpu_mmu_init(struct ivpu_device *vdev)
785{
786	struct ivpu_mmu_info *mmu = vdev->mmu;
787	int ret;
788
789	ivpu_dbg(vdev, MMU, "Init..\n");
790
791	ivpu_mmu_config_check(vdev);
792
793	ret = drmm_mutex_init(&vdev->drm, &mmu->lock);
794	if (ret)
795		return ret;
796
797	ret = ivpu_mmu_structs_alloc(vdev);
798	if (ret)
799		return ret;
800
801	ret = ivpu_mmu_strtab_init(vdev);
802	if (ret) {
803		ivpu_err(vdev, "Failed to initialize strtab: %d\n", ret);
804		return ret;
805	}
806
807	ret = ivpu_mmu_cd_add_gbl(vdev);
808	if (ret) {
809		ivpu_err(vdev, "Failed to initialize strtab: %d\n", ret);
810		return ret;
811	}
812
813	ret = ivpu_mmu_enable(vdev);
814	if (ret) {
815		ivpu_err(vdev, "Failed to resume MMU: %d\n", ret);
816		return ret;
817	}
818
819	ivpu_dbg(vdev, MMU, "Init done\n");
820
821	return 0;
822}
823
824int ivpu_mmu_enable(struct ivpu_device *vdev)
825{
826	struct ivpu_mmu_info *mmu = vdev->mmu;
827	int ret;
828
829	mutex_lock(&mmu->lock);
830
831	mmu->on = true;
832
833	ret = ivpu_mmu_reset(vdev);
834	if (ret) {
835		ivpu_err(vdev, "Failed to reset MMU: %d\n", ret);
836		goto err;
837	}
838
839	ret = ivpu_mmu_cmdq_write_cfgi_all(vdev);
840	if (ret)
841		goto err;
842
843	ret = ivpu_mmu_cmdq_write_tlbi_nsnh_all(vdev);
844	if (ret)
845		goto err;
846
847	ret = ivpu_mmu_cmdq_sync(vdev);
848	if (ret)
849		goto err;
850
851	mutex_unlock(&mmu->lock);
852
853	return 0;
854err:
855	mmu->on = false;
856	mutex_unlock(&mmu->lock);
857	return ret;
858}
859
860void ivpu_mmu_disable(struct ivpu_device *vdev)
861{
862	struct ivpu_mmu_info *mmu = vdev->mmu;
863
864	mutex_lock(&mmu->lock);
865	mmu->on = false;
866	mutex_unlock(&mmu->lock);
867}
868
869static void ivpu_mmu_dump_event(struct ivpu_device *vdev, u32 *event)
870{
871	u32 ssid = FIELD_GET(IVPU_MMU_EVT_SSID_MASK, event[0]);
872	u32 op = FIELD_GET(IVPU_MMU_EVT_OP_MASK, event[0]);
873	u64 fetch_addr = ((u64)event[7]) << 32 | event[6];
874	u64 in_addr = ((u64)event[5]) << 32 | event[4];
875	u32 sid = event[1];
876
877	ivpu_err(vdev, "MMU EVTQ: 0x%x (%s) SSID: %d SID: %d, e[2] %08x, e[3] %08x, in addr: 0x%llx, fetch addr: 0x%llx\n",
878		 op, ivpu_mmu_event_to_str(op), ssid, sid, event[2], event[3], in_addr, fetch_addr);
879}
880
881static u32 *ivpu_mmu_get_event(struct ivpu_device *vdev)
882{
883	struct ivpu_mmu_queue *evtq = &vdev->mmu->evtq;
884	u32 idx = IVPU_MMU_Q_IDX(evtq->cons);
885	u32 *evt = evtq->base + (idx * IVPU_MMU_EVTQ_CMD_SIZE);
886
887	evtq->prod = REGV_RD32(IVPU_MMU_REG_EVTQ_PROD_SEC);
888	if (ivpu_mmu_queue_is_empty(evtq))
889		return NULL;
890
891	evtq->cons = (evtq->cons + 1) & IVPU_MMU_Q_WRAP_MASK;
892	return evt;
893}
894
895void ivpu_mmu_irq_evtq_handler(struct ivpu_device *vdev)
896{
897	u32 *event;
898	u32 ssid;
899
900	ivpu_dbg(vdev, IRQ, "MMU event queue\n");
901
902	while ((event = ivpu_mmu_get_event(vdev)) != NULL) {
903		ivpu_mmu_dump_event(vdev, event);
904
905		ssid = FIELD_GET(IVPU_MMU_EVT_SSID_MASK, event[0]);
906		if (ssid == IVPU_GLOBAL_CONTEXT_MMU_SSID) {
907			ivpu_pm_trigger_recovery(vdev, "MMU event");
908			return;
909		}
910
911		ivpu_mmu_user_context_mark_invalid(vdev, ssid);
912		REGV_WR32(IVPU_MMU_REG_EVTQ_CONS_SEC, vdev->mmu->evtq.cons);
913	}
914}
915
916void ivpu_mmu_evtq_dump(struct ivpu_device *vdev)
917{
918	u32 *event;
919
920	while ((event = ivpu_mmu_get_event(vdev)) != NULL)
921		ivpu_mmu_dump_event(vdev, event);
922}
923
924void ivpu_mmu_irq_gerr_handler(struct ivpu_device *vdev)
925{
926	u32 gerror_val, gerrorn_val, active;
927
928	ivpu_dbg(vdev, IRQ, "MMU error\n");
929
930	gerror_val = REGV_RD32(IVPU_MMU_REG_GERROR);
931	gerrorn_val = REGV_RD32(IVPU_MMU_REG_GERRORN);
932
933	active = gerror_val ^ gerrorn_val;
934	if (!(active & IVPU_MMU_GERROR_ERR_MASK))
935		return;
936
937	if (REG_TEST_FLD(IVPU_MMU_REG_GERROR, MSI_ABT, active))
938		ivpu_warn_ratelimited(vdev, "MMU MSI ABT write aborted\n");
939
940	if (REG_TEST_FLD(IVPU_MMU_REG_GERROR, MSI_PRIQ_ABT, active))
941		ivpu_warn_ratelimited(vdev, "MMU PRIQ MSI ABT write aborted\n");
942
943	if (REG_TEST_FLD(IVPU_MMU_REG_GERROR, MSI_EVTQ_ABT, active))
944		ivpu_warn_ratelimited(vdev, "MMU EVTQ MSI ABT write aborted\n");
945
946	if (REG_TEST_FLD(IVPU_MMU_REG_GERROR, MSI_CMDQ_ABT, active))
947		ivpu_warn_ratelimited(vdev, "MMU CMDQ MSI ABT write aborted\n");
948
949	if (REG_TEST_FLD(IVPU_MMU_REG_GERROR, PRIQ_ABT, active))
950		ivpu_err_ratelimited(vdev, "MMU PRIQ write aborted\n");
951
952	if (REG_TEST_FLD(IVPU_MMU_REG_GERROR, EVTQ_ABT, active))
953		ivpu_err_ratelimited(vdev, "MMU EVTQ write aborted\n");
954
955	if (REG_TEST_FLD(IVPU_MMU_REG_GERROR, CMDQ, active))
956		ivpu_err_ratelimited(vdev, "MMU CMDQ write aborted\n");
957
958	REGV_WR32(IVPU_MMU_REG_GERRORN, gerror_val);
959}
960
961int ivpu_mmu_set_pgtable(struct ivpu_device *vdev, int ssid, struct ivpu_mmu_pgtable *pgtable)
962{
963	return ivpu_mmu_cd_add_user(vdev, ssid, pgtable->pgd_dma);
964}
965
966void ivpu_mmu_clear_pgtable(struct ivpu_device *vdev, int ssid)
967{
968	ivpu_mmu_cd_add_user(vdev, ssid, 0); /* 0 will clear CD entry */
969}