Linux Audio

Check our new training course

Loading...
v6.8
  1// SPDX-License-Identifier: GPL-2.0-only
  2/*
  3 * CPU-agnostic AMD IO page table allocator.
  4 *
  5 * Copyright (C) 2020 Advanced Micro Devices, Inc.
  6 * Author: Suravee Suthikulpanit <suravee.suthikulpanit@amd.com>
  7 */
  8
  9#define pr_fmt(fmt)     "AMD-Vi: " fmt
 10#define dev_fmt(fmt)    pr_fmt(fmt)
 11
 12#include <linux/atomic.h>
 13#include <linux/bitops.h>
 14#include <linux/io-pgtable.h>
 15#include <linux/kernel.h>
 16#include <linux/sizes.h>
 17#include <linux/slab.h>
 18#include <linux/types.h>
 19#include <linux/dma-mapping.h>
 20
 21#include <asm/barrier.h>
 22
 23#include "amd_iommu_types.h"
 24#include "amd_iommu.h"
 25
 26static void v1_tlb_flush_all(void *cookie)
 27{
 28}
 29
 30static void v1_tlb_flush_walk(unsigned long iova, size_t size,
 31				  size_t granule, void *cookie)
 32{
 33}
 34
 35static void v1_tlb_add_page(struct iommu_iotlb_gather *gather,
 36					 unsigned long iova, size_t granule,
 37					 void *cookie)
 38{
 39}
 40
 41static const struct iommu_flush_ops v1_flush_ops = {
 42	.tlb_flush_all	= v1_tlb_flush_all,
 43	.tlb_flush_walk = v1_tlb_flush_walk,
 44	.tlb_add_page	= v1_tlb_add_page,
 45};
 46
 47/*
 48 * Helper function to get the first pte of a large mapping
 49 */
 50static u64 *first_pte_l7(u64 *pte, unsigned long *page_size,
 51			 unsigned long *count)
 52{
 53	unsigned long pte_mask, pg_size, cnt;
 54	u64 *fpte;
 55
 56	pg_size  = PTE_PAGE_SIZE(*pte);
 57	cnt      = PAGE_SIZE_PTE_COUNT(pg_size);
 58	pte_mask = ~((cnt << 3) - 1);
 59	fpte     = (u64 *)(((unsigned long)pte) & pte_mask);
 60
 61	if (page_size)
 62		*page_size = pg_size;
 63
 64	if (count)
 65		*count = cnt;
 66
 67	return fpte;
 68}
 69
 70/****************************************************************************
 71 *
 72 * The functions below are used the create the page table mappings for
 73 * unity mapped regions.
 74 *
 75 ****************************************************************************/
 76
 77static void free_pt_page(u64 *pt, struct list_head *freelist)
 78{
 79	struct page *p = virt_to_page(pt);
 80
 81	list_add_tail(&p->lru, freelist);
 82}
 83
 84static void free_pt_lvl(u64 *pt, struct list_head *freelist, int lvl)
 85{
 86	u64 *p;
 87	int i;
 88
 89	for (i = 0; i < 512; ++i) {
 90		/* PTE present? */
 91		if (!IOMMU_PTE_PRESENT(pt[i]))
 92			continue;
 93
 94		/* Large PTE? */
 95		if (PM_PTE_LEVEL(pt[i]) == 0 ||
 96		    PM_PTE_LEVEL(pt[i]) == 7)
 97			continue;
 98
 99		/*
100		 * Free the next level. No need to look at l1 tables here since
101		 * they can only contain leaf PTEs; just free them directly.
102		 */
103		p = IOMMU_PTE_PAGE(pt[i]);
104		if (lvl > 2)
105			free_pt_lvl(p, freelist, lvl - 1);
106		else
107			free_pt_page(p, freelist);
108	}
109
110	free_pt_page(pt, freelist);
111}
112
113static void free_sub_pt(u64 *root, int mode, struct list_head *freelist)
114{
115	switch (mode) {
116	case PAGE_MODE_NONE:
117	case PAGE_MODE_7_LEVEL:
118		break;
119	case PAGE_MODE_1_LEVEL:
120		free_pt_page(root, freelist);
121		break;
122	case PAGE_MODE_2_LEVEL:
123	case PAGE_MODE_3_LEVEL:
124	case PAGE_MODE_4_LEVEL:
125	case PAGE_MODE_5_LEVEL:
126	case PAGE_MODE_6_LEVEL:
127		free_pt_lvl(root, freelist, mode);
128		break;
129	default:
130		BUG();
131	}
132}
133
134void amd_iommu_domain_set_pgtable(struct protection_domain *domain,
135				  u64 *root, int mode)
136{
137	u64 pt_root;
138
139	/* lowest 3 bits encode pgtable mode */
140	pt_root = mode & 7;
141	pt_root |= (u64)root;
142
143	amd_iommu_domain_set_pt_root(domain, pt_root);
144}
145
146/*
147 * This function is used to add another level to an IO page table. Adding
148 * another level increases the size of the address space by 9 bits to a size up
149 * to 64 bits.
150 */
151static bool increase_address_space(struct protection_domain *domain,
152				   unsigned long address,
 
153				   gfp_t gfp)
154{
 
 
 
155	unsigned long flags;
156	bool ret = true;
157	u64 *pte;
158
159	pte = alloc_pgtable_page(domain->nid, gfp);
160	if (!pte)
161		return false;
162
163	spin_lock_irqsave(&domain->lock, flags);
164
165	if (address <= PM_LEVEL_SIZE(domain->iop.mode))
 
166		goto out;
167
168	ret = false;
169	if (WARN_ON_ONCE(domain->iop.mode == PAGE_MODE_6_LEVEL))
170		goto out;
171
172	*pte = PM_LEVEL_PDE(domain->iop.mode, iommu_virt_to_phys(domain->iop.root));
173
174	domain->iop.root  = pte;
175	domain->iop.mode += 1;
176	amd_iommu_update_and_flush_device_table(domain);
177	amd_iommu_domain_flush_complete(domain);
178
179	/*
180	 * Device Table needs to be updated and flushed before the new root can
181	 * be published.
182	 */
183	amd_iommu_domain_set_pgtable(domain, pte, domain->iop.mode);
184
185	pte = NULL;
186	ret = true;
187
188out:
189	spin_unlock_irqrestore(&domain->lock, flags);
190	free_page((unsigned long)pte);
191
192	return ret;
193}
194
195static u64 *alloc_pte(struct protection_domain *domain,
196		      unsigned long address,
197		      unsigned long page_size,
198		      u64 **pte_page,
199		      gfp_t gfp,
200		      bool *updated)
201{
 
 
202	int level, end_lvl;
203	u64 *pte, *page;
204
205	BUG_ON(!is_power_of_2(page_size));
206
207	while (address > PM_LEVEL_SIZE(domain->iop.mode)) {
 
208		/*
209		 * Return an error if there is no memory to update the
210		 * page-table.
211		 */
212		if (!increase_address_space(domain, address, gfp))
 
213			return NULL;
214	}
215
216
217	level   = domain->iop.mode - 1;
218	pte     = &domain->iop.root[PM_LEVEL_INDEX(level, address)];
219	address = PAGE_SIZE_ALIGN(address, page_size);
220	end_lvl = PAGE_SIZE_LEVEL(page_size);
221
222	while (level > end_lvl) {
223		u64 __pte, __npte;
224		int pte_level;
225
226		__pte     = *pte;
227		pte_level = PM_PTE_LEVEL(__pte);
228
229		/*
230		 * If we replace a series of large PTEs, we need
231		 * to tear down all of them.
232		 */
233		if (IOMMU_PTE_PRESENT(__pte) &&
234		    pte_level == PAGE_MODE_7_LEVEL) {
235			unsigned long count, i;
236			u64 *lpte;
237
238			lpte = first_pte_l7(pte, NULL, &count);
239
240			/*
241			 * Unmap the replicated PTEs that still match the
242			 * original large mapping
243			 */
244			for (i = 0; i < count; ++i)
245				cmpxchg64(&lpte[i], __pte, 0ULL);
246
247			*updated = true;
248			continue;
249		}
250
251		if (!IOMMU_PTE_PRESENT(__pte) ||
252		    pte_level == PAGE_MODE_NONE) {
253			page = alloc_pgtable_page(domain->nid, gfp);
254
255			if (!page)
256				return NULL;
257
258			__npte = PM_LEVEL_PDE(level, iommu_virt_to_phys(page));
259
260			/* pte could have been changed somewhere. */
261			if (!try_cmpxchg64(pte, &__pte, __npte))
262				free_page((unsigned long)page);
263			else if (IOMMU_PTE_PRESENT(__pte))
264				*updated = true;
265
266			continue;
267		}
268
269		/* No level skipping support yet */
270		if (pte_level != level)
271			return NULL;
272
273		level -= 1;
274
275		pte = IOMMU_PTE_PAGE(__pte);
276
277		if (pte_page && level == end_lvl)
278			*pte_page = pte;
279
280		pte = &pte[PM_LEVEL_INDEX(level, address)];
281	}
282
283	return pte;
284}
285
286/*
287 * This function checks if there is a PTE for a given dma address. If
288 * there is one, it returns the pointer to it.
289 */
290static u64 *fetch_pte(struct amd_io_pgtable *pgtable,
291		      unsigned long address,
292		      unsigned long *page_size)
293{
294	int level;
295	u64 *pte;
296
297	*page_size = 0;
298
299	if (address > PM_LEVEL_SIZE(pgtable->mode))
300		return NULL;
301
302	level	   =  pgtable->mode - 1;
303	pte	   = &pgtable->root[PM_LEVEL_INDEX(level, address)];
304	*page_size =  PTE_LEVEL_PAGE_SIZE(level);
305
306	while (level > 0) {
307
308		/* Not Present */
309		if (!IOMMU_PTE_PRESENT(*pte))
310			return NULL;
311
312		/* Large PTE */
313		if (PM_PTE_LEVEL(*pte) == PAGE_MODE_7_LEVEL ||
314		    PM_PTE_LEVEL(*pte) == PAGE_MODE_NONE)
315			break;
316
317		/* No level skipping support yet */
318		if (PM_PTE_LEVEL(*pte) != level)
319			return NULL;
320
321		level -= 1;
322
323		/* Walk to the next level */
324		pte	   = IOMMU_PTE_PAGE(*pte);
325		pte	   = &pte[PM_LEVEL_INDEX(level, address)];
326		*page_size = PTE_LEVEL_PAGE_SIZE(level);
327	}
328
329	/*
330	 * If we have a series of large PTEs, make
331	 * sure to return a pointer to the first one.
332	 */
333	if (PM_PTE_LEVEL(*pte) == PAGE_MODE_7_LEVEL)
334		pte = first_pte_l7(pte, page_size, NULL);
335
336	return pte;
337}
338
339static void free_clear_pte(u64 *pte, u64 pteval, struct list_head *freelist)
340{
341	u64 *pt;
342	int mode;
343
344	while (!try_cmpxchg64(pte, &pteval, 0))
345		pr_warn("AMD-Vi: IOMMU pte changed since we read it\n");
346
347	if (!IOMMU_PTE_PRESENT(pteval))
348		return;
349
350	pt   = IOMMU_PTE_PAGE(pteval);
351	mode = IOMMU_PTE_MODE(pteval);
352
353	free_sub_pt(pt, mode, freelist);
354}
355
356/*
357 * Generic mapping functions. It maps a physical address into a DMA
358 * address space. It allocates the page table pages if necessary.
359 * In the future it can be extended to a generic mapping function
360 * supporting all features of AMD IOMMU page tables like level skipping
361 * and full 64 bit address spaces.
362 */
363static int iommu_v1_map_pages(struct io_pgtable_ops *ops, unsigned long iova,
364			      phys_addr_t paddr, size_t pgsize, size_t pgcount,
365			      int prot, gfp_t gfp, size_t *mapped)
366{
367	struct protection_domain *dom = io_pgtable_ops_to_domain(ops);
368	LIST_HEAD(freelist);
369	bool updated = false;
370	u64 __pte, *pte;
371	int ret, i, count;
372	size_t size = pgcount << __ffs(pgsize);
373	unsigned long o_iova = iova;
374
375	BUG_ON(!IS_ALIGNED(iova, pgsize));
376	BUG_ON(!IS_ALIGNED(paddr, pgsize));
377
378	ret = -EINVAL;
379	if (!(prot & IOMMU_PROT_MASK))
380		goto out;
381
382	while (pgcount > 0) {
383		count = PAGE_SIZE_PTE_COUNT(pgsize);
384		pte   = alloc_pte(dom, iova, pgsize, NULL, gfp, &updated);
385
386		ret = -ENOMEM;
387		if (!pte)
388			goto out;
389
390		for (i = 0; i < count; ++i)
391			free_clear_pte(&pte[i], pte[i], &freelist);
392
393		if (!list_empty(&freelist))
394			updated = true;
395
396		if (count > 1) {
397			__pte = PAGE_SIZE_PTE(__sme_set(paddr), pgsize);
398			__pte |= PM_LEVEL_ENC(7) | IOMMU_PTE_PR | IOMMU_PTE_FC;
399		} else
400			__pte = __sme_set(paddr) | IOMMU_PTE_PR | IOMMU_PTE_FC;
401
402		if (prot & IOMMU_PROT_IR)
403			__pte |= IOMMU_PTE_IR;
404		if (prot & IOMMU_PROT_IW)
405			__pte |= IOMMU_PTE_IW;
406
407		for (i = 0; i < count; ++i)
408			pte[i] = __pte;
409
410		iova  += pgsize;
411		paddr += pgsize;
412		pgcount--;
413		if (mapped)
414			*mapped += pgsize;
415	}
416
417	ret = 0;
418
419out:
420	if (updated) {
 
421		unsigned long flags;
422
423		spin_lock_irqsave(&dom->lock, flags);
424		/*
425		 * Flush domain TLB(s) and wait for completion. Any Device-Table
426		 * Updates and flushing already happened in
427		 * increase_address_space().
428		 */
429		amd_iommu_domain_flush_pages(dom, o_iova, size);
430		spin_unlock_irqrestore(&dom->lock, flags);
431	}
432
433	/* Everything flushed out, free pages now */
434	put_pages_list(&freelist);
435
436	return ret;
437}
438
439static unsigned long iommu_v1_unmap_pages(struct io_pgtable_ops *ops,
440					  unsigned long iova,
441					  size_t pgsize, size_t pgcount,
442					  struct iommu_iotlb_gather *gather)
443{
444	struct amd_io_pgtable *pgtable = io_pgtable_ops_to_data(ops);
445	unsigned long long unmapped;
446	unsigned long unmap_size;
447	u64 *pte;
448	size_t size = pgcount << __ffs(pgsize);
449
450	BUG_ON(!is_power_of_2(pgsize));
451
452	unmapped = 0;
453
454	while (unmapped < size) {
455		pte = fetch_pte(pgtable, iova, &unmap_size);
456		if (pte) {
457			int i, count;
458
459			count = PAGE_SIZE_PTE_COUNT(unmap_size);
460			for (i = 0; i < count; i++)
461				pte[i] = 0ULL;
462		} else {
463			return unmapped;
464		}
465
466		iova = (iova & ~(unmap_size - 1)) + unmap_size;
467		unmapped += unmap_size;
468	}
469
470	return unmapped;
471}
472
473static phys_addr_t iommu_v1_iova_to_phys(struct io_pgtable_ops *ops, unsigned long iova)
474{
475	struct amd_io_pgtable *pgtable = io_pgtable_ops_to_data(ops);
476	unsigned long offset_mask, pte_pgsize;
477	u64 *pte, __pte;
478
479	pte = fetch_pte(pgtable, iova, &pte_pgsize);
480
481	if (!pte || !IOMMU_PTE_PRESENT(*pte))
482		return 0;
483
484	offset_mask = pte_pgsize - 1;
485	__pte	    = __sme_clr(*pte & PM_ADDR_MASK);
486
487	return (__pte & ~offset_mask) | (iova & offset_mask);
488}
489
490static bool pte_test_and_clear_dirty(u64 *ptep, unsigned long size,
491				     unsigned long flags)
492{
493	bool test_only = flags & IOMMU_DIRTY_NO_CLEAR;
494	bool dirty = false;
495	int i, count;
496
497	/*
498	 * 2.2.3.2 Host Dirty Support
499	 * When a non-default page size is used , software must OR the
500	 * Dirty bits in all of the replicated host PTEs used to map
501	 * the page. The IOMMU does not guarantee the Dirty bits are
502	 * set in all of the replicated PTEs. Any portion of the page
503	 * may have been written even if the Dirty bit is set in only
504	 * one of the replicated PTEs.
505	 */
506	count = PAGE_SIZE_PTE_COUNT(size);
507	for (i = 0; i < count && test_only; i++) {
508		if (test_bit(IOMMU_PTE_HD_BIT, (unsigned long *)&ptep[i])) {
509			dirty = true;
510			break;
511		}
512	}
513
514	for (i = 0; i < count && !test_only; i++) {
515		if (test_and_clear_bit(IOMMU_PTE_HD_BIT,
516				       (unsigned long *)&ptep[i])) {
517			dirty = true;
518		}
519	}
520
521	return dirty;
522}
523
524static int iommu_v1_read_and_clear_dirty(struct io_pgtable_ops *ops,
525					 unsigned long iova, size_t size,
526					 unsigned long flags,
527					 struct iommu_dirty_bitmap *dirty)
528{
529	struct amd_io_pgtable *pgtable = io_pgtable_ops_to_data(ops);
530	unsigned long end = iova + size - 1;
531
532	do {
533		unsigned long pgsize = 0;
534		u64 *ptep, pte;
535
536		ptep = fetch_pte(pgtable, iova, &pgsize);
537		if (ptep)
538			pte = READ_ONCE(*ptep);
539		if (!ptep || !IOMMU_PTE_PRESENT(pte)) {
540			pgsize = pgsize ?: PTE_LEVEL_PAGE_SIZE(0);
541			iova += pgsize;
542			continue;
543		}
544
545		/*
546		 * Mark the whole IOVA range as dirty even if only one of
547		 * the replicated PTEs were marked dirty.
548		 */
549		if (pte_test_and_clear_dirty(ptep, pgsize, flags))
550			iommu_dirty_bitmap_record(dirty, iova, pgsize);
551		iova += pgsize;
552	} while (iova < end);
553
554	return 0;
555}
556
557/*
558 * ----------------------------------------------------
559 */
560static void v1_free_pgtable(struct io_pgtable *iop)
561{
562	struct amd_io_pgtable *pgtable = container_of(iop, struct amd_io_pgtable, iop);
563	struct protection_domain *dom;
564	LIST_HEAD(freelist);
565
566	if (pgtable->mode == PAGE_MODE_NONE)
567		return;
568
569	dom = container_of(pgtable, struct protection_domain, iop);
570
571	/* Page-table is not visible to IOMMU anymore, so free it */
572	BUG_ON(pgtable->mode < PAGE_MODE_NONE ||
573	       pgtable->mode > PAGE_MODE_6_LEVEL);
574
575	free_sub_pt(pgtable->root, pgtable->mode, &freelist);
576
577	/* Update data structure */
578	amd_iommu_domain_clr_pt_root(dom);
579
580	/* Make changes visible to IOMMUs */
581	amd_iommu_domain_update(dom);
582
583	put_pages_list(&freelist);
584}
585
586static struct io_pgtable *v1_alloc_pgtable(struct io_pgtable_cfg *cfg, void *cookie)
587{
588	struct amd_io_pgtable *pgtable = io_pgtable_cfg_to_data(cfg);
589
590	cfg->pgsize_bitmap  = AMD_IOMMU_PGSIZES,
591	cfg->ias            = IOMMU_IN_ADDR_BIT_SIZE,
592	cfg->oas            = IOMMU_OUT_ADDR_BIT_SIZE,
593	cfg->tlb            = &v1_flush_ops;
594
595	pgtable->iop.ops.map_pages    = iommu_v1_map_pages;
596	pgtable->iop.ops.unmap_pages  = iommu_v1_unmap_pages;
597	pgtable->iop.ops.iova_to_phys = iommu_v1_iova_to_phys;
598	pgtable->iop.ops.read_and_clear_dirty = iommu_v1_read_and_clear_dirty;
 
 
 
 
599
600	return &pgtable->iop;
601}
602
603struct io_pgtable_init_fns io_pgtable_amd_iommu_v1_init_fns = {
604	.alloc	= v1_alloc_pgtable,
605	.free	= v1_free_pgtable,
606};
v6.13.7
  1// SPDX-License-Identifier: GPL-2.0-only
  2/*
  3 * CPU-agnostic AMD IO page table allocator.
  4 *
  5 * Copyright (C) 2020 Advanced Micro Devices, Inc.
  6 * Author: Suravee Suthikulpanit <suravee.suthikulpanit@amd.com>
  7 */
  8
  9#define pr_fmt(fmt)     "AMD-Vi: " fmt
 10#define dev_fmt(fmt)    pr_fmt(fmt)
 11
 12#include <linux/atomic.h>
 13#include <linux/bitops.h>
 14#include <linux/io-pgtable.h>
 15#include <linux/kernel.h>
 16#include <linux/sizes.h>
 17#include <linux/slab.h>
 18#include <linux/types.h>
 19#include <linux/dma-mapping.h>
 20
 21#include <asm/barrier.h>
 22
 23#include "amd_iommu_types.h"
 24#include "amd_iommu.h"
 25#include "../iommu-pages.h"
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 26
 27/*
 28 * Helper function to get the first pte of a large mapping
 29 */
 30static u64 *first_pte_l7(u64 *pte, unsigned long *page_size,
 31			 unsigned long *count)
 32{
 33	unsigned long pte_mask, pg_size, cnt;
 34	u64 *fpte;
 35
 36	pg_size  = PTE_PAGE_SIZE(*pte);
 37	cnt      = PAGE_SIZE_PTE_COUNT(pg_size);
 38	pte_mask = ~((cnt << 3) - 1);
 39	fpte     = (u64 *)(((unsigned long)pte) & pte_mask);
 40
 41	if (page_size)
 42		*page_size = pg_size;
 43
 44	if (count)
 45		*count = cnt;
 46
 47	return fpte;
 48}
 49
 50/****************************************************************************
 51 *
 52 * The functions below are used the create the page table mappings for
 53 * unity mapped regions.
 54 *
 55 ****************************************************************************/
 56
 57static void free_pt_page(u64 *pt, struct list_head *freelist)
 58{
 59	struct page *p = virt_to_page(pt);
 60
 61	list_add_tail(&p->lru, freelist);
 62}
 63
 64static void free_pt_lvl(u64 *pt, struct list_head *freelist, int lvl)
 65{
 66	u64 *p;
 67	int i;
 68
 69	for (i = 0; i < 512; ++i) {
 70		/* PTE present? */
 71		if (!IOMMU_PTE_PRESENT(pt[i]))
 72			continue;
 73
 74		/* Large PTE? */
 75		if (PM_PTE_LEVEL(pt[i]) == 0 ||
 76		    PM_PTE_LEVEL(pt[i]) == 7)
 77			continue;
 78
 79		/*
 80		 * Free the next level. No need to look at l1 tables here since
 81		 * they can only contain leaf PTEs; just free them directly.
 82		 */
 83		p = IOMMU_PTE_PAGE(pt[i]);
 84		if (lvl > 2)
 85			free_pt_lvl(p, freelist, lvl - 1);
 86		else
 87			free_pt_page(p, freelist);
 88	}
 89
 90	free_pt_page(pt, freelist);
 91}
 92
 93static void free_sub_pt(u64 *root, int mode, struct list_head *freelist)
 94{
 95	switch (mode) {
 96	case PAGE_MODE_NONE:
 97	case PAGE_MODE_7_LEVEL:
 98		break;
 99	case PAGE_MODE_1_LEVEL:
100		free_pt_page(root, freelist);
101		break;
102	case PAGE_MODE_2_LEVEL:
103	case PAGE_MODE_3_LEVEL:
104	case PAGE_MODE_4_LEVEL:
105	case PAGE_MODE_5_LEVEL:
106	case PAGE_MODE_6_LEVEL:
107		free_pt_lvl(root, freelist, mode);
108		break;
109	default:
110		BUG();
111	}
112}
113
 
 
 
 
 
 
 
 
 
 
 
 
114/*
115 * This function is used to add another level to an IO page table. Adding
116 * another level increases the size of the address space by 9 bits to a size up
117 * to 64 bits.
118 */
119static bool increase_address_space(struct amd_io_pgtable *pgtable,
120				   unsigned long address,
121				   unsigned int page_size_level,
122				   gfp_t gfp)
123{
124	struct io_pgtable_cfg *cfg = &pgtable->pgtbl.cfg;
125	struct protection_domain *domain =
126		container_of(pgtable, struct protection_domain, iop);
127	unsigned long flags;
128	bool ret = true;
129	u64 *pte;
130
131	pte = iommu_alloc_page_node(cfg->amd.nid, gfp);
132	if (!pte)
133		return false;
134
135	spin_lock_irqsave(&domain->lock, flags);
136
137	if (address <= PM_LEVEL_SIZE(pgtable->mode) &&
138	    pgtable->mode - 1 >= page_size_level)
139		goto out;
140
141	ret = false;
142	if (WARN_ON_ONCE(pgtable->mode == PAGE_MODE_6_LEVEL))
143		goto out;
144
145	*pte = PM_LEVEL_PDE(pgtable->mode, iommu_virt_to_phys(pgtable->root));
146
147	pgtable->root  = pte;
148	pgtable->mode += 1;
149	amd_iommu_update_and_flush_device_table(domain);
 
 
 
 
 
 
 
150
151	pte = NULL;
152	ret = true;
153
154out:
155	spin_unlock_irqrestore(&domain->lock, flags);
156	iommu_free_page(pte);
157
158	return ret;
159}
160
161static u64 *alloc_pte(struct amd_io_pgtable *pgtable,
162		      unsigned long address,
163		      unsigned long page_size,
164		      u64 **pte_page,
165		      gfp_t gfp,
166		      bool *updated)
167{
168	unsigned long last_addr = address + (page_size - 1);
169	struct io_pgtable_cfg *cfg = &pgtable->pgtbl.cfg;
170	int level, end_lvl;
171	u64 *pte, *page;
172
173	BUG_ON(!is_power_of_2(page_size));
174
175	while (last_addr > PM_LEVEL_SIZE(pgtable->mode) ||
176	       pgtable->mode - 1 < PAGE_SIZE_LEVEL(page_size)) {
177		/*
178		 * Return an error if there is no memory to update the
179		 * page-table.
180		 */
181		if (!increase_address_space(pgtable, last_addr,
182					    PAGE_SIZE_LEVEL(page_size), gfp))
183			return NULL;
184	}
185
186
187	level   = pgtable->mode - 1;
188	pte     = &pgtable->root[PM_LEVEL_INDEX(level, address)];
189	address = PAGE_SIZE_ALIGN(address, page_size);
190	end_lvl = PAGE_SIZE_LEVEL(page_size);
191
192	while (level > end_lvl) {
193		u64 __pte, __npte;
194		int pte_level;
195
196		__pte     = *pte;
197		pte_level = PM_PTE_LEVEL(__pte);
198
199		/*
200		 * If we replace a series of large PTEs, we need
201		 * to tear down all of them.
202		 */
203		if (IOMMU_PTE_PRESENT(__pte) &&
204		    pte_level == PAGE_MODE_7_LEVEL) {
205			unsigned long count, i;
206			u64 *lpte;
207
208			lpte = first_pte_l7(pte, NULL, &count);
209
210			/*
211			 * Unmap the replicated PTEs that still match the
212			 * original large mapping
213			 */
214			for (i = 0; i < count; ++i)
215				cmpxchg64(&lpte[i], __pte, 0ULL);
216
217			*updated = true;
218			continue;
219		}
220
221		if (!IOMMU_PTE_PRESENT(__pte) ||
222		    pte_level == PAGE_MODE_NONE) {
223			page = iommu_alloc_page_node(cfg->amd.nid, gfp);
224
225			if (!page)
226				return NULL;
227
228			__npte = PM_LEVEL_PDE(level, iommu_virt_to_phys(page));
229
230			/* pte could have been changed somewhere. */
231			if (!try_cmpxchg64(pte, &__pte, __npte))
232				iommu_free_page(page);
233			else if (IOMMU_PTE_PRESENT(__pte))
234				*updated = true;
235
236			continue;
237		}
238
239		/* No level skipping support yet */
240		if (pte_level != level)
241			return NULL;
242
243		level -= 1;
244
245		pte = IOMMU_PTE_PAGE(__pte);
246
247		if (pte_page && level == end_lvl)
248			*pte_page = pte;
249
250		pte = &pte[PM_LEVEL_INDEX(level, address)];
251	}
252
253	return pte;
254}
255
256/*
257 * This function checks if there is a PTE for a given dma address. If
258 * there is one, it returns the pointer to it.
259 */
260static u64 *fetch_pte(struct amd_io_pgtable *pgtable,
261		      unsigned long address,
262		      unsigned long *page_size)
263{
264	int level;
265	u64 *pte;
266
267	*page_size = 0;
268
269	if (address > PM_LEVEL_SIZE(pgtable->mode))
270		return NULL;
271
272	level	   =  pgtable->mode - 1;
273	pte	   = &pgtable->root[PM_LEVEL_INDEX(level, address)];
274	*page_size =  PTE_LEVEL_PAGE_SIZE(level);
275
276	while (level > 0) {
277
278		/* Not Present */
279		if (!IOMMU_PTE_PRESENT(*pte))
280			return NULL;
281
282		/* Large PTE */
283		if (PM_PTE_LEVEL(*pte) == PAGE_MODE_7_LEVEL ||
284		    PM_PTE_LEVEL(*pte) == PAGE_MODE_NONE)
285			break;
286
287		/* No level skipping support yet */
288		if (PM_PTE_LEVEL(*pte) != level)
289			return NULL;
290
291		level -= 1;
292
293		/* Walk to the next level */
294		pte	   = IOMMU_PTE_PAGE(*pte);
295		pte	   = &pte[PM_LEVEL_INDEX(level, address)];
296		*page_size = PTE_LEVEL_PAGE_SIZE(level);
297	}
298
299	/*
300	 * If we have a series of large PTEs, make
301	 * sure to return a pointer to the first one.
302	 */
303	if (PM_PTE_LEVEL(*pte) == PAGE_MODE_7_LEVEL)
304		pte = first_pte_l7(pte, page_size, NULL);
305
306	return pte;
307}
308
309static void free_clear_pte(u64 *pte, u64 pteval, struct list_head *freelist)
310{
311	u64 *pt;
312	int mode;
313
314	while (!try_cmpxchg64(pte, &pteval, 0))
315		pr_warn("AMD-Vi: IOMMU pte changed since we read it\n");
316
317	if (!IOMMU_PTE_PRESENT(pteval))
318		return;
319
320	pt   = IOMMU_PTE_PAGE(pteval);
321	mode = IOMMU_PTE_MODE(pteval);
322
323	free_sub_pt(pt, mode, freelist);
324}
325
326/*
327 * Generic mapping functions. It maps a physical address into a DMA
328 * address space. It allocates the page table pages if necessary.
329 * In the future it can be extended to a generic mapping function
330 * supporting all features of AMD IOMMU page tables like level skipping
331 * and full 64 bit address spaces.
332 */
333static int iommu_v1_map_pages(struct io_pgtable_ops *ops, unsigned long iova,
334			      phys_addr_t paddr, size_t pgsize, size_t pgcount,
335			      int prot, gfp_t gfp, size_t *mapped)
336{
337	struct amd_io_pgtable *pgtable = io_pgtable_ops_to_data(ops);
338	LIST_HEAD(freelist);
339	bool updated = false;
340	u64 __pte, *pte;
341	int ret, i, count;
342	size_t size = pgcount << __ffs(pgsize);
343	unsigned long o_iova = iova;
344
345	BUG_ON(!IS_ALIGNED(iova, pgsize));
346	BUG_ON(!IS_ALIGNED(paddr, pgsize));
347
348	ret = -EINVAL;
349	if (!(prot & IOMMU_PROT_MASK))
350		goto out;
351
352	while (pgcount > 0) {
353		count = PAGE_SIZE_PTE_COUNT(pgsize);
354		pte   = alloc_pte(pgtable, iova, pgsize, NULL, gfp, &updated);
355
356		ret = -ENOMEM;
357		if (!pte)
358			goto out;
359
360		for (i = 0; i < count; ++i)
361			free_clear_pte(&pte[i], pte[i], &freelist);
362
363		if (!list_empty(&freelist))
364			updated = true;
365
366		if (count > 1) {
367			__pte = PAGE_SIZE_PTE(__sme_set(paddr), pgsize);
368			__pte |= PM_LEVEL_ENC(7) | IOMMU_PTE_PR | IOMMU_PTE_FC;
369		} else
370			__pte = __sme_set(paddr) | IOMMU_PTE_PR | IOMMU_PTE_FC;
371
372		if (prot & IOMMU_PROT_IR)
373			__pte |= IOMMU_PTE_IR;
374		if (prot & IOMMU_PROT_IW)
375			__pte |= IOMMU_PTE_IW;
376
377		for (i = 0; i < count; ++i)
378			pte[i] = __pte;
379
380		iova  += pgsize;
381		paddr += pgsize;
382		pgcount--;
383		if (mapped)
384			*mapped += pgsize;
385	}
386
387	ret = 0;
388
389out:
390	if (updated) {
391		struct protection_domain *dom = io_pgtable_ops_to_domain(ops);
392		unsigned long flags;
393
394		spin_lock_irqsave(&dom->lock, flags);
395		/*
396		 * Flush domain TLB(s) and wait for completion. Any Device-Table
397		 * Updates and flushing already happened in
398		 * increase_address_space().
399		 */
400		amd_iommu_domain_flush_pages(dom, o_iova, size);
401		spin_unlock_irqrestore(&dom->lock, flags);
402	}
403
404	/* Everything flushed out, free pages now */
405	iommu_put_pages_list(&freelist);
406
407	return ret;
408}
409
410static unsigned long iommu_v1_unmap_pages(struct io_pgtable_ops *ops,
411					  unsigned long iova,
412					  size_t pgsize, size_t pgcount,
413					  struct iommu_iotlb_gather *gather)
414{
415	struct amd_io_pgtable *pgtable = io_pgtable_ops_to_data(ops);
416	unsigned long long unmapped;
417	unsigned long unmap_size;
418	u64 *pte;
419	size_t size = pgcount << __ffs(pgsize);
420
421	BUG_ON(!is_power_of_2(pgsize));
422
423	unmapped = 0;
424
425	while (unmapped < size) {
426		pte = fetch_pte(pgtable, iova, &unmap_size);
427		if (pte) {
428			int i, count;
429
430			count = PAGE_SIZE_PTE_COUNT(unmap_size);
431			for (i = 0; i < count; i++)
432				pte[i] = 0ULL;
433		} else {
434			return unmapped;
435		}
436
437		iova = (iova & ~(unmap_size - 1)) + unmap_size;
438		unmapped += unmap_size;
439	}
440
441	return unmapped;
442}
443
444static phys_addr_t iommu_v1_iova_to_phys(struct io_pgtable_ops *ops, unsigned long iova)
445{
446	struct amd_io_pgtable *pgtable = io_pgtable_ops_to_data(ops);
447	unsigned long offset_mask, pte_pgsize;
448	u64 *pte, __pte;
449
450	pte = fetch_pte(pgtable, iova, &pte_pgsize);
451
452	if (!pte || !IOMMU_PTE_PRESENT(*pte))
453		return 0;
454
455	offset_mask = pte_pgsize - 1;
456	__pte	    = __sme_clr(*pte & PM_ADDR_MASK);
457
458	return (__pte & ~offset_mask) | (iova & offset_mask);
459}
460
461static bool pte_test_and_clear_dirty(u64 *ptep, unsigned long size,
462				     unsigned long flags)
463{
464	bool test_only = flags & IOMMU_DIRTY_NO_CLEAR;
465	bool dirty = false;
466	int i, count;
467
468	/*
469	 * 2.2.3.2 Host Dirty Support
470	 * When a non-default page size is used , software must OR the
471	 * Dirty bits in all of the replicated host PTEs used to map
472	 * the page. The IOMMU does not guarantee the Dirty bits are
473	 * set in all of the replicated PTEs. Any portion of the page
474	 * may have been written even if the Dirty bit is set in only
475	 * one of the replicated PTEs.
476	 */
477	count = PAGE_SIZE_PTE_COUNT(size);
478	for (i = 0; i < count && test_only; i++) {
479		if (test_bit(IOMMU_PTE_HD_BIT, (unsigned long *)&ptep[i])) {
480			dirty = true;
481			break;
482		}
483	}
484
485	for (i = 0; i < count && !test_only; i++) {
486		if (test_and_clear_bit(IOMMU_PTE_HD_BIT,
487				       (unsigned long *)&ptep[i])) {
488			dirty = true;
489		}
490	}
491
492	return dirty;
493}
494
495static int iommu_v1_read_and_clear_dirty(struct io_pgtable_ops *ops,
496					 unsigned long iova, size_t size,
497					 unsigned long flags,
498					 struct iommu_dirty_bitmap *dirty)
499{
500	struct amd_io_pgtable *pgtable = io_pgtable_ops_to_data(ops);
501	unsigned long end = iova + size - 1;
502
503	do {
504		unsigned long pgsize = 0;
505		u64 *ptep, pte;
506
507		ptep = fetch_pte(pgtable, iova, &pgsize);
508		if (ptep)
509			pte = READ_ONCE(*ptep);
510		if (!ptep || !IOMMU_PTE_PRESENT(pte)) {
511			pgsize = pgsize ?: PTE_LEVEL_PAGE_SIZE(0);
512			iova += pgsize;
513			continue;
514		}
515
516		/*
517		 * Mark the whole IOVA range as dirty even if only one of
518		 * the replicated PTEs were marked dirty.
519		 */
520		if (pte_test_and_clear_dirty(ptep, pgsize, flags))
521			iommu_dirty_bitmap_record(dirty, iova, pgsize);
522		iova += pgsize;
523	} while (iova < end);
524
525	return 0;
526}
527
528/*
529 * ----------------------------------------------------
530 */
531static void v1_free_pgtable(struct io_pgtable *iop)
532{
533	struct amd_io_pgtable *pgtable = container_of(iop, struct amd_io_pgtable, pgtbl);
 
534	LIST_HEAD(freelist);
535
536	if (pgtable->mode == PAGE_MODE_NONE)
537		return;
538
 
 
539	/* Page-table is not visible to IOMMU anymore, so free it */
540	BUG_ON(pgtable->mode < PAGE_MODE_NONE ||
541	       pgtable->mode > PAGE_MODE_6_LEVEL);
542
543	free_sub_pt(pgtable->root, pgtable->mode, &freelist);
544	iommu_put_pages_list(&freelist);
 
 
 
 
 
 
 
545}
546
547static struct io_pgtable *v1_alloc_pgtable(struct io_pgtable_cfg *cfg, void *cookie)
548{
549	struct amd_io_pgtable *pgtable = io_pgtable_cfg_to_data(cfg);
550
551	pgtable->root = iommu_alloc_page_node(cfg->amd.nid, GFP_KERNEL);
552	if (!pgtable->root)
553		return NULL;
554	pgtable->mode = PAGE_MODE_3_LEVEL;
555
556	cfg->pgsize_bitmap  = amd_iommu_pgsize_bitmap;
557	cfg->ias            = IOMMU_IN_ADDR_BIT_SIZE;
558	cfg->oas            = IOMMU_OUT_ADDR_BIT_SIZE;
559
560	pgtable->pgtbl.ops.map_pages    = iommu_v1_map_pages;
561	pgtable->pgtbl.ops.unmap_pages  = iommu_v1_unmap_pages;
562	pgtable->pgtbl.ops.iova_to_phys = iommu_v1_iova_to_phys;
563	pgtable->pgtbl.ops.read_and_clear_dirty = iommu_v1_read_and_clear_dirty;
564
565	return &pgtable->pgtbl;
566}
567
568struct io_pgtable_init_fns io_pgtable_amd_iommu_v1_init_fns = {
569	.alloc	= v1_alloc_pgtable,
570	.free	= v1_free_pgtable,
571};