Linux Audio

Check our new training course

Loading...
v6.13.7
  1// SPDX-License-Identifier: GPL-2.0-only
  2#include <linux/memblock.h>
  3#include <linux/gfp.h>
  4#include <linux/export.h>
  5#include <linux/spinlock.h>
  6#include <linux/slab.h>
  7#include <linux/types.h>
  8#include <linux/dma-mapping.h>
  9#include <linux/vmalloc.h>
 10#include <linux/swiotlb.h>
 11
 12#include <xen/xen.h>
 13#include <xen/interface/memory.h>
 14#include <xen/grant_table.h>
 15#include <xen/page.h>
 16#include <xen/swiotlb-xen.h>
 17
 18#include <asm/cacheflush.h>
 19#include <asm/xen/hypercall.h>
 20#include <asm/xen/interface.h>
 21
 22struct xen_p2m_entry {
 23	unsigned long pfn;
 24	unsigned long mfn;
 25	unsigned long nr_pages;
 26	struct rb_node rbnode_phys;
 27};
 28
 29static rwlock_t p2m_lock;
 30struct rb_root phys_to_mach = RB_ROOT;
 31EXPORT_SYMBOL_GPL(phys_to_mach);
 32
 33static int xen_add_phys_to_mach_entry(struct xen_p2m_entry *new)
 34{
 35	struct rb_node **link = &phys_to_mach.rb_node;
 36	struct rb_node *parent = NULL;
 37	struct xen_p2m_entry *entry;
 38	int rc = 0;
 39
 40	while (*link) {
 41		parent = *link;
 42		entry = rb_entry(parent, struct xen_p2m_entry, rbnode_phys);
 43
 44		if (new->pfn == entry->pfn)
 45			goto err_out;
 46
 47		if (new->pfn < entry->pfn)
 48			link = &(*link)->rb_left;
 49		else
 50			link = &(*link)->rb_right;
 51	}
 52	rb_link_node(&new->rbnode_phys, parent, link);
 53	rb_insert_color(&new->rbnode_phys, &phys_to_mach);
 54	goto out;
 55
 56err_out:
 57	rc = -EINVAL;
 58	pr_warn("%s: cannot add pfn=%pa -> mfn=%pa: pfn=%pa -> mfn=%pa already exists\n",
 59			__func__, &new->pfn, &new->mfn, &entry->pfn, &entry->mfn);
 60out:
 61	return rc;
 62}
 63
 64unsigned long __pfn_to_mfn(unsigned long pfn)
 65{
 66	struct rb_node *n;
 67	struct xen_p2m_entry *entry;
 68	unsigned long irqflags;
 69
 70	read_lock_irqsave(&p2m_lock, irqflags);
 71	n = phys_to_mach.rb_node;
 72	while (n) {
 73		entry = rb_entry(n, struct xen_p2m_entry, rbnode_phys);
 74		if (entry->pfn <= pfn &&
 75				entry->pfn + entry->nr_pages > pfn) {
 76			unsigned long mfn = entry->mfn + (pfn - entry->pfn);
 77			read_unlock_irqrestore(&p2m_lock, irqflags);
 78			return mfn;
 79		}
 80		if (pfn < entry->pfn)
 81			n = n->rb_left;
 82		else
 83			n = n->rb_right;
 84	}
 85	read_unlock_irqrestore(&p2m_lock, irqflags);
 86
 87	return INVALID_P2M_ENTRY;
 88}
 89EXPORT_SYMBOL_GPL(__pfn_to_mfn);
 90
 91int set_foreign_p2m_mapping(struct gnttab_map_grant_ref *map_ops,
 92			    struct gnttab_map_grant_ref *kmap_ops,
 93			    struct page **pages, unsigned int count)
 94{
 95	int i;
 96
 97	for (i = 0; i < count; i++) {
 98		struct gnttab_unmap_grant_ref unmap;
 99		int rc;
100
101		if (map_ops[i].status)
102			continue;
103		if (likely(set_phys_to_machine(map_ops[i].host_addr >> XEN_PAGE_SHIFT,
104				    map_ops[i].dev_bus_addr >> XEN_PAGE_SHIFT)))
105			continue;
106
107		/*
108		 * Signal an error for this slot. This in turn requires
109		 * immediate unmapping.
110		 */
111		map_ops[i].status = GNTST_general_error;
112		unmap.host_addr = map_ops[i].host_addr;
113		unmap.handle = map_ops[i].handle;
114		map_ops[i].handle = INVALID_GRANT_HANDLE;
115		if (map_ops[i].flags & GNTMAP_device_map)
116			unmap.dev_bus_addr = map_ops[i].dev_bus_addr;
117		else
118			unmap.dev_bus_addr = 0;
119
120		/*
121		 * Pre-populate the status field, to be recognizable in
122		 * the log message below.
123		 */
124		unmap.status = 1;
125
126		rc = HYPERVISOR_grant_table_op(GNTTABOP_unmap_grant_ref,
127					       &unmap, 1);
128		if (rc || unmap.status != GNTST_okay)
129			pr_err_once("gnttab unmap failed: rc=%d st=%d\n",
130				    rc, unmap.status);
131	}
132
133	return 0;
134}
135
136int clear_foreign_p2m_mapping(struct gnttab_unmap_grant_ref *unmap_ops,
137			      struct gnttab_unmap_grant_ref *kunmap_ops,
138			      struct page **pages, unsigned int count)
139{
140	int i;
141
142	for (i = 0; i < count; i++) {
143		set_phys_to_machine(unmap_ops[i].host_addr >> XEN_PAGE_SHIFT,
144				    INVALID_P2M_ENTRY);
145	}
146
147	return 0;
148}
149
150bool __set_phys_to_machine_multi(unsigned long pfn,
151		unsigned long mfn, unsigned long nr_pages)
152{
153	int rc;
154	unsigned long irqflags;
155	struct xen_p2m_entry *p2m_entry;
156	struct rb_node *n;
157
158	if (mfn == INVALID_P2M_ENTRY) {
159		write_lock_irqsave(&p2m_lock, irqflags);
160		n = phys_to_mach.rb_node;
161		while (n) {
162			p2m_entry = rb_entry(n, struct xen_p2m_entry, rbnode_phys);
163			if (p2m_entry->pfn <= pfn &&
164					p2m_entry->pfn + p2m_entry->nr_pages > pfn) {
165				rb_erase(&p2m_entry->rbnode_phys, &phys_to_mach);
166				write_unlock_irqrestore(&p2m_lock, irqflags);
167				kfree(p2m_entry);
168				return true;
169			}
170			if (pfn < p2m_entry->pfn)
171				n = n->rb_left;
172			else
173				n = n->rb_right;
174		}
175		write_unlock_irqrestore(&p2m_lock, irqflags);
176		return true;
177	}
178
179	p2m_entry = kzalloc(sizeof(*p2m_entry), GFP_NOWAIT);
180	if (!p2m_entry)
181		return false;
182
183	p2m_entry->pfn = pfn;
184	p2m_entry->nr_pages = nr_pages;
185	p2m_entry->mfn = mfn;
186
187	write_lock_irqsave(&p2m_lock, irqflags);
188	rc = xen_add_phys_to_mach_entry(p2m_entry);
189	if (rc < 0) {
190		write_unlock_irqrestore(&p2m_lock, irqflags);
191		kfree(p2m_entry);
192		return false;
193	}
194	write_unlock_irqrestore(&p2m_lock, irqflags);
195	return true;
196}
197EXPORT_SYMBOL_GPL(__set_phys_to_machine_multi);
198
199bool __set_phys_to_machine(unsigned long pfn, unsigned long mfn)
200{
201	return __set_phys_to_machine_multi(pfn, mfn, 1);
202}
203EXPORT_SYMBOL_GPL(__set_phys_to_machine);
204
205static int p2m_init(void)
206{
207	rwlock_init(&p2m_lock);
208	return 0;
209}
210arch_initcall(p2m_init);
v5.14.15
  1// SPDX-License-Identifier: GPL-2.0-only
  2#include <linux/memblock.h>
  3#include <linux/gfp.h>
  4#include <linux/export.h>
  5#include <linux/spinlock.h>
  6#include <linux/slab.h>
  7#include <linux/types.h>
  8#include <linux/dma-mapping.h>
  9#include <linux/vmalloc.h>
 10#include <linux/swiotlb.h>
 11
 12#include <xen/xen.h>
 13#include <xen/interface/memory.h>
 14#include <xen/grant_table.h>
 15#include <xen/page.h>
 16#include <xen/swiotlb-xen.h>
 17
 18#include <asm/cacheflush.h>
 19#include <asm/xen/hypercall.h>
 20#include <asm/xen/interface.h>
 21
 22struct xen_p2m_entry {
 23	unsigned long pfn;
 24	unsigned long mfn;
 25	unsigned long nr_pages;
 26	struct rb_node rbnode_phys;
 27};
 28
 29static rwlock_t p2m_lock;
 30struct rb_root phys_to_mach = RB_ROOT;
 31EXPORT_SYMBOL_GPL(phys_to_mach);
 32
 33static int xen_add_phys_to_mach_entry(struct xen_p2m_entry *new)
 34{
 35	struct rb_node **link = &phys_to_mach.rb_node;
 36	struct rb_node *parent = NULL;
 37	struct xen_p2m_entry *entry;
 38	int rc = 0;
 39
 40	while (*link) {
 41		parent = *link;
 42		entry = rb_entry(parent, struct xen_p2m_entry, rbnode_phys);
 43
 44		if (new->pfn == entry->pfn)
 45			goto err_out;
 46
 47		if (new->pfn < entry->pfn)
 48			link = &(*link)->rb_left;
 49		else
 50			link = &(*link)->rb_right;
 51	}
 52	rb_link_node(&new->rbnode_phys, parent, link);
 53	rb_insert_color(&new->rbnode_phys, &phys_to_mach);
 54	goto out;
 55
 56err_out:
 57	rc = -EINVAL;
 58	pr_warn("%s: cannot add pfn=%pa -> mfn=%pa: pfn=%pa -> mfn=%pa already exists\n",
 59			__func__, &new->pfn, &new->mfn, &entry->pfn, &entry->mfn);
 60out:
 61	return rc;
 62}
 63
 64unsigned long __pfn_to_mfn(unsigned long pfn)
 65{
 66	struct rb_node *n = phys_to_mach.rb_node;
 67	struct xen_p2m_entry *entry;
 68	unsigned long irqflags;
 69
 70	read_lock_irqsave(&p2m_lock, irqflags);
 
 71	while (n) {
 72		entry = rb_entry(n, struct xen_p2m_entry, rbnode_phys);
 73		if (entry->pfn <= pfn &&
 74				entry->pfn + entry->nr_pages > pfn) {
 75			unsigned long mfn = entry->mfn + (pfn - entry->pfn);
 76			read_unlock_irqrestore(&p2m_lock, irqflags);
 77			return mfn;
 78		}
 79		if (pfn < entry->pfn)
 80			n = n->rb_left;
 81		else
 82			n = n->rb_right;
 83	}
 84	read_unlock_irqrestore(&p2m_lock, irqflags);
 85
 86	return INVALID_P2M_ENTRY;
 87}
 88EXPORT_SYMBOL_GPL(__pfn_to_mfn);
 89
 90int set_foreign_p2m_mapping(struct gnttab_map_grant_ref *map_ops,
 91			    struct gnttab_map_grant_ref *kmap_ops,
 92			    struct page **pages, unsigned int count)
 93{
 94	int i;
 95
 96	for (i = 0; i < count; i++) {
 97		struct gnttab_unmap_grant_ref unmap;
 98		int rc;
 99
100		if (map_ops[i].status)
101			continue;
102		if (likely(set_phys_to_machine(map_ops[i].host_addr >> XEN_PAGE_SHIFT,
103				    map_ops[i].dev_bus_addr >> XEN_PAGE_SHIFT)))
104			continue;
105
106		/*
107		 * Signal an error for this slot. This in turn requires
108		 * immediate unmapping.
109		 */
110		map_ops[i].status = GNTST_general_error;
111		unmap.host_addr = map_ops[i].host_addr,
112		unmap.handle = map_ops[i].handle;
113		map_ops[i].handle = INVALID_GRANT_HANDLE;
114		if (map_ops[i].flags & GNTMAP_device_map)
115			unmap.dev_bus_addr = map_ops[i].dev_bus_addr;
116		else
117			unmap.dev_bus_addr = 0;
118
119		/*
120		 * Pre-populate the status field, to be recognizable in
121		 * the log message below.
122		 */
123		unmap.status = 1;
124
125		rc = HYPERVISOR_grant_table_op(GNTTABOP_unmap_grant_ref,
126					       &unmap, 1);
127		if (rc || unmap.status != GNTST_okay)
128			pr_err_once("gnttab unmap failed: rc=%d st=%d\n",
129				    rc, unmap.status);
130	}
131
132	return 0;
133}
134
135int clear_foreign_p2m_mapping(struct gnttab_unmap_grant_ref *unmap_ops,
136			      struct gnttab_unmap_grant_ref *kunmap_ops,
137			      struct page **pages, unsigned int count)
138{
139	int i;
140
141	for (i = 0; i < count; i++) {
142		set_phys_to_machine(unmap_ops[i].host_addr >> XEN_PAGE_SHIFT,
143				    INVALID_P2M_ENTRY);
144	}
145
146	return 0;
147}
148
149bool __set_phys_to_machine_multi(unsigned long pfn,
150		unsigned long mfn, unsigned long nr_pages)
151{
152	int rc;
153	unsigned long irqflags;
154	struct xen_p2m_entry *p2m_entry;
155	struct rb_node *n = phys_to_mach.rb_node;
156
157	if (mfn == INVALID_P2M_ENTRY) {
158		write_lock_irqsave(&p2m_lock, irqflags);
 
159		while (n) {
160			p2m_entry = rb_entry(n, struct xen_p2m_entry, rbnode_phys);
161			if (p2m_entry->pfn <= pfn &&
162					p2m_entry->pfn + p2m_entry->nr_pages > pfn) {
163				rb_erase(&p2m_entry->rbnode_phys, &phys_to_mach);
164				write_unlock_irqrestore(&p2m_lock, irqflags);
165				kfree(p2m_entry);
166				return true;
167			}
168			if (pfn < p2m_entry->pfn)
169				n = n->rb_left;
170			else
171				n = n->rb_right;
172		}
173		write_unlock_irqrestore(&p2m_lock, irqflags);
174		return true;
175	}
176
177	p2m_entry = kzalloc(sizeof(*p2m_entry), GFP_NOWAIT);
178	if (!p2m_entry)
179		return false;
180
181	p2m_entry->pfn = pfn;
182	p2m_entry->nr_pages = nr_pages;
183	p2m_entry->mfn = mfn;
184
185	write_lock_irqsave(&p2m_lock, irqflags);
186	rc = xen_add_phys_to_mach_entry(p2m_entry);
187	if (rc < 0) {
188		write_unlock_irqrestore(&p2m_lock, irqflags);
189		kfree(p2m_entry);
190		return false;
191	}
192	write_unlock_irqrestore(&p2m_lock, irqflags);
193	return true;
194}
195EXPORT_SYMBOL_GPL(__set_phys_to_machine_multi);
196
197bool __set_phys_to_machine(unsigned long pfn, unsigned long mfn)
198{
199	return __set_phys_to_machine_multi(pfn, mfn, 1);
200}
201EXPORT_SYMBOL_GPL(__set_phys_to_machine);
202
203static int p2m_init(void)
204{
205	rwlock_init(&p2m_lock);
206	return 0;
207}
208arch_initcall(p2m_init);