Linux Audio

Check our new training course

Loading...
v5.9
  1// SPDX-License-Identifier: GPL-2.0-only
  2/*
  3 * Copyright(c) 2013-2015 Intel Corporation. All rights reserved.
  4 */
  5#include <linux/device.h>
  6#include <linux/sizes.h>
 
  7#include "nd-core.h"
  8#include "pmem.h"
  9#include "pfn.h"
 10#include "btt.h"
 11#include "nd.h"
 12
 13void __nd_detach_ndns(struct device *dev, struct nd_namespace_common **_ndns)
 14{
 15	struct nd_namespace_common *ndns = *_ndns;
 16	struct nvdimm_bus *nvdimm_bus;
 17
 18	if (!ndns)
 19		return;
 20
 21	nvdimm_bus = walk_to_nvdimm_bus(&ndns->dev);
 22	lockdep_assert_held(&nvdimm_bus->reconfig_mutex);
 23	dev_WARN_ONCE(dev, ndns->claim != dev, "%s: invalid claim\n", __func__);
 24	ndns->claim = NULL;
 25	*_ndns = NULL;
 26	put_device(&ndns->dev);
 27}
 28
 29void nd_detach_ndns(struct device *dev,
 30		struct nd_namespace_common **_ndns)
 31{
 32	struct nd_namespace_common *ndns = *_ndns;
 33
 34	if (!ndns)
 35		return;
 36	get_device(&ndns->dev);
 37	nvdimm_bus_lock(&ndns->dev);
 38	__nd_detach_ndns(dev, _ndns);
 39	nvdimm_bus_unlock(&ndns->dev);
 40	put_device(&ndns->dev);
 41}
 42
 43bool __nd_attach_ndns(struct device *dev, struct nd_namespace_common *attach,
 44		struct nd_namespace_common **_ndns)
 45{
 46	struct nvdimm_bus *nvdimm_bus = walk_to_nvdimm_bus(&attach->dev);
 47
 48	if (attach->claim)
 49		return false;
 50	lockdep_assert_held(&nvdimm_bus->reconfig_mutex);
 51	dev_WARN_ONCE(dev, *_ndns, "%s: invalid claim\n", __func__);
 52	attach->claim = dev;
 53	*_ndns = attach;
 54	get_device(&attach->dev);
 55	return true;
 56}
 57
 58bool nd_attach_ndns(struct device *dev, struct nd_namespace_common *attach,
 59		struct nd_namespace_common **_ndns)
 60{
 61	bool claimed;
 62
 63	nvdimm_bus_lock(&attach->dev);
 64	claimed = __nd_attach_ndns(dev, attach, _ndns);
 65	nvdimm_bus_unlock(&attach->dev);
 66	return claimed;
 67}
 68
 69static int namespace_match(struct device *dev, void *data)
 70{
 71	char *name = data;
 72
 73	return strcmp(name, dev_name(dev)) == 0;
 74}
 75
 76static bool is_idle(struct device *dev, struct nd_namespace_common *ndns)
 77{
 78	struct nd_region *nd_region = to_nd_region(dev->parent);
 79	struct device *seed = NULL;
 80
 81	if (is_nd_btt(dev))
 82		seed = nd_region->btt_seed;
 83	else if (is_nd_pfn(dev))
 84		seed = nd_region->pfn_seed;
 85	else if (is_nd_dax(dev))
 86		seed = nd_region->dax_seed;
 87
 88	if (seed == dev || ndns || dev->driver)
 89		return false;
 90	return true;
 91}
 92
 93struct nd_pfn *to_nd_pfn_safe(struct device *dev)
 94{
 95	/*
 96	 * pfn device attributes are re-used by dax device instances, so we
 97	 * need to be careful to correct device-to-nd_pfn conversion.
 98	 */
 99	if (is_nd_pfn(dev))
100		return to_nd_pfn(dev);
101
102	if (is_nd_dax(dev)) {
103		struct nd_dax *nd_dax = to_nd_dax(dev);
104
105		return &nd_dax->nd_pfn;
106	}
107
108	WARN_ON(1);
109	return NULL;
110}
111
112static void nd_detach_and_reset(struct device *dev,
113		struct nd_namespace_common **_ndns)
114{
115	/* detach the namespace and destroy / reset the device */
116	__nd_detach_ndns(dev, _ndns);
117	if (is_idle(dev, *_ndns)) {
118		nd_device_unregister(dev, ND_ASYNC);
119	} else if (is_nd_btt(dev)) {
120		struct nd_btt *nd_btt = to_nd_btt(dev);
121
122		nd_btt->lbasize = 0;
123		kfree(nd_btt->uuid);
124		nd_btt->uuid = NULL;
125	} else if (is_nd_pfn(dev) || is_nd_dax(dev)) {
126		struct nd_pfn *nd_pfn = to_nd_pfn_safe(dev);
127
128		kfree(nd_pfn->uuid);
129		nd_pfn->uuid = NULL;
130		nd_pfn->mode = PFN_MODE_NONE;
131	}
132}
133
134ssize_t nd_namespace_store(struct device *dev,
135		struct nd_namespace_common **_ndns, const char *buf,
136		size_t len)
137{
138	struct nd_namespace_common *ndns;
139	struct device *found;
140	char *name;
141
142	if (dev->driver) {
143		dev_dbg(dev, "namespace already active\n");
144		return -EBUSY;
145	}
146
147	name = kstrndup(buf, len, GFP_KERNEL);
148	if (!name)
149		return -ENOMEM;
150	strim(name);
151
152	if (strncmp(name, "namespace", 9) == 0 || strcmp(name, "") == 0)
153		/* pass */;
154	else {
155		len = -EINVAL;
156		goto out;
157	}
158
159	ndns = *_ndns;
160	if (strcmp(name, "") == 0) {
161		nd_detach_and_reset(dev, _ndns);
162		goto out;
163	} else if (ndns) {
164		dev_dbg(dev, "namespace already set to: %s\n",
165				dev_name(&ndns->dev));
166		len = -EBUSY;
167		goto out;
168	}
169
170	found = device_find_child(dev->parent, name, namespace_match);
171	if (!found) {
172		dev_dbg(dev, "'%s' not found under %s\n", name,
173				dev_name(dev->parent));
174		len = -ENODEV;
175		goto out;
176	}
177
178	ndns = to_ndns(found);
179
180	switch (ndns->claim_class) {
181	case NVDIMM_CCLASS_NONE:
182		break;
183	case NVDIMM_CCLASS_BTT:
184	case NVDIMM_CCLASS_BTT2:
185		if (!is_nd_btt(dev)) {
186			len = -EBUSY;
187			goto out_attach;
188		}
189		break;
190	case NVDIMM_CCLASS_PFN:
191		if (!is_nd_pfn(dev)) {
192			len = -EBUSY;
193			goto out_attach;
194		}
195		break;
196	case NVDIMM_CCLASS_DAX:
197		if (!is_nd_dax(dev)) {
198			len = -EBUSY;
199			goto out_attach;
200		}
201		break;
202	default:
203		len = -EBUSY;
204		goto out_attach;
205		break;
206	}
207
208	if (__nvdimm_namespace_capacity(ndns) < SZ_16M) {
209		dev_dbg(dev, "%s too small to host\n", name);
210		len = -ENXIO;
211		goto out_attach;
212	}
213
214	WARN_ON_ONCE(!is_nvdimm_bus_locked(dev));
215	if (!__nd_attach_ndns(dev, ndns, _ndns)) {
216		dev_dbg(dev, "%s already claimed\n",
217				dev_name(&ndns->dev));
218		len = -EBUSY;
219	}
220
221 out_attach:
222	put_device(&ndns->dev); /* from device_find_child */
223 out:
224	kfree(name);
225	return len;
226}
227
228/*
229 * nd_sb_checksum: compute checksum for a generic info block
230 *
231 * Returns a fletcher64 checksum of everything in the given info block
232 * except the last field (since that's where the checksum lives).
233 */
234u64 nd_sb_checksum(struct nd_gen_sb *nd_gen_sb)
235{
236	u64 sum;
237	__le64 sum_save;
238
239	BUILD_BUG_ON(sizeof(struct btt_sb) != SZ_4K);
240	BUILD_BUG_ON(sizeof(struct nd_pfn_sb) != SZ_4K);
241	BUILD_BUG_ON(sizeof(struct nd_gen_sb) != SZ_4K);
242
243	sum_save = nd_gen_sb->checksum;
244	nd_gen_sb->checksum = 0;
245	sum = nd_fletcher64(nd_gen_sb, sizeof(*nd_gen_sb), 1);
246	nd_gen_sb->checksum = sum_save;
247	return sum;
248}
249EXPORT_SYMBOL(nd_sb_checksum);
250
251static int nsio_rw_bytes(struct nd_namespace_common *ndns,
252		resource_size_t offset, void *buf, size_t size, int rw,
253		unsigned long flags)
254{
255	struct nd_namespace_io *nsio = to_nd_namespace_io(&ndns->dev);
256	unsigned int sz_align = ALIGN(size + (offset & (512 - 1)), 512);
257	sector_t sector = offset >> 9;
258	int rc = 0, ret = 0;
259
260	if (unlikely(!size))
261		return 0;
262
263	if (unlikely(offset + size > nsio->size)) {
264		dev_WARN_ONCE(&ndns->dev, 1, "request out of range\n");
265		return -EFAULT;
266	}
267
268	if (rw == READ) {
269		if (unlikely(is_bad_pmem(&nsio->bb, sector, sz_align)))
270			return -EIO;
271		if (memcpy_mcsafe(buf, nsio->addr + offset, size) != 0)
272			return -EIO;
273		return 0;
274	}
275
276	if (unlikely(is_bad_pmem(&nsio->bb, sector, sz_align))) {
277		if (IS_ALIGNED(offset, 512) && IS_ALIGNED(size, 512)
278				&& !(flags & NVDIMM_IO_ATOMIC)) {
279			long cleared;
280
281			might_sleep();
282			cleared = nvdimm_clear_poison(&ndns->dev,
283					nsio->res.start + offset, size);
284			if (cleared < size)
285				rc = -EIO;
286			if (cleared > 0 && cleared / 512) {
287				cleared /= 512;
288				badblocks_clear(&nsio->bb, sector, cleared);
289			}
290			arch_invalidate_pmem(nsio->addr + offset, size);
291		} else
292			rc = -EIO;
293	}
294
295	memcpy_flushcache(nsio->addr + offset, buf, size);
296	ret = nvdimm_flush(to_nd_region(ndns->dev.parent), NULL);
297	if (ret)
298		rc = ret;
299
300	return rc;
301}
302
303int devm_nsio_enable(struct device *dev, struct nd_namespace_io *nsio,
304		resource_size_t size)
305{
306	struct resource *res = &nsio->res;
307	struct nd_namespace_common *ndns = &nsio->common;
 
 
 
 
308
309	nsio->size = size;
310	if (!devm_request_mem_region(dev, res->start, size,
311				dev_name(&ndns->dev))) {
312		dev_warn(dev, "could not reserve region %pR\n", res);
313		return -EBUSY;
314	}
315
316	ndns->rw_bytes = nsio_rw_bytes;
317	if (devm_init_badblocks(dev, &nsio->bb))
318		return -ENOMEM;
319	nvdimm_badblocks_populate(to_nd_region(ndns->dev.parent), &nsio->bb,
320			&nsio->res);
321
322	nsio->addr = devm_memremap(dev, res->start, size, ARCH_MEMREMAP_PMEM);
323
324	return PTR_ERR_OR_ZERO(nsio->addr);
325}
326
327void devm_nsio_disable(struct device *dev, struct nd_namespace_io *nsio)
328{
329	struct resource *res = &nsio->res;
330
331	devm_memunmap(dev, nsio->addr);
332	devm_exit_badblocks(dev, &nsio->bb);
333	devm_release_mem_region(dev, res->start, nsio->size);
334}
v6.8
  1// SPDX-License-Identifier: GPL-2.0-only
  2/*
  3 * Copyright(c) 2013-2015 Intel Corporation. All rights reserved.
  4 */
  5#include <linux/device.h>
  6#include <linux/sizes.h>
  7#include <linux/badblocks.h>
  8#include "nd-core.h"
  9#include "pmem.h"
 10#include "pfn.h"
 11#include "btt.h"
 12#include "nd.h"
 13
 14void __nd_detach_ndns(struct device *dev, struct nd_namespace_common **_ndns)
 15{
 16	struct nd_namespace_common *ndns = *_ndns;
 17	struct nvdimm_bus *nvdimm_bus;
 18
 19	if (!ndns)
 20		return;
 21
 22	nvdimm_bus = walk_to_nvdimm_bus(&ndns->dev);
 23	lockdep_assert_held(&nvdimm_bus->reconfig_mutex);
 24	dev_WARN_ONCE(dev, ndns->claim != dev, "%s: invalid claim\n", __func__);
 25	ndns->claim = NULL;
 26	*_ndns = NULL;
 27	put_device(&ndns->dev);
 28}
 29
 30void nd_detach_ndns(struct device *dev,
 31		struct nd_namespace_common **_ndns)
 32{
 33	struct nd_namespace_common *ndns = *_ndns;
 34
 35	if (!ndns)
 36		return;
 37	get_device(&ndns->dev);
 38	nvdimm_bus_lock(&ndns->dev);
 39	__nd_detach_ndns(dev, _ndns);
 40	nvdimm_bus_unlock(&ndns->dev);
 41	put_device(&ndns->dev);
 42}
 43
 44bool __nd_attach_ndns(struct device *dev, struct nd_namespace_common *attach,
 45		struct nd_namespace_common **_ndns)
 46{
 47	struct nvdimm_bus *nvdimm_bus = walk_to_nvdimm_bus(&attach->dev);
 48
 49	if (attach->claim)
 50		return false;
 51	lockdep_assert_held(&nvdimm_bus->reconfig_mutex);
 52	dev_WARN_ONCE(dev, *_ndns, "%s: invalid claim\n", __func__);
 53	attach->claim = dev;
 54	*_ndns = attach;
 55	get_device(&attach->dev);
 56	return true;
 57}
 58
 59bool nd_attach_ndns(struct device *dev, struct nd_namespace_common *attach,
 60		struct nd_namespace_common **_ndns)
 61{
 62	bool claimed;
 63
 64	nvdimm_bus_lock(&attach->dev);
 65	claimed = __nd_attach_ndns(dev, attach, _ndns);
 66	nvdimm_bus_unlock(&attach->dev);
 67	return claimed;
 68}
 69
 70static int namespace_match(struct device *dev, void *data)
 71{
 72	char *name = data;
 73
 74	return strcmp(name, dev_name(dev)) == 0;
 75}
 76
 77static bool is_idle(struct device *dev, struct nd_namespace_common *ndns)
 78{
 79	struct nd_region *nd_region = to_nd_region(dev->parent);
 80	struct device *seed = NULL;
 81
 82	if (is_nd_btt(dev))
 83		seed = nd_region->btt_seed;
 84	else if (is_nd_pfn(dev))
 85		seed = nd_region->pfn_seed;
 86	else if (is_nd_dax(dev))
 87		seed = nd_region->dax_seed;
 88
 89	if (seed == dev || ndns || dev->driver)
 90		return false;
 91	return true;
 92}
 93
 94struct nd_pfn *to_nd_pfn_safe(struct device *dev)
 95{
 96	/*
 97	 * pfn device attributes are re-used by dax device instances, so we
 98	 * need to be careful to correct device-to-nd_pfn conversion.
 99	 */
100	if (is_nd_pfn(dev))
101		return to_nd_pfn(dev);
102
103	if (is_nd_dax(dev)) {
104		struct nd_dax *nd_dax = to_nd_dax(dev);
105
106		return &nd_dax->nd_pfn;
107	}
108
109	WARN_ON(1);
110	return NULL;
111}
112
113static void nd_detach_and_reset(struct device *dev,
114		struct nd_namespace_common **_ndns)
115{
116	/* detach the namespace and destroy / reset the device */
117	__nd_detach_ndns(dev, _ndns);
118	if (is_idle(dev, *_ndns)) {
119		nd_device_unregister(dev, ND_ASYNC);
120	} else if (is_nd_btt(dev)) {
121		struct nd_btt *nd_btt = to_nd_btt(dev);
122
123		nd_btt->lbasize = 0;
124		kfree(nd_btt->uuid);
125		nd_btt->uuid = NULL;
126	} else if (is_nd_pfn(dev) || is_nd_dax(dev)) {
127		struct nd_pfn *nd_pfn = to_nd_pfn_safe(dev);
128
129		kfree(nd_pfn->uuid);
130		nd_pfn->uuid = NULL;
131		nd_pfn->mode = PFN_MODE_NONE;
132	}
133}
134
135ssize_t nd_namespace_store(struct device *dev,
136		struct nd_namespace_common **_ndns, const char *buf,
137		size_t len)
138{
139	struct nd_namespace_common *ndns;
140	struct device *found;
141	char *name;
142
143	if (dev->driver) {
144		dev_dbg(dev, "namespace already active\n");
145		return -EBUSY;
146	}
147
148	name = kstrndup(buf, len, GFP_KERNEL);
149	if (!name)
150		return -ENOMEM;
151	strim(name);
152
153	if (strncmp(name, "namespace", 9) == 0 || strcmp(name, "") == 0)
154		/* pass */;
155	else {
156		len = -EINVAL;
157		goto out;
158	}
159
160	ndns = *_ndns;
161	if (strcmp(name, "") == 0) {
162		nd_detach_and_reset(dev, _ndns);
163		goto out;
164	} else if (ndns) {
165		dev_dbg(dev, "namespace already set to: %s\n",
166				dev_name(&ndns->dev));
167		len = -EBUSY;
168		goto out;
169	}
170
171	found = device_find_child(dev->parent, name, namespace_match);
172	if (!found) {
173		dev_dbg(dev, "'%s' not found under %s\n", name,
174				dev_name(dev->parent));
175		len = -ENODEV;
176		goto out;
177	}
178
179	ndns = to_ndns(found);
180
181	switch (ndns->claim_class) {
182	case NVDIMM_CCLASS_NONE:
183		break;
184	case NVDIMM_CCLASS_BTT:
185	case NVDIMM_CCLASS_BTT2:
186		if (!is_nd_btt(dev)) {
187			len = -EBUSY;
188			goto out_attach;
189		}
190		break;
191	case NVDIMM_CCLASS_PFN:
192		if (!is_nd_pfn(dev)) {
193			len = -EBUSY;
194			goto out_attach;
195		}
196		break;
197	case NVDIMM_CCLASS_DAX:
198		if (!is_nd_dax(dev)) {
199			len = -EBUSY;
200			goto out_attach;
201		}
202		break;
203	default:
204		len = -EBUSY;
205		goto out_attach;
206		break;
207	}
208
209	if (__nvdimm_namespace_capacity(ndns) < SZ_16M) {
210		dev_dbg(dev, "%s too small to host\n", name);
211		len = -ENXIO;
212		goto out_attach;
213	}
214
215	WARN_ON_ONCE(!is_nvdimm_bus_locked(dev));
216	if (!__nd_attach_ndns(dev, ndns, _ndns)) {
217		dev_dbg(dev, "%s already claimed\n",
218				dev_name(&ndns->dev));
219		len = -EBUSY;
220	}
221
222 out_attach:
223	put_device(&ndns->dev); /* from device_find_child */
224 out:
225	kfree(name);
226	return len;
227}
228
229/*
230 * nd_sb_checksum: compute checksum for a generic info block
231 *
232 * Returns a fletcher64 checksum of everything in the given info block
233 * except the last field (since that's where the checksum lives).
234 */
235u64 nd_sb_checksum(struct nd_gen_sb *nd_gen_sb)
236{
237	u64 sum;
238	__le64 sum_save;
239
240	BUILD_BUG_ON(sizeof(struct btt_sb) != SZ_4K);
241	BUILD_BUG_ON(sizeof(struct nd_pfn_sb) != SZ_4K);
242	BUILD_BUG_ON(sizeof(struct nd_gen_sb) != SZ_4K);
243
244	sum_save = nd_gen_sb->checksum;
245	nd_gen_sb->checksum = 0;
246	sum = nd_fletcher64(nd_gen_sb, sizeof(*nd_gen_sb), 1);
247	nd_gen_sb->checksum = sum_save;
248	return sum;
249}
250EXPORT_SYMBOL(nd_sb_checksum);
251
252static int nsio_rw_bytes(struct nd_namespace_common *ndns,
253		resource_size_t offset, void *buf, size_t size, int rw,
254		unsigned long flags)
255{
256	struct nd_namespace_io *nsio = to_nd_namespace_io(&ndns->dev);
257	unsigned int sz_align = ALIGN(size + (offset & (512 - 1)), 512);
258	sector_t sector = offset >> 9;
259	int rc = 0, ret = 0;
260
261	if (unlikely(!size))
262		return 0;
263
264	if (unlikely(offset + size > nsio->size)) {
265		dev_WARN_ONCE(&ndns->dev, 1, "request out of range\n");
266		return -EFAULT;
267	}
268
269	if (rw == READ) {
270		if (unlikely(is_bad_pmem(&nsio->bb, sector, sz_align)))
271			return -EIO;
272		if (copy_mc_to_kernel(buf, nsio->addr + offset, size) != 0)
273			return -EIO;
274		return 0;
275	}
276
277	if (unlikely(is_bad_pmem(&nsio->bb, sector, sz_align))) {
278		if (IS_ALIGNED(offset, 512) && IS_ALIGNED(size, 512)
279				&& !(flags & NVDIMM_IO_ATOMIC)) {
280			long cleared;
281
282			might_sleep();
283			cleared = nvdimm_clear_poison(&ndns->dev,
284					nsio->res.start + offset, size);
285			if (cleared < size)
286				rc = -EIO;
287			if (cleared > 0 && cleared / 512) {
288				cleared /= 512;
289				badblocks_clear(&nsio->bb, sector, cleared);
290			}
291			arch_invalidate_pmem(nsio->addr + offset, size);
292		} else
293			rc = -EIO;
294	}
295
296	memcpy_flushcache(nsio->addr + offset, buf, size);
297	ret = nvdimm_flush(to_nd_region(ndns->dev.parent), NULL);
298	if (ret)
299		rc = ret;
300
301	return rc;
302}
303
304int devm_nsio_enable(struct device *dev, struct nd_namespace_io *nsio,
305		resource_size_t size)
306{
 
307	struct nd_namespace_common *ndns = &nsio->common;
308	struct range range = {
309		.start = nsio->res.start,
310		.end = nsio->res.end,
311	};
312
313	nsio->size = size;
314	if (!devm_request_mem_region(dev, range.start, size,
315				dev_name(&ndns->dev))) {
316		dev_warn(dev, "could not reserve region %pR\n", &nsio->res);
317		return -EBUSY;
318	}
319
320	ndns->rw_bytes = nsio_rw_bytes;
321	if (devm_init_badblocks(dev, &nsio->bb))
322		return -ENOMEM;
323	nvdimm_badblocks_populate(to_nd_region(ndns->dev.parent), &nsio->bb,
324			&range);
325
326	nsio->addr = devm_memremap(dev, range.start, size, ARCH_MEMREMAP_PMEM);
327
328	return PTR_ERR_OR_ZERO(nsio->addr);
329}
330
331void devm_nsio_disable(struct device *dev, struct nd_namespace_io *nsio)
332{
333	struct resource *res = &nsio->res;
334
335	devm_memunmap(dev, nsio->addr);
336	devm_exit_badblocks(dev, &nsio->bb);
337	devm_release_mem_region(dev, res->start, nsio->size);
338}