Linux Audio

Check our new training course

Loading...
v5.9
  1// SPDX-License-Identifier: GPL-2.0-only
  2/*
  3 * Copyright(c) 2013-2015 Intel Corporation. All rights reserved.
  4 */
  5#include <linux/cpumask.h>
  6#include <linux/module.h>
  7#include <linux/device.h>
  8#include <linux/nd.h>
  9#include "nd-core.h"
 10#include "nd.h"
 11
 12static int nd_region_probe(struct device *dev)
 13{
 14	int err, rc;
 15	static unsigned long once;
 16	struct nd_region_data *ndrd;
 17	struct nd_region *nd_region = to_nd_region(dev);
 18
 19	if (nd_region->num_lanes > num_online_cpus()
 20			&& nd_region->num_lanes < num_possible_cpus()
 21			&& !test_and_set_bit(0, &once)) {
 22		dev_dbg(dev, "online cpus (%d) < concurrent i/o lanes (%d) < possible cpus (%d)\n",
 23				num_online_cpus(), nd_region->num_lanes,
 24				num_possible_cpus());
 25		dev_dbg(dev, "setting nr_cpus=%d may yield better libnvdimm device performance\n",
 26				nd_region->num_lanes);
 27	}
 28
 29	rc = nd_region_activate(nd_region);
 30	if (rc)
 31		return rc;
 32
 33	rc = nd_blk_region_init(nd_region);
 34	if (rc)
 35		return rc;
 36
 37	if (is_memory(&nd_region->dev)) {
 38		struct resource ndr_res;
 
 
 
 39
 40		if (devm_init_badblocks(dev, &nd_region->bb))
 41			return -ENODEV;
 42		nd_region->bb_state = sysfs_get_dirent(nd_region->dev.kobj.sd,
 43						       "badblocks");
 44		if (!nd_region->bb_state)
 45			dev_warn(&nd_region->dev,
 46					"'badblocks' notification disabled\n");
 47		ndr_res.start = nd_region->ndr_start;
 48		ndr_res.end = nd_region->ndr_start + nd_region->ndr_size - 1;
 49		nvdimm_badblocks_populate(nd_region, &nd_region->bb, &ndr_res);
 50	}
 51
 52	rc = nd_region_register_namespaces(nd_region, &err);
 53	if (rc < 0)
 54		return rc;
 55
 56	ndrd = dev_get_drvdata(dev);
 57	ndrd->ns_active = rc;
 58	ndrd->ns_count = rc + err;
 59
 60	if (rc && err && rc == err)
 61		return -ENODEV;
 62
 63	nd_region->btt_seed = nd_btt_create(nd_region);
 64	nd_region->pfn_seed = nd_pfn_create(nd_region);
 65	nd_region->dax_seed = nd_dax_create(nd_region);
 66	if (err == 0)
 67		return 0;
 68
 69	/*
 70	 * Given multiple namespaces per region, we do not want to
 71	 * disable all the successfully registered peer namespaces upon
 72	 * a single registration failure.  If userspace is missing a
 73	 * namespace that it expects it can disable/re-enable the region
 74	 * to retry discovery after correcting the failure.
 75	 * <regionX>/namespaces returns the current
 76	 * "<async-registered>/<total>" namespace count.
 77	 */
 78	dev_err(dev, "failed to register %d namespace%s, continuing...\n",
 79			err, err == 1 ? "" : "s");
 80	return 0;
 81}
 82
 83static int child_unregister(struct device *dev, void *data)
 84{
 85	nd_device_unregister(dev, ND_SYNC);
 86	return 0;
 87}
 88
 89static int nd_region_remove(struct device *dev)
 90{
 91	struct nd_region *nd_region = to_nd_region(dev);
 92
 93	device_for_each_child(dev, NULL, child_unregister);
 94
 95	/* flush attribute readers and disable */
 96	nvdimm_bus_lock(dev);
 97	nd_region->ns_seed = NULL;
 98	nd_region->btt_seed = NULL;
 99	nd_region->pfn_seed = NULL;
100	nd_region->dax_seed = NULL;
101	dev_set_drvdata(dev, NULL);
102	nvdimm_bus_unlock(dev);
103
104	/*
105	 * Note, this assumes nd_device_lock() context to not race
106	 * nd_region_notify()
107	 */
108	sysfs_put(nd_region->bb_state);
109	nd_region->bb_state = NULL;
110
111	return 0;
112}
113
114static int child_notify(struct device *dev, void *data)
115{
116	nd_device_notify(dev, *(enum nvdimm_event *) data);
117	return 0;
118}
119
120static void nd_region_notify(struct device *dev, enum nvdimm_event event)
121{
122	if (event == NVDIMM_REVALIDATE_POISON) {
123		struct nd_region *nd_region = to_nd_region(dev);
124		struct resource res;
125
126		if (is_memory(&nd_region->dev)) {
127			res.start = nd_region->ndr_start;
128			res.end = nd_region->ndr_start +
129				nd_region->ndr_size - 1;
 
 
 
130			nvdimm_badblocks_populate(nd_region,
131					&nd_region->bb, &res);
132			if (nd_region->bb_state)
133				sysfs_notify_dirent(nd_region->bb_state);
134		}
135	}
136	device_for_each_child(dev, &event, child_notify);
137}
138
139static struct nd_device_driver nd_region_driver = {
140	.probe = nd_region_probe,
141	.remove = nd_region_remove,
142	.notify = nd_region_notify,
143	.drv = {
144		.name = "nd_region",
145	},
146	.type = ND_DRIVER_REGION_BLK | ND_DRIVER_REGION_PMEM,
147};
148
149int __init nd_region_init(void)
150{
151	return nd_driver_register(&nd_region_driver);
152}
153
154void nd_region_exit(void)
155{
156	driver_unregister(&nd_region_driver.drv);
157}
158
159MODULE_ALIAS_ND_DEVICE(ND_DEVICE_REGION_PMEM);
160MODULE_ALIAS_ND_DEVICE(ND_DEVICE_REGION_BLK);
v5.14.15
  1// SPDX-License-Identifier: GPL-2.0-only
  2/*
  3 * Copyright(c) 2013-2015 Intel Corporation. All rights reserved.
  4 */
  5#include <linux/cpumask.h>
  6#include <linux/module.h>
  7#include <linux/device.h>
  8#include <linux/nd.h>
  9#include "nd-core.h"
 10#include "nd.h"
 11
 12static int nd_region_probe(struct device *dev)
 13{
 14	int err, rc;
 15	static unsigned long once;
 16	struct nd_region_data *ndrd;
 17	struct nd_region *nd_region = to_nd_region(dev);
 18
 19	if (nd_region->num_lanes > num_online_cpus()
 20			&& nd_region->num_lanes < num_possible_cpus()
 21			&& !test_and_set_bit(0, &once)) {
 22		dev_dbg(dev, "online cpus (%d) < concurrent i/o lanes (%d) < possible cpus (%d)\n",
 23				num_online_cpus(), nd_region->num_lanes,
 24				num_possible_cpus());
 25		dev_dbg(dev, "setting nr_cpus=%d may yield better libnvdimm device performance\n",
 26				nd_region->num_lanes);
 27	}
 28
 29	rc = nd_region_activate(nd_region);
 30	if (rc)
 31		return rc;
 32
 33	rc = nd_blk_region_init(nd_region);
 34	if (rc)
 35		return rc;
 36
 37	if (is_memory(&nd_region->dev)) {
 38		struct range range = {
 39			.start = nd_region->ndr_start,
 40			.end = nd_region->ndr_start + nd_region->ndr_size - 1,
 41		};
 42
 43		if (devm_init_badblocks(dev, &nd_region->bb))
 44			return -ENODEV;
 45		nd_region->bb_state = sysfs_get_dirent(nd_region->dev.kobj.sd,
 46						       "badblocks");
 47		if (!nd_region->bb_state)
 48			dev_warn(&nd_region->dev,
 49					"'badblocks' notification disabled\n");
 50		nvdimm_badblocks_populate(nd_region, &nd_region->bb, &range);
 
 
 51	}
 52
 53	rc = nd_region_register_namespaces(nd_region, &err);
 54	if (rc < 0)
 55		return rc;
 56
 57	ndrd = dev_get_drvdata(dev);
 58	ndrd->ns_active = rc;
 59	ndrd->ns_count = rc + err;
 60
 61	if (rc && err && rc == err)
 62		return -ENODEV;
 63
 64	nd_region->btt_seed = nd_btt_create(nd_region);
 65	nd_region->pfn_seed = nd_pfn_create(nd_region);
 66	nd_region->dax_seed = nd_dax_create(nd_region);
 67	if (err == 0)
 68		return 0;
 69
 70	/*
 71	 * Given multiple namespaces per region, we do not want to
 72	 * disable all the successfully registered peer namespaces upon
 73	 * a single registration failure.  If userspace is missing a
 74	 * namespace that it expects it can disable/re-enable the region
 75	 * to retry discovery after correcting the failure.
 76	 * <regionX>/namespaces returns the current
 77	 * "<async-registered>/<total>" namespace count.
 78	 */
 79	dev_err(dev, "failed to register %d namespace%s, continuing...\n",
 80			err, err == 1 ? "" : "s");
 81	return 0;
 82}
 83
 84static int child_unregister(struct device *dev, void *data)
 85{
 86	nd_device_unregister(dev, ND_SYNC);
 87	return 0;
 88}
 89
 90static void nd_region_remove(struct device *dev)
 91{
 92	struct nd_region *nd_region = to_nd_region(dev);
 93
 94	device_for_each_child(dev, NULL, child_unregister);
 95
 96	/* flush attribute readers and disable */
 97	nvdimm_bus_lock(dev);
 98	nd_region->ns_seed = NULL;
 99	nd_region->btt_seed = NULL;
100	nd_region->pfn_seed = NULL;
101	nd_region->dax_seed = NULL;
102	dev_set_drvdata(dev, NULL);
103	nvdimm_bus_unlock(dev);
104
105	/*
106	 * Note, this assumes nd_device_lock() context to not race
107	 * nd_region_notify()
108	 */
109	sysfs_put(nd_region->bb_state);
110	nd_region->bb_state = NULL;
 
 
111}
112
113static int child_notify(struct device *dev, void *data)
114{
115	nd_device_notify(dev, *(enum nvdimm_event *) data);
116	return 0;
117}
118
119static void nd_region_notify(struct device *dev, enum nvdimm_event event)
120{
121	if (event == NVDIMM_REVALIDATE_POISON) {
122		struct nd_region *nd_region = to_nd_region(dev);
 
123
124		if (is_memory(&nd_region->dev)) {
125			struct range range = {
126				.start = nd_region->ndr_start,
127				.end = nd_region->ndr_start +
128					nd_region->ndr_size - 1,
129			};
130
131			nvdimm_badblocks_populate(nd_region,
132					&nd_region->bb, &range);
133			if (nd_region->bb_state)
134				sysfs_notify_dirent(nd_region->bb_state);
135		}
136	}
137	device_for_each_child(dev, &event, child_notify);
138}
139
140static struct nd_device_driver nd_region_driver = {
141	.probe = nd_region_probe,
142	.remove = nd_region_remove,
143	.notify = nd_region_notify,
144	.drv = {
145		.name = "nd_region",
146	},
147	.type = ND_DRIVER_REGION_BLK | ND_DRIVER_REGION_PMEM,
148};
149
150int __init nd_region_init(void)
151{
152	return nd_driver_register(&nd_region_driver);
153}
154
155void nd_region_exit(void)
156{
157	driver_unregister(&nd_region_driver.drv);
158}
159
160MODULE_ALIAS_ND_DEVICE(ND_DEVICE_REGION_PMEM);
161MODULE_ALIAS_ND_DEVICE(ND_DEVICE_REGION_BLK);