Linux Audio

Check our new training course

Loading...
v5.9
   1// SPDX-License-Identifier: GPL-2.0-only
   2/*
   3 * Copyright(c) 2013-2015 Intel Corporation. All rights reserved.
   4 */
   5#define pr_fmt(fmt) KBUILD_MODNAME ": " fmt
   6#include <linux/moduleparam.h>
   7#include <linux/vmalloc.h>
   8#include <linux/device.h>
   9#include <linux/ndctl.h>
  10#include <linux/slab.h>
  11#include <linux/io.h>
  12#include <linux/fs.h>
  13#include <linux/mm.h>
  14#include "nd-core.h"
  15#include "label.h"
  16#include "pmem.h"
  17#include "nd.h"
  18
  19static DEFINE_IDA(dimm_ida);
  20
  21static bool noblk;
  22module_param(noblk, bool, 0444);
  23MODULE_PARM_DESC(noblk, "force disable BLK / local alias support");
  24
  25/*
  26 * Retrieve bus and dimm handle and return if this bus supports
  27 * get_config_data commands
  28 */
  29int nvdimm_check_config_data(struct device *dev)
  30{
  31	struct nvdimm *nvdimm = to_nvdimm(dev);
  32
  33	if (!nvdimm->cmd_mask ||
  34	    !test_bit(ND_CMD_GET_CONFIG_DATA, &nvdimm->cmd_mask)) {
  35		if (test_bit(NDD_LABELING, &nvdimm->flags))
  36			return -ENXIO;
  37		else
  38			return -ENOTTY;
  39	}
  40
  41	return 0;
  42}
  43
  44static int validate_dimm(struct nvdimm_drvdata *ndd)
  45{
  46	int rc;
  47
  48	if (!ndd)
  49		return -EINVAL;
  50
  51	rc = nvdimm_check_config_data(ndd->dev);
  52	if (rc)
  53		dev_dbg(ndd->dev, "%ps: %s error: %d\n",
  54				__builtin_return_address(0), __func__, rc);
  55	return rc;
  56}
  57
  58/**
  59 * nvdimm_init_nsarea - determine the geometry of a dimm's namespace area
  60 * @nvdimm: dimm to initialize
  61 */
  62int nvdimm_init_nsarea(struct nvdimm_drvdata *ndd)
  63{
  64	struct nd_cmd_get_config_size *cmd = &ndd->nsarea;
  65	struct nvdimm_bus *nvdimm_bus = walk_to_nvdimm_bus(ndd->dev);
  66	struct nvdimm_bus_descriptor *nd_desc;
  67	int rc = validate_dimm(ndd);
  68	int cmd_rc = 0;
  69
  70	if (rc)
  71		return rc;
  72
  73	if (cmd->config_size)
  74		return 0; /* already valid */
  75
  76	memset(cmd, 0, sizeof(*cmd));
  77	nd_desc = nvdimm_bus->nd_desc;
  78	rc = nd_desc->ndctl(nd_desc, to_nvdimm(ndd->dev),
  79			ND_CMD_GET_CONFIG_SIZE, cmd, sizeof(*cmd), &cmd_rc);
  80	if (rc < 0)
  81		return rc;
  82	return cmd_rc;
  83}
  84
  85int nvdimm_get_config_data(struct nvdimm_drvdata *ndd, void *buf,
  86			   size_t offset, size_t len)
  87{
  88	struct nvdimm_bus *nvdimm_bus = walk_to_nvdimm_bus(ndd->dev);
  89	struct nvdimm_bus_descriptor *nd_desc = nvdimm_bus->nd_desc;
  90	int rc = validate_dimm(ndd), cmd_rc = 0;
  91	struct nd_cmd_get_config_data_hdr *cmd;
  92	size_t max_cmd_size, buf_offset;
  93
  94	if (rc)
  95		return rc;
  96
  97	if (offset + len > ndd->nsarea.config_size)
  98		return -ENXIO;
  99
 100	max_cmd_size = min_t(u32, len, ndd->nsarea.max_xfer);
 101	cmd = kvzalloc(max_cmd_size + sizeof(*cmd), GFP_KERNEL);
 102	if (!cmd)
 103		return -ENOMEM;
 104
 105	for (buf_offset = 0; len;
 106	     len -= cmd->in_length, buf_offset += cmd->in_length) {
 107		size_t cmd_size;
 108
 109		cmd->in_offset = offset + buf_offset;
 110		cmd->in_length = min(max_cmd_size, len);
 111
 112		cmd_size = sizeof(*cmd) + cmd->in_length;
 113
 114		rc = nd_desc->ndctl(nd_desc, to_nvdimm(ndd->dev),
 115				ND_CMD_GET_CONFIG_DATA, cmd, cmd_size, &cmd_rc);
 116		if (rc < 0)
 117			break;
 118		if (cmd_rc < 0) {
 119			rc = cmd_rc;
 120			break;
 121		}
 122
 123		/* out_buf should be valid, copy it into our output buffer */
 124		memcpy(buf + buf_offset, cmd->out_buf, cmd->in_length);
 125	}
 126	kvfree(cmd);
 127
 128	return rc;
 129}
 130
 131int nvdimm_set_config_data(struct nvdimm_drvdata *ndd, size_t offset,
 132		void *buf, size_t len)
 133{
 134	size_t max_cmd_size, buf_offset;
 135	struct nd_cmd_set_config_hdr *cmd;
 136	int rc = validate_dimm(ndd), cmd_rc = 0;
 137	struct nvdimm_bus *nvdimm_bus = walk_to_nvdimm_bus(ndd->dev);
 138	struct nvdimm_bus_descriptor *nd_desc = nvdimm_bus->nd_desc;
 139
 140	if (rc)
 141		return rc;
 142
 143	if (offset + len > ndd->nsarea.config_size)
 144		return -ENXIO;
 145
 146	max_cmd_size = min_t(u32, len, ndd->nsarea.max_xfer);
 147	cmd = kvzalloc(max_cmd_size + sizeof(*cmd) + sizeof(u32), GFP_KERNEL);
 148	if (!cmd)
 149		return -ENOMEM;
 150
 151	for (buf_offset = 0; len; len -= cmd->in_length,
 152			buf_offset += cmd->in_length) {
 153		size_t cmd_size;
 154
 155		cmd->in_offset = offset + buf_offset;
 156		cmd->in_length = min(max_cmd_size, len);
 157		memcpy(cmd->in_buf, buf + buf_offset, cmd->in_length);
 158
 159		/* status is output in the last 4-bytes of the command buffer */
 160		cmd_size = sizeof(*cmd) + cmd->in_length + sizeof(u32);
 161
 162		rc = nd_desc->ndctl(nd_desc, to_nvdimm(ndd->dev),
 163				ND_CMD_SET_CONFIG_DATA, cmd, cmd_size, &cmd_rc);
 164		if (rc < 0)
 165			break;
 166		if (cmd_rc < 0) {
 167			rc = cmd_rc;
 168			break;
 169		}
 170	}
 171	kvfree(cmd);
 172
 173	return rc;
 174}
 175
 176void nvdimm_set_labeling(struct device *dev)
 177{
 178	struct nvdimm *nvdimm = to_nvdimm(dev);
 179
 180	set_bit(NDD_LABELING, &nvdimm->flags);
 181}
 182
 183void nvdimm_set_locked(struct device *dev)
 184{
 185	struct nvdimm *nvdimm = to_nvdimm(dev);
 186
 187	set_bit(NDD_LOCKED, &nvdimm->flags);
 188}
 189
 190void nvdimm_clear_locked(struct device *dev)
 191{
 192	struct nvdimm *nvdimm = to_nvdimm(dev);
 193
 194	clear_bit(NDD_LOCKED, &nvdimm->flags);
 195}
 196
 197static void nvdimm_release(struct device *dev)
 198{
 199	struct nvdimm *nvdimm = to_nvdimm(dev);
 200
 201	ida_simple_remove(&dimm_ida, nvdimm->id);
 202	kfree(nvdimm);
 203}
 204
 
 
 
 
 
 
 
 
 
 
 205struct nvdimm *to_nvdimm(struct device *dev)
 206{
 207	struct nvdimm *nvdimm = container_of(dev, struct nvdimm, dev);
 208
 209	WARN_ON(!is_nvdimm(dev));
 210	return nvdimm;
 211}
 212EXPORT_SYMBOL_GPL(to_nvdimm);
 213
 214struct nvdimm *nd_blk_region_to_dimm(struct nd_blk_region *ndbr)
 215{
 216	struct nd_region *nd_region = &ndbr->nd_region;
 217	struct nd_mapping *nd_mapping = &nd_region->mapping[0];
 218
 219	return nd_mapping->nvdimm;
 220}
 221EXPORT_SYMBOL_GPL(nd_blk_region_to_dimm);
 222
 223unsigned long nd_blk_memremap_flags(struct nd_blk_region *ndbr)
 224{
 225	/* pmem mapping properties are private to libnvdimm */
 226	return ARCH_MEMREMAP_PMEM;
 227}
 228EXPORT_SYMBOL_GPL(nd_blk_memremap_flags);
 229
 230struct nvdimm_drvdata *to_ndd(struct nd_mapping *nd_mapping)
 231{
 232	struct nvdimm *nvdimm = nd_mapping->nvdimm;
 233
 234	WARN_ON_ONCE(!is_nvdimm_bus_locked(&nvdimm->dev));
 235
 236	return dev_get_drvdata(&nvdimm->dev);
 237}
 238EXPORT_SYMBOL(to_ndd);
 239
 240void nvdimm_drvdata_release(struct kref *kref)
 241{
 242	struct nvdimm_drvdata *ndd = container_of(kref, typeof(*ndd), kref);
 243	struct device *dev = ndd->dev;
 244	struct resource *res, *_r;
 245
 246	dev_dbg(dev, "trace\n");
 247	nvdimm_bus_lock(dev);
 248	for_each_dpa_resource_safe(ndd, res, _r)
 249		nvdimm_free_dpa(ndd, res);
 250	nvdimm_bus_unlock(dev);
 251
 252	kvfree(ndd->data);
 253	kfree(ndd);
 254	put_device(dev);
 255}
 256
 257void get_ndd(struct nvdimm_drvdata *ndd)
 258{
 259	kref_get(&ndd->kref);
 260}
 261
 262void put_ndd(struct nvdimm_drvdata *ndd)
 263{
 264	if (ndd)
 265		kref_put(&ndd->kref, nvdimm_drvdata_release);
 266}
 267
 268const char *nvdimm_name(struct nvdimm *nvdimm)
 269{
 270	return dev_name(&nvdimm->dev);
 271}
 272EXPORT_SYMBOL_GPL(nvdimm_name);
 273
 274struct kobject *nvdimm_kobj(struct nvdimm *nvdimm)
 275{
 276	return &nvdimm->dev.kobj;
 277}
 278EXPORT_SYMBOL_GPL(nvdimm_kobj);
 279
 280unsigned long nvdimm_cmd_mask(struct nvdimm *nvdimm)
 281{
 282	return nvdimm->cmd_mask;
 283}
 284EXPORT_SYMBOL_GPL(nvdimm_cmd_mask);
 285
 286void *nvdimm_provider_data(struct nvdimm *nvdimm)
 287{
 288	if (nvdimm)
 289		return nvdimm->provider_data;
 290	return NULL;
 291}
 292EXPORT_SYMBOL_GPL(nvdimm_provider_data);
 293
 294static ssize_t commands_show(struct device *dev,
 295		struct device_attribute *attr, char *buf)
 296{
 297	struct nvdimm *nvdimm = to_nvdimm(dev);
 298	int cmd, len = 0;
 299
 300	if (!nvdimm->cmd_mask)
 301		return sprintf(buf, "\n");
 302
 303	for_each_set_bit(cmd, &nvdimm->cmd_mask, BITS_PER_LONG)
 304		len += sprintf(buf + len, "%s ", nvdimm_cmd_name(cmd));
 305	len += sprintf(buf + len, "\n");
 306	return len;
 307}
 308static DEVICE_ATTR_RO(commands);
 309
 310static ssize_t flags_show(struct device *dev,
 311		struct device_attribute *attr, char *buf)
 312{
 313	struct nvdimm *nvdimm = to_nvdimm(dev);
 314
 315	return sprintf(buf, "%s%s%s\n",
 316			test_bit(NDD_ALIASING, &nvdimm->flags) ? "alias " : "",
 317			test_bit(NDD_LABELING, &nvdimm->flags) ? "label " : "",
 318			test_bit(NDD_LOCKED, &nvdimm->flags) ? "lock " : "");
 319}
 320static DEVICE_ATTR_RO(flags);
 321
 322static ssize_t state_show(struct device *dev, struct device_attribute *attr,
 323		char *buf)
 324{
 325	struct nvdimm *nvdimm = to_nvdimm(dev);
 326
 327	/*
 328	 * The state may be in the process of changing, userspace should
 329	 * quiesce probing if it wants a static answer
 330	 */
 331	nvdimm_bus_lock(dev);
 332	nvdimm_bus_unlock(dev);
 333	return sprintf(buf, "%s\n", atomic_read(&nvdimm->busy)
 334			? "active" : "idle");
 335}
 336static DEVICE_ATTR_RO(state);
 337
 338static ssize_t available_slots_show(struct device *dev,
 339		struct device_attribute *attr, char *buf)
 340{
 341	struct nvdimm_drvdata *ndd = dev_get_drvdata(dev);
 342	ssize_t rc;
 343	u32 nfree;
 344
 345	if (!ndd)
 346		return -ENXIO;
 347
 348	nvdimm_bus_lock(dev);
 349	nfree = nd_label_nfree(ndd);
 350	if (nfree - 1 > nfree) {
 351		dev_WARN_ONCE(dev, 1, "we ate our last label?\n");
 352		nfree = 0;
 353	} else
 354		nfree--;
 355	rc = sprintf(buf, "%d\n", nfree);
 356	nvdimm_bus_unlock(dev);
 357	return rc;
 358}
 359static DEVICE_ATTR_RO(available_slots);
 360
 361__weak ssize_t security_show(struct device *dev,
 362		struct device_attribute *attr, char *buf)
 363{
 364	struct nvdimm *nvdimm = to_nvdimm(dev);
 365
 366	if (test_bit(NVDIMM_SECURITY_OVERWRITE, &nvdimm->sec.flags))
 367		return sprintf(buf, "overwrite\n");
 368	if (test_bit(NVDIMM_SECURITY_DISABLED, &nvdimm->sec.flags))
 369		return sprintf(buf, "disabled\n");
 370	if (test_bit(NVDIMM_SECURITY_UNLOCKED, &nvdimm->sec.flags))
 371		return sprintf(buf, "unlocked\n");
 372	if (test_bit(NVDIMM_SECURITY_LOCKED, &nvdimm->sec.flags))
 373		return sprintf(buf, "locked\n");
 
 
 374	return -ENOTTY;
 375}
 376
 377static ssize_t frozen_show(struct device *dev,
 378		struct device_attribute *attr, char *buf)
 379{
 380	struct nvdimm *nvdimm = to_nvdimm(dev);
 381
 382	return sprintf(buf, "%d\n", test_bit(NVDIMM_SECURITY_FROZEN,
 383				&nvdimm->sec.flags));
 384}
 385static DEVICE_ATTR_RO(frozen);
 386
 387static ssize_t security_store(struct device *dev,
 388		struct device_attribute *attr, const char *buf, size_t len)
 389
 390{
 391	ssize_t rc;
 392
 393	/*
 394	 * Require all userspace triggered security management to be
 395	 * done while probing is idle and the DIMM is not in active use
 396	 * in any region.
 397	 */
 398	nd_device_lock(dev);
 399	nvdimm_bus_lock(dev);
 400	wait_nvdimm_bus_probe_idle(dev);
 401	rc = nvdimm_security_store(dev, buf, len);
 402	nvdimm_bus_unlock(dev);
 403	nd_device_unlock(dev);
 404
 405	return rc;
 406}
 407static DEVICE_ATTR_RW(security);
 408
 409static struct attribute *nvdimm_attributes[] = {
 410	&dev_attr_state.attr,
 411	&dev_attr_flags.attr,
 412	&dev_attr_commands.attr,
 413	&dev_attr_available_slots.attr,
 414	&dev_attr_security.attr,
 415	&dev_attr_frozen.attr,
 416	NULL,
 417};
 418
 419static umode_t nvdimm_visible(struct kobject *kobj, struct attribute *a, int n)
 420{
 421	struct device *dev = container_of(kobj, typeof(*dev), kobj);
 422	struct nvdimm *nvdimm = to_nvdimm(dev);
 423
 424	if (a != &dev_attr_security.attr && a != &dev_attr_frozen.attr)
 425		return a->mode;
 426	if (!nvdimm->sec.flags)
 427		return 0;
 428
 429	if (a == &dev_attr_security.attr) {
 430		/* Are there any state mutation ops (make writable)? */
 431		if (nvdimm->sec.ops->freeze || nvdimm->sec.ops->disable
 432				|| nvdimm->sec.ops->change_key
 433				|| nvdimm->sec.ops->erase
 434				|| nvdimm->sec.ops->overwrite)
 435			return a->mode;
 436		return 0444;
 437	}
 438
 439	if (nvdimm->sec.ops->freeze)
 440		return a->mode;
 441	return 0;
 442}
 443
 444static const struct attribute_group nvdimm_attribute_group = {
 445	.attrs = nvdimm_attributes,
 446	.is_visible = nvdimm_visible,
 447};
 448
 449static ssize_t result_show(struct device *dev, struct device_attribute *attr, char *buf)
 450{
 451	struct nvdimm *nvdimm = to_nvdimm(dev);
 452	enum nvdimm_fwa_result result;
 453
 454	if (!nvdimm->fw_ops)
 455		return -EOPNOTSUPP;
 456
 457	nvdimm_bus_lock(dev);
 458	result = nvdimm->fw_ops->activate_result(nvdimm);
 459	nvdimm_bus_unlock(dev);
 460
 461	switch (result) {
 462	case NVDIMM_FWA_RESULT_NONE:
 463		return sprintf(buf, "none\n");
 464	case NVDIMM_FWA_RESULT_SUCCESS:
 465		return sprintf(buf, "success\n");
 466	case NVDIMM_FWA_RESULT_FAIL:
 467		return sprintf(buf, "fail\n");
 468	case NVDIMM_FWA_RESULT_NOTSTAGED:
 469		return sprintf(buf, "not_staged\n");
 470	case NVDIMM_FWA_RESULT_NEEDRESET:
 471		return sprintf(buf, "need_reset\n");
 472	default:
 473		return -ENXIO;
 474	}
 475}
 476static DEVICE_ATTR_ADMIN_RO(result);
 477
 478static ssize_t activate_show(struct device *dev, struct device_attribute *attr, char *buf)
 479{
 480	struct nvdimm *nvdimm = to_nvdimm(dev);
 481	enum nvdimm_fwa_state state;
 482
 483	if (!nvdimm->fw_ops)
 484		return -EOPNOTSUPP;
 485
 486	nvdimm_bus_lock(dev);
 487	state = nvdimm->fw_ops->activate_state(nvdimm);
 488	nvdimm_bus_unlock(dev);
 489
 490	switch (state) {
 491	case NVDIMM_FWA_IDLE:
 492		return sprintf(buf, "idle\n");
 493	case NVDIMM_FWA_BUSY:
 494		return sprintf(buf, "busy\n");
 495	case NVDIMM_FWA_ARMED:
 496		return sprintf(buf, "armed\n");
 497	default:
 498		return -ENXIO;
 499	}
 500}
 501
 502static ssize_t activate_store(struct device *dev, struct device_attribute *attr,
 503		const char *buf, size_t len)
 504{
 505	struct nvdimm *nvdimm = to_nvdimm(dev);
 506	enum nvdimm_fwa_trigger arg;
 507	int rc;
 508
 509	if (!nvdimm->fw_ops)
 510		return -EOPNOTSUPP;
 511
 512	if (sysfs_streq(buf, "arm"))
 513		arg = NVDIMM_FWA_ARM;
 514	else if (sysfs_streq(buf, "disarm"))
 515		arg = NVDIMM_FWA_DISARM;
 516	else
 517		return -EINVAL;
 518
 519	nvdimm_bus_lock(dev);
 520	rc = nvdimm->fw_ops->arm(nvdimm, arg);
 521	nvdimm_bus_unlock(dev);
 522
 523	if (rc < 0)
 524		return rc;
 525	return len;
 526}
 527static DEVICE_ATTR_ADMIN_RW(activate);
 528
 529static struct attribute *nvdimm_firmware_attributes[] = {
 530	&dev_attr_activate.attr,
 531	&dev_attr_result.attr,
 532	NULL,
 533};
 534
 535static umode_t nvdimm_firmware_visible(struct kobject *kobj, struct attribute *a, int n)
 536{
 537	struct device *dev = container_of(kobj, typeof(*dev), kobj);
 538	struct nvdimm_bus *nvdimm_bus = walk_to_nvdimm_bus(dev);
 539	struct nvdimm_bus_descriptor *nd_desc = nvdimm_bus->nd_desc;
 540	struct nvdimm *nvdimm = to_nvdimm(dev);
 541	enum nvdimm_fwa_capability cap;
 542
 543	if (!nd_desc->fw_ops)
 544		return 0;
 545	if (!nvdimm->fw_ops)
 546		return 0;
 547
 548	nvdimm_bus_lock(dev);
 549	cap = nd_desc->fw_ops->capability(nd_desc);
 550	nvdimm_bus_unlock(dev);
 551
 552	if (cap < NVDIMM_FWA_CAP_QUIESCE)
 553		return 0;
 554
 555	return a->mode;
 556}
 557
 558static const struct attribute_group nvdimm_firmware_attribute_group = {
 559	.name = "firmware",
 560	.attrs = nvdimm_firmware_attributes,
 561	.is_visible = nvdimm_firmware_visible,
 562};
 563
 564static const struct attribute_group *nvdimm_attribute_groups[] = {
 565	&nd_device_attribute_group,
 566	&nvdimm_attribute_group,
 567	&nvdimm_firmware_attribute_group,
 568	NULL,
 569};
 570
 571static const struct device_type nvdimm_device_type = {
 572	.name = "nvdimm",
 573	.release = nvdimm_release,
 574	.groups = nvdimm_attribute_groups,
 575};
 576
 577bool is_nvdimm(struct device *dev)
 578{
 579	return dev->type == &nvdimm_device_type;
 580}
 581
 582struct nvdimm *__nvdimm_create(struct nvdimm_bus *nvdimm_bus,
 583		void *provider_data, const struct attribute_group **groups,
 584		unsigned long flags, unsigned long cmd_mask, int num_flush,
 585		struct resource *flush_wpq, const char *dimm_id,
 586		const struct nvdimm_security_ops *sec_ops,
 587		const struct nvdimm_fw_ops *fw_ops)
 588{
 589	struct nvdimm *nvdimm = kzalloc(sizeof(*nvdimm), GFP_KERNEL);
 590	struct device *dev;
 591
 592	if (!nvdimm)
 593		return NULL;
 594
 595	nvdimm->id = ida_simple_get(&dimm_ida, 0, 0, GFP_KERNEL);
 596	if (nvdimm->id < 0) {
 597		kfree(nvdimm);
 598		return NULL;
 599	}
 600
 601	nvdimm->dimm_id = dimm_id;
 602	nvdimm->provider_data = provider_data;
 603	if (noblk)
 604		flags |= 1 << NDD_NOBLK;
 605	nvdimm->flags = flags;
 606	nvdimm->cmd_mask = cmd_mask;
 607	nvdimm->num_flush = num_flush;
 608	nvdimm->flush_wpq = flush_wpq;
 609	atomic_set(&nvdimm->busy, 0);
 610	dev = &nvdimm->dev;
 611	dev_set_name(dev, "nmem%d", nvdimm->id);
 612	dev->parent = &nvdimm_bus->dev;
 613	dev->type = &nvdimm_device_type;
 614	dev->devt = MKDEV(nvdimm_major, nvdimm->id);
 615	dev->groups = groups;
 616	nvdimm->sec.ops = sec_ops;
 617	nvdimm->fw_ops = fw_ops;
 618	nvdimm->sec.overwrite_tmo = 0;
 619	INIT_DELAYED_WORK(&nvdimm->dwork, nvdimm_security_overwrite_query);
 620	/*
 621	 * Security state must be initialized before device_add() for
 622	 * attribute visibility.
 623	 */
 624	/* get security state and extended (master) state */
 625	nvdimm->sec.flags = nvdimm_security_flags(nvdimm, NVDIMM_USER);
 626	nvdimm->sec.ext_flags = nvdimm_security_flags(nvdimm, NVDIMM_MASTER);
 627	nd_device_register(dev);
 628
 629	return nvdimm;
 630}
 631EXPORT_SYMBOL_GPL(__nvdimm_create);
 632
 633static void shutdown_security_notify(void *data)
 634{
 635	struct nvdimm *nvdimm = data;
 636
 637	sysfs_put(nvdimm->sec.overwrite_state);
 638}
 639
 640int nvdimm_security_setup_events(struct device *dev)
 641{
 642	struct nvdimm *nvdimm = to_nvdimm(dev);
 643
 644	if (!nvdimm->sec.flags || !nvdimm->sec.ops
 645			|| !nvdimm->sec.ops->overwrite)
 646		return 0;
 647	nvdimm->sec.overwrite_state = sysfs_get_dirent(dev->kobj.sd, "security");
 648	if (!nvdimm->sec.overwrite_state)
 649		return -ENOMEM;
 650
 651	return devm_add_action_or_reset(dev, shutdown_security_notify, nvdimm);
 652}
 653EXPORT_SYMBOL_GPL(nvdimm_security_setup_events);
 654
 655int nvdimm_in_overwrite(struct nvdimm *nvdimm)
 656{
 657	return test_bit(NDD_SECURITY_OVERWRITE, &nvdimm->flags);
 658}
 659EXPORT_SYMBOL_GPL(nvdimm_in_overwrite);
 660
 661int nvdimm_security_freeze(struct nvdimm *nvdimm)
 662{
 663	int rc;
 664
 665	WARN_ON_ONCE(!is_nvdimm_bus_locked(&nvdimm->dev));
 666
 667	if (!nvdimm->sec.ops || !nvdimm->sec.ops->freeze)
 668		return -EOPNOTSUPP;
 669
 670	if (!nvdimm->sec.flags)
 671		return -EIO;
 672
 673	if (test_bit(NDD_SECURITY_OVERWRITE, &nvdimm->flags)) {
 674		dev_warn(&nvdimm->dev, "Overwrite operation in progress.\n");
 675		return -EBUSY;
 676	}
 677
 678	rc = nvdimm->sec.ops->freeze(nvdimm);
 679	nvdimm->sec.flags = nvdimm_security_flags(nvdimm, NVDIMM_USER);
 680
 681	return rc;
 682}
 683
 684static unsigned long dpa_align(struct nd_region *nd_region)
 685{
 686	struct device *dev = &nd_region->dev;
 687
 688	if (dev_WARN_ONCE(dev, !is_nvdimm_bus_locked(dev),
 689				"bus lock required for capacity provision\n"))
 690		return 0;
 691	if (dev_WARN_ONCE(dev, !nd_region->ndr_mappings || nd_region->align
 692				% nd_region->ndr_mappings,
 693				"invalid region align %#lx mappings: %d\n",
 694				nd_region->align, nd_region->ndr_mappings))
 695		return 0;
 696	return nd_region->align / nd_region->ndr_mappings;
 697}
 698
 699int alias_dpa_busy(struct device *dev, void *data)
 700{
 701	resource_size_t map_end, blk_start, new;
 702	struct blk_alloc_info *info = data;
 703	struct nd_mapping *nd_mapping;
 704	struct nd_region *nd_region;
 705	struct nvdimm_drvdata *ndd;
 706	struct resource *res;
 707	unsigned long align;
 708	int i;
 709
 710	if (!is_memory(dev))
 711		return 0;
 712
 713	nd_region = to_nd_region(dev);
 714	for (i = 0; i < nd_region->ndr_mappings; i++) {
 715		nd_mapping  = &nd_region->mapping[i];
 716		if (nd_mapping->nvdimm == info->nd_mapping->nvdimm)
 717			break;
 718	}
 719
 720	if (i >= nd_region->ndr_mappings)
 721		return 0;
 722
 723	ndd = to_ndd(nd_mapping);
 724	map_end = nd_mapping->start + nd_mapping->size - 1;
 725	blk_start = nd_mapping->start;
 726
 727	/*
 728	 * In the allocation case ->res is set to free space that we are
 729	 * looking to validate against PMEM aliasing collision rules
 730	 * (i.e. BLK is allocated after all aliased PMEM).
 731	 */
 732	if (info->res) {
 733		if (info->res->start >= nd_mapping->start
 734				&& info->res->start < map_end)
 735			/* pass */;
 736		else
 737			return 0;
 738	}
 739
 740 retry:
 741	/*
 742	 * Find the free dpa from the end of the last pmem allocation to
 743	 * the end of the interleave-set mapping.
 744	 */
 745	align = dpa_align(nd_region);
 746	if (!align)
 747		return 0;
 748
 749	for_each_dpa_resource(ndd, res) {
 750		resource_size_t start, end;
 751
 752		if (strncmp(res->name, "pmem", 4) != 0)
 753			continue;
 754
 755		start = ALIGN_DOWN(res->start, align);
 756		end = ALIGN(res->end + 1, align) - 1;
 757		if ((start >= blk_start && start < map_end)
 758				|| (end >= blk_start && end <= map_end)) {
 759			new = max(blk_start, min(map_end, end) + 1);
 760			if (new != blk_start) {
 761				blk_start = new;
 762				goto retry;
 763			}
 764		}
 765	}
 766
 767	/* update the free space range with the probed blk_start */
 768	if (info->res && blk_start > info->res->start) {
 769		info->res->start = max(info->res->start, blk_start);
 770		if (info->res->start > info->res->end)
 771			info->res->end = info->res->start - 1;
 772		return 1;
 773	}
 774
 775	info->available -= blk_start - nd_mapping->start;
 776
 777	return 0;
 778}
 779
 780/**
 781 * nd_blk_available_dpa - account the unused dpa of BLK region
 782 * @nd_mapping: container of dpa-resource-root + labels
 783 *
 784 * Unlike PMEM, BLK namespaces can occupy discontiguous DPA ranges, but
 785 * we arrange for them to never start at an lower dpa than the last
 786 * PMEM allocation in an aliased region.
 787 */
 788resource_size_t nd_blk_available_dpa(struct nd_region *nd_region)
 789{
 790	struct nvdimm_bus *nvdimm_bus = walk_to_nvdimm_bus(&nd_region->dev);
 791	struct nd_mapping *nd_mapping = &nd_region->mapping[0];
 792	struct nvdimm_drvdata *ndd = to_ndd(nd_mapping);
 793	struct blk_alloc_info info = {
 794		.nd_mapping = nd_mapping,
 795		.available = nd_mapping->size,
 796		.res = NULL,
 797	};
 798	struct resource *res;
 799	unsigned long align;
 800
 801	if (!ndd)
 802		return 0;
 803
 804	device_for_each_child(&nvdimm_bus->dev, &info, alias_dpa_busy);
 805
 806	/* now account for busy blk allocations in unaliased dpa */
 807	align = dpa_align(nd_region);
 808	if (!align)
 809		return 0;
 810	for_each_dpa_resource(ndd, res) {
 811		resource_size_t start, end, size;
 812
 813		if (strncmp(res->name, "blk", 3) != 0)
 814			continue;
 815		start = ALIGN_DOWN(res->start, align);
 816		end = ALIGN(res->end + 1, align) - 1;
 817		size = end - start + 1;
 818		if (size >= info.available)
 819			return 0;
 820		info.available -= size;
 821	}
 822
 823	return info.available;
 824}
 825
 826/**
 827 * nd_pmem_max_contiguous_dpa - For the given dimm+region, return the max
 828 *			   contiguous unallocated dpa range.
 829 * @nd_region: constrain available space check to this reference region
 830 * @nd_mapping: container of dpa-resource-root + labels
 831 */
 832resource_size_t nd_pmem_max_contiguous_dpa(struct nd_region *nd_region,
 833					   struct nd_mapping *nd_mapping)
 834{
 835	struct nvdimm_drvdata *ndd = to_ndd(nd_mapping);
 836	struct nvdimm_bus *nvdimm_bus;
 837	resource_size_t max = 0;
 838	struct resource *res;
 839	unsigned long align;
 840
 841	/* if a dimm is disabled the available capacity is zero */
 842	if (!ndd)
 843		return 0;
 844
 845	align = dpa_align(nd_region);
 846	if (!align)
 847		return 0;
 848
 849	nvdimm_bus = walk_to_nvdimm_bus(ndd->dev);
 850	if (__reserve_free_pmem(&nd_region->dev, nd_mapping->nvdimm))
 851		return 0;
 852	for_each_dpa_resource(ndd, res) {
 853		resource_size_t start, end;
 854
 855		if (strcmp(res->name, "pmem-reserve") != 0)
 856			continue;
 857		/* trim free space relative to current alignment setting */
 858		start = ALIGN(res->start, align);
 859		end = ALIGN_DOWN(res->end + 1, align) - 1;
 860		if (end < start)
 861			continue;
 862		if (end - start + 1 > max)
 863			max = end - start + 1;
 864	}
 865	release_free_pmem(nvdimm_bus, nd_mapping);
 866	return max;
 867}
 868
 869/**
 870 * nd_pmem_available_dpa - for the given dimm+region account unallocated dpa
 871 * @nd_mapping: container of dpa-resource-root + labels
 872 * @nd_region: constrain available space check to this reference region
 873 * @overlap: calculate available space assuming this level of overlap
 874 *
 875 * Validate that a PMEM label, if present, aligns with the start of an
 876 * interleave set and truncate the available size at the lowest BLK
 877 * overlap point.
 878 *
 879 * The expectation is that this routine is called multiple times as it
 880 * probes for the largest BLK encroachment for any single member DIMM of
 881 * the interleave set.  Once that value is determined the PMEM-limit for
 882 * the set can be established.
 883 */
 884resource_size_t nd_pmem_available_dpa(struct nd_region *nd_region,
 885		struct nd_mapping *nd_mapping, resource_size_t *overlap)
 886{
 887	resource_size_t map_start, map_end, busy = 0, available, blk_start;
 888	struct nvdimm_drvdata *ndd = to_ndd(nd_mapping);
 889	struct resource *res;
 890	const char *reason;
 891	unsigned long align;
 892
 893	if (!ndd)
 894		return 0;
 895
 896	align = dpa_align(nd_region);
 897	if (!align)
 898		return 0;
 899
 900	map_start = nd_mapping->start;
 901	map_end = map_start + nd_mapping->size - 1;
 902	blk_start = max(map_start, map_end + 1 - *overlap);
 903	for_each_dpa_resource(ndd, res) {
 904		resource_size_t start, end;
 905
 906		start = ALIGN_DOWN(res->start, align);
 907		end = ALIGN(res->end + 1, align) - 1;
 908		if (start >= map_start && start < map_end) {
 909			if (strncmp(res->name, "blk", 3) == 0)
 910				blk_start = min(blk_start,
 911						max(map_start, start));
 912			else if (end > map_end) {
 913				reason = "misaligned to iset";
 914				goto err;
 915			} else
 916				busy += end - start + 1;
 917		} else if (end >= map_start && end <= map_end) {
 918			if (strncmp(res->name, "blk", 3) == 0) {
 919				/*
 920				 * If a BLK allocation overlaps the start of
 921				 * PMEM the entire interleave set may now only
 922				 * be used for BLK.
 923				 */
 924				blk_start = map_start;
 925			} else
 926				busy += end - start + 1;
 927		} else if (map_start > start && map_start < end) {
 928			/* total eclipse of the mapping */
 929			busy += nd_mapping->size;
 930			blk_start = map_start;
 931		}
 932	}
 933
 934	*overlap = map_end + 1 - blk_start;
 935	available = blk_start - map_start;
 936	if (busy < available)
 937		return ALIGN_DOWN(available - busy, align);
 938	return 0;
 939
 940 err:
 941	nd_dbg_dpa(nd_region, ndd, res, "%s\n", reason);
 942	return 0;
 943}
 944
 945void nvdimm_free_dpa(struct nvdimm_drvdata *ndd, struct resource *res)
 946{
 947	WARN_ON_ONCE(!is_nvdimm_bus_locked(ndd->dev));
 948	kfree(res->name);
 949	__release_region(&ndd->dpa, res->start, resource_size(res));
 950}
 951
 952struct resource *nvdimm_allocate_dpa(struct nvdimm_drvdata *ndd,
 953		struct nd_label_id *label_id, resource_size_t start,
 954		resource_size_t n)
 955{
 956	char *name = kmemdup(label_id, sizeof(*label_id), GFP_KERNEL);
 957	struct resource *res;
 958
 959	if (!name)
 960		return NULL;
 961
 962	WARN_ON_ONCE(!is_nvdimm_bus_locked(ndd->dev));
 963	res = __request_region(&ndd->dpa, start, n, name, 0);
 964	if (!res)
 965		kfree(name);
 966	return res;
 967}
 968
 969/**
 970 * nvdimm_allocated_dpa - sum up the dpa currently allocated to this label_id
 971 * @nvdimm: container of dpa-resource-root + labels
 972 * @label_id: dpa resource name of the form {pmem|blk}-<human readable uuid>
 973 */
 974resource_size_t nvdimm_allocated_dpa(struct nvdimm_drvdata *ndd,
 975		struct nd_label_id *label_id)
 976{
 977	resource_size_t allocated = 0;
 978	struct resource *res;
 979
 980	for_each_dpa_resource(ndd, res)
 981		if (strcmp(res->name, label_id->id) == 0)
 982			allocated += resource_size(res);
 983
 984	return allocated;
 985}
 986
 987static int count_dimms(struct device *dev, void *c)
 988{
 989	int *count = c;
 990
 991	if (is_nvdimm(dev))
 992		(*count)++;
 993	return 0;
 994}
 995
 996int nvdimm_bus_check_dimm_count(struct nvdimm_bus *nvdimm_bus, int dimm_count)
 997{
 998	int count = 0;
 999	/* Flush any possible dimm registration failures */
1000	nd_synchronize();
1001
1002	device_for_each_child(&nvdimm_bus->dev, &count, count_dimms);
1003	dev_dbg(&nvdimm_bus->dev, "count: %d\n", count);
1004	if (count != dimm_count)
1005		return -ENXIO;
1006	return 0;
1007}
1008EXPORT_SYMBOL_GPL(nvdimm_bus_check_dimm_count);
1009
1010void __exit nvdimm_devs_exit(void)
1011{
1012	ida_destroy(&dimm_ida);
1013}
v5.4
  1// SPDX-License-Identifier: GPL-2.0-only
  2/*
  3 * Copyright(c) 2013-2015 Intel Corporation. All rights reserved.
  4 */
  5#define pr_fmt(fmt) KBUILD_MODNAME ": " fmt
  6#include <linux/moduleparam.h>
  7#include <linux/vmalloc.h>
  8#include <linux/device.h>
  9#include <linux/ndctl.h>
 10#include <linux/slab.h>
 11#include <linux/io.h>
 12#include <linux/fs.h>
 13#include <linux/mm.h>
 14#include "nd-core.h"
 15#include "label.h"
 16#include "pmem.h"
 17#include "nd.h"
 18
 19static DEFINE_IDA(dimm_ida);
 20
 21static bool noblk;
 22module_param(noblk, bool, 0444);
 23MODULE_PARM_DESC(noblk, "force disable BLK / local alias support");
 24
 25/*
 26 * Retrieve bus and dimm handle and return if this bus supports
 27 * get_config_data commands
 28 */
 29int nvdimm_check_config_data(struct device *dev)
 30{
 31	struct nvdimm *nvdimm = to_nvdimm(dev);
 32
 33	if (!nvdimm->cmd_mask ||
 34	    !test_bit(ND_CMD_GET_CONFIG_DATA, &nvdimm->cmd_mask)) {
 35		if (test_bit(NDD_ALIASING, &nvdimm->flags))
 36			return -ENXIO;
 37		else
 38			return -ENOTTY;
 39	}
 40
 41	return 0;
 42}
 43
 44static int validate_dimm(struct nvdimm_drvdata *ndd)
 45{
 46	int rc;
 47
 48	if (!ndd)
 49		return -EINVAL;
 50
 51	rc = nvdimm_check_config_data(ndd->dev);
 52	if (rc)
 53		dev_dbg(ndd->dev, "%ps: %s error: %d\n",
 54				__builtin_return_address(0), __func__, rc);
 55	return rc;
 56}
 57
 58/**
 59 * nvdimm_init_nsarea - determine the geometry of a dimm's namespace area
 60 * @nvdimm: dimm to initialize
 61 */
 62int nvdimm_init_nsarea(struct nvdimm_drvdata *ndd)
 63{
 64	struct nd_cmd_get_config_size *cmd = &ndd->nsarea;
 65	struct nvdimm_bus *nvdimm_bus = walk_to_nvdimm_bus(ndd->dev);
 66	struct nvdimm_bus_descriptor *nd_desc;
 67	int rc = validate_dimm(ndd);
 68	int cmd_rc = 0;
 69
 70	if (rc)
 71		return rc;
 72
 73	if (cmd->config_size)
 74		return 0; /* already valid */
 75
 76	memset(cmd, 0, sizeof(*cmd));
 77	nd_desc = nvdimm_bus->nd_desc;
 78	rc = nd_desc->ndctl(nd_desc, to_nvdimm(ndd->dev),
 79			ND_CMD_GET_CONFIG_SIZE, cmd, sizeof(*cmd), &cmd_rc);
 80	if (rc < 0)
 81		return rc;
 82	return cmd_rc;
 83}
 84
 85int nvdimm_get_config_data(struct nvdimm_drvdata *ndd, void *buf,
 86			   size_t offset, size_t len)
 87{
 88	struct nvdimm_bus *nvdimm_bus = walk_to_nvdimm_bus(ndd->dev);
 89	struct nvdimm_bus_descriptor *nd_desc = nvdimm_bus->nd_desc;
 90	int rc = validate_dimm(ndd), cmd_rc = 0;
 91	struct nd_cmd_get_config_data_hdr *cmd;
 92	size_t max_cmd_size, buf_offset;
 93
 94	if (rc)
 95		return rc;
 96
 97	if (offset + len > ndd->nsarea.config_size)
 98		return -ENXIO;
 99
100	max_cmd_size = min_t(u32, len, ndd->nsarea.max_xfer);
101	cmd = kvzalloc(max_cmd_size + sizeof(*cmd), GFP_KERNEL);
102	if (!cmd)
103		return -ENOMEM;
104
105	for (buf_offset = 0; len;
106	     len -= cmd->in_length, buf_offset += cmd->in_length) {
107		size_t cmd_size;
108
109		cmd->in_offset = offset + buf_offset;
110		cmd->in_length = min(max_cmd_size, len);
111
112		cmd_size = sizeof(*cmd) + cmd->in_length;
113
114		rc = nd_desc->ndctl(nd_desc, to_nvdimm(ndd->dev),
115				ND_CMD_GET_CONFIG_DATA, cmd, cmd_size, &cmd_rc);
116		if (rc < 0)
117			break;
118		if (cmd_rc < 0) {
119			rc = cmd_rc;
120			break;
121		}
122
123		/* out_buf should be valid, copy it into our output buffer */
124		memcpy(buf + buf_offset, cmd->out_buf, cmd->in_length);
125	}
126	kvfree(cmd);
127
128	return rc;
129}
130
131int nvdimm_set_config_data(struct nvdimm_drvdata *ndd, size_t offset,
132		void *buf, size_t len)
133{
134	size_t max_cmd_size, buf_offset;
135	struct nd_cmd_set_config_hdr *cmd;
136	int rc = validate_dimm(ndd), cmd_rc = 0;
137	struct nvdimm_bus *nvdimm_bus = walk_to_nvdimm_bus(ndd->dev);
138	struct nvdimm_bus_descriptor *nd_desc = nvdimm_bus->nd_desc;
139
140	if (rc)
141		return rc;
142
143	if (offset + len > ndd->nsarea.config_size)
144		return -ENXIO;
145
146	max_cmd_size = min_t(u32, len, ndd->nsarea.max_xfer);
147	cmd = kvzalloc(max_cmd_size + sizeof(*cmd) + sizeof(u32), GFP_KERNEL);
148	if (!cmd)
149		return -ENOMEM;
150
151	for (buf_offset = 0; len; len -= cmd->in_length,
152			buf_offset += cmd->in_length) {
153		size_t cmd_size;
154
155		cmd->in_offset = offset + buf_offset;
156		cmd->in_length = min(max_cmd_size, len);
157		memcpy(cmd->in_buf, buf + buf_offset, cmd->in_length);
158
159		/* status is output in the last 4-bytes of the command buffer */
160		cmd_size = sizeof(*cmd) + cmd->in_length + sizeof(u32);
161
162		rc = nd_desc->ndctl(nd_desc, to_nvdimm(ndd->dev),
163				ND_CMD_SET_CONFIG_DATA, cmd, cmd_size, &cmd_rc);
164		if (rc < 0)
165			break;
166		if (cmd_rc < 0) {
167			rc = cmd_rc;
168			break;
169		}
170	}
171	kvfree(cmd);
172
173	return rc;
174}
175
176void nvdimm_set_aliasing(struct device *dev)
177{
178	struct nvdimm *nvdimm = to_nvdimm(dev);
179
180	set_bit(NDD_ALIASING, &nvdimm->flags);
181}
182
183void nvdimm_set_locked(struct device *dev)
184{
185	struct nvdimm *nvdimm = to_nvdimm(dev);
186
187	set_bit(NDD_LOCKED, &nvdimm->flags);
188}
189
190void nvdimm_clear_locked(struct device *dev)
191{
192	struct nvdimm *nvdimm = to_nvdimm(dev);
193
194	clear_bit(NDD_LOCKED, &nvdimm->flags);
195}
196
197static void nvdimm_release(struct device *dev)
198{
199	struct nvdimm *nvdimm = to_nvdimm(dev);
200
201	ida_simple_remove(&dimm_ida, nvdimm->id);
202	kfree(nvdimm);
203}
204
205static struct device_type nvdimm_device_type = {
206	.name = "nvdimm",
207	.release = nvdimm_release,
208};
209
210bool is_nvdimm(struct device *dev)
211{
212	return dev->type == &nvdimm_device_type;
213}
214
215struct nvdimm *to_nvdimm(struct device *dev)
216{
217	struct nvdimm *nvdimm = container_of(dev, struct nvdimm, dev);
218
219	WARN_ON(!is_nvdimm(dev));
220	return nvdimm;
221}
222EXPORT_SYMBOL_GPL(to_nvdimm);
223
224struct nvdimm *nd_blk_region_to_dimm(struct nd_blk_region *ndbr)
225{
226	struct nd_region *nd_region = &ndbr->nd_region;
227	struct nd_mapping *nd_mapping = &nd_region->mapping[0];
228
229	return nd_mapping->nvdimm;
230}
231EXPORT_SYMBOL_GPL(nd_blk_region_to_dimm);
232
233unsigned long nd_blk_memremap_flags(struct nd_blk_region *ndbr)
234{
235	/* pmem mapping properties are private to libnvdimm */
236	return ARCH_MEMREMAP_PMEM;
237}
238EXPORT_SYMBOL_GPL(nd_blk_memremap_flags);
239
240struct nvdimm_drvdata *to_ndd(struct nd_mapping *nd_mapping)
241{
242	struct nvdimm *nvdimm = nd_mapping->nvdimm;
243
244	WARN_ON_ONCE(!is_nvdimm_bus_locked(&nvdimm->dev));
245
246	return dev_get_drvdata(&nvdimm->dev);
247}
248EXPORT_SYMBOL(to_ndd);
249
250void nvdimm_drvdata_release(struct kref *kref)
251{
252	struct nvdimm_drvdata *ndd = container_of(kref, typeof(*ndd), kref);
253	struct device *dev = ndd->dev;
254	struct resource *res, *_r;
255
256	dev_dbg(dev, "trace\n");
257	nvdimm_bus_lock(dev);
258	for_each_dpa_resource_safe(ndd, res, _r)
259		nvdimm_free_dpa(ndd, res);
260	nvdimm_bus_unlock(dev);
261
262	kvfree(ndd->data);
263	kfree(ndd);
264	put_device(dev);
265}
266
267void get_ndd(struct nvdimm_drvdata *ndd)
268{
269	kref_get(&ndd->kref);
270}
271
272void put_ndd(struct nvdimm_drvdata *ndd)
273{
274	if (ndd)
275		kref_put(&ndd->kref, nvdimm_drvdata_release);
276}
277
278const char *nvdimm_name(struct nvdimm *nvdimm)
279{
280	return dev_name(&nvdimm->dev);
281}
282EXPORT_SYMBOL_GPL(nvdimm_name);
283
284struct kobject *nvdimm_kobj(struct nvdimm *nvdimm)
285{
286	return &nvdimm->dev.kobj;
287}
288EXPORT_SYMBOL_GPL(nvdimm_kobj);
289
290unsigned long nvdimm_cmd_mask(struct nvdimm *nvdimm)
291{
292	return nvdimm->cmd_mask;
293}
294EXPORT_SYMBOL_GPL(nvdimm_cmd_mask);
295
296void *nvdimm_provider_data(struct nvdimm *nvdimm)
297{
298	if (nvdimm)
299		return nvdimm->provider_data;
300	return NULL;
301}
302EXPORT_SYMBOL_GPL(nvdimm_provider_data);
303
304static ssize_t commands_show(struct device *dev,
305		struct device_attribute *attr, char *buf)
306{
307	struct nvdimm *nvdimm = to_nvdimm(dev);
308	int cmd, len = 0;
309
310	if (!nvdimm->cmd_mask)
311		return sprintf(buf, "\n");
312
313	for_each_set_bit(cmd, &nvdimm->cmd_mask, BITS_PER_LONG)
314		len += sprintf(buf + len, "%s ", nvdimm_cmd_name(cmd));
315	len += sprintf(buf + len, "\n");
316	return len;
317}
318static DEVICE_ATTR_RO(commands);
319
320static ssize_t flags_show(struct device *dev,
321		struct device_attribute *attr, char *buf)
322{
323	struct nvdimm *nvdimm = to_nvdimm(dev);
324
325	return sprintf(buf, "%s%s\n",
326			test_bit(NDD_ALIASING, &nvdimm->flags) ? "alias " : "",
 
327			test_bit(NDD_LOCKED, &nvdimm->flags) ? "lock " : "");
328}
329static DEVICE_ATTR_RO(flags);
330
331static ssize_t state_show(struct device *dev, struct device_attribute *attr,
332		char *buf)
333{
334	struct nvdimm *nvdimm = to_nvdimm(dev);
335
336	/*
337	 * The state may be in the process of changing, userspace should
338	 * quiesce probing if it wants a static answer
339	 */
340	nvdimm_bus_lock(dev);
341	nvdimm_bus_unlock(dev);
342	return sprintf(buf, "%s\n", atomic_read(&nvdimm->busy)
343			? "active" : "idle");
344}
345static DEVICE_ATTR_RO(state);
346
347static ssize_t available_slots_show(struct device *dev,
348		struct device_attribute *attr, char *buf)
349{
350	struct nvdimm_drvdata *ndd = dev_get_drvdata(dev);
351	ssize_t rc;
352	u32 nfree;
353
354	if (!ndd)
355		return -ENXIO;
356
357	nvdimm_bus_lock(dev);
358	nfree = nd_label_nfree(ndd);
359	if (nfree - 1 > nfree) {
360		dev_WARN_ONCE(dev, 1, "we ate our last label?\n");
361		nfree = 0;
362	} else
363		nfree--;
364	rc = sprintf(buf, "%d\n", nfree);
365	nvdimm_bus_unlock(dev);
366	return rc;
367}
368static DEVICE_ATTR_RO(available_slots);
369
370__weak ssize_t security_show(struct device *dev,
371		struct device_attribute *attr, char *buf)
372{
373	struct nvdimm *nvdimm = to_nvdimm(dev);
374
 
 
375	if (test_bit(NVDIMM_SECURITY_DISABLED, &nvdimm->sec.flags))
376		return sprintf(buf, "disabled\n");
377	if (test_bit(NVDIMM_SECURITY_UNLOCKED, &nvdimm->sec.flags))
378		return sprintf(buf, "unlocked\n");
379	if (test_bit(NVDIMM_SECURITY_LOCKED, &nvdimm->sec.flags))
380		return sprintf(buf, "locked\n");
381	if (test_bit(NVDIMM_SECURITY_OVERWRITE, &nvdimm->sec.flags))
382		return sprintf(buf, "overwrite\n");
383	return -ENOTTY;
384}
385
386static ssize_t frozen_show(struct device *dev,
387		struct device_attribute *attr, char *buf)
388{
389	struct nvdimm *nvdimm = to_nvdimm(dev);
390
391	return sprintf(buf, "%d\n", test_bit(NVDIMM_SECURITY_FROZEN,
392				&nvdimm->sec.flags));
393}
394static DEVICE_ATTR_RO(frozen);
395
396static ssize_t security_store(struct device *dev,
397		struct device_attribute *attr, const char *buf, size_t len)
398
399{
400	ssize_t rc;
401
402	/*
403	 * Require all userspace triggered security management to be
404	 * done while probing is idle and the DIMM is not in active use
405	 * in any region.
406	 */
407	nd_device_lock(dev);
408	nvdimm_bus_lock(dev);
409	wait_nvdimm_bus_probe_idle(dev);
410	rc = nvdimm_security_store(dev, buf, len);
411	nvdimm_bus_unlock(dev);
412	nd_device_unlock(dev);
413
414	return rc;
415}
416static DEVICE_ATTR_RW(security);
417
418static struct attribute *nvdimm_attributes[] = {
419	&dev_attr_state.attr,
420	&dev_attr_flags.attr,
421	&dev_attr_commands.attr,
422	&dev_attr_available_slots.attr,
423	&dev_attr_security.attr,
424	&dev_attr_frozen.attr,
425	NULL,
426};
427
428static umode_t nvdimm_visible(struct kobject *kobj, struct attribute *a, int n)
429{
430	struct device *dev = container_of(kobj, typeof(*dev), kobj);
431	struct nvdimm *nvdimm = to_nvdimm(dev);
432
433	if (a != &dev_attr_security.attr && a != &dev_attr_frozen.attr)
434		return a->mode;
435	if (!nvdimm->sec.flags)
436		return 0;
437
438	if (a == &dev_attr_security.attr) {
439		/* Are there any state mutation ops (make writable)? */
440		if (nvdimm->sec.ops->freeze || nvdimm->sec.ops->disable
441				|| nvdimm->sec.ops->change_key
442				|| nvdimm->sec.ops->erase
443				|| nvdimm->sec.ops->overwrite)
444			return a->mode;
445		return 0444;
446	}
447
448	if (nvdimm->sec.ops->freeze)
449		return a->mode;
450	return 0;
451}
452
453struct attribute_group nvdimm_attribute_group = {
454	.attrs = nvdimm_attributes,
455	.is_visible = nvdimm_visible,
456};
457EXPORT_SYMBOL_GPL(nvdimm_attribute_group);
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
458
459struct nvdimm *__nvdimm_create(struct nvdimm_bus *nvdimm_bus,
460		void *provider_data, const struct attribute_group **groups,
461		unsigned long flags, unsigned long cmd_mask, int num_flush,
462		struct resource *flush_wpq, const char *dimm_id,
463		const struct nvdimm_security_ops *sec_ops)
 
464{
465	struct nvdimm *nvdimm = kzalloc(sizeof(*nvdimm), GFP_KERNEL);
466	struct device *dev;
467
468	if (!nvdimm)
469		return NULL;
470
471	nvdimm->id = ida_simple_get(&dimm_ida, 0, 0, GFP_KERNEL);
472	if (nvdimm->id < 0) {
473		kfree(nvdimm);
474		return NULL;
475	}
476
477	nvdimm->dimm_id = dimm_id;
478	nvdimm->provider_data = provider_data;
479	if (noblk)
480		flags |= 1 << NDD_NOBLK;
481	nvdimm->flags = flags;
482	nvdimm->cmd_mask = cmd_mask;
483	nvdimm->num_flush = num_flush;
484	nvdimm->flush_wpq = flush_wpq;
485	atomic_set(&nvdimm->busy, 0);
486	dev = &nvdimm->dev;
487	dev_set_name(dev, "nmem%d", nvdimm->id);
488	dev->parent = &nvdimm_bus->dev;
489	dev->type = &nvdimm_device_type;
490	dev->devt = MKDEV(nvdimm_major, nvdimm->id);
491	dev->groups = groups;
492	nvdimm->sec.ops = sec_ops;
 
493	nvdimm->sec.overwrite_tmo = 0;
494	INIT_DELAYED_WORK(&nvdimm->dwork, nvdimm_security_overwrite_query);
495	/*
496	 * Security state must be initialized before device_add() for
497	 * attribute visibility.
498	 */
499	/* get security state and extended (master) state */
500	nvdimm->sec.flags = nvdimm_security_flags(nvdimm, NVDIMM_USER);
501	nvdimm->sec.ext_flags = nvdimm_security_flags(nvdimm, NVDIMM_MASTER);
502	nd_device_register(dev);
503
504	return nvdimm;
505}
506EXPORT_SYMBOL_GPL(__nvdimm_create);
507
508static void shutdown_security_notify(void *data)
509{
510	struct nvdimm *nvdimm = data;
511
512	sysfs_put(nvdimm->sec.overwrite_state);
513}
514
515int nvdimm_security_setup_events(struct device *dev)
516{
517	struct nvdimm *nvdimm = to_nvdimm(dev);
518
519	if (!nvdimm->sec.flags || !nvdimm->sec.ops
520			|| !nvdimm->sec.ops->overwrite)
521		return 0;
522	nvdimm->sec.overwrite_state = sysfs_get_dirent(dev->kobj.sd, "security");
523	if (!nvdimm->sec.overwrite_state)
524		return -ENOMEM;
525
526	return devm_add_action_or_reset(dev, shutdown_security_notify, nvdimm);
527}
528EXPORT_SYMBOL_GPL(nvdimm_security_setup_events);
529
530int nvdimm_in_overwrite(struct nvdimm *nvdimm)
531{
532	return test_bit(NDD_SECURITY_OVERWRITE, &nvdimm->flags);
533}
534EXPORT_SYMBOL_GPL(nvdimm_in_overwrite);
535
536int nvdimm_security_freeze(struct nvdimm *nvdimm)
537{
538	int rc;
539
540	WARN_ON_ONCE(!is_nvdimm_bus_locked(&nvdimm->dev));
541
542	if (!nvdimm->sec.ops || !nvdimm->sec.ops->freeze)
543		return -EOPNOTSUPP;
544
545	if (!nvdimm->sec.flags)
546		return -EIO;
547
548	if (test_bit(NDD_SECURITY_OVERWRITE, &nvdimm->flags)) {
549		dev_warn(&nvdimm->dev, "Overwrite operation in progress.\n");
550		return -EBUSY;
551	}
552
553	rc = nvdimm->sec.ops->freeze(nvdimm);
554	nvdimm->sec.flags = nvdimm_security_flags(nvdimm, NVDIMM_USER);
555
556	return rc;
557}
558
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
559int alias_dpa_busy(struct device *dev, void *data)
560{
561	resource_size_t map_end, blk_start, new;
562	struct blk_alloc_info *info = data;
563	struct nd_mapping *nd_mapping;
564	struct nd_region *nd_region;
565	struct nvdimm_drvdata *ndd;
566	struct resource *res;
 
567	int i;
568
569	if (!is_memory(dev))
570		return 0;
571
572	nd_region = to_nd_region(dev);
573	for (i = 0; i < nd_region->ndr_mappings; i++) {
574		nd_mapping  = &nd_region->mapping[i];
575		if (nd_mapping->nvdimm == info->nd_mapping->nvdimm)
576			break;
577	}
578
579	if (i >= nd_region->ndr_mappings)
580		return 0;
581
582	ndd = to_ndd(nd_mapping);
583	map_end = nd_mapping->start + nd_mapping->size - 1;
584	blk_start = nd_mapping->start;
585
586	/*
587	 * In the allocation case ->res is set to free space that we are
588	 * looking to validate against PMEM aliasing collision rules
589	 * (i.e. BLK is allocated after all aliased PMEM).
590	 */
591	if (info->res) {
592		if (info->res->start >= nd_mapping->start
593				&& info->res->start < map_end)
594			/* pass */;
595		else
596			return 0;
597	}
598
599 retry:
600	/*
601	 * Find the free dpa from the end of the last pmem allocation to
602	 * the end of the interleave-set mapping.
603	 */
 
 
 
 
604	for_each_dpa_resource(ndd, res) {
 
 
605		if (strncmp(res->name, "pmem", 4) != 0)
606			continue;
607		if ((res->start >= blk_start && res->start < map_end)
608				|| (res->end >= blk_start
609					&& res->end <= map_end)) {
610			new = max(blk_start, min(map_end + 1, res->end + 1));
 
 
611			if (new != blk_start) {
612				blk_start = new;
613				goto retry;
614			}
615		}
616	}
617
618	/* update the free space range with the probed blk_start */
619	if (info->res && blk_start > info->res->start) {
620		info->res->start = max(info->res->start, blk_start);
621		if (info->res->start > info->res->end)
622			info->res->end = info->res->start - 1;
623		return 1;
624	}
625
626	info->available -= blk_start - nd_mapping->start;
627
628	return 0;
629}
630
631/**
632 * nd_blk_available_dpa - account the unused dpa of BLK region
633 * @nd_mapping: container of dpa-resource-root + labels
634 *
635 * Unlike PMEM, BLK namespaces can occupy discontiguous DPA ranges, but
636 * we arrange for them to never start at an lower dpa than the last
637 * PMEM allocation in an aliased region.
638 */
639resource_size_t nd_blk_available_dpa(struct nd_region *nd_region)
640{
641	struct nvdimm_bus *nvdimm_bus = walk_to_nvdimm_bus(&nd_region->dev);
642	struct nd_mapping *nd_mapping = &nd_region->mapping[0];
643	struct nvdimm_drvdata *ndd = to_ndd(nd_mapping);
644	struct blk_alloc_info info = {
645		.nd_mapping = nd_mapping,
646		.available = nd_mapping->size,
647		.res = NULL,
648	};
649	struct resource *res;
 
650
651	if (!ndd)
652		return 0;
653
654	device_for_each_child(&nvdimm_bus->dev, &info, alias_dpa_busy);
655
656	/* now account for busy blk allocations in unaliased dpa */
 
 
 
657	for_each_dpa_resource(ndd, res) {
 
 
658		if (strncmp(res->name, "blk", 3) != 0)
659			continue;
660		info.available -= resource_size(res);
 
 
 
 
 
661	}
662
663	return info.available;
664}
665
666/**
667 * nd_pmem_max_contiguous_dpa - For the given dimm+region, return the max
668 *			   contiguous unallocated dpa range.
669 * @nd_region: constrain available space check to this reference region
670 * @nd_mapping: container of dpa-resource-root + labels
671 */
672resource_size_t nd_pmem_max_contiguous_dpa(struct nd_region *nd_region,
673					   struct nd_mapping *nd_mapping)
674{
675	struct nvdimm_drvdata *ndd = to_ndd(nd_mapping);
676	struct nvdimm_bus *nvdimm_bus;
677	resource_size_t max = 0;
678	struct resource *res;
 
679
680	/* if a dimm is disabled the available capacity is zero */
681	if (!ndd)
682		return 0;
683
 
 
 
 
684	nvdimm_bus = walk_to_nvdimm_bus(ndd->dev);
685	if (__reserve_free_pmem(&nd_region->dev, nd_mapping->nvdimm))
686		return 0;
687	for_each_dpa_resource(ndd, res) {
 
 
688		if (strcmp(res->name, "pmem-reserve") != 0)
689			continue;
690		if (resource_size(res) > max)
691			max = resource_size(res);
 
 
 
 
 
692	}
693	release_free_pmem(nvdimm_bus, nd_mapping);
694	return max;
695}
696
697/**
698 * nd_pmem_available_dpa - for the given dimm+region account unallocated dpa
699 * @nd_mapping: container of dpa-resource-root + labels
700 * @nd_region: constrain available space check to this reference region
701 * @overlap: calculate available space assuming this level of overlap
702 *
703 * Validate that a PMEM label, if present, aligns with the start of an
704 * interleave set and truncate the available size at the lowest BLK
705 * overlap point.
706 *
707 * The expectation is that this routine is called multiple times as it
708 * probes for the largest BLK encroachment for any single member DIMM of
709 * the interleave set.  Once that value is determined the PMEM-limit for
710 * the set can be established.
711 */
712resource_size_t nd_pmem_available_dpa(struct nd_region *nd_region,
713		struct nd_mapping *nd_mapping, resource_size_t *overlap)
714{
715	resource_size_t map_start, map_end, busy = 0, available, blk_start;
716	struct nvdimm_drvdata *ndd = to_ndd(nd_mapping);
717	struct resource *res;
718	const char *reason;
 
719
720	if (!ndd)
721		return 0;
722
 
 
 
 
723	map_start = nd_mapping->start;
724	map_end = map_start + nd_mapping->size - 1;
725	blk_start = max(map_start, map_end + 1 - *overlap);
726	for_each_dpa_resource(ndd, res) {
727		if (res->start >= map_start && res->start < map_end) {
 
 
 
 
728			if (strncmp(res->name, "blk", 3) == 0)
729				blk_start = min(blk_start,
730						max(map_start, res->start));
731			else if (res->end > map_end) {
732				reason = "misaligned to iset";
733				goto err;
734			} else
735				busy += resource_size(res);
736		} else if (res->end >= map_start && res->end <= map_end) {
737			if (strncmp(res->name, "blk", 3) == 0) {
738				/*
739				 * If a BLK allocation overlaps the start of
740				 * PMEM the entire interleave set may now only
741				 * be used for BLK.
742				 */
743				blk_start = map_start;
744			} else
745				busy += resource_size(res);
746		} else if (map_start > res->start && map_start < res->end) {
747			/* total eclipse of the mapping */
748			busy += nd_mapping->size;
749			blk_start = map_start;
750		}
751	}
752
753	*overlap = map_end + 1 - blk_start;
754	available = blk_start - map_start;
755	if (busy < available)
756		return available - busy;
757	return 0;
758
759 err:
760	nd_dbg_dpa(nd_region, ndd, res, "%s\n", reason);
761	return 0;
762}
763
764void nvdimm_free_dpa(struct nvdimm_drvdata *ndd, struct resource *res)
765{
766	WARN_ON_ONCE(!is_nvdimm_bus_locked(ndd->dev));
767	kfree(res->name);
768	__release_region(&ndd->dpa, res->start, resource_size(res));
769}
770
771struct resource *nvdimm_allocate_dpa(struct nvdimm_drvdata *ndd,
772		struct nd_label_id *label_id, resource_size_t start,
773		resource_size_t n)
774{
775	char *name = kmemdup(label_id, sizeof(*label_id), GFP_KERNEL);
776	struct resource *res;
777
778	if (!name)
779		return NULL;
780
781	WARN_ON_ONCE(!is_nvdimm_bus_locked(ndd->dev));
782	res = __request_region(&ndd->dpa, start, n, name, 0);
783	if (!res)
784		kfree(name);
785	return res;
786}
787
788/**
789 * nvdimm_allocated_dpa - sum up the dpa currently allocated to this label_id
790 * @nvdimm: container of dpa-resource-root + labels
791 * @label_id: dpa resource name of the form {pmem|blk}-<human readable uuid>
792 */
793resource_size_t nvdimm_allocated_dpa(struct nvdimm_drvdata *ndd,
794		struct nd_label_id *label_id)
795{
796	resource_size_t allocated = 0;
797	struct resource *res;
798
799	for_each_dpa_resource(ndd, res)
800		if (strcmp(res->name, label_id->id) == 0)
801			allocated += resource_size(res);
802
803	return allocated;
804}
805
806static int count_dimms(struct device *dev, void *c)
807{
808	int *count = c;
809
810	if (is_nvdimm(dev))
811		(*count)++;
812	return 0;
813}
814
815int nvdimm_bus_check_dimm_count(struct nvdimm_bus *nvdimm_bus, int dimm_count)
816{
817	int count = 0;
818	/* Flush any possible dimm registration failures */
819	nd_synchronize();
820
821	device_for_each_child(&nvdimm_bus->dev, &count, count_dimms);
822	dev_dbg(&nvdimm_bus->dev, "count: %d\n", count);
823	if (count != dimm_count)
824		return -ENXIO;
825	return 0;
826}
827EXPORT_SYMBOL_GPL(nvdimm_bus_check_dimm_count);
828
829void __exit nvdimm_devs_exit(void)
830{
831	ida_destroy(&dimm_ida);
832}