Linux Audio

Check our new training course

Loading...
Note: File does not exist in v3.1.
   1/*
   2 * Copyright(c) 2013-2015 Intel Corporation. All rights reserved.
   3 *
   4 * This program is free software; you can redistribute it and/or modify
   5 * it under the terms of version 2 of the GNU General Public License as
   6 * published by the Free Software Foundation.
   7 *
   8 * This program is distributed in the hope that it will be useful, but
   9 * WITHOUT ANY WARRANTY; without even the implied warranty of
  10 * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.  See the GNU
  11 * General Public License for more details.
  12 */
  13#include <linux/list_sort.h>
  14#include <linux/libnvdimm.h>
  15#include <linux/module.h>
  16#include <linux/mutex.h>
  17#include <linux/ndctl.h>
  18#include <linux/delay.h>
  19#include <linux/list.h>
  20#include <linux/acpi.h>
  21#include <linux/sort.h>
  22#include <linux/pmem.h>
  23#include <linux/io.h>
  24#include <linux/nd.h>
  25#include <asm/cacheflush.h>
  26#include "nfit.h"
  27
  28/*
  29 * For readq() and writeq() on 32-bit builds, the hi-lo, lo-hi order is
  30 * irrelevant.
  31 */
  32#include <linux/io-64-nonatomic-hi-lo.h>
  33
  34static bool force_enable_dimms;
  35module_param(force_enable_dimms, bool, S_IRUGO|S_IWUSR);
  36MODULE_PARM_DESC(force_enable_dimms, "Ignore _STA (ACPI DIMM device) status");
  37
  38static unsigned int scrub_timeout = NFIT_ARS_TIMEOUT;
  39module_param(scrub_timeout, uint, S_IRUGO|S_IWUSR);
  40MODULE_PARM_DESC(scrub_timeout, "Initial scrub timeout in seconds");
  41
  42/* after three payloads of overflow, it's dead jim */
  43static unsigned int scrub_overflow_abort = 3;
  44module_param(scrub_overflow_abort, uint, S_IRUGO|S_IWUSR);
  45MODULE_PARM_DESC(scrub_overflow_abort,
  46		"Number of times we overflow ARS results before abort");
  47
  48static struct workqueue_struct *nfit_wq;
  49
  50struct nfit_table_prev {
  51	struct list_head spas;
  52	struct list_head memdevs;
  53	struct list_head dcrs;
  54	struct list_head bdws;
  55	struct list_head idts;
  56	struct list_head flushes;
  57};
  58
  59static u8 nfit_uuid[NFIT_UUID_MAX][16];
  60
  61const u8 *to_nfit_uuid(enum nfit_uuids id)
  62{
  63	return nfit_uuid[id];
  64}
  65EXPORT_SYMBOL(to_nfit_uuid);
  66
  67static struct acpi_nfit_desc *to_acpi_nfit_desc(
  68		struct nvdimm_bus_descriptor *nd_desc)
  69{
  70	return container_of(nd_desc, struct acpi_nfit_desc, nd_desc);
  71}
  72
  73static struct acpi_device *to_acpi_dev(struct acpi_nfit_desc *acpi_desc)
  74{
  75	struct nvdimm_bus_descriptor *nd_desc = &acpi_desc->nd_desc;
  76
  77	/*
  78	 * If provider == 'ACPI.NFIT' we can assume 'dev' is a struct
  79	 * acpi_device.
  80	 */
  81	if (!nd_desc->provider_name
  82			|| strcmp(nd_desc->provider_name, "ACPI.NFIT") != 0)
  83		return NULL;
  84
  85	return to_acpi_device(acpi_desc->dev);
  86}
  87
  88static int xlat_status(void *buf, unsigned int cmd)
  89{
  90	struct nd_cmd_clear_error *clear_err;
  91	struct nd_cmd_ars_status *ars_status;
  92	struct nd_cmd_ars_start *ars_start;
  93	struct nd_cmd_ars_cap *ars_cap;
  94	u16 flags;
  95
  96	switch (cmd) {
  97	case ND_CMD_ARS_CAP:
  98		ars_cap = buf;
  99		if ((ars_cap->status & 0xffff) == NFIT_ARS_CAP_NONE)
 100			return -ENOTTY;
 101
 102		/* Command failed */
 103		if (ars_cap->status & 0xffff)
 104			return -EIO;
 105
 106		/* No supported scan types for this range */
 107		flags = ND_ARS_PERSISTENT | ND_ARS_VOLATILE;
 108		if ((ars_cap->status >> 16 & flags) == 0)
 109			return -ENOTTY;
 110		break;
 111	case ND_CMD_ARS_START:
 112		ars_start = buf;
 113		/* ARS is in progress */
 114		if ((ars_start->status & 0xffff) == NFIT_ARS_START_BUSY)
 115			return -EBUSY;
 116
 117		/* Command failed */
 118		if (ars_start->status & 0xffff)
 119			return -EIO;
 120		break;
 121	case ND_CMD_ARS_STATUS:
 122		ars_status = buf;
 123		/* Command failed */
 124		if (ars_status->status & 0xffff)
 125			return -EIO;
 126		/* Check extended status (Upper two bytes) */
 127		if (ars_status->status == NFIT_ARS_STATUS_DONE)
 128			return 0;
 129
 130		/* ARS is in progress */
 131		if (ars_status->status == NFIT_ARS_STATUS_BUSY)
 132			return -EBUSY;
 133
 134		/* No ARS performed for the current boot */
 135		if (ars_status->status == NFIT_ARS_STATUS_NONE)
 136			return -EAGAIN;
 137
 138		/*
 139		 * ARS interrupted, either we overflowed or some other
 140		 * agent wants the scan to stop.  If we didn't overflow
 141		 * then just continue with the returned results.
 142		 */
 143		if (ars_status->status == NFIT_ARS_STATUS_INTR) {
 144			if (ars_status->flags & NFIT_ARS_F_OVERFLOW)
 145				return -ENOSPC;
 146			return 0;
 147		}
 148
 149		/* Unknown status */
 150		if (ars_status->status >> 16)
 151			return -EIO;
 152		break;
 153	case ND_CMD_CLEAR_ERROR:
 154		clear_err = buf;
 155		if (clear_err->status & 0xffff)
 156			return -EIO;
 157		if (!clear_err->cleared)
 158			return -EIO;
 159		if (clear_err->length > clear_err->cleared)
 160			return clear_err->cleared;
 161		break;
 162	default:
 163		break;
 164	}
 165
 166	return 0;
 167}
 168
 169static int acpi_nfit_ctl(struct nvdimm_bus_descriptor *nd_desc,
 170		struct nvdimm *nvdimm, unsigned int cmd, void *buf,
 171		unsigned int buf_len, int *cmd_rc)
 172{
 173	struct acpi_nfit_desc *acpi_desc = to_acpi_nfit_desc(nd_desc);
 174	const struct nd_cmd_desc *desc = NULL;
 175	union acpi_object in_obj, in_buf, *out_obj;
 176	struct device *dev = acpi_desc->dev;
 177	const char *cmd_name, *dimm_name;
 178	unsigned long dsm_mask;
 179	acpi_handle handle;
 180	const u8 *uuid;
 181	u32 offset;
 182	int rc, i;
 183
 184	if (nvdimm) {
 185		struct nfit_mem *nfit_mem = nvdimm_provider_data(nvdimm);
 186		struct acpi_device *adev = nfit_mem->adev;
 187
 188		if (!adev)
 189			return -ENOTTY;
 190		dimm_name = nvdimm_name(nvdimm);
 191		cmd_name = nvdimm_cmd_name(cmd);
 192		dsm_mask = nfit_mem->dsm_mask;
 193		desc = nd_cmd_dimm_desc(cmd);
 194		uuid = to_nfit_uuid(NFIT_DEV_DIMM);
 195		handle = adev->handle;
 196	} else {
 197		struct acpi_device *adev = to_acpi_dev(acpi_desc);
 198
 199		cmd_name = nvdimm_bus_cmd_name(cmd);
 200		dsm_mask = nd_desc->dsm_mask;
 201		desc = nd_cmd_bus_desc(cmd);
 202		uuid = to_nfit_uuid(NFIT_DEV_BUS);
 203		handle = adev->handle;
 204		dimm_name = "bus";
 205	}
 206
 207	if (!desc || (cmd && (desc->out_num + desc->in_num == 0)))
 208		return -ENOTTY;
 209
 210	if (!test_bit(cmd, &dsm_mask))
 211		return -ENOTTY;
 212
 213	in_obj.type = ACPI_TYPE_PACKAGE;
 214	in_obj.package.count = 1;
 215	in_obj.package.elements = &in_buf;
 216	in_buf.type = ACPI_TYPE_BUFFER;
 217	in_buf.buffer.pointer = buf;
 218	in_buf.buffer.length = 0;
 219
 220	/* libnvdimm has already validated the input envelope */
 221	for (i = 0; i < desc->in_num; i++)
 222		in_buf.buffer.length += nd_cmd_in_size(nvdimm, cmd, desc,
 223				i, buf);
 224
 225	if (IS_ENABLED(CONFIG_ACPI_NFIT_DEBUG)) {
 226		dev_dbg(dev, "%s:%s cmd: %s input length: %d\n", __func__,
 227				dimm_name, cmd_name, in_buf.buffer.length);
 228		print_hex_dump_debug(cmd_name, DUMP_PREFIX_OFFSET, 4,
 229				4, in_buf.buffer.pointer, min_t(u32, 128,
 230					in_buf.buffer.length), true);
 231	}
 232
 233	out_obj = acpi_evaluate_dsm(handle, uuid, 1, cmd, &in_obj);
 234	if (!out_obj) {
 235		dev_dbg(dev, "%s:%s _DSM failed cmd: %s\n", __func__, dimm_name,
 236				cmd_name);
 237		return -EINVAL;
 238	}
 239
 240	if (out_obj->package.type != ACPI_TYPE_BUFFER) {
 241		dev_dbg(dev, "%s:%s unexpected output object type cmd: %s type: %d\n",
 242				__func__, dimm_name, cmd_name, out_obj->type);
 243		rc = -EINVAL;
 244		goto out;
 245	}
 246
 247	if (IS_ENABLED(CONFIG_ACPI_NFIT_DEBUG)) {
 248		dev_dbg(dev, "%s:%s cmd: %s output length: %d\n", __func__,
 249				dimm_name, cmd_name, out_obj->buffer.length);
 250		print_hex_dump_debug(cmd_name, DUMP_PREFIX_OFFSET, 4,
 251				4, out_obj->buffer.pointer, min_t(u32, 128,
 252					out_obj->buffer.length), true);
 253	}
 254
 255	for (i = 0, offset = 0; i < desc->out_num; i++) {
 256		u32 out_size = nd_cmd_out_size(nvdimm, cmd, desc, i, buf,
 257				(u32 *) out_obj->buffer.pointer);
 258
 259		if (offset + out_size > out_obj->buffer.length) {
 260			dev_dbg(dev, "%s:%s output object underflow cmd: %s field: %d\n",
 261					__func__, dimm_name, cmd_name, i);
 262			break;
 263		}
 264
 265		if (in_buf.buffer.length + offset + out_size > buf_len) {
 266			dev_dbg(dev, "%s:%s output overrun cmd: %s field: %d\n",
 267					__func__, dimm_name, cmd_name, i);
 268			rc = -ENXIO;
 269			goto out;
 270		}
 271		memcpy(buf + in_buf.buffer.length + offset,
 272				out_obj->buffer.pointer + offset, out_size);
 273		offset += out_size;
 274	}
 275	if (offset + in_buf.buffer.length < buf_len) {
 276		if (i >= 1) {
 277			/*
 278			 * status valid, return the number of bytes left
 279			 * unfilled in the output buffer
 280			 */
 281			rc = buf_len - offset - in_buf.buffer.length;
 282			if (cmd_rc)
 283				*cmd_rc = xlat_status(buf, cmd);
 284		} else {
 285			dev_err(dev, "%s:%s underrun cmd: %s buf_len: %d out_len: %d\n",
 286					__func__, dimm_name, cmd_name, buf_len,
 287					offset);
 288			rc = -ENXIO;
 289		}
 290	} else {
 291		rc = 0;
 292		if (cmd_rc)
 293			*cmd_rc = xlat_status(buf, cmd);
 294	}
 295
 296 out:
 297	ACPI_FREE(out_obj);
 298
 299	return rc;
 300}
 301
 302static const char *spa_type_name(u16 type)
 303{
 304	static const char *to_name[] = {
 305		[NFIT_SPA_VOLATILE] = "volatile",
 306		[NFIT_SPA_PM] = "pmem",
 307		[NFIT_SPA_DCR] = "dimm-control-region",
 308		[NFIT_SPA_BDW] = "block-data-window",
 309		[NFIT_SPA_VDISK] = "volatile-disk",
 310		[NFIT_SPA_VCD] = "volatile-cd",
 311		[NFIT_SPA_PDISK] = "persistent-disk",
 312		[NFIT_SPA_PCD] = "persistent-cd",
 313
 314	};
 315
 316	if (type > NFIT_SPA_PCD)
 317		return "unknown";
 318
 319	return to_name[type];
 320}
 321
 322static int nfit_spa_type(struct acpi_nfit_system_address *spa)
 323{
 324	int i;
 325
 326	for (i = 0; i < NFIT_UUID_MAX; i++)
 327		if (memcmp(to_nfit_uuid(i), spa->range_guid, 16) == 0)
 328			return i;
 329	return -1;
 330}
 331
 332static bool add_spa(struct acpi_nfit_desc *acpi_desc,
 333		struct nfit_table_prev *prev,
 334		struct acpi_nfit_system_address *spa)
 335{
 336	size_t length = min_t(size_t, sizeof(*spa), spa->header.length);
 337	struct device *dev = acpi_desc->dev;
 338	struct nfit_spa *nfit_spa;
 339
 340	list_for_each_entry(nfit_spa, &prev->spas, list) {
 341		if (memcmp(nfit_spa->spa, spa, length) == 0) {
 342			list_move_tail(&nfit_spa->list, &acpi_desc->spas);
 343			return true;
 344		}
 345	}
 346
 347	nfit_spa = devm_kzalloc(dev, sizeof(*nfit_spa), GFP_KERNEL);
 348	if (!nfit_spa)
 349		return false;
 350	INIT_LIST_HEAD(&nfit_spa->list);
 351	nfit_spa->spa = spa;
 352	list_add_tail(&nfit_spa->list, &acpi_desc->spas);
 353	dev_dbg(dev, "%s: spa index: %d type: %s\n", __func__,
 354			spa->range_index,
 355			spa_type_name(nfit_spa_type(spa)));
 356	return true;
 357}
 358
 359static bool add_memdev(struct acpi_nfit_desc *acpi_desc,
 360		struct nfit_table_prev *prev,
 361		struct acpi_nfit_memory_map *memdev)
 362{
 363	size_t length = min_t(size_t, sizeof(*memdev), memdev->header.length);
 364	struct device *dev = acpi_desc->dev;
 365	struct nfit_memdev *nfit_memdev;
 366
 367	list_for_each_entry(nfit_memdev, &prev->memdevs, list)
 368		if (memcmp(nfit_memdev->memdev, memdev, length) == 0) {
 369			list_move_tail(&nfit_memdev->list, &acpi_desc->memdevs);
 370			return true;
 371		}
 372
 373	nfit_memdev = devm_kzalloc(dev, sizeof(*nfit_memdev), GFP_KERNEL);
 374	if (!nfit_memdev)
 375		return false;
 376	INIT_LIST_HEAD(&nfit_memdev->list);
 377	nfit_memdev->memdev = memdev;
 378	list_add_tail(&nfit_memdev->list, &acpi_desc->memdevs);
 379	dev_dbg(dev, "%s: memdev handle: %#x spa: %d dcr: %d\n",
 380			__func__, memdev->device_handle, memdev->range_index,
 381			memdev->region_index);
 382	return true;
 383}
 384
 385static bool add_dcr(struct acpi_nfit_desc *acpi_desc,
 386		struct nfit_table_prev *prev,
 387		struct acpi_nfit_control_region *dcr)
 388{
 389	size_t length = min_t(size_t, sizeof(*dcr), dcr->header.length);
 390	struct device *dev = acpi_desc->dev;
 391	struct nfit_dcr *nfit_dcr;
 392
 393	list_for_each_entry(nfit_dcr, &prev->dcrs, list)
 394		if (memcmp(nfit_dcr->dcr, dcr, length) == 0) {
 395			list_move_tail(&nfit_dcr->list, &acpi_desc->dcrs);
 396			return true;
 397		}
 398
 399	nfit_dcr = devm_kzalloc(dev, sizeof(*nfit_dcr), GFP_KERNEL);
 400	if (!nfit_dcr)
 401		return false;
 402	INIT_LIST_HEAD(&nfit_dcr->list);
 403	nfit_dcr->dcr = dcr;
 404	list_add_tail(&nfit_dcr->list, &acpi_desc->dcrs);
 405	dev_dbg(dev, "%s: dcr index: %d windows: %d\n", __func__,
 406			dcr->region_index, dcr->windows);
 407	return true;
 408}
 409
 410static bool add_bdw(struct acpi_nfit_desc *acpi_desc,
 411		struct nfit_table_prev *prev,
 412		struct acpi_nfit_data_region *bdw)
 413{
 414	size_t length = min_t(size_t, sizeof(*bdw), bdw->header.length);
 415	struct device *dev = acpi_desc->dev;
 416	struct nfit_bdw *nfit_bdw;
 417
 418	list_for_each_entry(nfit_bdw, &prev->bdws, list)
 419		if (memcmp(nfit_bdw->bdw, bdw, length) == 0) {
 420			list_move_tail(&nfit_bdw->list, &acpi_desc->bdws);
 421			return true;
 422		}
 423
 424	nfit_bdw = devm_kzalloc(dev, sizeof(*nfit_bdw), GFP_KERNEL);
 425	if (!nfit_bdw)
 426		return false;
 427	INIT_LIST_HEAD(&nfit_bdw->list);
 428	nfit_bdw->bdw = bdw;
 429	list_add_tail(&nfit_bdw->list, &acpi_desc->bdws);
 430	dev_dbg(dev, "%s: bdw dcr: %d windows: %d\n", __func__,
 431			bdw->region_index, bdw->windows);
 432	return true;
 433}
 434
 435static bool add_idt(struct acpi_nfit_desc *acpi_desc,
 436		struct nfit_table_prev *prev,
 437		struct acpi_nfit_interleave *idt)
 438{
 439	size_t length = min_t(size_t, sizeof(*idt), idt->header.length);
 440	struct device *dev = acpi_desc->dev;
 441	struct nfit_idt *nfit_idt;
 442
 443	list_for_each_entry(nfit_idt, &prev->idts, list)
 444		if (memcmp(nfit_idt->idt, idt, length) == 0) {
 445			list_move_tail(&nfit_idt->list, &acpi_desc->idts);
 446			return true;
 447		}
 448
 449	nfit_idt = devm_kzalloc(dev, sizeof(*nfit_idt), GFP_KERNEL);
 450	if (!nfit_idt)
 451		return false;
 452	INIT_LIST_HEAD(&nfit_idt->list);
 453	nfit_idt->idt = idt;
 454	list_add_tail(&nfit_idt->list, &acpi_desc->idts);
 455	dev_dbg(dev, "%s: idt index: %d num_lines: %d\n", __func__,
 456			idt->interleave_index, idt->line_count);
 457	return true;
 458}
 459
 460static bool add_flush(struct acpi_nfit_desc *acpi_desc,
 461		struct nfit_table_prev *prev,
 462		struct acpi_nfit_flush_address *flush)
 463{
 464	size_t length = min_t(size_t, sizeof(*flush), flush->header.length);
 465	struct device *dev = acpi_desc->dev;
 466	struct nfit_flush *nfit_flush;
 467
 468	list_for_each_entry(nfit_flush, &prev->flushes, list)
 469		if (memcmp(nfit_flush->flush, flush, length) == 0) {
 470			list_move_tail(&nfit_flush->list, &acpi_desc->flushes);
 471			return true;
 472		}
 473
 474	nfit_flush = devm_kzalloc(dev, sizeof(*nfit_flush), GFP_KERNEL);
 475	if (!nfit_flush)
 476		return false;
 477	INIT_LIST_HEAD(&nfit_flush->list);
 478	nfit_flush->flush = flush;
 479	list_add_tail(&nfit_flush->list, &acpi_desc->flushes);
 480	dev_dbg(dev, "%s: nfit_flush handle: %d hint_count: %d\n", __func__,
 481			flush->device_handle, flush->hint_count);
 482	return true;
 483}
 484
 485static void *add_table(struct acpi_nfit_desc *acpi_desc,
 486		struct nfit_table_prev *prev, void *table, const void *end)
 487{
 488	struct device *dev = acpi_desc->dev;
 489	struct acpi_nfit_header *hdr;
 490	void *err = ERR_PTR(-ENOMEM);
 491
 492	if (table >= end)
 493		return NULL;
 494
 495	hdr = table;
 496	if (!hdr->length) {
 497		dev_warn(dev, "found a zero length table '%d' parsing nfit\n",
 498			hdr->type);
 499		return NULL;
 500	}
 501
 502	switch (hdr->type) {
 503	case ACPI_NFIT_TYPE_SYSTEM_ADDRESS:
 504		if (!add_spa(acpi_desc, prev, table))
 505			return err;
 506		break;
 507	case ACPI_NFIT_TYPE_MEMORY_MAP:
 508		if (!add_memdev(acpi_desc, prev, table))
 509			return err;
 510		break;
 511	case ACPI_NFIT_TYPE_CONTROL_REGION:
 512		if (!add_dcr(acpi_desc, prev, table))
 513			return err;
 514		break;
 515	case ACPI_NFIT_TYPE_DATA_REGION:
 516		if (!add_bdw(acpi_desc, prev, table))
 517			return err;
 518		break;
 519	case ACPI_NFIT_TYPE_INTERLEAVE:
 520		if (!add_idt(acpi_desc, prev, table))
 521			return err;
 522		break;
 523	case ACPI_NFIT_TYPE_FLUSH_ADDRESS:
 524		if (!add_flush(acpi_desc, prev, table))
 525			return err;
 526		break;
 527	case ACPI_NFIT_TYPE_SMBIOS:
 528		dev_dbg(dev, "%s: smbios\n", __func__);
 529		break;
 530	default:
 531		dev_err(dev, "unknown table '%d' parsing nfit\n", hdr->type);
 532		break;
 533	}
 534
 535	return table + hdr->length;
 536}
 537
 538static void nfit_mem_find_spa_bdw(struct acpi_nfit_desc *acpi_desc,
 539		struct nfit_mem *nfit_mem)
 540{
 541	u32 device_handle = __to_nfit_memdev(nfit_mem)->device_handle;
 542	u16 dcr = nfit_mem->dcr->region_index;
 543	struct nfit_spa *nfit_spa;
 544
 545	list_for_each_entry(nfit_spa, &acpi_desc->spas, list) {
 546		u16 range_index = nfit_spa->spa->range_index;
 547		int type = nfit_spa_type(nfit_spa->spa);
 548		struct nfit_memdev *nfit_memdev;
 549
 550		if (type != NFIT_SPA_BDW)
 551			continue;
 552
 553		list_for_each_entry(nfit_memdev, &acpi_desc->memdevs, list) {
 554			if (nfit_memdev->memdev->range_index != range_index)
 555				continue;
 556			if (nfit_memdev->memdev->device_handle != device_handle)
 557				continue;
 558			if (nfit_memdev->memdev->region_index != dcr)
 559				continue;
 560
 561			nfit_mem->spa_bdw = nfit_spa->spa;
 562			return;
 563		}
 564	}
 565
 566	dev_dbg(acpi_desc->dev, "SPA-BDW not found for SPA-DCR %d\n",
 567			nfit_mem->spa_dcr->range_index);
 568	nfit_mem->bdw = NULL;
 569}
 570
 571static void nfit_mem_init_bdw(struct acpi_nfit_desc *acpi_desc,
 572		struct nfit_mem *nfit_mem, struct acpi_nfit_system_address *spa)
 573{
 574	u16 dcr = __to_nfit_memdev(nfit_mem)->region_index;
 575	struct nfit_memdev *nfit_memdev;
 576	struct nfit_flush *nfit_flush;
 577	struct nfit_bdw *nfit_bdw;
 578	struct nfit_idt *nfit_idt;
 579	u16 idt_idx, range_index;
 580
 581	list_for_each_entry(nfit_bdw, &acpi_desc->bdws, list) {
 582		if (nfit_bdw->bdw->region_index != dcr)
 583			continue;
 584		nfit_mem->bdw = nfit_bdw->bdw;
 585		break;
 586	}
 587
 588	if (!nfit_mem->bdw)
 589		return;
 590
 591	nfit_mem_find_spa_bdw(acpi_desc, nfit_mem);
 592
 593	if (!nfit_mem->spa_bdw)
 594		return;
 595
 596	range_index = nfit_mem->spa_bdw->range_index;
 597	list_for_each_entry(nfit_memdev, &acpi_desc->memdevs, list) {
 598		if (nfit_memdev->memdev->range_index != range_index ||
 599				nfit_memdev->memdev->region_index != dcr)
 600			continue;
 601		nfit_mem->memdev_bdw = nfit_memdev->memdev;
 602		idt_idx = nfit_memdev->memdev->interleave_index;
 603		list_for_each_entry(nfit_idt, &acpi_desc->idts, list) {
 604			if (nfit_idt->idt->interleave_index != idt_idx)
 605				continue;
 606			nfit_mem->idt_bdw = nfit_idt->idt;
 607			break;
 608		}
 609
 610		list_for_each_entry(nfit_flush, &acpi_desc->flushes, list) {
 611			if (nfit_flush->flush->device_handle !=
 612					nfit_memdev->memdev->device_handle)
 613				continue;
 614			nfit_mem->nfit_flush = nfit_flush;
 615			break;
 616		}
 617		break;
 618	}
 619}
 620
 621static int nfit_mem_dcr_init(struct acpi_nfit_desc *acpi_desc,
 622		struct acpi_nfit_system_address *spa)
 623{
 624	struct nfit_mem *nfit_mem, *found;
 625	struct nfit_memdev *nfit_memdev;
 626	int type = nfit_spa_type(spa);
 627
 628	switch (type) {
 629	case NFIT_SPA_DCR:
 630	case NFIT_SPA_PM:
 631		break;
 632	default:
 633		return 0;
 634	}
 635
 636	list_for_each_entry(nfit_memdev, &acpi_desc->memdevs, list) {
 637		struct nfit_dcr *nfit_dcr;
 638		u32 device_handle;
 639		u16 dcr;
 640
 641		if (nfit_memdev->memdev->range_index != spa->range_index)
 642			continue;
 643		found = NULL;
 644		dcr = nfit_memdev->memdev->region_index;
 645		device_handle = nfit_memdev->memdev->device_handle;
 646		list_for_each_entry(nfit_mem, &acpi_desc->dimms, list)
 647			if (__to_nfit_memdev(nfit_mem)->device_handle
 648					== device_handle) {
 649				found = nfit_mem;
 650				break;
 651			}
 652
 653		if (found)
 654			nfit_mem = found;
 655		else {
 656			nfit_mem = devm_kzalloc(acpi_desc->dev,
 657					sizeof(*nfit_mem), GFP_KERNEL);
 658			if (!nfit_mem)
 659				return -ENOMEM;
 660			INIT_LIST_HEAD(&nfit_mem->list);
 661			list_add(&nfit_mem->list, &acpi_desc->dimms);
 662		}
 663
 664		list_for_each_entry(nfit_dcr, &acpi_desc->dcrs, list) {
 665			if (nfit_dcr->dcr->region_index != dcr)
 666				continue;
 667			/*
 668			 * Record the control region for the dimm.  For
 669			 * the ACPI 6.1 case, where there are separate
 670			 * control regions for the pmem vs blk
 671			 * interfaces, be sure to record the extended
 672			 * blk details.
 673			 */
 674			if (!nfit_mem->dcr)
 675				nfit_mem->dcr = nfit_dcr->dcr;
 676			else if (nfit_mem->dcr->windows == 0
 677					&& nfit_dcr->dcr->windows)
 678				nfit_mem->dcr = nfit_dcr->dcr;
 679			break;
 680		}
 681
 682		if (dcr && !nfit_mem->dcr) {
 683			dev_err(acpi_desc->dev, "SPA %d missing DCR %d\n",
 684					spa->range_index, dcr);
 685			return -ENODEV;
 686		}
 687
 688		if (type == NFIT_SPA_DCR) {
 689			struct nfit_idt *nfit_idt;
 690			u16 idt_idx;
 691
 692			/* multiple dimms may share a SPA when interleaved */
 693			nfit_mem->spa_dcr = spa;
 694			nfit_mem->memdev_dcr = nfit_memdev->memdev;
 695			idt_idx = nfit_memdev->memdev->interleave_index;
 696			list_for_each_entry(nfit_idt, &acpi_desc->idts, list) {
 697				if (nfit_idt->idt->interleave_index != idt_idx)
 698					continue;
 699				nfit_mem->idt_dcr = nfit_idt->idt;
 700				break;
 701			}
 702			nfit_mem_init_bdw(acpi_desc, nfit_mem, spa);
 703		} else {
 704			/*
 705			 * A single dimm may belong to multiple SPA-PM
 706			 * ranges, record at least one in addition to
 707			 * any SPA-DCR range.
 708			 */
 709			nfit_mem->memdev_pmem = nfit_memdev->memdev;
 710		}
 711	}
 712
 713	return 0;
 714}
 715
 716static int nfit_mem_cmp(void *priv, struct list_head *_a, struct list_head *_b)
 717{
 718	struct nfit_mem *a = container_of(_a, typeof(*a), list);
 719	struct nfit_mem *b = container_of(_b, typeof(*b), list);
 720	u32 handleA, handleB;
 721
 722	handleA = __to_nfit_memdev(a)->device_handle;
 723	handleB = __to_nfit_memdev(b)->device_handle;
 724	if (handleA < handleB)
 725		return -1;
 726	else if (handleA > handleB)
 727		return 1;
 728	return 0;
 729}
 730
 731static int nfit_mem_init(struct acpi_nfit_desc *acpi_desc)
 732{
 733	struct nfit_spa *nfit_spa;
 734
 735	/*
 736	 * For each SPA-DCR or SPA-PMEM address range find its
 737	 * corresponding MEMDEV(s).  From each MEMDEV find the
 738	 * corresponding DCR.  Then, if we're operating on a SPA-DCR,
 739	 * try to find a SPA-BDW and a corresponding BDW that references
 740	 * the DCR.  Throw it all into an nfit_mem object.  Note, that
 741	 * BDWs are optional.
 742	 */
 743	list_for_each_entry(nfit_spa, &acpi_desc->spas, list) {
 744		int rc;
 745
 746		rc = nfit_mem_dcr_init(acpi_desc, nfit_spa->spa);
 747		if (rc)
 748			return rc;
 749	}
 750
 751	list_sort(NULL, &acpi_desc->dimms, nfit_mem_cmp);
 752
 753	return 0;
 754}
 755
 756static ssize_t revision_show(struct device *dev,
 757		struct device_attribute *attr, char *buf)
 758{
 759	struct nvdimm_bus *nvdimm_bus = to_nvdimm_bus(dev);
 760	struct nvdimm_bus_descriptor *nd_desc = to_nd_desc(nvdimm_bus);
 761	struct acpi_nfit_desc *acpi_desc = to_acpi_desc(nd_desc);
 762
 763	return sprintf(buf, "%d\n", acpi_desc->acpi_header.revision);
 764}
 765static DEVICE_ATTR_RO(revision);
 766
 767static struct attribute *acpi_nfit_attributes[] = {
 768	&dev_attr_revision.attr,
 769	NULL,
 770};
 771
 772static struct attribute_group acpi_nfit_attribute_group = {
 773	.name = "nfit",
 774	.attrs = acpi_nfit_attributes,
 775};
 776
 777static const struct attribute_group *acpi_nfit_attribute_groups[] = {
 778	&nvdimm_bus_attribute_group,
 779	&acpi_nfit_attribute_group,
 780	NULL,
 781};
 782
 783static struct acpi_nfit_memory_map *to_nfit_memdev(struct device *dev)
 784{
 785	struct nvdimm *nvdimm = to_nvdimm(dev);
 786	struct nfit_mem *nfit_mem = nvdimm_provider_data(nvdimm);
 787
 788	return __to_nfit_memdev(nfit_mem);
 789}
 790
 791static struct acpi_nfit_control_region *to_nfit_dcr(struct device *dev)
 792{
 793	struct nvdimm *nvdimm = to_nvdimm(dev);
 794	struct nfit_mem *nfit_mem = nvdimm_provider_data(nvdimm);
 795
 796	return nfit_mem->dcr;
 797}
 798
 799static ssize_t handle_show(struct device *dev,
 800		struct device_attribute *attr, char *buf)
 801{
 802	struct acpi_nfit_memory_map *memdev = to_nfit_memdev(dev);
 803
 804	return sprintf(buf, "%#x\n", memdev->device_handle);
 805}
 806static DEVICE_ATTR_RO(handle);
 807
 808static ssize_t phys_id_show(struct device *dev,
 809		struct device_attribute *attr, char *buf)
 810{
 811	struct acpi_nfit_memory_map *memdev = to_nfit_memdev(dev);
 812
 813	return sprintf(buf, "%#x\n", memdev->physical_id);
 814}
 815static DEVICE_ATTR_RO(phys_id);
 816
 817static ssize_t vendor_show(struct device *dev,
 818		struct device_attribute *attr, char *buf)
 819{
 820	struct acpi_nfit_control_region *dcr = to_nfit_dcr(dev);
 821
 822	return sprintf(buf, "%#x\n", dcr->vendor_id);
 823}
 824static DEVICE_ATTR_RO(vendor);
 825
 826static ssize_t rev_id_show(struct device *dev,
 827		struct device_attribute *attr, char *buf)
 828{
 829	struct acpi_nfit_control_region *dcr = to_nfit_dcr(dev);
 830
 831	return sprintf(buf, "%#x\n", dcr->revision_id);
 832}
 833static DEVICE_ATTR_RO(rev_id);
 834
 835static ssize_t device_show(struct device *dev,
 836		struct device_attribute *attr, char *buf)
 837{
 838	struct acpi_nfit_control_region *dcr = to_nfit_dcr(dev);
 839
 840	return sprintf(buf, "%#x\n", dcr->device_id);
 841}
 842static DEVICE_ATTR_RO(device);
 843
 844static ssize_t format_show(struct device *dev,
 845		struct device_attribute *attr, char *buf)
 846{
 847	struct acpi_nfit_control_region *dcr = to_nfit_dcr(dev);
 848
 849	return sprintf(buf, "%#x\n", dcr->code);
 850}
 851static DEVICE_ATTR_RO(format);
 852
 853static ssize_t serial_show(struct device *dev,
 854		struct device_attribute *attr, char *buf)
 855{
 856	struct acpi_nfit_control_region *dcr = to_nfit_dcr(dev);
 857
 858	return sprintf(buf, "%#x\n", dcr->serial_number);
 859}
 860static DEVICE_ATTR_RO(serial);
 861
 862static ssize_t flags_show(struct device *dev,
 863		struct device_attribute *attr, char *buf)
 864{
 865	u16 flags = to_nfit_memdev(dev)->flags;
 866
 867	return sprintf(buf, "%s%s%s%s%s\n",
 868		flags & ACPI_NFIT_MEM_SAVE_FAILED ? "save_fail " : "",
 869		flags & ACPI_NFIT_MEM_RESTORE_FAILED ? "restore_fail " : "",
 870		flags & ACPI_NFIT_MEM_FLUSH_FAILED ? "flush_fail " : "",
 871		flags & ACPI_NFIT_MEM_NOT_ARMED ? "not_armed " : "",
 872		flags & ACPI_NFIT_MEM_HEALTH_OBSERVED ? "smart_event " : "");
 873}
 874static DEVICE_ATTR_RO(flags);
 875
 876static struct attribute *acpi_nfit_dimm_attributes[] = {
 877	&dev_attr_handle.attr,
 878	&dev_attr_phys_id.attr,
 879	&dev_attr_vendor.attr,
 880	&dev_attr_device.attr,
 881	&dev_attr_format.attr,
 882	&dev_attr_serial.attr,
 883	&dev_attr_rev_id.attr,
 884	&dev_attr_flags.attr,
 885	NULL,
 886};
 887
 888static umode_t acpi_nfit_dimm_attr_visible(struct kobject *kobj,
 889		struct attribute *a, int n)
 890{
 891	struct device *dev = container_of(kobj, struct device, kobj);
 892
 893	if (to_nfit_dcr(dev))
 894		return a->mode;
 895	else
 896		return 0;
 897}
 898
 899static struct attribute_group acpi_nfit_dimm_attribute_group = {
 900	.name = "nfit",
 901	.attrs = acpi_nfit_dimm_attributes,
 902	.is_visible = acpi_nfit_dimm_attr_visible,
 903};
 904
 905static const struct attribute_group *acpi_nfit_dimm_attribute_groups[] = {
 906	&nvdimm_attribute_group,
 907	&nd_device_attribute_group,
 908	&acpi_nfit_dimm_attribute_group,
 909	NULL,
 910};
 911
 912static struct nvdimm *acpi_nfit_dimm_by_handle(struct acpi_nfit_desc *acpi_desc,
 913		u32 device_handle)
 914{
 915	struct nfit_mem *nfit_mem;
 916
 917	list_for_each_entry(nfit_mem, &acpi_desc->dimms, list)
 918		if (__to_nfit_memdev(nfit_mem)->device_handle == device_handle)
 919			return nfit_mem->nvdimm;
 920
 921	return NULL;
 922}
 923
 924static int acpi_nfit_add_dimm(struct acpi_nfit_desc *acpi_desc,
 925		struct nfit_mem *nfit_mem, u32 device_handle)
 926{
 927	struct acpi_device *adev, *adev_dimm;
 928	struct device *dev = acpi_desc->dev;
 929	const u8 *uuid = to_nfit_uuid(NFIT_DEV_DIMM);
 930	int i;
 931
 932	nfit_mem->dsm_mask = acpi_desc->dimm_dsm_force_en;
 933	adev = to_acpi_dev(acpi_desc);
 934	if (!adev)
 935		return 0;
 936
 937	adev_dimm = acpi_find_child_device(adev, device_handle, false);
 938	nfit_mem->adev = adev_dimm;
 939	if (!adev_dimm) {
 940		dev_err(dev, "no ACPI.NFIT device with _ADR %#x, disabling...\n",
 941				device_handle);
 942		return force_enable_dimms ? 0 : -ENODEV;
 943	}
 944
 945	for (i = ND_CMD_SMART; i <= ND_CMD_VENDOR; i++)
 946		if (acpi_check_dsm(adev_dimm->handle, uuid, 1, 1ULL << i))
 947			set_bit(i, &nfit_mem->dsm_mask);
 948
 949	return 0;
 950}
 951
 952static int acpi_nfit_register_dimms(struct acpi_nfit_desc *acpi_desc)
 953{
 954	struct nfit_mem *nfit_mem;
 955	int dimm_count = 0;
 956
 957	list_for_each_entry(nfit_mem, &acpi_desc->dimms, list) {
 958		struct nvdimm *nvdimm;
 959		unsigned long flags = 0;
 960		u32 device_handle;
 961		u16 mem_flags;
 962		int rc;
 963
 964		device_handle = __to_nfit_memdev(nfit_mem)->device_handle;
 965		nvdimm = acpi_nfit_dimm_by_handle(acpi_desc, device_handle);
 966		if (nvdimm) {
 967			dimm_count++;
 968			continue;
 969		}
 970
 971		if (nfit_mem->bdw && nfit_mem->memdev_pmem)
 972			flags |= NDD_ALIASING;
 973
 974		mem_flags = __to_nfit_memdev(nfit_mem)->flags;
 975		if (mem_flags & ACPI_NFIT_MEM_NOT_ARMED)
 976			flags |= NDD_UNARMED;
 977
 978		rc = acpi_nfit_add_dimm(acpi_desc, nfit_mem, device_handle);
 979		if (rc)
 980			continue;
 981
 982		nvdimm = nvdimm_create(acpi_desc->nvdimm_bus, nfit_mem,
 983				acpi_nfit_dimm_attribute_groups,
 984				flags, &nfit_mem->dsm_mask);
 985		if (!nvdimm)
 986			return -ENOMEM;
 987
 988		nfit_mem->nvdimm = nvdimm;
 989		dimm_count++;
 990
 991		if ((mem_flags & ACPI_NFIT_MEM_FAILED_MASK) == 0)
 992			continue;
 993
 994		dev_info(acpi_desc->dev, "%s flags:%s%s%s%s\n",
 995				nvdimm_name(nvdimm),
 996		  mem_flags & ACPI_NFIT_MEM_SAVE_FAILED ? " save_fail" : "",
 997		  mem_flags & ACPI_NFIT_MEM_RESTORE_FAILED ? " restore_fail":"",
 998		  mem_flags & ACPI_NFIT_MEM_FLUSH_FAILED ? " flush_fail" : "",
 999		  mem_flags & ACPI_NFIT_MEM_NOT_ARMED ? " not_armed" : "");
1000
1001	}
1002
1003	return nvdimm_bus_check_dimm_count(acpi_desc->nvdimm_bus, dimm_count);
1004}
1005
1006static void acpi_nfit_init_dsms(struct acpi_nfit_desc *acpi_desc)
1007{
1008	struct nvdimm_bus_descriptor *nd_desc = &acpi_desc->nd_desc;
1009	const u8 *uuid = to_nfit_uuid(NFIT_DEV_BUS);
1010	struct acpi_device *adev;
1011	int i;
1012
1013	nd_desc->dsm_mask = acpi_desc->bus_dsm_force_en;
1014	adev = to_acpi_dev(acpi_desc);
1015	if (!adev)
1016		return;
1017
1018	for (i = ND_CMD_ARS_CAP; i <= ND_CMD_CLEAR_ERROR; i++)
1019		if (acpi_check_dsm(adev->handle, uuid, 1, 1ULL << i))
1020			set_bit(i, &nd_desc->dsm_mask);
1021}
1022
1023static ssize_t range_index_show(struct device *dev,
1024		struct device_attribute *attr, char *buf)
1025{
1026	struct nd_region *nd_region = to_nd_region(dev);
1027	struct nfit_spa *nfit_spa = nd_region_provider_data(nd_region);
1028
1029	return sprintf(buf, "%d\n", nfit_spa->spa->range_index);
1030}
1031static DEVICE_ATTR_RO(range_index);
1032
1033static struct attribute *acpi_nfit_region_attributes[] = {
1034	&dev_attr_range_index.attr,
1035	NULL,
1036};
1037
1038static struct attribute_group acpi_nfit_region_attribute_group = {
1039	.name = "nfit",
1040	.attrs = acpi_nfit_region_attributes,
1041};
1042
1043static const struct attribute_group *acpi_nfit_region_attribute_groups[] = {
1044	&nd_region_attribute_group,
1045	&nd_mapping_attribute_group,
1046	&nd_device_attribute_group,
1047	&nd_numa_attribute_group,
1048	&acpi_nfit_region_attribute_group,
1049	NULL,
1050};
1051
1052/* enough info to uniquely specify an interleave set */
1053struct nfit_set_info {
1054	struct nfit_set_info_map {
1055		u64 region_offset;
1056		u32 serial_number;
1057		u32 pad;
1058	} mapping[0];
1059};
1060
1061static size_t sizeof_nfit_set_info(int num_mappings)
1062{
1063	return sizeof(struct nfit_set_info)
1064		+ num_mappings * sizeof(struct nfit_set_info_map);
1065}
1066
1067static int cmp_map(const void *m0, const void *m1)
1068{
1069	const struct nfit_set_info_map *map0 = m0;
1070	const struct nfit_set_info_map *map1 = m1;
1071
1072	return memcmp(&map0->region_offset, &map1->region_offset,
1073			sizeof(u64));
1074}
1075
1076/* Retrieve the nth entry referencing this spa */
1077static struct acpi_nfit_memory_map *memdev_from_spa(
1078		struct acpi_nfit_desc *acpi_desc, u16 range_index, int n)
1079{
1080	struct nfit_memdev *nfit_memdev;
1081
1082	list_for_each_entry(nfit_memdev, &acpi_desc->memdevs, list)
1083		if (nfit_memdev->memdev->range_index == range_index)
1084			if (n-- == 0)
1085				return nfit_memdev->memdev;
1086	return NULL;
1087}
1088
1089static int acpi_nfit_init_interleave_set(struct acpi_nfit_desc *acpi_desc,
1090		struct nd_region_desc *ndr_desc,
1091		struct acpi_nfit_system_address *spa)
1092{
1093	int i, spa_type = nfit_spa_type(spa);
1094	struct device *dev = acpi_desc->dev;
1095	struct nd_interleave_set *nd_set;
1096	u16 nr = ndr_desc->num_mappings;
1097	struct nfit_set_info *info;
1098
1099	if (spa_type == NFIT_SPA_PM || spa_type == NFIT_SPA_VOLATILE)
1100		/* pass */;
1101	else
1102		return 0;
1103
1104	nd_set = devm_kzalloc(dev, sizeof(*nd_set), GFP_KERNEL);
1105	if (!nd_set)
1106		return -ENOMEM;
1107
1108	info = devm_kzalloc(dev, sizeof_nfit_set_info(nr), GFP_KERNEL);
1109	if (!info)
1110		return -ENOMEM;
1111	for (i = 0; i < nr; i++) {
1112		struct nd_mapping *nd_mapping = &ndr_desc->nd_mapping[i];
1113		struct nfit_set_info_map *map = &info->mapping[i];
1114		struct nvdimm *nvdimm = nd_mapping->nvdimm;
1115		struct nfit_mem *nfit_mem = nvdimm_provider_data(nvdimm);
1116		struct acpi_nfit_memory_map *memdev = memdev_from_spa(acpi_desc,
1117				spa->range_index, i);
1118
1119		if (!memdev || !nfit_mem->dcr) {
1120			dev_err(dev, "%s: failed to find DCR\n", __func__);
1121			return -ENODEV;
1122		}
1123
1124		map->region_offset = memdev->region_offset;
1125		map->serial_number = nfit_mem->dcr->serial_number;
1126	}
1127
1128	sort(&info->mapping[0], nr, sizeof(struct nfit_set_info_map),
1129			cmp_map, NULL);
1130	nd_set->cookie = nd_fletcher64(info, sizeof_nfit_set_info(nr), 0);
1131	ndr_desc->nd_set = nd_set;
1132	devm_kfree(dev, info);
1133
1134	return 0;
1135}
1136
1137static u64 to_interleave_offset(u64 offset, struct nfit_blk_mmio *mmio)
1138{
1139	struct acpi_nfit_interleave *idt = mmio->idt;
1140	u32 sub_line_offset, line_index, line_offset;
1141	u64 line_no, table_skip_count, table_offset;
1142
1143	line_no = div_u64_rem(offset, mmio->line_size, &sub_line_offset);
1144	table_skip_count = div_u64_rem(line_no, mmio->num_lines, &line_index);
1145	line_offset = idt->line_offset[line_index]
1146		* mmio->line_size;
1147	table_offset = table_skip_count * mmio->table_size;
1148
1149	return mmio->base_offset + line_offset + table_offset + sub_line_offset;
1150}
1151
1152static void wmb_blk(struct nfit_blk *nfit_blk)
1153{
1154
1155	if (nfit_blk->nvdimm_flush) {
1156		/*
1157		 * The first wmb() is needed to 'sfence' all previous writes
1158		 * such that they are architecturally visible for the platform
1159		 * buffer flush.  Note that we've already arranged for pmem
1160		 * writes to avoid the cache via arch_memcpy_to_pmem().  The
1161		 * final wmb() ensures ordering for the NVDIMM flush write.
1162		 */
1163		wmb();
1164		writeq(1, nfit_blk->nvdimm_flush);
1165		wmb();
1166	} else
1167		wmb_pmem();
1168}
1169
1170static u32 read_blk_stat(struct nfit_blk *nfit_blk, unsigned int bw)
1171{
1172	struct nfit_blk_mmio *mmio = &nfit_blk->mmio[DCR];
1173	u64 offset = nfit_blk->stat_offset + mmio->size * bw;
1174
1175	if (mmio->num_lines)
1176		offset = to_interleave_offset(offset, mmio);
1177
1178	return readl(mmio->addr.base + offset);
1179}
1180
1181static void write_blk_ctl(struct nfit_blk *nfit_blk, unsigned int bw,
1182		resource_size_t dpa, unsigned int len, unsigned int write)
1183{
1184	u64 cmd, offset;
1185	struct nfit_blk_mmio *mmio = &nfit_blk->mmio[DCR];
1186
1187	enum {
1188		BCW_OFFSET_MASK = (1ULL << 48)-1,
1189		BCW_LEN_SHIFT = 48,
1190		BCW_LEN_MASK = (1ULL << 8) - 1,
1191		BCW_CMD_SHIFT = 56,
1192	};
1193
1194	cmd = (dpa >> L1_CACHE_SHIFT) & BCW_OFFSET_MASK;
1195	len = len >> L1_CACHE_SHIFT;
1196	cmd |= ((u64) len & BCW_LEN_MASK) << BCW_LEN_SHIFT;
1197	cmd |= ((u64) write) << BCW_CMD_SHIFT;
1198
1199	offset = nfit_blk->cmd_offset + mmio->size * bw;
1200	if (mmio->num_lines)
1201		offset = to_interleave_offset(offset, mmio);
1202
1203	writeq(cmd, mmio->addr.base + offset);
1204	wmb_blk(nfit_blk);
1205
1206	if (nfit_blk->dimm_flags & NFIT_BLK_DCR_LATCH)
1207		readq(mmio->addr.base + offset);
1208}
1209
1210static int acpi_nfit_blk_single_io(struct nfit_blk *nfit_blk,
1211		resource_size_t dpa, void *iobuf, size_t len, int rw,
1212		unsigned int lane)
1213{
1214	struct nfit_blk_mmio *mmio = &nfit_blk->mmio[BDW];
1215	unsigned int copied = 0;
1216	u64 base_offset;
1217	int rc;
1218
1219	base_offset = nfit_blk->bdw_offset + dpa % L1_CACHE_BYTES
1220		+ lane * mmio->size;
1221	write_blk_ctl(nfit_blk, lane, dpa, len, rw);
1222	while (len) {
1223		unsigned int c;
1224		u64 offset;
1225
1226		if (mmio->num_lines) {
1227			u32 line_offset;
1228
1229			offset = to_interleave_offset(base_offset + copied,
1230					mmio);
1231			div_u64_rem(offset, mmio->line_size, &line_offset);
1232			c = min_t(size_t, len, mmio->line_size - line_offset);
1233		} else {
1234			offset = base_offset + nfit_blk->bdw_offset;
1235			c = len;
1236		}
1237
1238		if (rw)
1239			memcpy_to_pmem(mmio->addr.aperture + offset,
1240					iobuf + copied, c);
1241		else {
1242			if (nfit_blk->dimm_flags & NFIT_BLK_READ_FLUSH)
1243				mmio_flush_range((void __force *)
1244					mmio->addr.aperture + offset, c);
1245
1246			memcpy_from_pmem(iobuf + copied,
1247					mmio->addr.aperture + offset, c);
1248		}
1249
1250		copied += c;
1251		len -= c;
1252	}
1253
1254	if (rw)
1255		wmb_blk(nfit_blk);
1256
1257	rc = read_blk_stat(nfit_blk, lane) ? -EIO : 0;
1258	return rc;
1259}
1260
1261static int acpi_nfit_blk_region_do_io(struct nd_blk_region *ndbr,
1262		resource_size_t dpa, void *iobuf, u64 len, int rw)
1263{
1264	struct nfit_blk *nfit_blk = nd_blk_region_provider_data(ndbr);
1265	struct nfit_blk_mmio *mmio = &nfit_blk->mmio[BDW];
1266	struct nd_region *nd_region = nfit_blk->nd_region;
1267	unsigned int lane, copied = 0;
1268	int rc = 0;
1269
1270	lane = nd_region_acquire_lane(nd_region);
1271	while (len) {
1272		u64 c = min(len, mmio->size);
1273
1274		rc = acpi_nfit_blk_single_io(nfit_blk, dpa + copied,
1275				iobuf + copied, c, rw, lane);
1276		if (rc)
1277			break;
1278
1279		copied += c;
1280		len -= c;
1281	}
1282	nd_region_release_lane(nd_region, lane);
1283
1284	return rc;
1285}
1286
1287static void nfit_spa_mapping_release(struct kref *kref)
1288{
1289	struct nfit_spa_mapping *spa_map = to_spa_map(kref);
1290	struct acpi_nfit_system_address *spa = spa_map->spa;
1291	struct acpi_nfit_desc *acpi_desc = spa_map->acpi_desc;
1292
1293	WARN_ON(!mutex_is_locked(&acpi_desc->spa_map_mutex));
1294	dev_dbg(acpi_desc->dev, "%s: SPA%d\n", __func__, spa->range_index);
1295	if (spa_map->type == SPA_MAP_APERTURE)
1296		memunmap((void __force *)spa_map->addr.aperture);
1297	else
1298		iounmap(spa_map->addr.base);
1299	release_mem_region(spa->address, spa->length);
1300	list_del(&spa_map->list);
1301	kfree(spa_map);
1302}
1303
1304static struct nfit_spa_mapping *find_spa_mapping(
1305		struct acpi_nfit_desc *acpi_desc,
1306		struct acpi_nfit_system_address *spa)
1307{
1308	struct nfit_spa_mapping *spa_map;
1309
1310	WARN_ON(!mutex_is_locked(&acpi_desc->spa_map_mutex));
1311	list_for_each_entry(spa_map, &acpi_desc->spa_maps, list)
1312		if (spa_map->spa == spa)
1313			return spa_map;
1314
1315	return NULL;
1316}
1317
1318static void nfit_spa_unmap(struct acpi_nfit_desc *acpi_desc,
1319		struct acpi_nfit_system_address *spa)
1320{
1321	struct nfit_spa_mapping *spa_map;
1322
1323	mutex_lock(&acpi_desc->spa_map_mutex);
1324	spa_map = find_spa_mapping(acpi_desc, spa);
1325
1326	if (spa_map)
1327		kref_put(&spa_map->kref, nfit_spa_mapping_release);
1328	mutex_unlock(&acpi_desc->spa_map_mutex);
1329}
1330
1331static void __iomem *__nfit_spa_map(struct acpi_nfit_desc *acpi_desc,
1332		struct acpi_nfit_system_address *spa, enum spa_map_type type)
1333{
1334	resource_size_t start = spa->address;
1335	resource_size_t n = spa->length;
1336	struct nfit_spa_mapping *spa_map;
1337	struct resource *res;
1338
1339	WARN_ON(!mutex_is_locked(&acpi_desc->spa_map_mutex));
1340
1341	spa_map = find_spa_mapping(acpi_desc, spa);
1342	if (spa_map) {
1343		kref_get(&spa_map->kref);
1344		return spa_map->addr.base;
1345	}
1346
1347	spa_map = kzalloc(sizeof(*spa_map), GFP_KERNEL);
1348	if (!spa_map)
1349		return NULL;
1350
1351	INIT_LIST_HEAD(&spa_map->list);
1352	spa_map->spa = spa;
1353	kref_init(&spa_map->kref);
1354	spa_map->acpi_desc = acpi_desc;
1355
1356	res = request_mem_region(start, n, dev_name(acpi_desc->dev));
1357	if (!res)
1358		goto err_mem;
1359
1360	spa_map->type = type;
1361	if (type == SPA_MAP_APERTURE)
1362		spa_map->addr.aperture = (void __pmem *)memremap(start, n,
1363							ARCH_MEMREMAP_PMEM);
1364	else
1365		spa_map->addr.base = ioremap_nocache(start, n);
1366
1367
1368	if (!spa_map->addr.base)
1369		goto err_map;
1370
1371	list_add_tail(&spa_map->list, &acpi_desc->spa_maps);
1372	return spa_map->addr.base;
1373
1374 err_map:
1375	release_mem_region(start, n);
1376 err_mem:
1377	kfree(spa_map);
1378	return NULL;
1379}
1380
1381/**
1382 * nfit_spa_map - interleave-aware managed-mappings of acpi_nfit_system_address ranges
1383 * @nvdimm_bus: NFIT-bus that provided the spa table entry
1384 * @nfit_spa: spa table to map
1385 * @type: aperture or control region
1386 *
1387 * In the case where block-data-window apertures and
1388 * dimm-control-regions are interleaved they will end up sharing a
1389 * single request_mem_region() + ioremap() for the address range.  In
1390 * the style of devm nfit_spa_map() mappings are automatically dropped
1391 * when all region devices referencing the same mapping are disabled /
1392 * unbound.
1393 */
1394static void __iomem *nfit_spa_map(struct acpi_nfit_desc *acpi_desc,
1395		struct acpi_nfit_system_address *spa, enum spa_map_type type)
1396{
1397	void __iomem *iomem;
1398
1399	mutex_lock(&acpi_desc->spa_map_mutex);
1400	iomem = __nfit_spa_map(acpi_desc, spa, type);
1401	mutex_unlock(&acpi_desc->spa_map_mutex);
1402
1403	return iomem;
1404}
1405
1406static int nfit_blk_init_interleave(struct nfit_blk_mmio *mmio,
1407		struct acpi_nfit_interleave *idt, u16 interleave_ways)
1408{
1409	if (idt) {
1410		mmio->num_lines = idt->line_count;
1411		mmio->line_size = idt->line_size;
1412		if (interleave_ways == 0)
1413			return -ENXIO;
1414		mmio->table_size = mmio->num_lines * interleave_ways
1415			* mmio->line_size;
1416	}
1417
1418	return 0;
1419}
1420
1421static int acpi_nfit_blk_get_flags(struct nvdimm_bus_descriptor *nd_desc,
1422		struct nvdimm *nvdimm, struct nfit_blk *nfit_blk)
1423{
1424	struct nd_cmd_dimm_flags flags;
1425	int rc;
1426
1427	memset(&flags, 0, sizeof(flags));
1428	rc = nd_desc->ndctl(nd_desc, nvdimm, ND_CMD_DIMM_FLAGS, &flags,
1429			sizeof(flags), NULL);
1430
1431	if (rc >= 0 && flags.status == 0)
1432		nfit_blk->dimm_flags = flags.flags;
1433	else if (rc == -ENOTTY) {
1434		/* fall back to a conservative default */
1435		nfit_blk->dimm_flags = NFIT_BLK_DCR_LATCH | NFIT_BLK_READ_FLUSH;
1436		rc = 0;
1437	} else
1438		rc = -ENXIO;
1439
1440	return rc;
1441}
1442
1443static int acpi_nfit_blk_region_enable(struct nvdimm_bus *nvdimm_bus,
1444		struct device *dev)
1445{
1446	struct nvdimm_bus_descriptor *nd_desc = to_nd_desc(nvdimm_bus);
1447	struct acpi_nfit_desc *acpi_desc = to_acpi_desc(nd_desc);
1448	struct nd_blk_region *ndbr = to_nd_blk_region(dev);
1449	struct nfit_flush *nfit_flush;
1450	struct nfit_blk_mmio *mmio;
1451	struct nfit_blk *nfit_blk;
1452	struct nfit_mem *nfit_mem;
1453	struct nvdimm *nvdimm;
1454	int rc;
1455
1456	nvdimm = nd_blk_region_to_dimm(ndbr);
1457	nfit_mem = nvdimm_provider_data(nvdimm);
1458	if (!nfit_mem || !nfit_mem->dcr || !nfit_mem->bdw) {
1459		dev_dbg(dev, "%s: missing%s%s%s\n", __func__,
1460				nfit_mem ? "" : " nfit_mem",
1461				(nfit_mem && nfit_mem->dcr) ? "" : " dcr",
1462				(nfit_mem && nfit_mem->bdw) ? "" : " bdw");
1463		return -ENXIO;
1464	}
1465
1466	nfit_blk = devm_kzalloc(dev, sizeof(*nfit_blk), GFP_KERNEL);
1467	if (!nfit_blk)
1468		return -ENOMEM;
1469	nd_blk_region_set_provider_data(ndbr, nfit_blk);
1470	nfit_blk->nd_region = to_nd_region(dev);
1471
1472	/* map block aperture memory */
1473	nfit_blk->bdw_offset = nfit_mem->bdw->offset;
1474	mmio = &nfit_blk->mmio[BDW];
1475	mmio->addr.base = nfit_spa_map(acpi_desc, nfit_mem->spa_bdw,
1476			SPA_MAP_APERTURE);
1477	if (!mmio->addr.base) {
1478		dev_dbg(dev, "%s: %s failed to map bdw\n", __func__,
1479				nvdimm_name(nvdimm));
1480		return -ENOMEM;
1481	}
1482	mmio->size = nfit_mem->bdw->size;
1483	mmio->base_offset = nfit_mem->memdev_bdw->region_offset;
1484	mmio->idt = nfit_mem->idt_bdw;
1485	mmio->spa = nfit_mem->spa_bdw;
1486	rc = nfit_blk_init_interleave(mmio, nfit_mem->idt_bdw,
1487			nfit_mem->memdev_bdw->interleave_ways);
1488	if (rc) {
1489		dev_dbg(dev, "%s: %s failed to init bdw interleave\n",
1490				__func__, nvdimm_name(nvdimm));
1491		return rc;
1492	}
1493
1494	/* map block control memory */
1495	nfit_blk->cmd_offset = nfit_mem->dcr->command_offset;
1496	nfit_blk->stat_offset = nfit_mem->dcr->status_offset;
1497	mmio = &nfit_blk->mmio[DCR];
1498	mmio->addr.base = nfit_spa_map(acpi_desc, nfit_mem->spa_dcr,
1499			SPA_MAP_CONTROL);
1500	if (!mmio->addr.base) {
1501		dev_dbg(dev, "%s: %s failed to map dcr\n", __func__,
1502				nvdimm_name(nvdimm));
1503		return -ENOMEM;
1504	}
1505	mmio->size = nfit_mem->dcr->window_size;
1506	mmio->base_offset = nfit_mem->memdev_dcr->region_offset;
1507	mmio->idt = nfit_mem->idt_dcr;
1508	mmio->spa = nfit_mem->spa_dcr;
1509	rc = nfit_blk_init_interleave(mmio, nfit_mem->idt_dcr,
1510			nfit_mem->memdev_dcr->interleave_ways);
1511	if (rc) {
1512		dev_dbg(dev, "%s: %s failed to init dcr interleave\n",
1513				__func__, nvdimm_name(nvdimm));
1514		return rc;
1515	}
1516
1517	rc = acpi_nfit_blk_get_flags(nd_desc, nvdimm, nfit_blk);
1518	if (rc < 0) {
1519		dev_dbg(dev, "%s: %s failed get DIMM flags\n",
1520				__func__, nvdimm_name(nvdimm));
1521		return rc;
1522	}
1523
1524	nfit_flush = nfit_mem->nfit_flush;
1525	if (nfit_flush && nfit_flush->flush->hint_count != 0) {
1526		nfit_blk->nvdimm_flush = devm_ioremap_nocache(dev,
1527				nfit_flush->flush->hint_address[0], 8);
1528		if (!nfit_blk->nvdimm_flush)
1529			return -ENOMEM;
1530	}
1531
1532	if (!arch_has_wmb_pmem() && !nfit_blk->nvdimm_flush)
1533		dev_warn(dev, "unable to guarantee persistence of writes\n");
1534
1535	if (mmio->line_size == 0)
1536		return 0;
1537
1538	if ((u32) nfit_blk->cmd_offset % mmio->line_size
1539			+ 8 > mmio->line_size) {
1540		dev_dbg(dev, "cmd_offset crosses interleave boundary\n");
1541		return -ENXIO;
1542	} else if ((u32) nfit_blk->stat_offset % mmio->line_size
1543			+ 8 > mmio->line_size) {
1544		dev_dbg(dev, "stat_offset crosses interleave boundary\n");
1545		return -ENXIO;
1546	}
1547
1548	return 0;
1549}
1550
1551static void acpi_nfit_blk_region_disable(struct nvdimm_bus *nvdimm_bus,
1552		struct device *dev)
1553{
1554	struct nvdimm_bus_descriptor *nd_desc = to_nd_desc(nvdimm_bus);
1555	struct acpi_nfit_desc *acpi_desc = to_acpi_desc(nd_desc);
1556	struct nd_blk_region *ndbr = to_nd_blk_region(dev);
1557	struct nfit_blk *nfit_blk = nd_blk_region_provider_data(ndbr);
1558	int i;
1559
1560	if (!nfit_blk)
1561		return; /* never enabled */
1562
1563	/* auto-free BLK spa mappings */
1564	for (i = 0; i < 2; i++) {
1565		struct nfit_blk_mmio *mmio = &nfit_blk->mmio[i];
1566
1567		if (mmio->addr.base)
1568			nfit_spa_unmap(acpi_desc, mmio->spa);
1569	}
1570	nd_blk_region_set_provider_data(ndbr, NULL);
1571	/* devm will free nfit_blk */
1572}
1573
1574static int ars_get_cap(struct acpi_nfit_desc *acpi_desc,
1575		struct nd_cmd_ars_cap *cmd, struct nfit_spa *nfit_spa)
1576{
1577	struct nvdimm_bus_descriptor *nd_desc = &acpi_desc->nd_desc;
1578	struct acpi_nfit_system_address *spa = nfit_spa->spa;
1579	int cmd_rc, rc;
1580
1581	cmd->address = spa->address;
1582	cmd->length = spa->length;
1583	rc = nd_desc->ndctl(nd_desc, NULL, ND_CMD_ARS_CAP, cmd,
1584			sizeof(*cmd), &cmd_rc);
1585	if (rc < 0)
1586		return rc;
1587	return cmd_rc;
1588}
1589
1590static int ars_start(struct acpi_nfit_desc *acpi_desc, struct nfit_spa *nfit_spa)
1591{
1592	int rc;
1593	int cmd_rc;
1594	struct nd_cmd_ars_start ars_start;
1595	struct acpi_nfit_system_address *spa = nfit_spa->spa;
1596	struct nvdimm_bus_descriptor *nd_desc = &acpi_desc->nd_desc;
1597
1598	memset(&ars_start, 0, sizeof(ars_start));
1599	ars_start.address = spa->address;
1600	ars_start.length = spa->length;
1601	if (nfit_spa_type(spa) == NFIT_SPA_PM)
1602		ars_start.type = ND_ARS_PERSISTENT;
1603	else if (nfit_spa_type(spa) == NFIT_SPA_VOLATILE)
1604		ars_start.type = ND_ARS_VOLATILE;
1605	else
1606		return -ENOTTY;
1607
1608	rc = nd_desc->ndctl(nd_desc, NULL, ND_CMD_ARS_START, &ars_start,
1609			sizeof(ars_start), &cmd_rc);
1610
1611	if (rc < 0)
1612		return rc;
1613	return cmd_rc;
1614}
1615
1616static int ars_continue(struct acpi_nfit_desc *acpi_desc)
1617{
1618	int rc, cmd_rc;
1619	struct nd_cmd_ars_start ars_start;
1620	struct nvdimm_bus_descriptor *nd_desc = &acpi_desc->nd_desc;
1621	struct nd_cmd_ars_status *ars_status = acpi_desc->ars_status;
1622
1623	memset(&ars_start, 0, sizeof(ars_start));
1624	ars_start.address = ars_status->restart_address;
1625	ars_start.length = ars_status->restart_length;
1626	ars_start.type = ars_status->type;
1627	rc = nd_desc->ndctl(nd_desc, NULL, ND_CMD_ARS_START, &ars_start,
1628			sizeof(ars_start), &cmd_rc);
1629	if (rc < 0)
1630		return rc;
1631	return cmd_rc;
1632}
1633
1634static int ars_get_status(struct acpi_nfit_desc *acpi_desc)
1635{
1636	struct nvdimm_bus_descriptor *nd_desc = &acpi_desc->nd_desc;
1637	struct nd_cmd_ars_status *ars_status = acpi_desc->ars_status;
1638	int rc, cmd_rc;
1639
1640	rc = nd_desc->ndctl(nd_desc, NULL, ND_CMD_ARS_STATUS, ars_status,
1641			acpi_desc->ars_status_size, &cmd_rc);
1642	if (rc < 0)
1643		return rc;
1644	return cmd_rc;
1645}
1646
1647static int ars_status_process_records(struct nvdimm_bus *nvdimm_bus,
1648		struct nd_cmd_ars_status *ars_status)
1649{
1650	int rc;
1651	u32 i;
1652
1653	for (i = 0; i < ars_status->num_records; i++) {
1654		rc = nvdimm_bus_add_poison(nvdimm_bus,
1655				ars_status->records[i].err_address,
1656				ars_status->records[i].length);
1657		if (rc)
1658			return rc;
1659	}
1660
1661	return 0;
1662}
1663
1664static void acpi_nfit_remove_resource(void *data)
1665{
1666	struct resource *res = data;
1667
1668	remove_resource(res);
1669}
1670
1671static int acpi_nfit_insert_resource(struct acpi_nfit_desc *acpi_desc,
1672		struct nd_region_desc *ndr_desc)
1673{
1674	struct resource *res, *nd_res = ndr_desc->res;
1675	int is_pmem, ret;
1676
1677	/* No operation if the region is already registered as PMEM */
1678	is_pmem = region_intersects(nd_res->start, resource_size(nd_res),
1679				IORESOURCE_MEM, IORES_DESC_PERSISTENT_MEMORY);
1680	if (is_pmem == REGION_INTERSECTS)
1681		return 0;
1682
1683	res = devm_kzalloc(acpi_desc->dev, sizeof(*res), GFP_KERNEL);
1684	if (!res)
1685		return -ENOMEM;
1686
1687	res->name = "Persistent Memory";
1688	res->start = nd_res->start;
1689	res->end = nd_res->end;
1690	res->flags = IORESOURCE_MEM;
1691	res->desc = IORES_DESC_PERSISTENT_MEMORY;
1692
1693	ret = insert_resource(&iomem_resource, res);
1694	if (ret)
1695		return ret;
1696
1697	ret = devm_add_action(acpi_desc->dev, acpi_nfit_remove_resource, res);
1698	if (ret) {
1699		remove_resource(res);
1700		return ret;
1701	}
1702
1703	return 0;
1704}
1705
1706static int acpi_nfit_init_mapping(struct acpi_nfit_desc *acpi_desc,
1707		struct nd_mapping *nd_mapping, struct nd_region_desc *ndr_desc,
1708		struct acpi_nfit_memory_map *memdev,
1709		struct nfit_spa *nfit_spa)
1710{
1711	struct nvdimm *nvdimm = acpi_nfit_dimm_by_handle(acpi_desc,
1712			memdev->device_handle);
1713	struct acpi_nfit_system_address *spa = nfit_spa->spa;
1714	struct nd_blk_region_desc *ndbr_desc;
1715	struct nfit_mem *nfit_mem;
1716	int blk_valid = 0;
1717
1718	if (!nvdimm) {
1719		dev_err(acpi_desc->dev, "spa%d dimm: %#x not found\n",
1720				spa->range_index, memdev->device_handle);
1721		return -ENODEV;
1722	}
1723
1724	nd_mapping->nvdimm = nvdimm;
1725	switch (nfit_spa_type(spa)) {
1726	case NFIT_SPA_PM:
1727	case NFIT_SPA_VOLATILE:
1728		nd_mapping->start = memdev->address;
1729		nd_mapping->size = memdev->region_size;
1730		break;
1731	case NFIT_SPA_DCR:
1732		nfit_mem = nvdimm_provider_data(nvdimm);
1733		if (!nfit_mem || !nfit_mem->bdw) {
1734			dev_dbg(acpi_desc->dev, "spa%d %s missing bdw\n",
1735					spa->range_index, nvdimm_name(nvdimm));
1736		} else {
1737			nd_mapping->size = nfit_mem->bdw->capacity;
1738			nd_mapping->start = nfit_mem->bdw->start_address;
1739			ndr_desc->num_lanes = nfit_mem->bdw->windows;
1740			blk_valid = 1;
1741		}
1742
1743		ndr_desc->nd_mapping = nd_mapping;
1744		ndr_desc->num_mappings = blk_valid;
1745		ndbr_desc = to_blk_region_desc(ndr_desc);
1746		ndbr_desc->enable = acpi_nfit_blk_region_enable;
1747		ndbr_desc->disable = acpi_nfit_blk_region_disable;
1748		ndbr_desc->do_io = acpi_desc->blk_do_io;
1749		nfit_spa->nd_region = nvdimm_blk_region_create(acpi_desc->nvdimm_bus,
1750				ndr_desc);
1751		if (!nfit_spa->nd_region)
1752			return -ENOMEM;
1753		break;
1754	}
1755
1756	return 0;
1757}
1758
1759static int acpi_nfit_register_region(struct acpi_nfit_desc *acpi_desc,
1760		struct nfit_spa *nfit_spa)
1761{
1762	static struct nd_mapping nd_mappings[ND_MAX_MAPPINGS];
1763	struct acpi_nfit_system_address *spa = nfit_spa->spa;
1764	struct nd_blk_region_desc ndbr_desc;
1765	struct nd_region_desc *ndr_desc;
1766	struct nfit_memdev *nfit_memdev;
1767	struct nvdimm_bus *nvdimm_bus;
1768	struct resource res;
1769	int count = 0, rc;
1770
1771	if (nfit_spa->nd_region)
1772		return 0;
1773
1774	if (spa->range_index == 0) {
1775		dev_dbg(acpi_desc->dev, "%s: detected invalid spa index\n",
1776				__func__);
1777		return 0;
1778	}
1779
1780	memset(&res, 0, sizeof(res));
1781	memset(&nd_mappings, 0, sizeof(nd_mappings));
1782	memset(&ndbr_desc, 0, sizeof(ndbr_desc));
1783	res.start = spa->address;
1784	res.end = res.start + spa->length - 1;
1785	ndr_desc = &ndbr_desc.ndr_desc;
1786	ndr_desc->res = &res;
1787	ndr_desc->provider_data = nfit_spa;
1788	ndr_desc->attr_groups = acpi_nfit_region_attribute_groups;
1789	if (spa->flags & ACPI_NFIT_PROXIMITY_VALID)
1790		ndr_desc->numa_node = acpi_map_pxm_to_online_node(
1791						spa->proximity_domain);
1792	else
1793		ndr_desc->numa_node = NUMA_NO_NODE;
1794
1795	list_for_each_entry(nfit_memdev, &acpi_desc->memdevs, list) {
1796		struct acpi_nfit_memory_map *memdev = nfit_memdev->memdev;
1797		struct nd_mapping *nd_mapping;
1798
1799		if (memdev->range_index != spa->range_index)
1800			continue;
1801		if (count >= ND_MAX_MAPPINGS) {
1802			dev_err(acpi_desc->dev, "spa%d exceeds max mappings %d\n",
1803					spa->range_index, ND_MAX_MAPPINGS);
1804			return -ENXIO;
1805		}
1806		nd_mapping = &nd_mappings[count++];
1807		rc = acpi_nfit_init_mapping(acpi_desc, nd_mapping, ndr_desc,
1808				memdev, nfit_spa);
1809		if (rc)
1810			goto out;
1811	}
1812
1813	ndr_desc->nd_mapping = nd_mappings;
1814	ndr_desc->num_mappings = count;
1815	rc = acpi_nfit_init_interleave_set(acpi_desc, ndr_desc, spa);
1816	if (rc)
1817		goto out;
1818
1819	nvdimm_bus = acpi_desc->nvdimm_bus;
1820	if (nfit_spa_type(spa) == NFIT_SPA_PM) {
1821		rc = acpi_nfit_insert_resource(acpi_desc, ndr_desc);
1822		if (rc) {
1823			dev_warn(acpi_desc->dev,
1824				"failed to insert pmem resource to iomem: %d\n",
1825				rc);
1826			goto out;
1827		}
1828
1829		nfit_spa->nd_region = nvdimm_pmem_region_create(nvdimm_bus,
1830				ndr_desc);
1831		if (!nfit_spa->nd_region)
1832			rc = -ENOMEM;
1833	} else if (nfit_spa_type(spa) == NFIT_SPA_VOLATILE) {
1834		nfit_spa->nd_region = nvdimm_volatile_region_create(nvdimm_bus,
1835				ndr_desc);
1836		if (!nfit_spa->nd_region)
1837			rc = -ENOMEM;
1838	}
1839
1840 out:
1841	if (rc)
1842		dev_err(acpi_desc->dev, "failed to register spa range %d\n",
1843				nfit_spa->spa->range_index);
1844	return rc;
1845}
1846
1847static int ars_status_alloc(struct acpi_nfit_desc *acpi_desc,
1848		u32 max_ars)
1849{
1850	struct device *dev = acpi_desc->dev;
1851	struct nd_cmd_ars_status *ars_status;
1852
1853	if (acpi_desc->ars_status && acpi_desc->ars_status_size >= max_ars) {
1854		memset(acpi_desc->ars_status, 0, acpi_desc->ars_status_size);
1855		return 0;
1856	}
1857
1858	if (acpi_desc->ars_status)
1859		devm_kfree(dev, acpi_desc->ars_status);
1860	acpi_desc->ars_status = NULL;
1861	ars_status = devm_kzalloc(dev, max_ars, GFP_KERNEL);
1862	if (!ars_status)
1863		return -ENOMEM;
1864	acpi_desc->ars_status = ars_status;
1865	acpi_desc->ars_status_size = max_ars;
1866	return 0;
1867}
1868
1869static int acpi_nfit_query_poison(struct acpi_nfit_desc *acpi_desc,
1870		struct nfit_spa *nfit_spa)
1871{
1872	struct acpi_nfit_system_address *spa = nfit_spa->spa;
1873	int rc;
1874
1875	if (!nfit_spa->max_ars) {
1876		struct nd_cmd_ars_cap ars_cap;
1877
1878		memset(&ars_cap, 0, sizeof(ars_cap));
1879		rc = ars_get_cap(acpi_desc, &ars_cap, nfit_spa);
1880		if (rc < 0)
1881			return rc;
1882		nfit_spa->max_ars = ars_cap.max_ars_out;
1883		nfit_spa->clear_err_unit = ars_cap.clear_err_unit;
1884		/* check that the supported scrub types match the spa type */
1885		if (nfit_spa_type(spa) == NFIT_SPA_VOLATILE &&
1886				((ars_cap.status >> 16) & ND_ARS_VOLATILE) == 0)
1887			return -ENOTTY;
1888		else if (nfit_spa_type(spa) == NFIT_SPA_PM &&
1889				((ars_cap.status >> 16) & ND_ARS_PERSISTENT) == 0)
1890			return -ENOTTY;
1891	}
1892
1893	if (ars_status_alloc(acpi_desc, nfit_spa->max_ars))
1894		return -ENOMEM;
1895
1896	rc = ars_get_status(acpi_desc);
1897	if (rc < 0 && rc != -ENOSPC)
1898		return rc;
1899
1900	if (ars_status_process_records(acpi_desc->nvdimm_bus,
1901				acpi_desc->ars_status))
1902		return -ENOMEM;
1903
1904	return 0;
1905}
1906
1907static void acpi_nfit_async_scrub(struct acpi_nfit_desc *acpi_desc,
1908		struct nfit_spa *nfit_spa)
1909{
1910	struct acpi_nfit_system_address *spa = nfit_spa->spa;
1911	unsigned int overflow_retry = scrub_overflow_abort;
1912	u64 init_ars_start = 0, init_ars_len = 0;
1913	struct device *dev = acpi_desc->dev;
1914	unsigned int tmo = scrub_timeout;
1915	int rc;
1916
1917	if (nfit_spa->ars_done || !nfit_spa->nd_region)
1918		return;
1919
1920	rc = ars_start(acpi_desc, nfit_spa);
1921	/*
1922	 * If we timed out the initial scan we'll still be busy here,
1923	 * and will wait another timeout before giving up permanently.
1924	 */
1925	if (rc < 0 && rc != -EBUSY)
1926		return;
1927
1928	do {
1929		u64 ars_start, ars_len;
1930
1931		if (acpi_desc->cancel)
1932			break;
1933		rc = acpi_nfit_query_poison(acpi_desc, nfit_spa);
1934		if (rc == -ENOTTY)
1935			break;
1936		if (rc == -EBUSY && !tmo) {
1937			dev_warn(dev, "range %d ars timeout, aborting\n",
1938					spa->range_index);
1939			break;
1940		}
1941
1942		if (rc == -EBUSY) {
1943			/*
1944			 * Note, entries may be appended to the list
1945			 * while the lock is dropped, but the workqueue
1946			 * being active prevents entries being deleted /
1947			 * freed.
1948			 */
1949			mutex_unlock(&acpi_desc->init_mutex);
1950			ssleep(1);
1951			tmo--;
1952			mutex_lock(&acpi_desc->init_mutex);
1953			continue;
1954		}
1955
1956		/* we got some results, but there are more pending... */
1957		if (rc == -ENOSPC && overflow_retry--) {
1958			if (!init_ars_len) {
1959				init_ars_len = acpi_desc->ars_status->length;
1960				init_ars_start = acpi_desc->ars_status->address;
1961			}
1962			rc = ars_continue(acpi_desc);
1963		}
1964
1965		if (rc < 0) {
1966			dev_warn(dev, "range %d ars continuation failed\n",
1967					spa->range_index);
1968			break;
1969		}
1970
1971		if (init_ars_len) {
1972			ars_start = init_ars_start;
1973			ars_len = init_ars_len;
1974		} else {
1975			ars_start = acpi_desc->ars_status->address;
1976			ars_len = acpi_desc->ars_status->length;
1977		}
1978		dev_dbg(dev, "spa range: %d ars from %#llx + %#llx complete\n",
1979				spa->range_index, ars_start, ars_len);
1980		/* notify the region about new poison entries */
1981		nvdimm_region_notify(nfit_spa->nd_region,
1982				NVDIMM_REVALIDATE_POISON);
1983		break;
1984	} while (1);
1985}
1986
1987static void acpi_nfit_scrub(struct work_struct *work)
1988{
1989	struct device *dev;
1990	u64 init_scrub_length = 0;
1991	struct nfit_spa *nfit_spa;
1992	u64 init_scrub_address = 0;
1993	bool init_ars_done = false;
1994	struct acpi_nfit_desc *acpi_desc;
1995	unsigned int tmo = scrub_timeout;
1996	unsigned int overflow_retry = scrub_overflow_abort;
1997
1998	acpi_desc = container_of(work, typeof(*acpi_desc), work);
1999	dev = acpi_desc->dev;
2000
2001	/*
2002	 * We scrub in 2 phases.  The first phase waits for any platform
2003	 * firmware initiated scrubs to complete and then we go search for the
2004	 * affected spa regions to mark them scanned.  In the second phase we
2005	 * initiate a directed scrub for every range that was not scrubbed in
2006	 * phase 1.
2007	 */
2008
2009	/* process platform firmware initiated scrubs */
2010 retry:
2011	mutex_lock(&acpi_desc->init_mutex);
2012	list_for_each_entry(nfit_spa, &acpi_desc->spas, list) {
2013		struct nd_cmd_ars_status *ars_status;
2014		struct acpi_nfit_system_address *spa;
2015		u64 ars_start, ars_len;
2016		int rc;
2017
2018		if (acpi_desc->cancel)
2019			break;
2020
2021		if (nfit_spa->nd_region)
2022			continue;
2023
2024		if (init_ars_done) {
2025			/*
2026			 * No need to re-query, we're now just
2027			 * reconciling all the ranges covered by the
2028			 * initial scrub
2029			 */
2030			rc = 0;
2031		} else
2032			rc = acpi_nfit_query_poison(acpi_desc, nfit_spa);
2033
2034		if (rc == -ENOTTY) {
2035			/* no ars capability, just register spa and move on */
2036			acpi_nfit_register_region(acpi_desc, nfit_spa);
2037			continue;
2038		}
2039
2040		if (rc == -EBUSY && !tmo) {
2041			/* fallthrough to directed scrub in phase 2 */
2042			dev_warn(dev, "timeout awaiting ars results, continuing...\n");
2043			break;
2044		} else if (rc == -EBUSY) {
2045			mutex_unlock(&acpi_desc->init_mutex);
2046			ssleep(1);
2047			tmo--;
2048			goto retry;
2049		}
2050
2051		/* we got some results, but there are more pending... */
2052		if (rc == -ENOSPC && overflow_retry--) {
2053			ars_status = acpi_desc->ars_status;
2054			/*
2055			 * Record the original scrub range, so that we
2056			 * can recall all the ranges impacted by the
2057			 * initial scrub.
2058			 */
2059			if (!init_scrub_length) {
2060				init_scrub_length = ars_status->length;
2061				init_scrub_address = ars_status->address;
2062			}
2063			rc = ars_continue(acpi_desc);
2064			if (rc == 0) {
2065				mutex_unlock(&acpi_desc->init_mutex);
2066				goto retry;
2067			}
2068		}
2069
2070		if (rc < 0) {
2071			/*
2072			 * Initial scrub failed, we'll give it one more
2073			 * try below...
2074			 */
2075			break;
2076		}
2077
2078		/* We got some final results, record completed ranges */
2079		ars_status = acpi_desc->ars_status;
2080		if (init_scrub_length) {
2081			ars_start = init_scrub_address;
2082			ars_len = ars_start + init_scrub_length;
2083		} else {
2084			ars_start = ars_status->address;
2085			ars_len = ars_status->length;
2086		}
2087		spa = nfit_spa->spa;
2088
2089		if (!init_ars_done) {
2090			init_ars_done = true;
2091			dev_dbg(dev, "init scrub %#llx + %#llx complete\n",
2092					ars_start, ars_len);
2093		}
2094		if (ars_start <= spa->address && ars_start + ars_len
2095				>= spa->address + spa->length)
2096			acpi_nfit_register_region(acpi_desc, nfit_spa);
2097	}
2098
2099	/*
2100	 * For all the ranges not covered by an initial scrub we still
2101	 * want to see if there are errors, but it's ok to discover them
2102	 * asynchronously.
2103	 */
2104	list_for_each_entry(nfit_spa, &acpi_desc->spas, list) {
2105		/*
2106		 * Flag all the ranges that still need scrubbing, but
2107		 * register them now to make data available.
2108		 */
2109		if (nfit_spa->nd_region)
2110			nfit_spa->ars_done = 1;
2111		else
2112			acpi_nfit_register_region(acpi_desc, nfit_spa);
2113	}
2114
2115	list_for_each_entry(nfit_spa, &acpi_desc->spas, list)
2116		acpi_nfit_async_scrub(acpi_desc, nfit_spa);
2117	mutex_unlock(&acpi_desc->init_mutex);
2118}
2119
2120static int acpi_nfit_register_regions(struct acpi_nfit_desc *acpi_desc)
2121{
2122	struct nfit_spa *nfit_spa;
2123	int rc;
2124
2125	list_for_each_entry(nfit_spa, &acpi_desc->spas, list)
2126		if (nfit_spa_type(nfit_spa->spa) == NFIT_SPA_DCR) {
2127			/* BLK regions don't need to wait for ars results */
2128			rc = acpi_nfit_register_region(acpi_desc, nfit_spa);
2129			if (rc)
2130				return rc;
2131		}
2132
2133	queue_work(nfit_wq, &acpi_desc->work);
2134	return 0;
2135}
2136
2137static int acpi_nfit_check_deletions(struct acpi_nfit_desc *acpi_desc,
2138		struct nfit_table_prev *prev)
2139{
2140	struct device *dev = acpi_desc->dev;
2141
2142	if (!list_empty(&prev->spas) ||
2143			!list_empty(&prev->memdevs) ||
2144			!list_empty(&prev->dcrs) ||
2145			!list_empty(&prev->bdws) ||
2146			!list_empty(&prev->idts) ||
2147			!list_empty(&prev->flushes)) {
2148		dev_err(dev, "new nfit deletes entries (unsupported)\n");
2149		return -ENXIO;
2150	}
2151	return 0;
2152}
2153
2154int acpi_nfit_init(struct acpi_nfit_desc *acpi_desc, acpi_size sz)
2155{
2156	struct device *dev = acpi_desc->dev;
2157	struct nfit_table_prev prev;
2158	const void *end;
2159	u8 *data;
2160	int rc;
2161
2162	mutex_lock(&acpi_desc->init_mutex);
2163
2164	INIT_LIST_HEAD(&prev.spas);
2165	INIT_LIST_HEAD(&prev.memdevs);
2166	INIT_LIST_HEAD(&prev.dcrs);
2167	INIT_LIST_HEAD(&prev.bdws);
2168	INIT_LIST_HEAD(&prev.idts);
2169	INIT_LIST_HEAD(&prev.flushes);
2170
2171	list_cut_position(&prev.spas, &acpi_desc->spas,
2172				acpi_desc->spas.prev);
2173	list_cut_position(&prev.memdevs, &acpi_desc->memdevs,
2174				acpi_desc->memdevs.prev);
2175	list_cut_position(&prev.dcrs, &acpi_desc->dcrs,
2176				acpi_desc->dcrs.prev);
2177	list_cut_position(&prev.bdws, &acpi_desc->bdws,
2178				acpi_desc->bdws.prev);
2179	list_cut_position(&prev.idts, &acpi_desc->idts,
2180				acpi_desc->idts.prev);
2181	list_cut_position(&prev.flushes, &acpi_desc->flushes,
2182				acpi_desc->flushes.prev);
2183
2184	data = (u8 *) acpi_desc->nfit;
2185	end = data + sz;
2186	while (!IS_ERR_OR_NULL(data))
2187		data = add_table(acpi_desc, &prev, data, end);
2188
2189	if (IS_ERR(data)) {
2190		dev_dbg(dev, "%s: nfit table parsing error: %ld\n", __func__,
2191				PTR_ERR(data));
2192		rc = PTR_ERR(data);
2193		goto out_unlock;
2194	}
2195
2196	rc = acpi_nfit_check_deletions(acpi_desc, &prev);
2197	if (rc)
2198		goto out_unlock;
2199
2200	if (nfit_mem_init(acpi_desc) != 0) {
2201		rc = -ENOMEM;
2202		goto out_unlock;
2203	}
2204
2205	acpi_nfit_init_dsms(acpi_desc);
2206
2207	rc = acpi_nfit_register_dimms(acpi_desc);
2208	if (rc)
2209		goto out_unlock;
2210
2211	rc = acpi_nfit_register_regions(acpi_desc);
2212
2213 out_unlock:
2214	mutex_unlock(&acpi_desc->init_mutex);
2215	return rc;
2216}
2217EXPORT_SYMBOL_GPL(acpi_nfit_init);
2218
2219struct acpi_nfit_flush_work {
2220	struct work_struct work;
2221	struct completion cmp;
2222};
2223
2224static void flush_probe(struct work_struct *work)
2225{
2226	struct acpi_nfit_flush_work *flush;
2227
2228	flush = container_of(work, typeof(*flush), work);
2229	complete(&flush->cmp);
2230}
2231
2232static int acpi_nfit_flush_probe(struct nvdimm_bus_descriptor *nd_desc)
2233{
2234	struct acpi_nfit_desc *acpi_desc = to_acpi_nfit_desc(nd_desc);
2235	struct device *dev = acpi_desc->dev;
2236	struct acpi_nfit_flush_work flush;
2237
2238	/* bounce the device lock to flush acpi_nfit_add / acpi_nfit_notify */
2239	device_lock(dev);
2240	device_unlock(dev);
2241
2242	/*
2243	 * Scrub work could take 10s of seconds, userspace may give up so we
2244	 * need to be interruptible while waiting.
2245	 */
2246	INIT_WORK_ONSTACK(&flush.work, flush_probe);
2247	COMPLETION_INITIALIZER_ONSTACK(flush.cmp);
2248	queue_work(nfit_wq, &flush.work);
2249	return wait_for_completion_interruptible(&flush.cmp);
2250}
2251
2252static int acpi_nfit_clear_to_send(struct nvdimm_bus_descriptor *nd_desc,
2253		struct nvdimm *nvdimm, unsigned int cmd)
2254{
2255	struct acpi_nfit_desc *acpi_desc = to_acpi_nfit_desc(nd_desc);
2256
2257	if (nvdimm)
2258		return 0;
2259	if (cmd != ND_CMD_ARS_START)
2260		return 0;
2261
2262	/*
2263	 * The kernel and userspace may race to initiate a scrub, but
2264	 * the scrub thread is prepared to lose that initial race.  It
2265	 * just needs guarantees that any ars it initiates are not
2266	 * interrupted by any intervening start reqeusts from userspace.
2267	 */
2268	if (work_busy(&acpi_desc->work))
2269		return -EBUSY;
2270
2271	return 0;
2272}
2273
2274void acpi_nfit_desc_init(struct acpi_nfit_desc *acpi_desc, struct device *dev)
2275{
2276	struct nvdimm_bus_descriptor *nd_desc;
2277
2278	dev_set_drvdata(dev, acpi_desc);
2279	acpi_desc->dev = dev;
2280	acpi_desc->blk_do_io = acpi_nfit_blk_region_do_io;
2281	nd_desc = &acpi_desc->nd_desc;
2282	nd_desc->provider_name = "ACPI.NFIT";
2283	nd_desc->ndctl = acpi_nfit_ctl;
2284	nd_desc->flush_probe = acpi_nfit_flush_probe;
2285	nd_desc->clear_to_send = acpi_nfit_clear_to_send;
2286	nd_desc->attr_groups = acpi_nfit_attribute_groups;
2287
2288	INIT_LIST_HEAD(&acpi_desc->spa_maps);
2289	INIT_LIST_HEAD(&acpi_desc->spas);
2290	INIT_LIST_HEAD(&acpi_desc->dcrs);
2291	INIT_LIST_HEAD(&acpi_desc->bdws);
2292	INIT_LIST_HEAD(&acpi_desc->idts);
2293	INIT_LIST_HEAD(&acpi_desc->flushes);
2294	INIT_LIST_HEAD(&acpi_desc->memdevs);
2295	INIT_LIST_HEAD(&acpi_desc->dimms);
2296	mutex_init(&acpi_desc->spa_map_mutex);
2297	mutex_init(&acpi_desc->init_mutex);
2298	INIT_WORK(&acpi_desc->work, acpi_nfit_scrub);
2299}
2300EXPORT_SYMBOL_GPL(acpi_nfit_desc_init);
2301
2302static int acpi_nfit_add(struct acpi_device *adev)
2303{
2304	struct acpi_buffer buf = { ACPI_ALLOCATE_BUFFER, NULL };
2305	struct acpi_nfit_desc *acpi_desc;
2306	struct device *dev = &adev->dev;
2307	struct acpi_table_header *tbl;
2308	acpi_status status = AE_OK;
2309	acpi_size sz;
2310	int rc;
2311
2312	status = acpi_get_table_with_size("NFIT", 0, &tbl, &sz);
2313	if (ACPI_FAILURE(status)) {
2314		/* This is ok, we could have an nvdimm hotplugged later */
2315		dev_dbg(dev, "failed to find NFIT at startup\n");
2316		return 0;
2317	}
2318
2319	acpi_desc = devm_kzalloc(dev, sizeof(*acpi_desc), GFP_KERNEL);
2320	if (!acpi_desc)
2321		return -ENOMEM;
2322	acpi_nfit_desc_init(acpi_desc, &adev->dev);
2323	acpi_desc->nvdimm_bus = nvdimm_bus_register(dev, &acpi_desc->nd_desc);
2324	if (!acpi_desc->nvdimm_bus)
2325		return -ENOMEM;
2326
2327	/*
2328	 * Save the acpi header for later and then skip it,
2329	 * making nfit point to the first nfit table header.
2330	 */
2331	acpi_desc->acpi_header = *tbl;
2332	acpi_desc->nfit = (void *) tbl + sizeof(struct acpi_table_nfit);
2333	sz -= sizeof(struct acpi_table_nfit);
2334
2335	/* Evaluate _FIT and override with that if present */
2336	status = acpi_evaluate_object(adev->handle, "_FIT", NULL, &buf);
2337	if (ACPI_SUCCESS(status) && buf.length > 0) {
2338		union acpi_object *obj;
2339		/*
2340		 * Adjust for the acpi_object header of the _FIT
2341		 */
2342		obj = buf.pointer;
2343		if (obj->type == ACPI_TYPE_BUFFER) {
2344			acpi_desc->nfit =
2345				(struct acpi_nfit_header *)obj->buffer.pointer;
2346			sz = obj->buffer.length;
2347		} else
2348			dev_dbg(dev, "%s invalid type %d, ignoring _FIT\n",
2349				 __func__, (int) obj->type);
2350	}
2351
2352	rc = acpi_nfit_init(acpi_desc, sz);
2353	if (rc) {
2354		nvdimm_bus_unregister(acpi_desc->nvdimm_bus);
2355		return rc;
2356	}
2357	return 0;
2358}
2359
2360static int acpi_nfit_remove(struct acpi_device *adev)
2361{
2362	struct acpi_nfit_desc *acpi_desc = dev_get_drvdata(&adev->dev);
2363
2364	acpi_desc->cancel = 1;
2365	flush_workqueue(nfit_wq);
2366	nvdimm_bus_unregister(acpi_desc->nvdimm_bus);
2367	return 0;
2368}
2369
2370static void acpi_nfit_notify(struct acpi_device *adev, u32 event)
2371{
2372	struct acpi_nfit_desc *acpi_desc = dev_get_drvdata(&adev->dev);
2373	struct acpi_buffer buf = { ACPI_ALLOCATE_BUFFER, NULL };
2374	struct acpi_nfit_header *nfit_saved;
2375	union acpi_object *obj;
2376	struct device *dev = &adev->dev;
2377	acpi_status status;
2378	int ret;
2379
2380	dev_dbg(dev, "%s: event: %d\n", __func__, event);
2381
2382	device_lock(dev);
2383	if (!dev->driver) {
2384		/* dev->driver may be null if we're being removed */
2385		dev_dbg(dev, "%s: no driver found for dev\n", __func__);
2386		goto out_unlock;
2387	}
2388
2389	if (!acpi_desc) {
2390		acpi_desc = devm_kzalloc(dev, sizeof(*acpi_desc), GFP_KERNEL);
2391		if (!acpi_desc)
2392			goto out_unlock;
2393		acpi_nfit_desc_init(acpi_desc, &adev->dev);
2394		acpi_desc->nvdimm_bus = nvdimm_bus_register(dev, &acpi_desc->nd_desc);
2395		if (!acpi_desc->nvdimm_bus)
2396			goto out_unlock;
2397	} else {
2398		/*
2399		 * Finish previous registration before considering new
2400		 * regions.
2401		 */
2402		flush_workqueue(nfit_wq);
2403	}
2404
2405	/* Evaluate _FIT */
2406	status = acpi_evaluate_object(adev->handle, "_FIT", NULL, &buf);
2407	if (ACPI_FAILURE(status)) {
2408		dev_err(dev, "failed to evaluate _FIT\n");
2409		goto out_unlock;
2410	}
2411
2412	nfit_saved = acpi_desc->nfit;
2413	obj = buf.pointer;
2414	if (obj->type == ACPI_TYPE_BUFFER) {
2415		acpi_desc->nfit =
2416			(struct acpi_nfit_header *)obj->buffer.pointer;
2417		ret = acpi_nfit_init(acpi_desc, obj->buffer.length);
2418		if (ret) {
2419			/* Merge failed, restore old nfit, and exit */
2420			acpi_desc->nfit = nfit_saved;
2421			dev_err(dev, "failed to merge updated NFIT\n");
2422		}
2423	} else {
2424		/* Bad _FIT, restore old nfit */
2425		dev_err(dev, "Invalid _FIT\n");
2426	}
2427	kfree(buf.pointer);
2428
2429 out_unlock:
2430	device_unlock(dev);
2431}
2432
2433static const struct acpi_device_id acpi_nfit_ids[] = {
2434	{ "ACPI0012", 0 },
2435	{ "", 0 },
2436};
2437MODULE_DEVICE_TABLE(acpi, acpi_nfit_ids);
2438
2439static struct acpi_driver acpi_nfit_driver = {
2440	.name = KBUILD_MODNAME,
2441	.ids = acpi_nfit_ids,
2442	.ops = {
2443		.add = acpi_nfit_add,
2444		.remove = acpi_nfit_remove,
2445		.notify = acpi_nfit_notify,
2446	},
2447};
2448
2449static __init int nfit_init(void)
2450{
2451	BUILD_BUG_ON(sizeof(struct acpi_table_nfit) != 40);
2452	BUILD_BUG_ON(sizeof(struct acpi_nfit_system_address) != 56);
2453	BUILD_BUG_ON(sizeof(struct acpi_nfit_memory_map) != 48);
2454	BUILD_BUG_ON(sizeof(struct acpi_nfit_interleave) != 20);
2455	BUILD_BUG_ON(sizeof(struct acpi_nfit_smbios) != 9);
2456	BUILD_BUG_ON(sizeof(struct acpi_nfit_control_region) != 80);
2457	BUILD_BUG_ON(sizeof(struct acpi_nfit_data_region) != 40);
2458
2459	acpi_str_to_uuid(UUID_VOLATILE_MEMORY, nfit_uuid[NFIT_SPA_VOLATILE]);
2460	acpi_str_to_uuid(UUID_PERSISTENT_MEMORY, nfit_uuid[NFIT_SPA_PM]);
2461	acpi_str_to_uuid(UUID_CONTROL_REGION, nfit_uuid[NFIT_SPA_DCR]);
2462	acpi_str_to_uuid(UUID_DATA_REGION, nfit_uuid[NFIT_SPA_BDW]);
2463	acpi_str_to_uuid(UUID_VOLATILE_VIRTUAL_DISK, nfit_uuid[NFIT_SPA_VDISK]);
2464	acpi_str_to_uuid(UUID_VOLATILE_VIRTUAL_CD, nfit_uuid[NFIT_SPA_VCD]);
2465	acpi_str_to_uuid(UUID_PERSISTENT_VIRTUAL_DISK, nfit_uuid[NFIT_SPA_PDISK]);
2466	acpi_str_to_uuid(UUID_PERSISTENT_VIRTUAL_CD, nfit_uuid[NFIT_SPA_PCD]);
2467	acpi_str_to_uuid(UUID_NFIT_BUS, nfit_uuid[NFIT_DEV_BUS]);
2468	acpi_str_to_uuid(UUID_NFIT_DIMM, nfit_uuid[NFIT_DEV_DIMM]);
2469
2470	nfit_wq = create_singlethread_workqueue("nfit");
2471	if (!nfit_wq)
2472		return -ENOMEM;
2473
2474	return acpi_bus_register_driver(&acpi_nfit_driver);
2475}
2476
2477static __exit void nfit_exit(void)
2478{
2479	acpi_bus_unregister_driver(&acpi_nfit_driver);
2480	destroy_workqueue(nfit_wq);
2481}
2482
2483module_init(nfit_init);
2484module_exit(nfit_exit);
2485MODULE_LICENSE("GPL v2");
2486MODULE_AUTHOR("Intel Corporation");