Linux Audio

Check our new training course

Real-Time Linux with PREEMPT_RT training

Feb 18-20, 2025
Register
Loading...
  1// SPDX-License-Identifier: GPL-2.0-only
  2/*
  3 *  dcdbas.c: Dell Systems Management Base Driver
  4 *
  5 *  The Dell Systems Management Base Driver provides a sysfs interface for
  6 *  systems management software to perform System Management Interrupts (SMIs)
  7 *  and Host Control Actions (power cycle or power off after OS shutdown) on
  8 *  Dell systems.
  9 *
 10 *  See Documentation/userspace-api/dcdbas.rst for more information.
 11 *
 12 *  Copyright (C) 1995-2006 Dell Inc.
 13 */
 14
 15#include <linux/platform_device.h>
 16#include <linux/acpi.h>
 17#include <linux/dma-mapping.h>
 18#include <linux/dmi.h>
 19#include <linux/errno.h>
 20#include <linux/cpu.h>
 21#include <linux/gfp.h>
 22#include <linux/init.h>
 23#include <linux/io.h>
 24#include <linux/kernel.h>
 25#include <linux/mc146818rtc.h>
 26#include <linux/module.h>
 27#include <linux/reboot.h>
 28#include <linux/sched.h>
 29#include <linux/smp.h>
 30#include <linux/spinlock.h>
 31#include <linux/string.h>
 32#include <linux/types.h>
 33#include <linux/mutex.h>
 34
 35#include "dcdbas.h"
 36
 37#define DRIVER_NAME		"dcdbas"
 38#define DRIVER_VERSION		"5.6.0-3.4"
 39#define DRIVER_DESCRIPTION	"Dell Systems Management Base Driver"
 40
 41static struct platform_device *dcdbas_pdev;
 42
 43static unsigned long max_smi_data_buf_size = MAX_SMI_DATA_BUF_SIZE;
 44static DEFINE_MUTEX(smi_data_lock);
 45static u8 *bios_buffer;
 46static struct smi_buffer smi_buf;
 47
 48static unsigned int host_control_action;
 49static unsigned int host_control_smi_type;
 50static unsigned int host_control_on_shutdown;
 51
 52static bool wsmt_enabled;
 53
 54int dcdbas_smi_alloc(struct smi_buffer *smi_buffer, unsigned long size)
 55{
 56	smi_buffer->virt = dma_alloc_coherent(&dcdbas_pdev->dev, size,
 57					      &smi_buffer->dma, GFP_KERNEL);
 58	if (!smi_buffer->virt) {
 59		dev_dbg(&dcdbas_pdev->dev,
 60			"%s: failed to allocate memory size %lu\n",
 61			__func__, size);
 62		return -ENOMEM;
 63	}
 64	smi_buffer->size = size;
 65
 66	dev_dbg(&dcdbas_pdev->dev, "%s: phys: %x size: %lu\n",
 67		__func__, (u32)smi_buffer->dma, smi_buffer->size);
 68
 69	return 0;
 70}
 71EXPORT_SYMBOL_GPL(dcdbas_smi_alloc);
 72
 73void dcdbas_smi_free(struct smi_buffer *smi_buffer)
 74{
 75	if (!smi_buffer->virt)
 76		return;
 77
 78	dev_dbg(&dcdbas_pdev->dev, "%s: phys: %x size: %lu\n",
 79		__func__, (u32)smi_buffer->dma, smi_buffer->size);
 80	dma_free_coherent(&dcdbas_pdev->dev, smi_buffer->size,
 81			  smi_buffer->virt, smi_buffer->dma);
 82	smi_buffer->virt = NULL;
 83	smi_buffer->dma = 0;
 84	smi_buffer->size = 0;
 85}
 86EXPORT_SYMBOL_GPL(dcdbas_smi_free);
 87
 88/**
 89 * smi_data_buf_free: free SMI data buffer
 90 */
 91static void smi_data_buf_free(void)
 92{
 93	if (!smi_buf.virt || wsmt_enabled)
 94		return;
 95
 96	dcdbas_smi_free(&smi_buf);
 97}
 98
 99/**
100 * smi_data_buf_realloc: grow SMI data buffer if needed
101 */
102static int smi_data_buf_realloc(unsigned long size)
103{
104	struct smi_buffer tmp;
105	int ret;
106
107	if (smi_buf.size >= size)
108		return 0;
109
110	if (size > max_smi_data_buf_size)
111		return -EINVAL;
112
113	/* new buffer is needed */
114	ret = dcdbas_smi_alloc(&tmp, size);
115	if (ret)
116		return ret;
117
118	/* memory zeroed by dma_alloc_coherent */
119	if (smi_buf.virt)
120		memcpy(tmp.virt, smi_buf.virt, smi_buf.size);
121
122	/* free any existing buffer */
123	smi_data_buf_free();
124
125	/* set up new buffer for use */
126	smi_buf = tmp;
127
128	return 0;
129}
130
131static ssize_t smi_data_buf_phys_addr_show(struct device *dev,
132					   struct device_attribute *attr,
133					   char *buf)
134{
135	return sprintf(buf, "%x\n", (u32)smi_buf.dma);
136}
137
138static ssize_t smi_data_buf_size_show(struct device *dev,
139				      struct device_attribute *attr,
140				      char *buf)
141{
142	return sprintf(buf, "%lu\n", smi_buf.size);
143}
144
145static ssize_t smi_data_buf_size_store(struct device *dev,
146				       struct device_attribute *attr,
147				       const char *buf, size_t count)
148{
149	unsigned long buf_size;
150	ssize_t ret;
151
152	buf_size = simple_strtoul(buf, NULL, 10);
153
154	/* make sure SMI data buffer is at least buf_size */
155	mutex_lock(&smi_data_lock);
156	ret = smi_data_buf_realloc(buf_size);
157	mutex_unlock(&smi_data_lock);
158	if (ret)
159		return ret;
160
161	return count;
162}
163
164static ssize_t smi_data_read(struct file *filp, struct kobject *kobj,
165			     struct bin_attribute *bin_attr,
166			     char *buf, loff_t pos, size_t count)
167{
168	ssize_t ret;
169
170	mutex_lock(&smi_data_lock);
171	ret = memory_read_from_buffer(buf, count, &pos, smi_buf.virt,
172					smi_buf.size);
173	mutex_unlock(&smi_data_lock);
174	return ret;
175}
176
177static ssize_t smi_data_write(struct file *filp, struct kobject *kobj,
178			      struct bin_attribute *bin_attr,
179			      char *buf, loff_t pos, size_t count)
180{
181	ssize_t ret;
182
183	if ((pos + count) > max_smi_data_buf_size)
184		return -EINVAL;
185
186	mutex_lock(&smi_data_lock);
187
188	ret = smi_data_buf_realloc(pos + count);
189	if (ret)
190		goto out;
191
192	memcpy(smi_buf.virt + pos, buf, count);
193	ret = count;
194out:
195	mutex_unlock(&smi_data_lock);
196	return ret;
197}
198
199static ssize_t host_control_action_show(struct device *dev,
200					struct device_attribute *attr,
201					char *buf)
202{
203	return sprintf(buf, "%u\n", host_control_action);
204}
205
206static ssize_t host_control_action_store(struct device *dev,
207					 struct device_attribute *attr,
208					 const char *buf, size_t count)
209{
210	ssize_t ret;
211
212	/* make sure buffer is available for host control command */
213	mutex_lock(&smi_data_lock);
214	ret = smi_data_buf_realloc(sizeof(struct apm_cmd));
215	mutex_unlock(&smi_data_lock);
216	if (ret)
217		return ret;
218
219	host_control_action = simple_strtoul(buf, NULL, 10);
220	return count;
221}
222
223static ssize_t host_control_smi_type_show(struct device *dev,
224					  struct device_attribute *attr,
225					  char *buf)
226{
227	return sprintf(buf, "%u\n", host_control_smi_type);
228}
229
230static ssize_t host_control_smi_type_store(struct device *dev,
231					   struct device_attribute *attr,
232					   const char *buf, size_t count)
233{
234	host_control_smi_type = simple_strtoul(buf, NULL, 10);
235	return count;
236}
237
238static ssize_t host_control_on_shutdown_show(struct device *dev,
239					     struct device_attribute *attr,
240					     char *buf)
241{
242	return sprintf(buf, "%u\n", host_control_on_shutdown);
243}
244
245static ssize_t host_control_on_shutdown_store(struct device *dev,
246					      struct device_attribute *attr,
247					      const char *buf, size_t count)
248{
249	host_control_on_shutdown = simple_strtoul(buf, NULL, 10);
250	return count;
251}
252
253static int raise_smi(void *par)
254{
255	struct smi_cmd *smi_cmd = par;
256
257	if (smp_processor_id() != 0) {
258		dev_dbg(&dcdbas_pdev->dev, "%s: failed to get CPU 0\n",
259			__func__);
260		return -EBUSY;
261	}
262
263	/* generate SMI */
264	/* inb to force posted write through and make SMI happen now */
265	asm volatile (
266		"outb %b0,%w1\n"
267		"inb %w1"
268		: /* no output args */
269		: "a" (smi_cmd->command_code),
270		  "d" (smi_cmd->command_address),
271		  "b" (smi_cmd->ebx),
272		  "c" (smi_cmd->ecx)
273		: "memory"
274	);
275
276	return 0;
277}
278/**
279 * dcdbas_smi_request: generate SMI request
280 *
281 * Called with smi_data_lock.
282 */
283int dcdbas_smi_request(struct smi_cmd *smi_cmd)
284{
285	int ret;
286
287	if (smi_cmd->magic != SMI_CMD_MAGIC) {
288		dev_info(&dcdbas_pdev->dev, "%s: invalid magic value\n",
289			 __func__);
290		return -EBADR;
291	}
292
293	/* SMI requires CPU 0 */
294	cpus_read_lock();
295	ret = smp_call_on_cpu(0, raise_smi, smi_cmd, true);
296	cpus_read_unlock();
297
298	return ret;
299}
300EXPORT_SYMBOL(dcdbas_smi_request);
301
302/**
303 * smi_request_store:
304 *
305 * The valid values are:
306 * 0: zero SMI data buffer
307 * 1: generate calling interface SMI
308 * 2: generate raw SMI
309 *
310 * User application writes smi_cmd to smi_data before telling driver
311 * to generate SMI.
312 */
313static ssize_t smi_request_store(struct device *dev,
314				 struct device_attribute *attr,
315				 const char *buf, size_t count)
316{
317	struct smi_cmd *smi_cmd;
318	unsigned long val = simple_strtoul(buf, NULL, 10);
319	ssize_t ret;
320
321	mutex_lock(&smi_data_lock);
322
323	if (smi_buf.size < sizeof(struct smi_cmd)) {
324		ret = -ENODEV;
325		goto out;
326	}
327	smi_cmd = (struct smi_cmd *)smi_buf.virt;
328
329	switch (val) {
330	case 2:
331		/* Raw SMI */
332		ret = dcdbas_smi_request(smi_cmd);
333		if (!ret)
334			ret = count;
335		break;
336	case 1:
337		/*
338		 * Calling Interface SMI
339		 *
340		 * Provide physical address of command buffer field within
341		 * the struct smi_cmd to BIOS.
342		 *
343		 * Because the address that smi_cmd (smi_buf.virt) points to
344		 * will be from memremap() of a non-memory address if WSMT
345		 * is present, we can't use virt_to_phys() on smi_cmd, so
346		 * we have to use the physical address that was saved when
347		 * the virtual address for smi_cmd was received.
348		 */
349		smi_cmd->ebx = (u32)smi_buf.dma +
350				offsetof(struct smi_cmd, command_buffer);
351		ret = dcdbas_smi_request(smi_cmd);
352		if (!ret)
353			ret = count;
354		break;
355	case 0:
356		memset(smi_buf.virt, 0, smi_buf.size);
357		ret = count;
358		break;
359	default:
360		ret = -EINVAL;
361		break;
362	}
363
364out:
365	mutex_unlock(&smi_data_lock);
366	return ret;
367}
368
369/**
370 * host_control_smi: generate host control SMI
371 *
372 * Caller must set up the host control command in smi_buf.virt.
373 */
374static int host_control_smi(void)
375{
376	struct apm_cmd *apm_cmd;
377	u8 *data;
378	unsigned long flags;
379	u32 num_ticks;
380	s8 cmd_status;
381	u8 index;
382
383	apm_cmd = (struct apm_cmd *)smi_buf.virt;
384	apm_cmd->status = ESM_STATUS_CMD_UNSUCCESSFUL;
385
386	switch (host_control_smi_type) {
387	case HC_SMITYPE_TYPE1:
388		spin_lock_irqsave(&rtc_lock, flags);
389		/* write SMI data buffer physical address */
390		data = (u8 *)&smi_buf.dma;
391		for (index = PE1300_CMOS_CMD_STRUCT_PTR;
392		     index < (PE1300_CMOS_CMD_STRUCT_PTR + 4);
393		     index++, data++) {
394			outb(index,
395			     (CMOS_BASE_PORT + CMOS_PAGE2_INDEX_PORT_PIIX4));
396			outb(*data,
397			     (CMOS_BASE_PORT + CMOS_PAGE2_DATA_PORT_PIIX4));
398		}
399
400		/* first set status to -1 as called by spec */
401		cmd_status = ESM_STATUS_CMD_UNSUCCESSFUL;
402		outb((u8) cmd_status, PCAT_APM_STATUS_PORT);
403
404		/* generate SMM call */
405		outb(ESM_APM_CMD, PCAT_APM_CONTROL_PORT);
406		spin_unlock_irqrestore(&rtc_lock, flags);
407
408		/* wait a few to see if it executed */
409		num_ticks = TIMEOUT_USEC_SHORT_SEMA_BLOCKING;
410		while ((s8)inb(PCAT_APM_STATUS_PORT) == ESM_STATUS_CMD_UNSUCCESSFUL) {
411			num_ticks--;
412			if (num_ticks == EXPIRED_TIMER)
413				return -ETIME;
414		}
415		break;
416
417	case HC_SMITYPE_TYPE2:
418	case HC_SMITYPE_TYPE3:
419		spin_lock_irqsave(&rtc_lock, flags);
420		/* write SMI data buffer physical address */
421		data = (u8 *)&smi_buf.dma;
422		for (index = PE1400_CMOS_CMD_STRUCT_PTR;
423		     index < (PE1400_CMOS_CMD_STRUCT_PTR + 4);
424		     index++, data++) {
425			outb(index, (CMOS_BASE_PORT + CMOS_PAGE1_INDEX_PORT));
426			outb(*data, (CMOS_BASE_PORT + CMOS_PAGE1_DATA_PORT));
427		}
428
429		/* generate SMM call */
430		if (host_control_smi_type == HC_SMITYPE_TYPE3)
431			outb(ESM_APM_CMD, PCAT_APM_CONTROL_PORT);
432		else
433			outb(ESM_APM_CMD, PE1400_APM_CONTROL_PORT);
434
435		/* restore RTC index pointer since it was written to above */
436		CMOS_READ(RTC_REG_C);
437		spin_unlock_irqrestore(&rtc_lock, flags);
438
439		/* read control port back to serialize write */
440		cmd_status = inb(PE1400_APM_CONTROL_PORT);
441
442		/* wait a few to see if it executed */
443		num_ticks = TIMEOUT_USEC_SHORT_SEMA_BLOCKING;
444		while (apm_cmd->status == ESM_STATUS_CMD_UNSUCCESSFUL) {
445			num_ticks--;
446			if (num_ticks == EXPIRED_TIMER)
447				return -ETIME;
448		}
449		break;
450
451	default:
452		dev_dbg(&dcdbas_pdev->dev, "%s: invalid SMI type %u\n",
453			__func__, host_control_smi_type);
454		return -ENOSYS;
455	}
456
457	return 0;
458}
459
460/**
461 * dcdbas_host_control: initiate host control
462 *
463 * This function is called by the driver after the system has
464 * finished shutting down if the user application specified a
465 * host control action to perform on shutdown.  It is safe to
466 * use smi_buf.virt at this point because the system has finished
467 * shutting down and no userspace apps are running.
468 */
469static void dcdbas_host_control(void)
470{
471	struct apm_cmd *apm_cmd;
472	u8 action;
473
474	if (host_control_action == HC_ACTION_NONE)
475		return;
476
477	action = host_control_action;
478	host_control_action = HC_ACTION_NONE;
479
480	if (!smi_buf.virt) {
481		dev_dbg(&dcdbas_pdev->dev, "%s: no SMI buffer\n", __func__);
482		return;
483	}
484
485	if (smi_buf.size < sizeof(struct apm_cmd)) {
486		dev_dbg(&dcdbas_pdev->dev, "%s: SMI buffer too small\n",
487			__func__);
488		return;
489	}
490
491	apm_cmd = (struct apm_cmd *)smi_buf.virt;
492
493	/* power off takes precedence */
494	if (action & HC_ACTION_HOST_CONTROL_POWEROFF) {
495		apm_cmd->command = ESM_APM_POWER_CYCLE;
496		apm_cmd->reserved = 0;
497		*((s16 *)&apm_cmd->parameters.shortreq.parm[0]) = (s16) 0;
498		host_control_smi();
499	} else if (action & HC_ACTION_HOST_CONTROL_POWERCYCLE) {
500		apm_cmd->command = ESM_APM_POWER_CYCLE;
501		apm_cmd->reserved = 0;
502		*((s16 *)&apm_cmd->parameters.shortreq.parm[0]) = (s16) 20;
503		host_control_smi();
504	}
505}
506
507/* WSMT */
508
509static u8 checksum(u8 *buffer, u8 length)
510{
511	u8 sum = 0;
512	u8 *end = buffer + length;
513
514	while (buffer < end)
515		sum += *buffer++;
516	return sum;
517}
518
519static inline struct smm_eps_table *check_eps_table(u8 *addr)
520{
521	struct smm_eps_table *eps = (struct smm_eps_table *)addr;
522
523	if (strncmp(eps->smm_comm_buff_anchor, SMM_EPS_SIG, 4) != 0)
524		return NULL;
525
526	if (checksum(addr, eps->length) != 0)
527		return NULL;
528
529	return eps;
530}
531
532static int dcdbas_check_wsmt(void)
533{
534	const struct dmi_device *dev = NULL;
535	struct acpi_table_wsmt *wsmt = NULL;
536	struct smm_eps_table *eps = NULL;
537	u64 bios_buf_paddr;
538	u64 remap_size;
539	u8 *addr;
540
541	acpi_get_table(ACPI_SIG_WSMT, 0, (struct acpi_table_header **)&wsmt);
542	if (!wsmt)
543		return 0;
544
545	/* Check if WSMT ACPI table shows that protection is enabled */
546	if (!(wsmt->protection_flags & ACPI_WSMT_FIXED_COMM_BUFFERS) ||
547	    !(wsmt->protection_flags & ACPI_WSMT_COMM_BUFFER_NESTED_PTR_PROTECTION))
548		return 0;
549
550	/*
551	 * BIOS could provide the address/size of the protected buffer
552	 * in an SMBIOS string or in an EPS structure in 0xFxxxx.
553	 */
554
555	/* Check SMBIOS for buffer address */
556	while ((dev = dmi_find_device(DMI_DEV_TYPE_OEM_STRING, NULL, dev)))
557		if (sscanf(dev->name, "30[%16llx;%8llx]", &bios_buf_paddr,
558		    &remap_size) == 2)
559			goto remap;
560
561	/* Scan for EPS (entry point structure) */
562	for (addr = (u8 *)__va(0xf0000);
563	     addr < (u8 *)__va(0x100000 - sizeof(struct smm_eps_table));
564	     addr += 16) {
565		eps = check_eps_table(addr);
566		if (eps)
567			break;
568	}
569
570	if (!eps) {
571		dev_dbg(&dcdbas_pdev->dev, "found WSMT, but no firmware buffer found\n");
572		return -ENODEV;
573	}
574	bios_buf_paddr = eps->smm_comm_buff_addr;
575	remap_size = eps->num_of_4k_pages * PAGE_SIZE;
576
577remap:
578	/*
579	 * Get physical address of buffer and map to virtual address.
580	 * Table gives size in 4K pages, regardless of actual system page size.
581	 */
582	if (upper_32_bits(bios_buf_paddr + 8)) {
583		dev_warn(&dcdbas_pdev->dev, "found WSMT, but buffer address is above 4GB\n");
584		return -EINVAL;
585	}
586	/*
587	 * Limit remap size to MAX_SMI_DATA_BUF_SIZE + 8 (since the first 8
588	 * bytes are used for a semaphore, not the data buffer itself).
589	 */
590	if (remap_size > MAX_SMI_DATA_BUF_SIZE + 8)
591		remap_size = MAX_SMI_DATA_BUF_SIZE + 8;
592
593	bios_buffer = memremap(bios_buf_paddr, remap_size, MEMREMAP_WB);
594	if (!bios_buffer) {
595		dev_warn(&dcdbas_pdev->dev, "found WSMT, but failed to map buffer\n");
596		return -ENOMEM;
597	}
598
599	/* First 8 bytes is for a semaphore, not part of the smi_buf.virt */
600	smi_buf.dma = bios_buf_paddr + 8;
601	smi_buf.virt = bios_buffer + 8;
602	smi_buf.size = remap_size - 8;
603	max_smi_data_buf_size = smi_buf.size;
604	wsmt_enabled = true;
605	dev_info(&dcdbas_pdev->dev,
606		 "WSMT found, using firmware-provided SMI buffer.\n");
607	return 1;
608}
609
610/**
611 * dcdbas_reboot_notify: handle reboot notification for host control
612 */
613static int dcdbas_reboot_notify(struct notifier_block *nb, unsigned long code,
614				void *unused)
615{
616	switch (code) {
617	case SYS_DOWN:
618	case SYS_HALT:
619	case SYS_POWER_OFF:
620		if (host_control_on_shutdown) {
621			/* firmware is going to perform host control action */
622			printk(KERN_WARNING "Please wait for shutdown "
623			       "action to complete...\n");
624			dcdbas_host_control();
625		}
626		break;
627	}
628
629	return NOTIFY_DONE;
630}
631
632static struct notifier_block dcdbas_reboot_nb = {
633	.notifier_call = dcdbas_reboot_notify,
634	.next = NULL,
635	.priority = INT_MIN
636};
637
638static DCDBAS_BIN_ATTR_RW(smi_data);
639
640static struct bin_attribute *dcdbas_bin_attrs[] = {
641	&bin_attr_smi_data,
642	NULL
643};
644
645static DCDBAS_DEV_ATTR_RW(smi_data_buf_size);
646static DCDBAS_DEV_ATTR_RO(smi_data_buf_phys_addr);
647static DCDBAS_DEV_ATTR_WO(smi_request);
648static DCDBAS_DEV_ATTR_RW(host_control_action);
649static DCDBAS_DEV_ATTR_RW(host_control_smi_type);
650static DCDBAS_DEV_ATTR_RW(host_control_on_shutdown);
651
652static struct attribute *dcdbas_dev_attrs[] = {
653	&dev_attr_smi_data_buf_size.attr,
654	&dev_attr_smi_data_buf_phys_addr.attr,
655	&dev_attr_smi_request.attr,
656	&dev_attr_host_control_action.attr,
657	&dev_attr_host_control_smi_type.attr,
658	&dev_attr_host_control_on_shutdown.attr,
659	NULL
660};
661
662static const struct attribute_group dcdbas_attr_group = {
663	.attrs = dcdbas_dev_attrs,
664	.bin_attrs = dcdbas_bin_attrs,
665};
666
667static int dcdbas_probe(struct platform_device *dev)
668{
669	int error;
670
671	host_control_action = HC_ACTION_NONE;
672	host_control_smi_type = HC_SMITYPE_NONE;
673
674	dcdbas_pdev = dev;
675
676	/* Check if ACPI WSMT table specifies protected SMI buffer address */
677	error = dcdbas_check_wsmt();
678	if (error < 0)
679		return error;
680
681	/*
682	 * BIOS SMI calls require buffer addresses be in 32-bit address space.
683	 * This is done by setting the DMA mask below.
684	 */
685	error = dma_set_coherent_mask(&dcdbas_pdev->dev, DMA_BIT_MASK(32));
686	if (error)
687		return error;
688
689	error = sysfs_create_group(&dev->dev.kobj, &dcdbas_attr_group);
690	if (error)
691		return error;
692
693	register_reboot_notifier(&dcdbas_reboot_nb);
694
695	dev_info(&dev->dev, "%s (version %s)\n",
696		 DRIVER_DESCRIPTION, DRIVER_VERSION);
697
698	return 0;
699}
700
701static void dcdbas_remove(struct platform_device *dev)
702{
703	unregister_reboot_notifier(&dcdbas_reboot_nb);
704	sysfs_remove_group(&dev->dev.kobj, &dcdbas_attr_group);
705}
706
707static struct platform_driver dcdbas_driver = {
708	.driver		= {
709		.name	= DRIVER_NAME,
710	},
711	.probe		= dcdbas_probe,
712	.remove_new	= dcdbas_remove,
713};
714
715static const struct platform_device_info dcdbas_dev_info __initconst = {
716	.name		= DRIVER_NAME,
717	.id		= PLATFORM_DEVID_NONE,
718	.dma_mask	= DMA_BIT_MASK(32),
719};
720
721static struct platform_device *dcdbas_pdev_reg;
722
723/**
724 * dcdbas_init: initialize driver
725 */
726static int __init dcdbas_init(void)
727{
728	int error;
729
730	error = platform_driver_register(&dcdbas_driver);
731	if (error)
732		return error;
733
734	dcdbas_pdev_reg = platform_device_register_full(&dcdbas_dev_info);
735	if (IS_ERR(dcdbas_pdev_reg)) {
736		error = PTR_ERR(dcdbas_pdev_reg);
737		goto err_unregister_driver;
738	}
739
740	return 0;
741
742 err_unregister_driver:
743	platform_driver_unregister(&dcdbas_driver);
744	return error;
745}
746
747/**
748 * dcdbas_exit: perform driver cleanup
749 */
750static void __exit dcdbas_exit(void)
751{
752	/*
753	 * make sure functions that use dcdbas_pdev are called
754	 * before platform_device_unregister
755	 */
756	unregister_reboot_notifier(&dcdbas_reboot_nb);
757
758	/*
759	 * We have to free the buffer here instead of dcdbas_remove
760	 * because only in module exit function we can be sure that
761	 * all sysfs attributes belonging to this module have been
762	 * released.
763	 */
764	if (dcdbas_pdev)
765		smi_data_buf_free();
766	if (bios_buffer)
767		memunmap(bios_buffer);
768	platform_device_unregister(dcdbas_pdev_reg);
769	platform_driver_unregister(&dcdbas_driver);
770}
771
772subsys_initcall_sync(dcdbas_init);
773module_exit(dcdbas_exit);
774
775MODULE_DESCRIPTION(DRIVER_DESCRIPTION " (version " DRIVER_VERSION ")");
776MODULE_VERSION(DRIVER_VERSION);
777MODULE_AUTHOR("Dell Inc.");
778MODULE_LICENSE("GPL");
779/* Any System or BIOS claiming to be by Dell */
780MODULE_ALIAS("dmi:*:[bs]vnD[Ee][Ll][Ll]*:*");
1