Linux Audio

Check our new training course

Loading...
Note: File does not exist in v3.1.
   1// SPDX-License-Identifier: GPL-2.0-only
   2/**
   3 * IBM Accelerator Family 'GenWQE'
   4 *
   5 * (C) Copyright IBM Corp. 2013
   6 *
   7 * Author: Frank Haverkamp <haver@linux.vnet.ibm.com>
   8 * Author: Joerg-Stephan Vogt <jsvogt@de.ibm.com>
   9 * Author: Michael Jung <mijung@gmx.net>
  10 * Author: Michael Ruettger <michael@ibmra.de>
  11 */
  12
  13/*
  14 * Character device representation of the GenWQE device. This allows
  15 * user-space applications to communicate with the card.
  16 */
  17
  18#include <linux/kernel.h>
  19#include <linux/types.h>
  20#include <linux/module.h>
  21#include <linux/pci.h>
  22#include <linux/string.h>
  23#include <linux/fs.h>
  24#include <linux/sched/signal.h>
  25#include <linux/wait.h>
  26#include <linux/delay.h>
  27#include <linux/atomic.h>
  28
  29#include "card_base.h"
  30#include "card_ddcb.h"
  31
  32static int genwqe_open_files(struct genwqe_dev *cd)
  33{
  34	int rc;
  35	unsigned long flags;
  36
  37	spin_lock_irqsave(&cd->file_lock, flags);
  38	rc = list_empty(&cd->file_list);
  39	spin_unlock_irqrestore(&cd->file_lock, flags);
  40	return !rc;
  41}
  42
  43static void genwqe_add_file(struct genwqe_dev *cd, struct genwqe_file *cfile)
  44{
  45	unsigned long flags;
  46
  47	cfile->opener = get_pid(task_tgid(current));
  48	spin_lock_irqsave(&cd->file_lock, flags);
  49	list_add(&cfile->list, &cd->file_list);
  50	spin_unlock_irqrestore(&cd->file_lock, flags);
  51}
  52
  53static int genwqe_del_file(struct genwqe_dev *cd, struct genwqe_file *cfile)
  54{
  55	unsigned long flags;
  56
  57	spin_lock_irqsave(&cd->file_lock, flags);
  58	list_del(&cfile->list);
  59	spin_unlock_irqrestore(&cd->file_lock, flags);
  60	put_pid(cfile->opener);
  61
  62	return 0;
  63}
  64
  65static void genwqe_add_pin(struct genwqe_file *cfile, struct dma_mapping *m)
  66{
  67	unsigned long flags;
  68
  69	spin_lock_irqsave(&cfile->pin_lock, flags);
  70	list_add(&m->pin_list, &cfile->pin_list);
  71	spin_unlock_irqrestore(&cfile->pin_lock, flags);
  72}
  73
  74static int genwqe_del_pin(struct genwqe_file *cfile, struct dma_mapping *m)
  75{
  76	unsigned long flags;
  77
  78	spin_lock_irqsave(&cfile->pin_lock, flags);
  79	list_del(&m->pin_list);
  80	spin_unlock_irqrestore(&cfile->pin_lock, flags);
  81
  82	return 0;
  83}
  84
  85/**
  86 * genwqe_search_pin() - Search for the mapping for a userspace address
  87 * @cfile:	Descriptor of opened file
  88 * @u_addr:	User virtual address
  89 * @size:	Size of buffer
  90 * @dma_addr:	DMA address to be updated
  91 *
  92 * Return: Pointer to the corresponding mapping	NULL if not found
  93 */
  94static struct dma_mapping *genwqe_search_pin(struct genwqe_file *cfile,
  95					    unsigned long u_addr,
  96					    unsigned int size,
  97					    void **virt_addr)
  98{
  99	unsigned long flags;
 100	struct dma_mapping *m;
 101
 102	spin_lock_irqsave(&cfile->pin_lock, flags);
 103
 104	list_for_each_entry(m, &cfile->pin_list, pin_list) {
 105		if ((((u64)m->u_vaddr) <= (u_addr)) &&
 106		    (((u64)m->u_vaddr + m->size) >= (u_addr + size))) {
 107
 108			if (virt_addr)
 109				*virt_addr = m->k_vaddr +
 110					(u_addr - (u64)m->u_vaddr);
 111
 112			spin_unlock_irqrestore(&cfile->pin_lock, flags);
 113			return m;
 114		}
 115	}
 116	spin_unlock_irqrestore(&cfile->pin_lock, flags);
 117	return NULL;
 118}
 119
 120static void __genwqe_add_mapping(struct genwqe_file *cfile,
 121			      struct dma_mapping *dma_map)
 122{
 123	unsigned long flags;
 124
 125	spin_lock_irqsave(&cfile->map_lock, flags);
 126	list_add(&dma_map->card_list, &cfile->map_list);
 127	spin_unlock_irqrestore(&cfile->map_lock, flags);
 128}
 129
 130static void __genwqe_del_mapping(struct genwqe_file *cfile,
 131			      struct dma_mapping *dma_map)
 132{
 133	unsigned long flags;
 134
 135	spin_lock_irqsave(&cfile->map_lock, flags);
 136	list_del(&dma_map->card_list);
 137	spin_unlock_irqrestore(&cfile->map_lock, flags);
 138}
 139
 140
 141/**
 142 * __genwqe_search_mapping() - Search for the mapping for a userspace address
 143 * @cfile:	descriptor of opened file
 144 * @u_addr:	user virtual address
 145 * @size:	size of buffer
 146 * @dma_addr:	DMA address to be updated
 147 * Return: Pointer to the corresponding mapping	NULL if not found
 148 */
 149static struct dma_mapping *__genwqe_search_mapping(struct genwqe_file *cfile,
 150						   unsigned long u_addr,
 151						   unsigned int size,
 152						   dma_addr_t *dma_addr,
 153						   void **virt_addr)
 154{
 155	unsigned long flags;
 156	struct dma_mapping *m;
 157	struct pci_dev *pci_dev = cfile->cd->pci_dev;
 158
 159	spin_lock_irqsave(&cfile->map_lock, flags);
 160	list_for_each_entry(m, &cfile->map_list, card_list) {
 161
 162		if ((((u64)m->u_vaddr) <= (u_addr)) &&
 163		    (((u64)m->u_vaddr + m->size) >= (u_addr + size))) {
 164
 165			/* match found: current is as expected and
 166			   addr is in range */
 167			if (dma_addr)
 168				*dma_addr = m->dma_addr +
 169					(u_addr - (u64)m->u_vaddr);
 170
 171			if (virt_addr)
 172				*virt_addr = m->k_vaddr +
 173					(u_addr - (u64)m->u_vaddr);
 174
 175			spin_unlock_irqrestore(&cfile->map_lock, flags);
 176			return m;
 177		}
 178	}
 179	spin_unlock_irqrestore(&cfile->map_lock, flags);
 180
 181	dev_err(&pci_dev->dev,
 182		"[%s] Entry not found: u_addr=%lx, size=%x\n",
 183		__func__, u_addr, size);
 184
 185	return NULL;
 186}
 187
 188static void genwqe_remove_mappings(struct genwqe_file *cfile)
 189{
 190	int i = 0;
 191	struct list_head *node, *next;
 192	struct dma_mapping *dma_map;
 193	struct genwqe_dev *cd = cfile->cd;
 194	struct pci_dev *pci_dev = cfile->cd->pci_dev;
 195
 196	list_for_each_safe(node, next, &cfile->map_list) {
 197		dma_map = list_entry(node, struct dma_mapping, card_list);
 198
 199		list_del_init(&dma_map->card_list);
 200
 201		/*
 202		 * This is really a bug, because those things should
 203		 * have been already tidied up.
 204		 *
 205		 * GENWQE_MAPPING_RAW should have been removed via mmunmap().
 206		 * GENWQE_MAPPING_SGL_TEMP should be removed by tidy up code.
 207		 */
 208		dev_err(&pci_dev->dev,
 209			"[%s] %d. cleanup mapping: u_vaddr=%p u_kaddr=%016lx dma_addr=%lx\n",
 210			__func__, i++, dma_map->u_vaddr,
 211			(unsigned long)dma_map->k_vaddr,
 212			(unsigned long)dma_map->dma_addr);
 213
 214		if (dma_map->type == GENWQE_MAPPING_RAW) {
 215			/* we allocated this dynamically */
 216			__genwqe_free_consistent(cd, dma_map->size,
 217						dma_map->k_vaddr,
 218						dma_map->dma_addr);
 219			kfree(dma_map);
 220		} else if (dma_map->type == GENWQE_MAPPING_SGL_TEMP) {
 221			/* we use dma_map statically from the request */
 222			genwqe_user_vunmap(cd, dma_map);
 223		}
 224	}
 225}
 226
 227static void genwqe_remove_pinnings(struct genwqe_file *cfile)
 228{
 229	struct list_head *node, *next;
 230	struct dma_mapping *dma_map;
 231	struct genwqe_dev *cd = cfile->cd;
 232
 233	list_for_each_safe(node, next, &cfile->pin_list) {
 234		dma_map = list_entry(node, struct dma_mapping, pin_list);
 235
 236		/*
 237		 * This is not a bug, because a killed processed might
 238		 * not call the unpin ioctl, which is supposed to free
 239		 * the resources.
 240		 *
 241		 * Pinnings are dymically allocated and need to be
 242		 * deleted.
 243		 */
 244		list_del_init(&dma_map->pin_list);
 245		genwqe_user_vunmap(cd, dma_map);
 246		kfree(dma_map);
 247	}
 248}
 249
 250/**
 251 * genwqe_kill_fasync() - Send signal to all processes with open GenWQE files
 252 *
 253 * E.g. genwqe_send_signal(cd, SIGIO);
 254 */
 255static int genwqe_kill_fasync(struct genwqe_dev *cd, int sig)
 256{
 257	unsigned int files = 0;
 258	unsigned long flags;
 259	struct genwqe_file *cfile;
 260
 261	spin_lock_irqsave(&cd->file_lock, flags);
 262	list_for_each_entry(cfile, &cd->file_list, list) {
 263		if (cfile->async_queue)
 264			kill_fasync(&cfile->async_queue, sig, POLL_HUP);
 265		files++;
 266	}
 267	spin_unlock_irqrestore(&cd->file_lock, flags);
 268	return files;
 269}
 270
 271static int genwqe_terminate(struct genwqe_dev *cd)
 272{
 273	unsigned int files = 0;
 274	unsigned long flags;
 275	struct genwqe_file *cfile;
 276
 277	spin_lock_irqsave(&cd->file_lock, flags);
 278	list_for_each_entry(cfile, &cd->file_list, list) {
 279		kill_pid(cfile->opener, SIGKILL, 1);
 280		files++;
 281	}
 282	spin_unlock_irqrestore(&cd->file_lock, flags);
 283	return files;
 284}
 285
 286/**
 287 * genwqe_open() - file open
 288 * @inode:      file system information
 289 * @filp:	file handle
 290 *
 291 * This function is executed whenever an application calls
 292 * open("/dev/genwqe",..).
 293 *
 294 * Return: 0 if successful or <0 if errors
 295 */
 296static int genwqe_open(struct inode *inode, struct file *filp)
 297{
 298	struct genwqe_dev *cd;
 299	struct genwqe_file *cfile;
 300
 301	cfile = kzalloc(sizeof(*cfile), GFP_KERNEL);
 302	if (cfile == NULL)
 303		return -ENOMEM;
 304
 305	cd = container_of(inode->i_cdev, struct genwqe_dev, cdev_genwqe);
 306	cfile->cd = cd;
 307	cfile->filp = filp;
 308	cfile->client = NULL;
 309
 310	spin_lock_init(&cfile->map_lock);  /* list of raw memory allocations */
 311	INIT_LIST_HEAD(&cfile->map_list);
 312
 313	spin_lock_init(&cfile->pin_lock);  /* list of user pinned memory */
 314	INIT_LIST_HEAD(&cfile->pin_list);
 315
 316	filp->private_data = cfile;
 317
 318	genwqe_add_file(cd, cfile);
 319	return 0;
 320}
 321
 322/**
 323 * genwqe_fasync() - Setup process to receive SIGIO.
 324 * @fd:        file descriptor
 325 * @filp:      file handle
 326 * @mode:      file mode
 327 *
 328 * Sending a signal is working as following:
 329 *
 330 * if (cdev->async_queue)
 331 *         kill_fasync(&cdev->async_queue, SIGIO, POLL_IN);
 332 *
 333 * Some devices also implement asynchronous notification to indicate
 334 * when the device can be written; in this case, of course,
 335 * kill_fasync must be called with a mode of POLL_OUT.
 336 */
 337static int genwqe_fasync(int fd, struct file *filp, int mode)
 338{
 339	struct genwqe_file *cdev = (struct genwqe_file *)filp->private_data;
 340
 341	return fasync_helper(fd, filp, mode, &cdev->async_queue);
 342}
 343
 344
 345/**
 346 * genwqe_release() - file close
 347 * @inode:      file system information
 348 * @filp:       file handle
 349 *
 350 * This function is executed whenever an application calls 'close(fd_genwqe)'
 351 *
 352 * Return: always 0
 353 */
 354static int genwqe_release(struct inode *inode, struct file *filp)
 355{
 356	struct genwqe_file *cfile = (struct genwqe_file *)filp->private_data;
 357	struct genwqe_dev *cd = cfile->cd;
 358
 359	/* there must be no entries in these lists! */
 360	genwqe_remove_mappings(cfile);
 361	genwqe_remove_pinnings(cfile);
 362
 363	/* remove this filp from the asynchronously notified filp's */
 364	genwqe_fasync(-1, filp, 0);
 365
 366	/*
 367	 * For this to work we must not release cd when this cfile is
 368	 * not yet released, otherwise the list entry is invalid,
 369	 * because the list itself gets reinstantiated!
 370	 */
 371	genwqe_del_file(cd, cfile);
 372	kfree(cfile);
 373	return 0;
 374}
 375
 376static void genwqe_vma_open(struct vm_area_struct *vma)
 377{
 378	/* nothing ... */
 379}
 380
 381/**
 382 * genwqe_vma_close() - Called each time when vma is unmapped
 383 *
 384 * Free memory which got allocated by GenWQE mmap().
 385 */
 386static void genwqe_vma_close(struct vm_area_struct *vma)
 387{
 388	unsigned long vsize = vma->vm_end - vma->vm_start;
 389	struct inode *inode = file_inode(vma->vm_file);
 390	struct dma_mapping *dma_map;
 391	struct genwqe_dev *cd = container_of(inode->i_cdev, struct genwqe_dev,
 392					    cdev_genwqe);
 393	struct pci_dev *pci_dev = cd->pci_dev;
 394	dma_addr_t d_addr = 0;
 395	struct genwqe_file *cfile = vma->vm_private_data;
 396
 397	dma_map = __genwqe_search_mapping(cfile, vma->vm_start, vsize,
 398					 &d_addr, NULL);
 399	if (dma_map == NULL) {
 400		dev_err(&pci_dev->dev,
 401			"  [%s] err: mapping not found: v=%lx, p=%lx s=%lx\n",
 402			__func__, vma->vm_start, vma->vm_pgoff << PAGE_SHIFT,
 403			vsize);
 404		return;
 405	}
 406	__genwqe_del_mapping(cfile, dma_map);
 407	__genwqe_free_consistent(cd, dma_map->size, dma_map->k_vaddr,
 408				 dma_map->dma_addr);
 409	kfree(dma_map);
 410}
 411
 412static const struct vm_operations_struct genwqe_vma_ops = {
 413	.open   = genwqe_vma_open,
 414	.close  = genwqe_vma_close,
 415};
 416
 417/**
 418 * genwqe_mmap() - Provide contignous buffers to userspace
 419 *
 420 * We use mmap() to allocate contignous buffers used for DMA
 421 * transfers. After the buffer is allocated we remap it to user-space
 422 * and remember a reference to our dma_mapping data structure, where
 423 * we store the associated DMA address and allocated size.
 424 *
 425 * When we receive a DDCB execution request with the ATS bits set to
 426 * plain buffer, we lookup our dma_mapping list to find the
 427 * corresponding DMA address for the associated user-space address.
 428 */
 429static int genwqe_mmap(struct file *filp, struct vm_area_struct *vma)
 430{
 431	int rc;
 432	unsigned long pfn, vsize = vma->vm_end - vma->vm_start;
 433	struct genwqe_file *cfile = (struct genwqe_file *)filp->private_data;
 434	struct genwqe_dev *cd = cfile->cd;
 435	struct dma_mapping *dma_map;
 436
 437	if (vsize == 0)
 438		return -EINVAL;
 439
 440	if (get_order(vsize) > MAX_ORDER)
 441		return -ENOMEM;
 442
 443	dma_map = kzalloc(sizeof(struct dma_mapping), GFP_KERNEL);
 444	if (dma_map == NULL)
 445		return -ENOMEM;
 446
 447	genwqe_mapping_init(dma_map, GENWQE_MAPPING_RAW);
 448	dma_map->u_vaddr = (void *)vma->vm_start;
 449	dma_map->size = vsize;
 450	dma_map->nr_pages = DIV_ROUND_UP(vsize, PAGE_SIZE);
 451	dma_map->k_vaddr = __genwqe_alloc_consistent(cd, vsize,
 452						     &dma_map->dma_addr);
 453	if (dma_map->k_vaddr == NULL) {
 454		rc = -ENOMEM;
 455		goto free_dma_map;
 456	}
 457
 458	if (capable(CAP_SYS_ADMIN) && (vsize > sizeof(dma_addr_t)))
 459		*(dma_addr_t *)dma_map->k_vaddr = dma_map->dma_addr;
 460
 461	pfn = virt_to_phys(dma_map->k_vaddr) >> PAGE_SHIFT;
 462	rc = remap_pfn_range(vma,
 463			     vma->vm_start,
 464			     pfn,
 465			     vsize,
 466			     vma->vm_page_prot);
 467	if (rc != 0) {
 468		rc = -EFAULT;
 469		goto free_dma_mem;
 470	}
 471
 472	vma->vm_private_data = cfile;
 473	vma->vm_ops = &genwqe_vma_ops;
 474	__genwqe_add_mapping(cfile, dma_map);
 475
 476	return 0;
 477
 478 free_dma_mem:
 479	__genwqe_free_consistent(cd, dma_map->size,
 480				dma_map->k_vaddr,
 481				dma_map->dma_addr);
 482 free_dma_map:
 483	kfree(dma_map);
 484	return rc;
 485}
 486
 487/**
 488 * do_flash_update() - Excute flash update (write image or CVPD)
 489 * @cd:        genwqe device
 490 * @load:      details about image load
 491 *
 492 * Return: 0 if successful
 493 */
 494
 495#define	FLASH_BLOCK	0x40000	/* we use 256k blocks */
 496
 497static int do_flash_update(struct genwqe_file *cfile,
 498			   struct genwqe_bitstream *load)
 499{
 500	int rc = 0;
 501	int blocks_to_flash;
 502	dma_addr_t dma_addr;
 503	u64 flash = 0;
 504	size_t tocopy = 0;
 505	u8 __user *buf;
 506	u8 *xbuf;
 507	u32 crc;
 508	u8 cmdopts;
 509	struct genwqe_dev *cd = cfile->cd;
 510	struct file *filp = cfile->filp;
 511	struct pci_dev *pci_dev = cd->pci_dev;
 512
 513	if ((load->size & 0x3) != 0)
 514		return -EINVAL;
 515
 516	if (((unsigned long)(load->data_addr) & ~PAGE_MASK) != 0)
 517		return -EINVAL;
 518
 519	/* FIXME Bits have changed for new service layer! */
 520	switch ((char)load->partition) {
 521	case '0':
 522		cmdopts = 0x14;
 523		break;		/* download/erase_first/part_0 */
 524	case '1':
 525		cmdopts = 0x1C;
 526		break;		/* download/erase_first/part_1 */
 527	case 'v':
 528		cmdopts = 0x0C;
 529		break;		/* download/erase_first/vpd */
 530	default:
 531		return -EINVAL;
 532	}
 533
 534	buf = (u8 __user *)load->data_addr;
 535	xbuf = __genwqe_alloc_consistent(cd, FLASH_BLOCK, &dma_addr);
 536	if (xbuf == NULL)
 537		return -ENOMEM;
 538
 539	blocks_to_flash = load->size / FLASH_BLOCK;
 540	while (load->size) {
 541		struct genwqe_ddcb_cmd *req;
 542
 543		/*
 544		 * We must be 4 byte aligned. Buffer must be 0 appened
 545		 * to have defined values when calculating CRC.
 546		 */
 547		tocopy = min_t(size_t, load->size, FLASH_BLOCK);
 548
 549		rc = copy_from_user(xbuf, buf, tocopy);
 550		if (rc) {
 551			rc = -EFAULT;
 552			goto free_buffer;
 553		}
 554		crc = genwqe_crc32(xbuf, tocopy, 0xffffffff);
 555
 556		dev_dbg(&pci_dev->dev,
 557			"[%s] DMA: %lx CRC: %08x SZ: %ld %d\n",
 558			__func__, (unsigned long)dma_addr, crc, tocopy,
 559			blocks_to_flash);
 560
 561		/* prepare DDCB for SLU process */
 562		req = ddcb_requ_alloc();
 563		if (req == NULL) {
 564			rc = -ENOMEM;
 565			goto free_buffer;
 566		}
 567
 568		req->cmd = SLCMD_MOVE_FLASH;
 569		req->cmdopts = cmdopts;
 570
 571		/* prepare invariant values */
 572		if (genwqe_get_slu_id(cd) <= 0x2) {
 573			*(__be64 *)&req->__asiv[0]  = cpu_to_be64(dma_addr);
 574			*(__be64 *)&req->__asiv[8]  = cpu_to_be64(tocopy);
 575			*(__be64 *)&req->__asiv[16] = cpu_to_be64(flash);
 576			*(__be32 *)&req->__asiv[24] = cpu_to_be32(0);
 577			req->__asiv[24]	       = load->uid;
 578			*(__be32 *)&req->__asiv[28] = cpu_to_be32(crc);
 579
 580			/* for simulation only */
 581			*(__be64 *)&req->__asiv[88] = cpu_to_be64(load->slu_id);
 582			*(__be64 *)&req->__asiv[96] = cpu_to_be64(load->app_id);
 583			req->asiv_length = 32; /* bytes included in crc calc */
 584		} else {	/* setup DDCB for ATS architecture */
 585			*(__be64 *)&req->asiv[0]  = cpu_to_be64(dma_addr);
 586			*(__be32 *)&req->asiv[8]  = cpu_to_be32(tocopy);
 587			*(__be32 *)&req->asiv[12] = cpu_to_be32(0); /* resvd */
 588			*(__be64 *)&req->asiv[16] = cpu_to_be64(flash);
 589			*(__be32 *)&req->asiv[24] = cpu_to_be32(load->uid<<24);
 590			*(__be32 *)&req->asiv[28] = cpu_to_be32(crc);
 591
 592			/* for simulation only */
 593			*(__be64 *)&req->asiv[80] = cpu_to_be64(load->slu_id);
 594			*(__be64 *)&req->asiv[88] = cpu_to_be64(load->app_id);
 595
 596			/* Rd only */
 597			req->ats = 0x4ULL << 44;
 598			req->asiv_length = 40; /* bytes included in crc calc */
 599		}
 600		req->asv_length  = 8;
 601
 602		/* For Genwqe5 we get back the calculated CRC */
 603		*(u64 *)&req->asv[0] = 0ULL;			/* 0x80 */
 604
 605		rc = __genwqe_execute_raw_ddcb(cd, req, filp->f_flags);
 606
 607		load->retc = req->retc;
 608		load->attn = req->attn;
 609		load->progress = req->progress;
 610
 611		if (rc < 0) {
 612			ddcb_requ_free(req);
 613			goto free_buffer;
 614		}
 615
 616		if (req->retc != DDCB_RETC_COMPLETE) {
 617			rc = -EIO;
 618			ddcb_requ_free(req);
 619			goto free_buffer;
 620		}
 621
 622		load->size  -= tocopy;
 623		flash += tocopy;
 624		buf += tocopy;
 625		blocks_to_flash--;
 626		ddcb_requ_free(req);
 627	}
 628
 629 free_buffer:
 630	__genwqe_free_consistent(cd, FLASH_BLOCK, xbuf, dma_addr);
 631	return rc;
 632}
 633
 634static int do_flash_read(struct genwqe_file *cfile,
 635			 struct genwqe_bitstream *load)
 636{
 637	int rc, blocks_to_flash;
 638	dma_addr_t dma_addr;
 639	u64 flash = 0;
 640	size_t tocopy = 0;
 641	u8 __user *buf;
 642	u8 *xbuf;
 643	u8 cmdopts;
 644	struct genwqe_dev *cd = cfile->cd;
 645	struct file *filp = cfile->filp;
 646	struct pci_dev *pci_dev = cd->pci_dev;
 647	struct genwqe_ddcb_cmd *cmd;
 648
 649	if ((load->size & 0x3) != 0)
 650		return -EINVAL;
 651
 652	if (((unsigned long)(load->data_addr) & ~PAGE_MASK) != 0)
 653		return -EINVAL;
 654
 655	/* FIXME Bits have changed for new service layer! */
 656	switch ((char)load->partition) {
 657	case '0':
 658		cmdopts = 0x12;
 659		break;		/* upload/part_0 */
 660	case '1':
 661		cmdopts = 0x1A;
 662		break;		/* upload/part_1 */
 663	case 'v':
 664		cmdopts = 0x0A;
 665		break;		/* upload/vpd */
 666	default:
 667		return -EINVAL;
 668	}
 669
 670	buf = (u8 __user *)load->data_addr;
 671	xbuf = __genwqe_alloc_consistent(cd, FLASH_BLOCK, &dma_addr);
 672	if (xbuf == NULL)
 673		return -ENOMEM;
 674
 675	blocks_to_flash = load->size / FLASH_BLOCK;
 676	while (load->size) {
 677		/*
 678		 * We must be 4 byte aligned. Buffer must be 0 appened
 679		 * to have defined values when calculating CRC.
 680		 */
 681		tocopy = min_t(size_t, load->size, FLASH_BLOCK);
 682
 683		dev_dbg(&pci_dev->dev,
 684			"[%s] DMA: %lx SZ: %ld %d\n",
 685			__func__, (unsigned long)dma_addr, tocopy,
 686			blocks_to_flash);
 687
 688		/* prepare DDCB for SLU process */
 689		cmd = ddcb_requ_alloc();
 690		if (cmd == NULL) {
 691			rc = -ENOMEM;
 692			goto free_buffer;
 693		}
 694		cmd->cmd = SLCMD_MOVE_FLASH;
 695		cmd->cmdopts = cmdopts;
 696
 697		/* prepare invariant values */
 698		if (genwqe_get_slu_id(cd) <= 0x2) {
 699			*(__be64 *)&cmd->__asiv[0]  = cpu_to_be64(dma_addr);
 700			*(__be64 *)&cmd->__asiv[8]  = cpu_to_be64(tocopy);
 701			*(__be64 *)&cmd->__asiv[16] = cpu_to_be64(flash);
 702			*(__be32 *)&cmd->__asiv[24] = cpu_to_be32(0);
 703			cmd->__asiv[24] = load->uid;
 704			*(__be32 *)&cmd->__asiv[28] = cpu_to_be32(0) /* CRC */;
 705			cmd->asiv_length = 32; /* bytes included in crc calc */
 706		} else {	/* setup DDCB for ATS architecture */
 707			*(__be64 *)&cmd->asiv[0]  = cpu_to_be64(dma_addr);
 708			*(__be32 *)&cmd->asiv[8]  = cpu_to_be32(tocopy);
 709			*(__be32 *)&cmd->asiv[12] = cpu_to_be32(0); /* resvd */
 710			*(__be64 *)&cmd->asiv[16] = cpu_to_be64(flash);
 711			*(__be32 *)&cmd->asiv[24] = cpu_to_be32(load->uid<<24);
 712			*(__be32 *)&cmd->asiv[28] = cpu_to_be32(0); /* CRC */
 713
 714			/* rd/wr */
 715			cmd->ats = 0x5ULL << 44;
 716			cmd->asiv_length = 40; /* bytes included in crc calc */
 717		}
 718		cmd->asv_length  = 8;
 719
 720		/* we only get back the calculated CRC */
 721		*(u64 *)&cmd->asv[0] = 0ULL;	/* 0x80 */
 722
 723		rc = __genwqe_execute_raw_ddcb(cd, cmd, filp->f_flags);
 724
 725		load->retc = cmd->retc;
 726		load->attn = cmd->attn;
 727		load->progress = cmd->progress;
 728
 729		if ((rc < 0) && (rc != -EBADMSG)) {
 730			ddcb_requ_free(cmd);
 731			goto free_buffer;
 732		}
 733
 734		rc = copy_to_user(buf, xbuf, tocopy);
 735		if (rc) {
 736			rc = -EFAULT;
 737			ddcb_requ_free(cmd);
 738			goto free_buffer;
 739		}
 740
 741		/* We know that we can get retc 0x104 with CRC err */
 742		if (((cmd->retc == DDCB_RETC_FAULT) &&
 743		     (cmd->attn != 0x02)) ||  /* Normally ignore CRC error */
 744		    ((cmd->retc == DDCB_RETC_COMPLETE) &&
 745		     (cmd->attn != 0x00))) {  /* Everything was fine */
 746			rc = -EIO;
 747			ddcb_requ_free(cmd);
 748			goto free_buffer;
 749		}
 750
 751		load->size  -= tocopy;
 752		flash += tocopy;
 753		buf += tocopy;
 754		blocks_to_flash--;
 755		ddcb_requ_free(cmd);
 756	}
 757	rc = 0;
 758
 759 free_buffer:
 760	__genwqe_free_consistent(cd, FLASH_BLOCK, xbuf, dma_addr);
 761	return rc;
 762}
 763
 764static int genwqe_pin_mem(struct genwqe_file *cfile, struct genwqe_mem *m)
 765{
 766	int rc;
 767	struct genwqe_dev *cd = cfile->cd;
 768	struct pci_dev *pci_dev = cfile->cd->pci_dev;
 769	struct dma_mapping *dma_map;
 770	unsigned long map_addr;
 771	unsigned long map_size;
 772
 773	if ((m->addr == 0x0) || (m->size == 0))
 774		return -EINVAL;
 775	if (m->size > ULONG_MAX - PAGE_SIZE - (m->addr & ~PAGE_MASK))
 776		return -EINVAL;
 777
 778	map_addr = (m->addr & PAGE_MASK);
 779	map_size = round_up(m->size + (m->addr & ~PAGE_MASK), PAGE_SIZE);
 780
 781	dma_map = kzalloc(sizeof(struct dma_mapping), GFP_KERNEL);
 782	if (dma_map == NULL)
 783		return -ENOMEM;
 784
 785	genwqe_mapping_init(dma_map, GENWQE_MAPPING_SGL_PINNED);
 786	rc = genwqe_user_vmap(cd, dma_map, (void *)map_addr, map_size);
 787	if (rc != 0) {
 788		dev_err(&pci_dev->dev,
 789			"[%s] genwqe_user_vmap rc=%d\n", __func__, rc);
 790		kfree(dma_map);
 791		return rc;
 792	}
 793
 794	genwqe_add_pin(cfile, dma_map);
 795	return 0;
 796}
 797
 798static int genwqe_unpin_mem(struct genwqe_file *cfile, struct genwqe_mem *m)
 799{
 800	struct genwqe_dev *cd = cfile->cd;
 801	struct dma_mapping *dma_map;
 802	unsigned long map_addr;
 803	unsigned long map_size;
 804
 805	if (m->addr == 0x0)
 806		return -EINVAL;
 807
 808	map_addr = (m->addr & PAGE_MASK);
 809	map_size = round_up(m->size + (m->addr & ~PAGE_MASK), PAGE_SIZE);
 810
 811	dma_map = genwqe_search_pin(cfile, map_addr, map_size, NULL);
 812	if (dma_map == NULL)
 813		return -ENOENT;
 814
 815	genwqe_del_pin(cfile, dma_map);
 816	genwqe_user_vunmap(cd, dma_map);
 817	kfree(dma_map);
 818	return 0;
 819}
 820
 821/**
 822 * ddcb_cmd_cleanup() - Remove dynamically created fixup entries
 823 *
 824 * Only if there are any. Pinnings are not removed.
 825 */
 826static int ddcb_cmd_cleanup(struct genwqe_file *cfile, struct ddcb_requ *req)
 827{
 828	unsigned int i;
 829	struct dma_mapping *dma_map;
 830	struct genwqe_dev *cd = cfile->cd;
 831
 832	for (i = 0; i < DDCB_FIXUPS; i++) {
 833		dma_map = &req->dma_mappings[i];
 834
 835		if (dma_mapping_used(dma_map)) {
 836			__genwqe_del_mapping(cfile, dma_map);
 837			genwqe_user_vunmap(cd, dma_map);
 838		}
 839		if (req->sgls[i].sgl != NULL)
 840			genwqe_free_sync_sgl(cd, &req->sgls[i]);
 841	}
 842	return 0;
 843}
 844
 845/**
 846 * ddcb_cmd_fixups() - Establish DMA fixups/sglists for user memory references
 847 *
 848 * Before the DDCB gets executed we need to handle the fixups. We
 849 * replace the user-space addresses with DMA addresses or do
 850 * additional setup work e.g. generating a scatter-gather list which
 851 * is used to describe the memory referred to in the fixup.
 852 */
 853static int ddcb_cmd_fixups(struct genwqe_file *cfile, struct ddcb_requ *req)
 854{
 855	int rc;
 856	unsigned int asiv_offs, i;
 857	struct genwqe_dev *cd = cfile->cd;
 858	struct genwqe_ddcb_cmd *cmd = &req->cmd;
 859	struct dma_mapping *m;
 860
 861	for (i = 0, asiv_offs = 0x00; asiv_offs <= 0x58;
 862	     i++, asiv_offs += 0x08) {
 863
 864		u64 u_addr;
 865		dma_addr_t d_addr;
 866		u32 u_size = 0;
 867		u64 ats_flags;
 868
 869		ats_flags = ATS_GET_FLAGS(cmd->ats, asiv_offs);
 870
 871		switch (ats_flags) {
 872
 873		case ATS_TYPE_DATA:
 874			break;	/* nothing to do here */
 875
 876		case ATS_TYPE_FLAT_RDWR:
 877		case ATS_TYPE_FLAT_RD: {
 878			u_addr = be64_to_cpu(*((__be64 *)&cmd->
 879					       asiv[asiv_offs]));
 880			u_size = be32_to_cpu(*((__be32 *)&cmd->
 881					       asiv[asiv_offs + 0x08]));
 882
 883			/*
 884			 * No data available. Ignore u_addr in this
 885			 * case and set addr to 0. Hardware must not
 886			 * fetch the buffer.
 887			 */
 888			if (u_size == 0x0) {
 889				*((__be64 *)&cmd->asiv[asiv_offs]) =
 890					cpu_to_be64(0x0);
 891				break;
 892			}
 893
 894			m = __genwqe_search_mapping(cfile, u_addr, u_size,
 895						   &d_addr, NULL);
 896			if (m == NULL) {
 897				rc = -EFAULT;
 898				goto err_out;
 899			}
 900
 901			*((__be64 *)&cmd->asiv[asiv_offs]) =
 902				cpu_to_be64(d_addr);
 903			break;
 904		}
 905
 906		case ATS_TYPE_SGL_RDWR:
 907		case ATS_TYPE_SGL_RD: {
 908			int page_offs;
 909
 910			u_addr = be64_to_cpu(*((__be64 *)
 911					       &cmd->asiv[asiv_offs]));
 912			u_size = be32_to_cpu(*((__be32 *)
 913					       &cmd->asiv[asiv_offs + 0x08]));
 914
 915			/*
 916			 * No data available. Ignore u_addr in this
 917			 * case and set addr to 0. Hardware must not
 918			 * fetch the empty sgl.
 919			 */
 920			if (u_size == 0x0) {
 921				*((__be64 *)&cmd->asiv[asiv_offs]) =
 922					cpu_to_be64(0x0);
 923				break;
 924			}
 925
 926			m = genwqe_search_pin(cfile, u_addr, u_size, NULL);
 927			if (m != NULL) {
 928				page_offs = (u_addr -
 929					     (u64)m->u_vaddr)/PAGE_SIZE;
 930			} else {
 931				m = &req->dma_mappings[i];
 932
 933				genwqe_mapping_init(m,
 934						    GENWQE_MAPPING_SGL_TEMP);
 935
 936				if (ats_flags == ATS_TYPE_SGL_RD)
 937					m->write = 0;
 938
 939				rc = genwqe_user_vmap(cd, m, (void *)u_addr,
 940						      u_size);
 941				if (rc != 0)
 942					goto err_out;
 943
 944				__genwqe_add_mapping(cfile, m);
 945				page_offs = 0;
 946			}
 947
 948			/* create genwqe style scatter gather list */
 949			rc = genwqe_alloc_sync_sgl(cd, &req->sgls[i],
 950						   (void __user *)u_addr,
 951						   u_size, m->write);
 952			if (rc != 0)
 953				goto err_out;
 954
 955			genwqe_setup_sgl(cd, &req->sgls[i],
 956					 &m->dma_list[page_offs]);
 957
 958			*((__be64 *)&cmd->asiv[asiv_offs]) =
 959				cpu_to_be64(req->sgls[i].sgl_dma_addr);
 960
 961			break;
 962		}
 963		default:
 964			rc = -EINVAL;
 965			goto err_out;
 966		}
 967	}
 968	return 0;
 969
 970 err_out:
 971	ddcb_cmd_cleanup(cfile, req);
 972	return rc;
 973}
 974
 975/**
 976 * genwqe_execute_ddcb() - Execute DDCB using userspace address fixups
 977 *
 978 * The code will build up the translation tables or lookup the
 979 * contignous memory allocation table to find the right translations
 980 * and DMA addresses.
 981 */
 982static int genwqe_execute_ddcb(struct genwqe_file *cfile,
 983			       struct genwqe_ddcb_cmd *cmd)
 984{
 985	int rc;
 986	struct genwqe_dev *cd = cfile->cd;
 987	struct file *filp = cfile->filp;
 988	struct ddcb_requ *req = container_of(cmd, struct ddcb_requ, cmd);
 989
 990	rc = ddcb_cmd_fixups(cfile, req);
 991	if (rc != 0)
 992		return rc;
 993
 994	rc = __genwqe_execute_raw_ddcb(cd, cmd, filp->f_flags);
 995	ddcb_cmd_cleanup(cfile, req);
 996	return rc;
 997}
 998
 999static int do_execute_ddcb(struct genwqe_file *cfile,
1000			   unsigned long arg, int raw)
1001{
1002	int rc;
1003	struct genwqe_ddcb_cmd *cmd;
1004	struct genwqe_dev *cd = cfile->cd;
1005	struct file *filp = cfile->filp;
1006
1007	cmd = ddcb_requ_alloc();
1008	if (cmd == NULL)
1009		return -ENOMEM;
1010
1011	if (copy_from_user(cmd, (void __user *)arg, sizeof(*cmd))) {
1012		ddcb_requ_free(cmd);
1013		return -EFAULT;
1014	}
1015
1016	if (!raw)
1017		rc = genwqe_execute_ddcb(cfile, cmd);
1018	else
1019		rc = __genwqe_execute_raw_ddcb(cd, cmd, filp->f_flags);
1020
1021	/* Copy back only the modifed fields. Do not copy ASIV
1022	   back since the copy got modified by the driver. */
1023	if (copy_to_user((void __user *)arg, cmd,
1024			 sizeof(*cmd) - DDCB_ASIV_LENGTH)) {
1025		ddcb_requ_free(cmd);
1026		return -EFAULT;
1027	}
1028
1029	ddcb_requ_free(cmd);
1030	return rc;
1031}
1032
1033/**
1034 * genwqe_ioctl() - IO control
1035 * @filp:       file handle
1036 * @cmd:        command identifier (passed from user)
1037 * @arg:        argument (passed from user)
1038 *
1039 * Return: 0 success
1040 */
1041static long genwqe_ioctl(struct file *filp, unsigned int cmd,
1042			 unsigned long arg)
1043{
1044	int rc = 0;
1045	struct genwqe_file *cfile = (struct genwqe_file *)filp->private_data;
1046	struct genwqe_dev *cd = cfile->cd;
1047	struct pci_dev *pci_dev = cd->pci_dev;
1048	struct genwqe_reg_io __user *io;
1049	u64 val;
1050	u32 reg_offs;
1051
1052	/* Return -EIO if card hit EEH */
1053	if (pci_channel_offline(pci_dev))
1054		return -EIO;
1055
1056	if (_IOC_TYPE(cmd) != GENWQE_IOC_CODE)
1057		return -EINVAL;
1058
1059	switch (cmd) {
1060
1061	case GENWQE_GET_CARD_STATE:
1062		put_user(cd->card_state, (enum genwqe_card_state __user *)arg);
1063		return 0;
1064
1065		/* Register access */
1066	case GENWQE_READ_REG64: {
1067		io = (struct genwqe_reg_io __user *)arg;
1068
1069		if (get_user(reg_offs, &io->num))
1070			return -EFAULT;
1071
1072		if ((reg_offs >= cd->mmio_len) || (reg_offs & 0x7))
1073			return -EINVAL;
1074
1075		val = __genwqe_readq(cd, reg_offs);
1076		put_user(val, &io->val64);
1077		return 0;
1078	}
1079
1080	case GENWQE_WRITE_REG64: {
1081		io = (struct genwqe_reg_io __user *)arg;
1082
1083		if (!capable(CAP_SYS_ADMIN))
1084			return -EPERM;
1085
1086		if ((filp->f_flags & O_ACCMODE) == O_RDONLY)
1087			return -EPERM;
1088
1089		if (get_user(reg_offs, &io->num))
1090			return -EFAULT;
1091
1092		if ((reg_offs >= cd->mmio_len) || (reg_offs & 0x7))
1093			return -EINVAL;
1094
1095		if (get_user(val, &io->val64))
1096			return -EFAULT;
1097
1098		__genwqe_writeq(cd, reg_offs, val);
1099		return 0;
1100	}
1101
1102	case GENWQE_READ_REG32: {
1103		io = (struct genwqe_reg_io __user *)arg;
1104
1105		if (get_user(reg_offs, &io->num))
1106			return -EFAULT;
1107
1108		if ((reg_offs >= cd->mmio_len) || (reg_offs & 0x3))
1109			return -EINVAL;
1110
1111		val = __genwqe_readl(cd, reg_offs);
1112		put_user(val, &io->val64);
1113		return 0;
1114	}
1115
1116	case GENWQE_WRITE_REG32: {
1117		io = (struct genwqe_reg_io __user *)arg;
1118
1119		if (!capable(CAP_SYS_ADMIN))
1120			return -EPERM;
1121
1122		if ((filp->f_flags & O_ACCMODE) == O_RDONLY)
1123			return -EPERM;
1124
1125		if (get_user(reg_offs, &io->num))
1126			return -EFAULT;
1127
1128		if ((reg_offs >= cd->mmio_len) || (reg_offs & 0x3))
1129			return -EINVAL;
1130
1131		if (get_user(val, &io->val64))
1132			return -EFAULT;
1133
1134		__genwqe_writel(cd, reg_offs, val);
1135		return 0;
1136	}
1137
1138		/* Flash update/reading */
1139	case GENWQE_SLU_UPDATE: {
1140		struct genwqe_bitstream load;
1141
1142		if (!genwqe_is_privileged(cd))
1143			return -EPERM;
1144
1145		if ((filp->f_flags & O_ACCMODE) == O_RDONLY)
1146			return -EPERM;
1147
1148		if (copy_from_user(&load, (void __user *)arg,
1149				   sizeof(load)))
1150			return -EFAULT;
1151
1152		rc = do_flash_update(cfile, &load);
1153
1154		if (copy_to_user((void __user *)arg, &load, sizeof(load)))
1155			return -EFAULT;
1156
1157		return rc;
1158	}
1159
1160	case GENWQE_SLU_READ: {
1161		struct genwqe_bitstream load;
1162
1163		if (!genwqe_is_privileged(cd))
1164			return -EPERM;
1165
1166		if (genwqe_flash_readback_fails(cd))
1167			return -ENOSPC;	 /* known to fail for old versions */
1168
1169		if (copy_from_user(&load, (void __user *)arg, sizeof(load)))
1170			return -EFAULT;
1171
1172		rc = do_flash_read(cfile, &load);
1173
1174		if (copy_to_user((void __user *)arg, &load, sizeof(load)))
1175			return -EFAULT;
1176
1177		return rc;
1178	}
1179
1180		/* memory pinning and unpinning */
1181	case GENWQE_PIN_MEM: {
1182		struct genwqe_mem m;
1183
1184		if (copy_from_user(&m, (void __user *)arg, sizeof(m)))
1185			return -EFAULT;
1186
1187		return genwqe_pin_mem(cfile, &m);
1188	}
1189
1190	case GENWQE_UNPIN_MEM: {
1191		struct genwqe_mem m;
1192
1193		if (copy_from_user(&m, (void __user *)arg, sizeof(m)))
1194			return -EFAULT;
1195
1196		return genwqe_unpin_mem(cfile, &m);
1197	}
1198
1199		/* launch an DDCB and wait for completion */
1200	case GENWQE_EXECUTE_DDCB:
1201		return do_execute_ddcb(cfile, arg, 0);
1202
1203	case GENWQE_EXECUTE_RAW_DDCB: {
1204
1205		if (!capable(CAP_SYS_ADMIN))
1206			return -EPERM;
1207
1208		return do_execute_ddcb(cfile, arg, 1);
1209	}
1210
1211	default:
1212		return -EINVAL;
1213	}
1214
1215	return rc;
1216}
1217
1218#if defined(CONFIG_COMPAT)
1219/**
1220 * genwqe_compat_ioctl() - Compatibility ioctl
1221 *
1222 * Called whenever a 32-bit process running under a 64-bit kernel
1223 * performs an ioctl on /dev/genwqe<n>_card.
1224 *
1225 * @filp:        file pointer.
1226 * @cmd:         command.
1227 * @arg:         user argument.
1228 * Return:       zero on success or negative number on failure.
1229 */
1230static long genwqe_compat_ioctl(struct file *filp, unsigned int cmd,
1231				unsigned long arg)
1232{
1233	return genwqe_ioctl(filp, cmd, arg);
1234}
1235#endif /* defined(CONFIG_COMPAT) */
1236
1237static const struct file_operations genwqe_fops = {
1238	.owner		= THIS_MODULE,
1239	.open		= genwqe_open,
1240	.fasync		= genwqe_fasync,
1241	.mmap		= genwqe_mmap,
1242	.unlocked_ioctl	= genwqe_ioctl,
1243#if defined(CONFIG_COMPAT)
1244	.compat_ioctl   = genwqe_compat_ioctl,
1245#endif
1246	.release	= genwqe_release,
1247};
1248
1249static int genwqe_device_initialized(struct genwqe_dev *cd)
1250{
1251	return cd->dev != NULL;
1252}
1253
1254/**
1255 * genwqe_device_create() - Create and configure genwqe char device
1256 * @cd:      genwqe device descriptor
1257 *
1258 * This function must be called before we create any more genwqe
1259 * character devices, because it is allocating the major and minor
1260 * number which are supposed to be used by the client drivers.
1261 */
1262int genwqe_device_create(struct genwqe_dev *cd)
1263{
1264	int rc;
1265	struct pci_dev *pci_dev = cd->pci_dev;
1266
1267	/*
1268	 * Here starts the individual setup per client. It must
1269	 * initialize its own cdev data structure with its own fops.
1270	 * The appropriate devnum needs to be created. The ranges must
1271	 * not overlap.
1272	 */
1273	rc = alloc_chrdev_region(&cd->devnum_genwqe, 0,
1274				 GENWQE_MAX_MINOR, GENWQE_DEVNAME);
1275	if (rc < 0) {
1276		dev_err(&pci_dev->dev, "err: alloc_chrdev_region failed\n");
1277		goto err_dev;
1278	}
1279
1280	cdev_init(&cd->cdev_genwqe, &genwqe_fops);
1281	cd->cdev_genwqe.owner = THIS_MODULE;
1282
1283	rc = cdev_add(&cd->cdev_genwqe, cd->devnum_genwqe, 1);
1284	if (rc < 0) {
1285		dev_err(&pci_dev->dev, "err: cdev_add failed\n");
1286		goto err_add;
1287	}
1288
1289	/*
1290	 * Finally the device in /dev/... must be created. The rule is
1291	 * to use card%d_clientname for each created device.
1292	 */
1293	cd->dev = device_create_with_groups(cd->class_genwqe,
1294					    &cd->pci_dev->dev,
1295					    cd->devnum_genwqe, cd,
1296					    genwqe_attribute_groups,
1297					    GENWQE_DEVNAME "%u_card",
1298					    cd->card_idx);
1299	if (IS_ERR(cd->dev)) {
1300		rc = PTR_ERR(cd->dev);
1301		goto err_cdev;
1302	}
1303
1304	genwqe_init_debugfs(cd);
1305
1306	return 0;
1307
1308 err_cdev:
1309	cdev_del(&cd->cdev_genwqe);
1310 err_add:
1311	unregister_chrdev_region(cd->devnum_genwqe, GENWQE_MAX_MINOR);
1312 err_dev:
1313	cd->dev = NULL;
1314	return rc;
1315}
1316
1317static int genwqe_inform_and_stop_processes(struct genwqe_dev *cd)
1318{
1319	int rc;
1320	unsigned int i;
1321	struct pci_dev *pci_dev = cd->pci_dev;
1322
1323	if (!genwqe_open_files(cd))
1324		return 0;
1325
1326	dev_warn(&pci_dev->dev, "[%s] send SIGIO and wait ...\n", __func__);
1327
1328	rc = genwqe_kill_fasync(cd, SIGIO);
1329	if (rc > 0) {
1330		/* give kill_timeout seconds to close file descriptors ... */
1331		for (i = 0; (i < GENWQE_KILL_TIMEOUT) &&
1332			     genwqe_open_files(cd); i++) {
1333			dev_info(&pci_dev->dev, "  %d sec ...", i);
1334
1335			cond_resched();
1336			msleep(1000);
1337		}
1338
1339		/* if no open files we can safely continue, else ... */
1340		if (!genwqe_open_files(cd))
1341			return 0;
1342
1343		dev_warn(&pci_dev->dev,
1344			 "[%s] send SIGKILL and wait ...\n", __func__);
1345
1346		rc = genwqe_terminate(cd);
1347		if (rc) {
1348			/* Give kill_timout more seconds to end processes */
1349			for (i = 0; (i < GENWQE_KILL_TIMEOUT) &&
1350				     genwqe_open_files(cd); i++) {
1351				dev_warn(&pci_dev->dev, "  %d sec ...", i);
1352
1353				cond_resched();
1354				msleep(1000);
1355			}
1356		}
1357	}
1358	return 0;
1359}
1360
1361/**
1362 * genwqe_device_remove() - Remove genwqe's char device
1363 *
1364 * This function must be called after the client devices are removed
1365 * because it will free the major/minor number range for the genwqe
1366 * drivers.
1367 *
1368 * This function must be robust enough to be called twice.
1369 */
1370int genwqe_device_remove(struct genwqe_dev *cd)
1371{
1372	int rc;
1373	struct pci_dev *pci_dev = cd->pci_dev;
1374
1375	if (!genwqe_device_initialized(cd))
1376		return 1;
1377
1378	genwqe_inform_and_stop_processes(cd);
1379
1380	/*
1381	 * We currently do wait until all filedescriptors are
1382	 * closed. This leads to a problem when we abort the
1383	 * application which will decrease this reference from
1384	 * 1/unused to 0/illegal and not from 2/used 1/empty.
1385	 */
1386	rc = kref_read(&cd->cdev_genwqe.kobj.kref);
1387	if (rc != 1) {
1388		dev_err(&pci_dev->dev,
1389			"[%s] err: cdev_genwqe...refcount=%d\n", __func__, rc);
1390		panic("Fatal err: cannot free resources with pending references!");
1391	}
1392
1393	genqwe_exit_debugfs(cd);
1394	device_destroy(cd->class_genwqe, cd->devnum_genwqe);
1395	cdev_del(&cd->cdev_genwqe);
1396	unregister_chrdev_region(cd->devnum_genwqe, GENWQE_MAX_MINOR);
1397	cd->dev = NULL;
1398
1399	return 0;
1400}