Linux Audio

Check our new training course

Loading...
v3.1
 
  1/* sunvdc.c: Sun LDOM Virtual Disk Client.
  2 *
  3 * Copyright (C) 2007, 2008 David S. Miller <davem@davemloft.net>
  4 */
  5
  6#include <linux/module.h>
  7#include <linux/kernel.h>
  8#include <linux/types.h>
  9#include <linux/blkdev.h>
 10#include <linux/hdreg.h>
 11#include <linux/genhd.h>
 
 12#include <linux/slab.h>
 13#include <linux/spinlock.h>
 14#include <linux/completion.h>
 15#include <linux/delay.h>
 16#include <linux/init.h>
 17#include <linux/list.h>
 18#include <linux/scatterlist.h>
 19
 20#include <asm/vio.h>
 21#include <asm/ldc.h>
 22
 23#define DRV_MODULE_NAME		"sunvdc"
 24#define PFX DRV_MODULE_NAME	": "
 25#define DRV_MODULE_VERSION	"1.0"
 26#define DRV_MODULE_RELDATE	"June 25, 2007"
 27
 28static char version[] __devinitdata =
 29	DRV_MODULE_NAME ".c:v" DRV_MODULE_VERSION " (" DRV_MODULE_RELDATE ")\n";
 30MODULE_AUTHOR("David S. Miller (davem@davemloft.net)");
 31MODULE_DESCRIPTION("Sun LDOM virtual disk client driver");
 32MODULE_LICENSE("GPL");
 33MODULE_VERSION(DRV_MODULE_VERSION);
 34
 35#define VDC_TX_RING_SIZE	256
 
 
 
 
 
 36
 37#define WAITING_FOR_LINK_UP	0x01
 38#define WAITING_FOR_TX_SPACE	0x02
 39#define WAITING_FOR_GEN_CMD	0x04
 40#define WAITING_FOR_ANY		-1
 41
 
 
 
 
 42struct vdc_req_entry {
 43	struct request		*req;
 44};
 45
 46struct vdc_port {
 47	struct vio_driver_state	vio;
 48
 49	struct gendisk		*disk;
 50
 51	struct vdc_completion	*cmp;
 52
 53	u64			req_id;
 54	u64			seq;
 55	struct vdc_req_entry	rq_arr[VDC_TX_RING_SIZE];
 56
 57	unsigned long		ring_cookies;
 58
 59	u64			max_xfer_size;
 60	u32			vdisk_block_size;
 
 
 
 
 
 61
 62	/* The server fills these in for us in the disk attribute
 63	 * ACK packet.
 64	 */
 65	u64			operations;
 66	u32			vdisk_size;
 67	u8			vdisk_type;
 
 
 68
 69	char			disk_name[32];
 70
 71	struct vio_disk_geom	geom;
 72	struct vio_disk_vtoc	label;
 73};
 74
 
 
 
 
 75static inline struct vdc_port *to_vdc_port(struct vio_driver_state *vio)
 76{
 77	return container_of(vio, struct vdc_port, vio);
 78}
 79
 80/* Ordered from largest major to lowest */
 81static struct vio_version vdc_versions[] = {
 
 
 82	{ .major = 1, .minor = 0 },
 83};
 84
 
 
 
 
 
 
 85#define VDCBLK_NAME	"vdisk"
 86static int vdc_major;
 87#define PARTITION_SHIFT	3
 88
 89static inline u32 vdc_tx_dring_avail(struct vio_dring_state *dr)
 90{
 91	return vio_dring_avail(dr, VDC_TX_RING_SIZE);
 92}
 93
 94static int vdc_getgeo(struct block_device *bdev, struct hd_geometry *geo)
 95{
 96	struct gendisk *disk = bdev->bd_disk;
 97	struct vdc_port *port = disk->private_data;
 
 98
 99	geo->heads = (u8) port->geom.num_hd;
100	geo->sectors = (u8) port->geom.num_sec;
101	geo->cylinders = port->geom.num_cyl;
 
 
 
102
103	return 0;
104}
105
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
106static const struct block_device_operations vdc_fops = {
107	.owner		= THIS_MODULE,
108	.getgeo		= vdc_getgeo,
 
 
109};
110
 
 
 
 
 
 
 
 
 
 
 
 
111static void vdc_finish(struct vio_driver_state *vio, int err, int waiting_for)
112{
113	if (vio->cmp &&
114	    (waiting_for == -1 ||
115	     vio->cmp->waiting_for == waiting_for)) {
116		vio->cmp->err = err;
117		complete(&vio->cmp->com);
118		vio->cmp = NULL;
119	}
120}
121
122static void vdc_handshake_complete(struct vio_driver_state *vio)
123{
 
 
 
124	vdc_finish(vio, 0, WAITING_FOR_LINK_UP);
 
125}
126
127static int vdc_handle_unknown(struct vdc_port *port, void *arg)
128{
129	struct vio_msg_tag *pkt = arg;
130
131	printk(KERN_ERR PFX "Received unknown msg [%02x:%02x:%04x:%08x]\n",
132	       pkt->type, pkt->stype, pkt->stype_env, pkt->sid);
133	printk(KERN_ERR PFX "Resetting connection.\n");
134
135	ldc_disconnect(port->vio.lp);
136
137	return -ECONNRESET;
138}
139
140static int vdc_send_attr(struct vio_driver_state *vio)
141{
142	struct vdc_port *port = to_vdc_port(vio);
143	struct vio_disk_attr_info pkt;
144
145	memset(&pkt, 0, sizeof(pkt));
146
147	pkt.tag.type = VIO_TYPE_CTRL;
148	pkt.tag.stype = VIO_SUBTYPE_INFO;
149	pkt.tag.stype_env = VIO_ATTR_INFO;
150	pkt.tag.sid = vio_send_sid(vio);
151
152	pkt.xfer_mode = VIO_DRING_MODE;
153	pkt.vdisk_block_size = port->vdisk_block_size;
154	pkt.max_xfer_size = port->max_xfer_size;
155
156	viodbg(HS, "SEND ATTR xfer_mode[0x%x] blksz[%u] max_xfer[%llu]\n",
157	       pkt.xfer_mode, pkt.vdisk_block_size, pkt.max_xfer_size);
158
159	return vio_ldc_send(&port->vio, &pkt, sizeof(pkt));
160}
161
162static int vdc_handle_attr(struct vio_driver_state *vio, void *arg)
163{
164	struct vdc_port *port = to_vdc_port(vio);
165	struct vio_disk_attr_info *pkt = arg;
166
167	viodbg(HS, "GOT ATTR stype[0x%x] ops[%llx] disk_size[%llu] disk_type[%x] "
168	       "xfer_mode[0x%x] blksz[%u] max_xfer[%llu]\n",
169	       pkt->tag.stype, pkt->operations,
170	       pkt->vdisk_size, pkt->vdisk_type,
171	       pkt->xfer_mode, pkt->vdisk_block_size,
172	       pkt->max_xfer_size);
173
174	if (pkt->tag.stype == VIO_SUBTYPE_ACK) {
175		switch (pkt->vdisk_type) {
176		case VD_DISK_TYPE_DISK:
177		case VD_DISK_TYPE_SLICE:
178			break;
179
180		default:
181			printk(KERN_ERR PFX "%s: Bogus vdisk_type 0x%x\n",
182			       vio->name, pkt->vdisk_type);
183			return -ECONNRESET;
184		}
185
186		if (pkt->vdisk_block_size > port->vdisk_block_size) {
187			printk(KERN_ERR PFX "%s: BLOCK size increased "
188			       "%u --> %u\n",
189			       vio->name,
190			       port->vdisk_block_size, pkt->vdisk_block_size);
191			return -ECONNRESET;
192		}
193
194		port->operations = pkt->operations;
195		port->vdisk_size = pkt->vdisk_size;
196		port->vdisk_type = pkt->vdisk_type;
 
 
 
 
197		if (pkt->max_xfer_size < port->max_xfer_size)
198			port->max_xfer_size = pkt->max_xfer_size;
199		port->vdisk_block_size = pkt->vdisk_block_size;
 
 
 
 
 
200		return 0;
201	} else {
202		printk(KERN_ERR PFX "%s: Attribute NACK\n", vio->name);
203
204		return -ECONNRESET;
205	}
206}
207
208static void vdc_end_special(struct vdc_port *port, struct vio_disk_desc *desc)
209{
210	int err = desc->status;
211
212	vdc_finish(&port->vio, -err, WAITING_FOR_GEN_CMD);
213}
214
215static void vdc_end_one(struct vdc_port *port, struct vio_dring_state *dr,
216			unsigned int index)
217{
218	struct vio_disk_desc *desc = vio_dring_entry(dr, index);
219	struct vdc_req_entry *rqe = &port->rq_arr[index];
220	struct request *req;
221
222	if (unlikely(desc->hdr.state != VIO_DESC_DONE))
223		return;
224
225	ldc_unmap(port->vio.lp, desc->cookies, desc->ncookies);
226	desc->hdr.state = VIO_DESC_FREE;
227	dr->cons = (index + 1) & (VDC_TX_RING_SIZE - 1);
228
229	req = rqe->req;
230	if (req == NULL) {
231		vdc_end_special(port, desc);
232		return;
233	}
234
235	rqe->req = NULL;
236
237	__blk_end_request(req, (desc->status ? -EIO : 0), desc->size);
238
239	if (blk_queue_stopped(port->disk->queue))
240		blk_start_queue(port->disk->queue);
241}
242
243static int vdc_ack(struct vdc_port *port, void *msgbuf)
244{
245	struct vio_dring_state *dr = &port->vio.drings[VIO_DRIVER_TX_RING];
246	struct vio_dring_data *pkt = msgbuf;
247
248	if (unlikely(pkt->dring_ident != dr->ident ||
249		     pkt->start_idx != pkt->end_idx ||
250		     pkt->start_idx >= VDC_TX_RING_SIZE))
251		return 0;
252
253	vdc_end_one(port, dr, pkt->start_idx);
254
255	return 0;
256}
257
258static int vdc_nack(struct vdc_port *port, void *msgbuf)
259{
260	/* XXX Implement me XXX */
261	return 0;
262}
263
264static void vdc_event(void *arg, int event)
265{
266	struct vdc_port *port = arg;
267	struct vio_driver_state *vio = &port->vio;
268	unsigned long flags;
269	int err;
270
271	spin_lock_irqsave(&vio->lock, flags);
272
273	if (unlikely(event == LDC_EVENT_RESET ||
274		     event == LDC_EVENT_UP)) {
275		vio_link_state_change(vio, event);
276		spin_unlock_irqrestore(&vio->lock, flags);
277		return;
 
 
 
 
 
278	}
279
280	if (unlikely(event != LDC_EVENT_DATA_READY)) {
281		printk(KERN_WARNING PFX "Unexpected LDC event %d\n", event);
282		spin_unlock_irqrestore(&vio->lock, flags);
283		return;
284	}
285
286	err = 0;
287	while (1) {
288		union {
289			struct vio_msg_tag tag;
290			u64 raw[8];
291		} msgbuf;
292
293		err = ldc_read(vio->lp, &msgbuf, sizeof(msgbuf));
294		if (unlikely(err < 0)) {
295			if (err == -ECONNRESET)
296				vio_conn_reset(vio);
297			break;
298		}
299		if (err == 0)
300			break;
301		viodbg(DATA, "TAG [%02x:%02x:%04x:%08x]\n",
302		       msgbuf.tag.type,
303		       msgbuf.tag.stype,
304		       msgbuf.tag.stype_env,
305		       msgbuf.tag.sid);
306		err = vio_validate_sid(vio, &msgbuf.tag);
307		if (err < 0)
308			break;
309
310		if (likely(msgbuf.tag.type == VIO_TYPE_DATA)) {
311			if (msgbuf.tag.stype == VIO_SUBTYPE_ACK)
312				err = vdc_ack(port, &msgbuf);
313			else if (msgbuf.tag.stype == VIO_SUBTYPE_NACK)
314				err = vdc_nack(port, &msgbuf);
315			else
316				err = vdc_handle_unknown(port, &msgbuf);
317		} else if (msgbuf.tag.type == VIO_TYPE_CTRL) {
318			err = vio_control_pkt_engine(vio, &msgbuf);
319		} else {
320			err = vdc_handle_unknown(port, &msgbuf);
321		}
322		if (err < 0)
323			break;
324	}
325	if (err < 0)
326		vdc_finish(&port->vio, err, WAITING_FOR_ANY);
 
327	spin_unlock_irqrestore(&vio->lock, flags);
328}
329
330static int __vdc_tx_trigger(struct vdc_port *port)
331{
332	struct vio_dring_state *dr = &port->vio.drings[VIO_DRIVER_TX_RING];
333	struct vio_dring_data hdr = {
334		.tag = {
335			.type		= VIO_TYPE_DATA,
336			.stype		= VIO_SUBTYPE_INFO,
337			.stype_env	= VIO_DRING_DATA,
338			.sid		= vio_send_sid(&port->vio),
339		},
340		.dring_ident		= dr->ident,
341		.start_idx		= dr->prod,
342		.end_idx		= dr->prod,
343	};
344	int err, delay;
 
345
346	hdr.seq = dr->snd_nxt;
347	delay = 1;
348	do {
349		err = vio_ldc_send(&port->vio, &hdr, sizeof(hdr));
350		if (err > 0) {
351			dr->snd_nxt++;
352			break;
353		}
354		udelay(delay);
355		if ((delay <<= 1) > 128)
356			delay = 128;
 
 
357	} while (err == -EAGAIN);
358
 
 
359	return err;
360}
361
362static int __send_request(struct request *req)
363{
364	struct vdc_port *port = req->rq_disk->private_data;
365	struct vio_dring_state *dr = &port->vio.drings[VIO_DRIVER_TX_RING];
366	struct scatterlist sg[port->ring_cookies];
367	struct vdc_req_entry *rqe;
368	struct vio_disk_desc *desc;
369	unsigned int map_perm;
370	int nsg, err, i;
371	u64 len;
372	u8 op;
373
 
 
 
374	map_perm = LDC_MAP_SHADOW | LDC_MAP_DIRECT | LDC_MAP_IO;
375
376	if (rq_data_dir(req) == READ) {
377		map_perm |= LDC_MAP_W;
378		op = VD_OP_BREAD;
379	} else {
380		map_perm |= LDC_MAP_R;
381		op = VD_OP_BWRITE;
382	}
383
384	sg_init_table(sg, port->ring_cookies);
385	nsg = blk_rq_map_sg(req->q, req, sg);
386
387	len = 0;
388	for (i = 0; i < nsg; i++)
389		len += sg[i].length;
390
391	if (unlikely(vdc_tx_dring_avail(dr) < 1)) {
392		blk_stop_queue(port->disk->queue);
393		err = -ENOMEM;
394		goto out;
395	}
396
397	desc = vio_dring_cur(dr);
398
399	err = ldc_map_sg(port->vio.lp, sg, nsg,
400			 desc->cookies, port->ring_cookies,
401			 map_perm);
402	if (err < 0) {
403		printk(KERN_ERR PFX "ldc_map_sg() failure, err=%d.\n", err);
404		return err;
405	}
406
407	rqe = &port->rq_arr[dr->prod];
408	rqe->req = req;
409
410	desc->hdr.ack = VIO_ACK_ENABLE;
411	desc->req_id = port->req_id;
412	desc->operation = op;
413	if (port->vdisk_type == VD_DISK_TYPE_DISK) {
414		desc->slice = 0xff;
415	} else {
416		desc->slice = 0;
417	}
418	desc->status = ~0;
419	desc->offset = (blk_rq_pos(req) << 9) / port->vdisk_block_size;
420	desc->size = len;
421	desc->ncookies = err;
422
423	/* This has to be a non-SMP write barrier because we are writing
424	 * to memory which is shared with the peer LDOM.
425	 */
426	wmb();
427	desc->hdr.state = VIO_DESC_READY;
428
429	err = __vdc_tx_trigger(port);
430	if (err < 0) {
431		printk(KERN_ERR PFX "vdc_tx_trigger() failure, err=%d\n", err);
432	} else {
433		port->req_id++;
434		dr->prod = (dr->prod + 1) & (VDC_TX_RING_SIZE - 1);
435	}
436out:
437
438	return err;
439}
440
441static void do_vdc_request(struct request_queue *q)
 
442{
443	while (1) {
444		struct request *req = blk_fetch_request(q);
 
445
446		if (!req)
447			break;
 
448
449		if (__send_request(req) < 0)
450			__blk_end_request_all(req, -EIO);
 
 
 
 
 
 
451	}
 
 
 
 
 
 
 
 
 
 
 
 
 
 
452}
453
454static int generic_request(struct vdc_port *port, u8 op, void *buf, int len)
455{
456	struct vio_dring_state *dr;
457	struct vio_completion comp;
458	struct vio_disk_desc *desc;
459	unsigned int map_perm;
460	unsigned long flags;
461	int op_len, err;
462	void *req_buf;
463
464	if (!(((u64)1 << ((u64)op - 1)) & port->operations))
465		return -EOPNOTSUPP;
466
467	switch (op) {
468	case VD_OP_BREAD:
469	case VD_OP_BWRITE:
470	default:
471		return -EINVAL;
472
473	case VD_OP_FLUSH:
474		op_len = 0;
475		map_perm = 0;
476		break;
477
478	case VD_OP_GET_WCE:
479		op_len = sizeof(u32);
480		map_perm = LDC_MAP_W;
481		break;
482
483	case VD_OP_SET_WCE:
484		op_len = sizeof(u32);
485		map_perm = LDC_MAP_R;
486		break;
487
488	case VD_OP_GET_VTOC:
489		op_len = sizeof(struct vio_disk_vtoc);
490		map_perm = LDC_MAP_W;
491		break;
492
493	case VD_OP_SET_VTOC:
494		op_len = sizeof(struct vio_disk_vtoc);
495		map_perm = LDC_MAP_R;
496		break;
497
498	case VD_OP_GET_DISKGEOM:
499		op_len = sizeof(struct vio_disk_geom);
500		map_perm = LDC_MAP_W;
501		break;
502
503	case VD_OP_SET_DISKGEOM:
504		op_len = sizeof(struct vio_disk_geom);
505		map_perm = LDC_MAP_R;
506		break;
507
508	case VD_OP_SCSICMD:
509		op_len = 16;
510		map_perm = LDC_MAP_RW;
511		break;
512
513	case VD_OP_GET_DEVID:
514		op_len = sizeof(struct vio_disk_devid);
515		map_perm = LDC_MAP_W;
516		break;
517
518	case VD_OP_GET_EFI:
519	case VD_OP_SET_EFI:
520		return -EOPNOTSUPP;
521		break;
522	};
523
524	map_perm |= LDC_MAP_SHADOW | LDC_MAP_DIRECT | LDC_MAP_IO;
525
526	op_len = (op_len + 7) & ~7;
527	req_buf = kzalloc(op_len, GFP_KERNEL);
528	if (!req_buf)
529		return -ENOMEM;
530
531	if (len > op_len)
532		len = op_len;
533
534	if (map_perm & LDC_MAP_R)
535		memcpy(req_buf, buf, len);
536
537	spin_lock_irqsave(&port->vio.lock, flags);
538
539	dr = &port->vio.drings[VIO_DRIVER_TX_RING];
540
541	/* XXX If we want to use this code generically we have to
542	 * XXX handle TX ring exhaustion etc.
543	 */
544	desc = vio_dring_cur(dr);
545
546	err = ldc_map_single(port->vio.lp, req_buf, op_len,
547			     desc->cookies, port->ring_cookies,
548			     map_perm);
549	if (err < 0) {
550		spin_unlock_irqrestore(&port->vio.lock, flags);
551		kfree(req_buf);
552		return err;
553	}
554
555	init_completion(&comp.com);
556	comp.waiting_for = WAITING_FOR_GEN_CMD;
557	port->vio.cmp = &comp;
558
559	desc->hdr.ack = VIO_ACK_ENABLE;
560	desc->req_id = port->req_id;
561	desc->operation = op;
562	desc->slice = 0;
563	desc->status = ~0;
564	desc->offset = 0;
565	desc->size = op_len;
566	desc->ncookies = err;
567
568	/* This has to be a non-SMP write barrier because we are writing
569	 * to memory which is shared with the peer LDOM.
570	 */
571	wmb();
572	desc->hdr.state = VIO_DESC_READY;
573
574	err = __vdc_tx_trigger(port);
575	if (err >= 0) {
576		port->req_id++;
577		dr->prod = (dr->prod + 1) & (VDC_TX_RING_SIZE - 1);
578		spin_unlock_irqrestore(&port->vio.lock, flags);
579
580		wait_for_completion(&comp.com);
581		err = comp.err;
582	} else {
583		port->vio.cmp = NULL;
584		spin_unlock_irqrestore(&port->vio.lock, flags);
585	}
586
587	if (map_perm & LDC_MAP_W)
588		memcpy(buf, req_buf, len);
589
590	kfree(req_buf);
591
592	return err;
593}
594
595static int __devinit vdc_alloc_tx_ring(struct vdc_port *port)
596{
597	struct vio_dring_state *dr = &port->vio.drings[VIO_DRIVER_TX_RING];
598	unsigned long len, entry_size;
599	int ncookies;
600	void *dring;
601
602	entry_size = sizeof(struct vio_disk_desc) +
603		(sizeof(struct ldc_trans_cookie) * port->ring_cookies);
604	len = (VDC_TX_RING_SIZE * entry_size);
605
606	ncookies = VIO_MAX_RING_COOKIES;
607	dring = ldc_alloc_exp_dring(port->vio.lp, len,
608				    dr->cookies, &ncookies,
609				    (LDC_MAP_SHADOW |
610				     LDC_MAP_DIRECT |
611				     LDC_MAP_RW));
612	if (IS_ERR(dring))
613		return PTR_ERR(dring);
614
615	dr->base = dring;
616	dr->entry_size = entry_size;
617	dr->num_entries = VDC_TX_RING_SIZE;
618	dr->prod = dr->cons = 0;
619	dr->pending = VDC_TX_RING_SIZE;
620	dr->ncookies = ncookies;
621
622	return 0;
623}
624
625static void vdc_free_tx_ring(struct vdc_port *port)
626{
627	struct vio_dring_state *dr = &port->vio.drings[VIO_DRIVER_TX_RING];
628
629	if (dr->base) {
630		ldc_free_exp_dring(port->vio.lp, dr->base,
631				   (dr->entry_size * dr->num_entries),
632				   dr->cookies, dr->ncookies);
633		dr->base = NULL;
634		dr->entry_size = 0;
635		dr->num_entries = 0;
636		dr->pending = 0;
637		dr->ncookies = 0;
638	}
639}
640
641static int probe_disk(struct vdc_port *port)
642{
643	struct vio_completion comp;
644	struct request_queue *q;
645	struct gendisk *g;
646	int err;
647
648	init_completion(&comp.com);
649	comp.err = 0;
650	comp.waiting_for = WAITING_FOR_LINK_UP;
651	port->vio.cmp = &comp;
652
653	vio_port_up(&port->vio);
654
655	wait_for_completion(&comp.com);
656	if (comp.err)
657		return comp.err;
658
659	err = generic_request(port, VD_OP_GET_VTOC,
660			      &port->label, sizeof(port->label));
661	if (err < 0) {
662		printk(KERN_ERR PFX "VD_OP_GET_VTOC returns error %d\n", err);
663		return err;
664	}
 
665
666	err = generic_request(port, VD_OP_GET_DISKGEOM,
667			      &port->geom, sizeof(port->geom));
668	if (err < 0) {
669		printk(KERN_ERR PFX "VD_OP_GET_DISKGEOM returns "
670		       "error %d\n", err);
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
671		return err;
672	}
673
674	port->vdisk_size = ((u64)port->geom.num_cyl *
675			    (u64)port->geom.num_hd *
676			    (u64)port->geom.num_sec);
 
 
677
678	q = blk_init_queue(do_vdc_request, &port->vio.lock);
679	if (!q) {
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
680		printk(KERN_ERR PFX "%s: Could not allocate queue.\n",
681		       port->vio.name);
682		return -ENOMEM;
683	}
684	g = alloc_disk(1 << PARTITION_SHIFT);
685	if (!g) {
686		printk(KERN_ERR PFX "%s: Could not allocate gendisk.\n",
687		       port->vio.name);
688		blk_cleanup_queue(q);
689		return -ENOMEM;
690	}
691
692	port->disk = g;
693
 
 
 
 
694	blk_queue_max_segments(q, port->ring_cookies);
695	blk_queue_max_hw_sectors(q, port->max_xfer_size);
696	g->major = vdc_major;
697	g->first_minor = port->vio.vdev->dev_no << PARTITION_SHIFT;
698	strcpy(g->disk_name, port->disk_name);
699
700	g->fops = &vdc_fops;
701	g->queue = q;
702	g->private_data = port;
703	g->driverfs_dev = &port->vio.vdev->dev;
704
705	set_capacity(g, port->vdisk_size);
706
707	printk(KERN_INFO PFX "%s: %u sectors (%u MB)\n",
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
708	       g->disk_name,
709	       port->vdisk_size, (port->vdisk_size >> (20 - 9)));
 
710
711	add_disk(g);
712
713	return 0;
714}
715
716static struct ldc_channel_config vdc_ldc_cfg = {
717	.event		= vdc_event,
718	.mtu		= 64,
719	.mode		= LDC_MODE_UNRELIABLE,
720};
721
722static struct vio_driver_ops vdc_vio_ops = {
723	.send_attr		= vdc_send_attr,
724	.handle_attr		= vdc_handle_attr,
725	.handshake_complete	= vdc_handshake_complete,
726};
727
728static void __devinit print_version(void)
729{
730	static int version_printed;
731
732	if (version_printed++ == 0)
733		printk(KERN_INFO "%s", version);
734}
735
736static int __devinit vdc_port_probe(struct vio_dev *vdev,
737				    const struct vio_device_id *id)
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
738{
739	struct mdesc_handle *hp;
740	struct vdc_port *port;
741	int err;
 
742
743	print_version();
744
745	hp = mdesc_grab();
746
747	err = -ENODEV;
748	if ((vdev->dev_no << PARTITION_SHIFT) & ~(u64)MINORMASK) {
749		printk(KERN_ERR PFX "Port id [%llu] too large.\n",
750		       vdev->dev_no);
751		goto err_out_release_mdesc;
752	}
753
 
 
 
 
 
 
 
 
754	port = kzalloc(sizeof(*port), GFP_KERNEL);
755	err = -ENOMEM;
756	if (!port) {
757		printk(KERN_ERR PFX "Cannot allocate vdc_port.\n");
758		goto err_out_release_mdesc;
759	}
760
761	if (vdev->dev_no >= 26)
762		snprintf(port->disk_name, sizeof(port->disk_name),
763			 VDCBLK_NAME "%c%c",
764			 'a' + ((int)vdev->dev_no / 26) - 1,
765			 'a' + ((int)vdev->dev_no % 26));
766	else
767		snprintf(port->disk_name, sizeof(port->disk_name),
768			 VDCBLK_NAME "%c", 'a' + ((int)vdev->dev_no % 26));
 
 
 
 
 
 
 
 
 
 
769
770	err = vio_driver_init(&port->vio, vdev, VDEV_DISK,
771			      vdc_versions, ARRAY_SIZE(vdc_versions),
772			      &vdc_vio_ops, port->disk_name);
773	if (err)
774		goto err_out_free_port;
775
776	port->vdisk_block_size = 512;
777	port->max_xfer_size = ((128 * 1024) / port->vdisk_block_size);
778	port->ring_cookies = ((port->max_xfer_size *
779			       port->vdisk_block_size) / PAGE_SIZE) + 2;
780
781	err = vio_ldc_alloc(&port->vio, &vdc_ldc_cfg, port);
782	if (err)
783		goto err_out_free_port;
784
785	err = vdc_alloc_tx_ring(port);
786	if (err)
787		goto err_out_free_ldc;
788
789	err = probe_disk(port);
790	if (err)
791		goto err_out_free_tx_ring;
792
 
 
 
793	dev_set_drvdata(&vdev->dev, port);
794
795	mdesc_release(hp);
796
797	return 0;
798
799err_out_free_tx_ring:
800	vdc_free_tx_ring(port);
801
802err_out_free_ldc:
803	vio_ldc_free(&port->vio);
804
805err_out_free_port:
806	kfree(port);
807
808err_out_release_mdesc:
809	mdesc_release(hp);
810	return err;
811}
812
813static int vdc_port_remove(struct vio_dev *vdev)
814{
815	struct vdc_port *port = dev_get_drvdata(&vdev->dev);
816
817	if (port) {
 
 
 
 
818		del_timer_sync(&port->vio.timer);
819
 
 
 
 
 
820		vdc_free_tx_ring(port);
821		vio_ldc_free(&port->vio);
822
823		dev_set_drvdata(&vdev->dev, NULL);
824
825		kfree(port);
826	}
827	return 0;
828}
829
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
830static const struct vio_device_id vdc_port_match[] = {
831	{
832		.type = "vdc-port",
833	},
834	{},
835};
836MODULE_DEVICE_TABLE(vio, vdc_port_match);
837
838static struct vio_driver vdc_port_driver = {
839	.id_table	= vdc_port_match,
840	.probe		= vdc_port_probe,
841	.remove		= vdc_port_remove,
842	.driver		= {
843		.name	= "vdc_port",
844		.owner	= THIS_MODULE,
845	}
846};
847
848static int __init vdc_init(void)
849{
850	int err;
851
 
 
 
 
852	err = register_blkdev(0, VDCBLK_NAME);
853	if (err < 0)
854		goto out_err;
855
856	vdc_major = err;
857
858	err = vio_register_driver(&vdc_port_driver);
859	if (err)
860		goto out_unregister_blkdev;
861
862	return 0;
863
864out_unregister_blkdev:
865	unregister_blkdev(vdc_major, VDCBLK_NAME);
866	vdc_major = 0;
867
868out_err:
 
869	return err;
870}
871
872static void __exit vdc_exit(void)
873{
874	vio_unregister_driver(&vdc_port_driver);
875	unregister_blkdev(vdc_major, VDCBLK_NAME);
 
876}
877
878module_init(vdc_init);
879module_exit(vdc_exit);
v5.9
   1// SPDX-License-Identifier: GPL-2.0-only
   2/* sunvdc.c: Sun LDOM Virtual Disk Client.
   3 *
   4 * Copyright (C) 2007, 2008 David S. Miller <davem@davemloft.net>
   5 */
   6
   7#include <linux/module.h>
   8#include <linux/kernel.h>
   9#include <linux/types.h>
  10#include <linux/blk-mq.h>
  11#include <linux/hdreg.h>
  12#include <linux/genhd.h>
  13#include <linux/cdrom.h>
  14#include <linux/slab.h>
  15#include <linux/spinlock.h>
  16#include <linux/completion.h>
  17#include <linux/delay.h>
  18#include <linux/init.h>
  19#include <linux/list.h>
  20#include <linux/scatterlist.h>
  21
  22#include <asm/vio.h>
  23#include <asm/ldc.h>
  24
  25#define DRV_MODULE_NAME		"sunvdc"
  26#define PFX DRV_MODULE_NAME	": "
  27#define DRV_MODULE_VERSION	"1.2"
  28#define DRV_MODULE_RELDATE	"November 24, 2014"
  29
  30static char version[] =
  31	DRV_MODULE_NAME ".c:v" DRV_MODULE_VERSION " (" DRV_MODULE_RELDATE ")\n";
  32MODULE_AUTHOR("David S. Miller (davem@davemloft.net)");
  33MODULE_DESCRIPTION("Sun LDOM virtual disk client driver");
  34MODULE_LICENSE("GPL");
  35MODULE_VERSION(DRV_MODULE_VERSION);
  36
  37#define VDC_TX_RING_SIZE	512
  38#define VDC_DEFAULT_BLK_SIZE	512
  39
  40#define MAX_XFER_BLKS		(128 * 1024)
  41#define MAX_XFER_SIZE		(MAX_XFER_BLKS / VDC_DEFAULT_BLK_SIZE)
  42#define MAX_RING_COOKIES	((MAX_XFER_BLKS / PAGE_SIZE) + 2)
  43
  44#define WAITING_FOR_LINK_UP	0x01
  45#define WAITING_FOR_TX_SPACE	0x02
  46#define WAITING_FOR_GEN_CMD	0x04
  47#define WAITING_FOR_ANY		-1
  48
  49#define	VDC_MAX_RETRIES	10
  50
  51static struct workqueue_struct *sunvdc_wq;
  52
  53struct vdc_req_entry {
  54	struct request		*req;
  55};
  56
  57struct vdc_port {
  58	struct vio_driver_state	vio;
  59
  60	struct gendisk		*disk;
  61
  62	struct vdc_completion	*cmp;
  63
  64	u64			req_id;
  65	u64			seq;
  66	struct vdc_req_entry	rq_arr[VDC_TX_RING_SIZE];
  67
  68	unsigned long		ring_cookies;
  69
  70	u64			max_xfer_size;
  71	u32			vdisk_block_size;
  72	u32			drain;
  73
  74	u64			ldc_timeout;
  75	struct delayed_work	ldc_reset_timer_work;
  76	struct work_struct	ldc_reset_work;
  77
  78	/* The server fills these in for us in the disk attribute
  79	 * ACK packet.
  80	 */
  81	u64			operations;
  82	u32			vdisk_size;
  83	u8			vdisk_type;
  84	u8			vdisk_mtype;
  85	u32			vdisk_phys_blksz;
  86
  87	struct blk_mq_tag_set	tag_set;
  88
  89	char			disk_name[32];
 
  90};
  91
  92static void vdc_ldc_reset(struct vdc_port *port);
  93static void vdc_ldc_reset_work(struct work_struct *work);
  94static void vdc_ldc_reset_timer_work(struct work_struct *work);
  95
  96static inline struct vdc_port *to_vdc_port(struct vio_driver_state *vio)
  97{
  98	return container_of(vio, struct vdc_port, vio);
  99}
 100
 101/* Ordered from largest major to lowest */
 102static struct vio_version vdc_versions[] = {
 103	{ .major = 1, .minor = 2 },
 104	{ .major = 1, .minor = 1 },
 105	{ .major = 1, .minor = 0 },
 106};
 107
 108static inline int vdc_version_supported(struct vdc_port *port,
 109					u16 major, u16 minor)
 110{
 111	return port->vio.ver.major == major && port->vio.ver.minor >= minor;
 112}
 113
 114#define VDCBLK_NAME	"vdisk"
 115static int vdc_major;
 116#define PARTITION_SHIFT	3
 117
 118static inline u32 vdc_tx_dring_avail(struct vio_dring_state *dr)
 119{
 120	return vio_dring_avail(dr, VDC_TX_RING_SIZE);
 121}
 122
 123static int vdc_getgeo(struct block_device *bdev, struct hd_geometry *geo)
 124{
 125	struct gendisk *disk = bdev->bd_disk;
 126	sector_t nsect = get_capacity(disk);
 127	sector_t cylinders = nsect;
 128
 129	geo->heads = 0xff;
 130	geo->sectors = 0x3f;
 131	sector_div(cylinders, geo->heads * geo->sectors);
 132	geo->cylinders = cylinders;
 133	if ((sector_t)(geo->cylinders + 1) * geo->heads * geo->sectors < nsect)
 134		geo->cylinders = 0xffff;
 135
 136	return 0;
 137}
 138
 139/* Add ioctl/CDROM_GET_CAPABILITY to support cdrom_id in udev
 140 * when vdisk_mtype is VD_MEDIA_TYPE_CD or VD_MEDIA_TYPE_DVD.
 141 * Needed to be able to install inside an ldom from an iso image.
 142 */
 143static int vdc_ioctl(struct block_device *bdev, fmode_t mode,
 144		     unsigned command, unsigned long argument)
 145{
 146	int i;
 147	struct gendisk *disk;
 148
 149	switch (command) {
 150	case CDROMMULTISESSION:
 151		pr_debug(PFX "Multisession CDs not supported\n");
 152		for (i = 0; i < sizeof(struct cdrom_multisession); i++)
 153			if (put_user(0, (char __user *)(argument + i)))
 154				return -EFAULT;
 155		return 0;
 156
 157	case CDROM_GET_CAPABILITY:
 158		disk = bdev->bd_disk;
 159
 160		if (bdev->bd_disk && (disk->flags & GENHD_FL_CD))
 161			return 0;
 162		return -EINVAL;
 163
 164	default:
 165		pr_debug(PFX "ioctl %08x not supported\n", command);
 166		return -EINVAL;
 167	}
 168}
 169
 170static const struct block_device_operations vdc_fops = {
 171	.owner		= THIS_MODULE,
 172	.getgeo		= vdc_getgeo,
 173	.ioctl		= vdc_ioctl,
 174	.compat_ioctl	= blkdev_compat_ptr_ioctl,
 175};
 176
 177static void vdc_blk_queue_start(struct vdc_port *port)
 178{
 179	struct vio_dring_state *dr = &port->vio.drings[VIO_DRIVER_TX_RING];
 180
 181	/* restart blk queue when ring is half emptied. also called after
 182	 * handshake completes, so check for initial handshake before we've
 183	 * allocated a disk.
 184	 */
 185	if (port->disk && vdc_tx_dring_avail(dr) * 100 / VDC_TX_RING_SIZE >= 50)
 186		blk_mq_start_stopped_hw_queues(port->disk->queue, true);
 187}
 188
 189static void vdc_finish(struct vio_driver_state *vio, int err, int waiting_for)
 190{
 191	if (vio->cmp &&
 192	    (waiting_for == -1 ||
 193	     vio->cmp->waiting_for == waiting_for)) {
 194		vio->cmp->err = err;
 195		complete(&vio->cmp->com);
 196		vio->cmp = NULL;
 197	}
 198}
 199
 200static void vdc_handshake_complete(struct vio_driver_state *vio)
 201{
 202	struct vdc_port *port = to_vdc_port(vio);
 203
 204	cancel_delayed_work(&port->ldc_reset_timer_work);
 205	vdc_finish(vio, 0, WAITING_FOR_LINK_UP);
 206	vdc_blk_queue_start(port);
 207}
 208
 209static int vdc_handle_unknown(struct vdc_port *port, void *arg)
 210{
 211	struct vio_msg_tag *pkt = arg;
 212
 213	printk(KERN_ERR PFX "Received unknown msg [%02x:%02x:%04x:%08x]\n",
 214	       pkt->type, pkt->stype, pkt->stype_env, pkt->sid);
 215	printk(KERN_ERR PFX "Resetting connection.\n");
 216
 217	ldc_disconnect(port->vio.lp);
 218
 219	return -ECONNRESET;
 220}
 221
 222static int vdc_send_attr(struct vio_driver_state *vio)
 223{
 224	struct vdc_port *port = to_vdc_port(vio);
 225	struct vio_disk_attr_info pkt;
 226
 227	memset(&pkt, 0, sizeof(pkt));
 228
 229	pkt.tag.type = VIO_TYPE_CTRL;
 230	pkt.tag.stype = VIO_SUBTYPE_INFO;
 231	pkt.tag.stype_env = VIO_ATTR_INFO;
 232	pkt.tag.sid = vio_send_sid(vio);
 233
 234	pkt.xfer_mode = VIO_DRING_MODE;
 235	pkt.vdisk_block_size = port->vdisk_block_size;
 236	pkt.max_xfer_size = port->max_xfer_size;
 237
 238	viodbg(HS, "SEND ATTR xfer_mode[0x%x] blksz[%u] max_xfer[%llu]\n",
 239	       pkt.xfer_mode, pkt.vdisk_block_size, pkt.max_xfer_size);
 240
 241	return vio_ldc_send(&port->vio, &pkt, sizeof(pkt));
 242}
 243
 244static int vdc_handle_attr(struct vio_driver_state *vio, void *arg)
 245{
 246	struct vdc_port *port = to_vdc_port(vio);
 247	struct vio_disk_attr_info *pkt = arg;
 248
 249	viodbg(HS, "GOT ATTR stype[0x%x] ops[%llx] disk_size[%llu] disk_type[%x] "
 250	       "mtype[0x%x] xfer_mode[0x%x] blksz[%u] max_xfer[%llu]\n",
 251	       pkt->tag.stype, pkt->operations,
 252	       pkt->vdisk_size, pkt->vdisk_type, pkt->vdisk_mtype,
 253	       pkt->xfer_mode, pkt->vdisk_block_size,
 254	       pkt->max_xfer_size);
 255
 256	if (pkt->tag.stype == VIO_SUBTYPE_ACK) {
 257		switch (pkt->vdisk_type) {
 258		case VD_DISK_TYPE_DISK:
 259		case VD_DISK_TYPE_SLICE:
 260			break;
 261
 262		default:
 263			printk(KERN_ERR PFX "%s: Bogus vdisk_type 0x%x\n",
 264			       vio->name, pkt->vdisk_type);
 265			return -ECONNRESET;
 266		}
 267
 268		if (pkt->vdisk_block_size > port->vdisk_block_size) {
 269			printk(KERN_ERR PFX "%s: BLOCK size increased "
 270			       "%u --> %u\n",
 271			       vio->name,
 272			       port->vdisk_block_size, pkt->vdisk_block_size);
 273			return -ECONNRESET;
 274		}
 275
 276		port->operations = pkt->operations;
 
 277		port->vdisk_type = pkt->vdisk_type;
 278		if (vdc_version_supported(port, 1, 1)) {
 279			port->vdisk_size = pkt->vdisk_size;
 280			port->vdisk_mtype = pkt->vdisk_mtype;
 281		}
 282		if (pkt->max_xfer_size < port->max_xfer_size)
 283			port->max_xfer_size = pkt->max_xfer_size;
 284		port->vdisk_block_size = pkt->vdisk_block_size;
 285
 286		port->vdisk_phys_blksz = VDC_DEFAULT_BLK_SIZE;
 287		if (vdc_version_supported(port, 1, 2))
 288			port->vdisk_phys_blksz = pkt->phys_block_size;
 289
 290		return 0;
 291	} else {
 292		printk(KERN_ERR PFX "%s: Attribute NACK\n", vio->name);
 293
 294		return -ECONNRESET;
 295	}
 296}
 297
 298static void vdc_end_special(struct vdc_port *port, struct vio_disk_desc *desc)
 299{
 300	int err = desc->status;
 301
 302	vdc_finish(&port->vio, -err, WAITING_FOR_GEN_CMD);
 303}
 304
 305static void vdc_end_one(struct vdc_port *port, struct vio_dring_state *dr,
 306			unsigned int index)
 307{
 308	struct vio_disk_desc *desc = vio_dring_entry(dr, index);
 309	struct vdc_req_entry *rqe = &port->rq_arr[index];
 310	struct request *req;
 311
 312	if (unlikely(desc->hdr.state != VIO_DESC_DONE))
 313		return;
 314
 315	ldc_unmap(port->vio.lp, desc->cookies, desc->ncookies);
 316	desc->hdr.state = VIO_DESC_FREE;
 317	dr->cons = vio_dring_next(dr, index);
 318
 319	req = rqe->req;
 320	if (req == NULL) {
 321		vdc_end_special(port, desc);
 322		return;
 323	}
 324
 325	rqe->req = NULL;
 326
 327	blk_mq_end_request(req, desc->status ? BLK_STS_IOERR : 0);
 328
 329	vdc_blk_queue_start(port);
 
 330}
 331
 332static int vdc_ack(struct vdc_port *port, void *msgbuf)
 333{
 334	struct vio_dring_state *dr = &port->vio.drings[VIO_DRIVER_TX_RING];
 335	struct vio_dring_data *pkt = msgbuf;
 336
 337	if (unlikely(pkt->dring_ident != dr->ident ||
 338		     pkt->start_idx != pkt->end_idx ||
 339		     pkt->start_idx >= VDC_TX_RING_SIZE))
 340		return 0;
 341
 342	vdc_end_one(port, dr, pkt->start_idx);
 343
 344	return 0;
 345}
 346
 347static int vdc_nack(struct vdc_port *port, void *msgbuf)
 348{
 349	/* XXX Implement me XXX */
 350	return 0;
 351}
 352
 353static void vdc_event(void *arg, int event)
 354{
 355	struct vdc_port *port = arg;
 356	struct vio_driver_state *vio = &port->vio;
 357	unsigned long flags;
 358	int err;
 359
 360	spin_lock_irqsave(&vio->lock, flags);
 361
 362	if (unlikely(event == LDC_EVENT_RESET)) {
 
 363		vio_link_state_change(vio, event);
 364		queue_work(sunvdc_wq, &port->ldc_reset_work);
 365		goto out;
 366	}
 367
 368	if (unlikely(event == LDC_EVENT_UP)) {
 369		vio_link_state_change(vio, event);
 370		goto out;
 371	}
 372
 373	if (unlikely(event != LDC_EVENT_DATA_READY)) {
 374		pr_warn(PFX "Unexpected LDC event %d\n", event);
 375		goto out;
 
 376	}
 377
 378	err = 0;
 379	while (1) {
 380		union {
 381			struct vio_msg_tag tag;
 382			u64 raw[8];
 383		} msgbuf;
 384
 385		err = ldc_read(vio->lp, &msgbuf, sizeof(msgbuf));
 386		if (unlikely(err < 0)) {
 387			if (err == -ECONNRESET)
 388				vio_conn_reset(vio);
 389			break;
 390		}
 391		if (err == 0)
 392			break;
 393		viodbg(DATA, "TAG [%02x:%02x:%04x:%08x]\n",
 394		       msgbuf.tag.type,
 395		       msgbuf.tag.stype,
 396		       msgbuf.tag.stype_env,
 397		       msgbuf.tag.sid);
 398		err = vio_validate_sid(vio, &msgbuf.tag);
 399		if (err < 0)
 400			break;
 401
 402		if (likely(msgbuf.tag.type == VIO_TYPE_DATA)) {
 403			if (msgbuf.tag.stype == VIO_SUBTYPE_ACK)
 404				err = vdc_ack(port, &msgbuf);
 405			else if (msgbuf.tag.stype == VIO_SUBTYPE_NACK)
 406				err = vdc_nack(port, &msgbuf);
 407			else
 408				err = vdc_handle_unknown(port, &msgbuf);
 409		} else if (msgbuf.tag.type == VIO_TYPE_CTRL) {
 410			err = vio_control_pkt_engine(vio, &msgbuf);
 411		} else {
 412			err = vdc_handle_unknown(port, &msgbuf);
 413		}
 414		if (err < 0)
 415			break;
 416	}
 417	if (err < 0)
 418		vdc_finish(&port->vio, err, WAITING_FOR_ANY);
 419out:
 420	spin_unlock_irqrestore(&vio->lock, flags);
 421}
 422
 423static int __vdc_tx_trigger(struct vdc_port *port)
 424{
 425	struct vio_dring_state *dr = &port->vio.drings[VIO_DRIVER_TX_RING];
 426	struct vio_dring_data hdr = {
 427		.tag = {
 428			.type		= VIO_TYPE_DATA,
 429			.stype		= VIO_SUBTYPE_INFO,
 430			.stype_env	= VIO_DRING_DATA,
 431			.sid		= vio_send_sid(&port->vio),
 432		},
 433		.dring_ident		= dr->ident,
 434		.start_idx		= dr->prod,
 435		.end_idx		= dr->prod,
 436	};
 437	int err, delay;
 438	int retries = 0;
 439
 440	hdr.seq = dr->snd_nxt;
 441	delay = 1;
 442	do {
 443		err = vio_ldc_send(&port->vio, &hdr, sizeof(hdr));
 444		if (err > 0) {
 445			dr->snd_nxt++;
 446			break;
 447		}
 448		udelay(delay);
 449		if ((delay <<= 1) > 128)
 450			delay = 128;
 451		if (retries++ > VDC_MAX_RETRIES)
 452			break;
 453	} while (err == -EAGAIN);
 454
 455	if (err == -ENOTCONN)
 456		vdc_ldc_reset(port);
 457	return err;
 458}
 459
 460static int __send_request(struct request *req)
 461{
 462	struct vdc_port *port = req->rq_disk->private_data;
 463	struct vio_dring_state *dr = &port->vio.drings[VIO_DRIVER_TX_RING];
 464	struct scatterlist sg[MAX_RING_COOKIES];
 465	struct vdc_req_entry *rqe;
 466	struct vio_disk_desc *desc;
 467	unsigned int map_perm;
 468	int nsg, err, i;
 469	u64 len;
 470	u8 op;
 471
 472	if (WARN_ON(port->ring_cookies > MAX_RING_COOKIES))
 473		return -EINVAL;
 474
 475	map_perm = LDC_MAP_SHADOW | LDC_MAP_DIRECT | LDC_MAP_IO;
 476
 477	if (rq_data_dir(req) == READ) {
 478		map_perm |= LDC_MAP_W;
 479		op = VD_OP_BREAD;
 480	} else {
 481		map_perm |= LDC_MAP_R;
 482		op = VD_OP_BWRITE;
 483	}
 484
 485	sg_init_table(sg, port->ring_cookies);
 486	nsg = blk_rq_map_sg(req->q, req, sg);
 487
 488	len = 0;
 489	for (i = 0; i < nsg; i++)
 490		len += sg[i].length;
 491
 
 
 
 
 
 
 492	desc = vio_dring_cur(dr);
 493
 494	err = ldc_map_sg(port->vio.lp, sg, nsg,
 495			 desc->cookies, port->ring_cookies,
 496			 map_perm);
 497	if (err < 0) {
 498		printk(KERN_ERR PFX "ldc_map_sg() failure, err=%d.\n", err);
 499		return err;
 500	}
 501
 502	rqe = &port->rq_arr[dr->prod];
 503	rqe->req = req;
 504
 505	desc->hdr.ack = VIO_ACK_ENABLE;
 506	desc->req_id = port->req_id;
 507	desc->operation = op;
 508	if (port->vdisk_type == VD_DISK_TYPE_DISK) {
 509		desc->slice = 0xff;
 510	} else {
 511		desc->slice = 0;
 512	}
 513	desc->status = ~0;
 514	desc->offset = (blk_rq_pos(req) << 9) / port->vdisk_block_size;
 515	desc->size = len;
 516	desc->ncookies = err;
 517
 518	/* This has to be a non-SMP write barrier because we are writing
 519	 * to memory which is shared with the peer LDOM.
 520	 */
 521	wmb();
 522	desc->hdr.state = VIO_DESC_READY;
 523
 524	err = __vdc_tx_trigger(port);
 525	if (err < 0) {
 526		printk(KERN_ERR PFX "vdc_tx_trigger() failure, err=%d\n", err);
 527	} else {
 528		port->req_id++;
 529		dr->prod = vio_dring_next(dr, dr->prod);
 530	}
 
 531
 532	return err;
 533}
 534
 535static blk_status_t vdc_queue_rq(struct blk_mq_hw_ctx *hctx,
 536				 const struct blk_mq_queue_data *bd)
 537{
 538	struct vdc_port *port = hctx->queue->queuedata;
 539	struct vio_dring_state *dr;
 540	unsigned long flags;
 541
 542	dr = &port->vio.drings[VIO_DRIVER_TX_RING];
 543
 544	blk_mq_start_request(bd->rq);
 545
 546	spin_lock_irqsave(&port->vio.lock, flags);
 547
 548	/*
 549	 * Doing drain, just end the request in error
 550	 */
 551	if (unlikely(port->drain)) {
 552		spin_unlock_irqrestore(&port->vio.lock, flags);
 553		return BLK_STS_IOERR;
 554	}
 555
 556	if (unlikely(vdc_tx_dring_avail(dr) < 1)) {
 557		spin_unlock_irqrestore(&port->vio.lock, flags);
 558		blk_mq_stop_hw_queue(hctx);
 559		return BLK_STS_DEV_RESOURCE;
 560	}
 561
 562	if (__send_request(bd->rq) < 0) {
 563		spin_unlock_irqrestore(&port->vio.lock, flags);
 564		return BLK_STS_IOERR;
 565	}
 566
 567	spin_unlock_irqrestore(&port->vio.lock, flags);
 568	return BLK_STS_OK;
 569}
 570
 571static int generic_request(struct vdc_port *port, u8 op, void *buf, int len)
 572{
 573	struct vio_dring_state *dr;
 574	struct vio_completion comp;
 575	struct vio_disk_desc *desc;
 576	unsigned int map_perm;
 577	unsigned long flags;
 578	int op_len, err;
 579	void *req_buf;
 580
 581	if (!(((u64)1 << (u64)op) & port->operations))
 582		return -EOPNOTSUPP;
 583
 584	switch (op) {
 585	case VD_OP_BREAD:
 586	case VD_OP_BWRITE:
 587	default:
 588		return -EINVAL;
 589
 590	case VD_OP_FLUSH:
 591		op_len = 0;
 592		map_perm = 0;
 593		break;
 594
 595	case VD_OP_GET_WCE:
 596		op_len = sizeof(u32);
 597		map_perm = LDC_MAP_W;
 598		break;
 599
 600	case VD_OP_SET_WCE:
 601		op_len = sizeof(u32);
 602		map_perm = LDC_MAP_R;
 603		break;
 604
 605	case VD_OP_GET_VTOC:
 606		op_len = sizeof(struct vio_disk_vtoc);
 607		map_perm = LDC_MAP_W;
 608		break;
 609
 610	case VD_OP_SET_VTOC:
 611		op_len = sizeof(struct vio_disk_vtoc);
 612		map_perm = LDC_MAP_R;
 613		break;
 614
 615	case VD_OP_GET_DISKGEOM:
 616		op_len = sizeof(struct vio_disk_geom);
 617		map_perm = LDC_MAP_W;
 618		break;
 619
 620	case VD_OP_SET_DISKGEOM:
 621		op_len = sizeof(struct vio_disk_geom);
 622		map_perm = LDC_MAP_R;
 623		break;
 624
 625	case VD_OP_SCSICMD:
 626		op_len = 16;
 627		map_perm = LDC_MAP_RW;
 628		break;
 629
 630	case VD_OP_GET_DEVID:
 631		op_len = sizeof(struct vio_disk_devid);
 632		map_perm = LDC_MAP_W;
 633		break;
 634
 635	case VD_OP_GET_EFI:
 636	case VD_OP_SET_EFI:
 637		return -EOPNOTSUPP;
 638	}
 
 639
 640	map_perm |= LDC_MAP_SHADOW | LDC_MAP_DIRECT | LDC_MAP_IO;
 641
 642	op_len = (op_len + 7) & ~7;
 643	req_buf = kzalloc(op_len, GFP_KERNEL);
 644	if (!req_buf)
 645		return -ENOMEM;
 646
 647	if (len > op_len)
 648		len = op_len;
 649
 650	if (map_perm & LDC_MAP_R)
 651		memcpy(req_buf, buf, len);
 652
 653	spin_lock_irqsave(&port->vio.lock, flags);
 654
 655	dr = &port->vio.drings[VIO_DRIVER_TX_RING];
 656
 657	/* XXX If we want to use this code generically we have to
 658	 * XXX handle TX ring exhaustion etc.
 659	 */
 660	desc = vio_dring_cur(dr);
 661
 662	err = ldc_map_single(port->vio.lp, req_buf, op_len,
 663			     desc->cookies, port->ring_cookies,
 664			     map_perm);
 665	if (err < 0) {
 666		spin_unlock_irqrestore(&port->vio.lock, flags);
 667		kfree(req_buf);
 668		return err;
 669	}
 670
 671	init_completion(&comp.com);
 672	comp.waiting_for = WAITING_FOR_GEN_CMD;
 673	port->vio.cmp = &comp;
 674
 675	desc->hdr.ack = VIO_ACK_ENABLE;
 676	desc->req_id = port->req_id;
 677	desc->operation = op;
 678	desc->slice = 0;
 679	desc->status = ~0;
 680	desc->offset = 0;
 681	desc->size = op_len;
 682	desc->ncookies = err;
 683
 684	/* This has to be a non-SMP write barrier because we are writing
 685	 * to memory which is shared with the peer LDOM.
 686	 */
 687	wmb();
 688	desc->hdr.state = VIO_DESC_READY;
 689
 690	err = __vdc_tx_trigger(port);
 691	if (err >= 0) {
 692		port->req_id++;
 693		dr->prod = vio_dring_next(dr, dr->prod);
 694		spin_unlock_irqrestore(&port->vio.lock, flags);
 695
 696		wait_for_completion(&comp.com);
 697		err = comp.err;
 698	} else {
 699		port->vio.cmp = NULL;
 700		spin_unlock_irqrestore(&port->vio.lock, flags);
 701	}
 702
 703	if (map_perm & LDC_MAP_W)
 704		memcpy(buf, req_buf, len);
 705
 706	kfree(req_buf);
 707
 708	return err;
 709}
 710
 711static int vdc_alloc_tx_ring(struct vdc_port *port)
 712{
 713	struct vio_dring_state *dr = &port->vio.drings[VIO_DRIVER_TX_RING];
 714	unsigned long len, entry_size;
 715	int ncookies;
 716	void *dring;
 717
 718	entry_size = sizeof(struct vio_disk_desc) +
 719		(sizeof(struct ldc_trans_cookie) * port->ring_cookies);
 720	len = (VDC_TX_RING_SIZE * entry_size);
 721
 722	ncookies = VIO_MAX_RING_COOKIES;
 723	dring = ldc_alloc_exp_dring(port->vio.lp, len,
 724				    dr->cookies, &ncookies,
 725				    (LDC_MAP_SHADOW |
 726				     LDC_MAP_DIRECT |
 727				     LDC_MAP_RW));
 728	if (IS_ERR(dring))
 729		return PTR_ERR(dring);
 730
 731	dr->base = dring;
 732	dr->entry_size = entry_size;
 733	dr->num_entries = VDC_TX_RING_SIZE;
 734	dr->prod = dr->cons = 0;
 735	dr->pending = VDC_TX_RING_SIZE;
 736	dr->ncookies = ncookies;
 737
 738	return 0;
 739}
 740
 741static void vdc_free_tx_ring(struct vdc_port *port)
 742{
 743	struct vio_dring_state *dr = &port->vio.drings[VIO_DRIVER_TX_RING];
 744
 745	if (dr->base) {
 746		ldc_free_exp_dring(port->vio.lp, dr->base,
 747				   (dr->entry_size * dr->num_entries),
 748				   dr->cookies, dr->ncookies);
 749		dr->base = NULL;
 750		dr->entry_size = 0;
 751		dr->num_entries = 0;
 752		dr->pending = 0;
 753		dr->ncookies = 0;
 754	}
 755}
 756
 757static int vdc_port_up(struct vdc_port *port)
 758{
 759	struct vio_completion comp;
 
 
 
 760
 761	init_completion(&comp.com);
 762	comp.err = 0;
 763	comp.waiting_for = WAITING_FOR_LINK_UP;
 764	port->vio.cmp = &comp;
 765
 766	vio_port_up(&port->vio);
 
 767	wait_for_completion(&comp.com);
 768	return comp.err;
 769}
 770
 771static void vdc_port_down(struct vdc_port *port)
 772{
 773	ldc_disconnect(port->vio.lp);
 774	ldc_unbind(port->vio.lp);
 775	vdc_free_tx_ring(port);
 776	vio_ldc_free(&port->vio);
 777}
 778
 779static const struct blk_mq_ops vdc_mq_ops = {
 780	.queue_rq	= vdc_queue_rq,
 781};
 782
 783static void cleanup_queue(struct request_queue *q)
 784{
 785	struct vdc_port *port = q->queuedata;
 786
 787	blk_cleanup_queue(q);
 788	blk_mq_free_tag_set(&port->tag_set);
 789}
 790
 791static struct request_queue *init_queue(struct vdc_port *port)
 792{
 793	struct request_queue *q;
 794
 795	q = blk_mq_init_sq_queue(&port->tag_set, &vdc_mq_ops, VDC_TX_RING_SIZE,
 796					BLK_MQ_F_SHOULD_MERGE);
 797	if (IS_ERR(q))
 798		return q;
 799
 800	q->queuedata = port;
 801	return q;
 802}
 803
 804static int probe_disk(struct vdc_port *port)
 805{
 806	struct request_queue *q;
 807	struct gendisk *g;
 808	int err;
 809
 810	err = vdc_port_up(port);
 811	if (err)
 812		return err;
 
 813
 814	/* Using version 1.2 means vdisk_phys_blksz should be set unless the
 815	 * disk is reserved by another system.
 816	 */
 817	if (vdc_version_supported(port, 1, 2) && !port->vdisk_phys_blksz)
 818		return -ENODEV;
 819
 820	if (vdc_version_supported(port, 1, 1)) {
 821		/* vdisk_size should be set during the handshake, if it wasn't
 822		 * then the underlying disk is reserved by another system
 823		 */
 824		if (port->vdisk_size == -1)
 825			return -ENODEV;
 826	} else {
 827		struct vio_disk_geom geom;
 828
 829		err = generic_request(port, VD_OP_GET_DISKGEOM,
 830				      &geom, sizeof(geom));
 831		if (err < 0) {
 832			printk(KERN_ERR PFX "VD_OP_GET_DISKGEOM returns "
 833			       "error %d\n", err);
 834			return err;
 835		}
 836		port->vdisk_size = ((u64)geom.num_cyl *
 837				    (u64)geom.num_hd *
 838				    (u64)geom.num_sec);
 839	}
 840
 841	q = init_queue(port);
 842	if (IS_ERR(q)) {
 843		printk(KERN_ERR PFX "%s: Could not allocate queue.\n",
 844		       port->vio.name);
 845		return PTR_ERR(q);
 846	}
 847	g = alloc_disk(1 << PARTITION_SHIFT);
 848	if (!g) {
 849		printk(KERN_ERR PFX "%s: Could not allocate gendisk.\n",
 850		       port->vio.name);
 851		cleanup_queue(q);
 852		return -ENOMEM;
 853	}
 854
 855	port->disk = g;
 856
 857	/* Each segment in a request is up to an aligned page in size. */
 858	blk_queue_segment_boundary(q, PAGE_SIZE - 1);
 859	blk_queue_max_segment_size(q, PAGE_SIZE);
 860
 861	blk_queue_max_segments(q, port->ring_cookies);
 862	blk_queue_max_hw_sectors(q, port->max_xfer_size);
 863	g->major = vdc_major;
 864	g->first_minor = port->vio.vdev->dev_no << PARTITION_SHIFT;
 865	strcpy(g->disk_name, port->disk_name);
 866
 867	g->fops = &vdc_fops;
 868	g->queue = q;
 869	g->private_data = port;
 
 870
 871	set_capacity(g, port->vdisk_size);
 872
 873	if (vdc_version_supported(port, 1, 1)) {
 874		switch (port->vdisk_mtype) {
 875		case VD_MEDIA_TYPE_CD:
 876			pr_info(PFX "Virtual CDROM %s\n", port->disk_name);
 877			g->flags |= GENHD_FL_CD;
 878			g->flags |= GENHD_FL_REMOVABLE;
 879			set_disk_ro(g, 1);
 880			break;
 881
 882		case VD_MEDIA_TYPE_DVD:
 883			pr_info(PFX "Virtual DVD %s\n", port->disk_name);
 884			g->flags |= GENHD_FL_CD;
 885			g->flags |= GENHD_FL_REMOVABLE;
 886			set_disk_ro(g, 1);
 887			break;
 888
 889		case VD_MEDIA_TYPE_FIXED:
 890			pr_info(PFX "Virtual Hard disk %s\n", port->disk_name);
 891			break;
 892		}
 893	}
 894
 895	blk_queue_physical_block_size(q, port->vdisk_phys_blksz);
 896
 897	pr_info(PFX "%s: %u sectors (%u MB) protocol %d.%d\n",
 898	       g->disk_name,
 899	       port->vdisk_size, (port->vdisk_size >> (20 - 9)),
 900	       port->vio.ver.major, port->vio.ver.minor);
 901
 902	device_add_disk(&port->vio.vdev->dev, g, NULL);
 903
 904	return 0;
 905}
 906
 907static struct ldc_channel_config vdc_ldc_cfg = {
 908	.event		= vdc_event,
 909	.mtu		= 64,
 910	.mode		= LDC_MODE_UNRELIABLE,
 911};
 912
 913static struct vio_driver_ops vdc_vio_ops = {
 914	.send_attr		= vdc_send_attr,
 915	.handle_attr		= vdc_handle_attr,
 916	.handshake_complete	= vdc_handshake_complete,
 917};
 918
 919static void print_version(void)
 920{
 921	static int version_printed;
 922
 923	if (version_printed++ == 0)
 924		printk(KERN_INFO "%s", version);
 925}
 926
 927struct vdc_check_port_data {
 928	int	dev_no;
 929	char	*type;
 930};
 931
 932static int vdc_device_probed(struct device *dev, void *arg)
 933{
 934	struct vio_dev *vdev = to_vio_dev(dev);
 935	struct vdc_check_port_data *port_data;
 936
 937	port_data = (struct vdc_check_port_data *)arg;
 938
 939	if ((vdev->dev_no == port_data->dev_no) &&
 940	    (!(strcmp((char *)&vdev->type, port_data->type))) &&
 941		dev_get_drvdata(dev)) {
 942		/* This device has already been configured
 943		 * by vdc_port_probe()
 944		 */
 945		return 1;
 946	} else {
 947		return 0;
 948	}
 949}
 950
 951/* Determine whether the VIO device is part of an mpgroup
 952 * by locating all the virtual-device-port nodes associated
 953 * with the parent virtual-device node for the VIO device
 954 * and checking whether any of these nodes are vdc-ports
 955 * which have already been configured.
 956 *
 957 * Returns true if this device is part of an mpgroup and has
 958 * already been probed.
 959 */
 960static bool vdc_port_mpgroup_check(struct vio_dev *vdev)
 961{
 962	struct vdc_check_port_data port_data;
 963	struct device *dev;
 964
 965	port_data.dev_no = vdev->dev_no;
 966	port_data.type = (char *)&vdev->type;
 967
 968	dev = device_find_child(vdev->dev.parent, &port_data,
 969				vdc_device_probed);
 970
 971	if (dev)
 972		return true;
 973
 974	return false;
 975}
 976
 977static int vdc_port_probe(struct vio_dev *vdev, const struct vio_device_id *id)
 978{
 979	struct mdesc_handle *hp;
 980	struct vdc_port *port;
 981	int err;
 982	const u64 *ldc_timeout;
 983
 984	print_version();
 985
 986	hp = mdesc_grab();
 987
 988	err = -ENODEV;
 989	if ((vdev->dev_no << PARTITION_SHIFT) & ~(u64)MINORMASK) {
 990		printk(KERN_ERR PFX "Port id [%llu] too large.\n",
 991		       vdev->dev_no);
 992		goto err_out_release_mdesc;
 993	}
 994
 995	/* Check if this device is part of an mpgroup */
 996	if (vdc_port_mpgroup_check(vdev)) {
 997		printk(KERN_WARNING
 998			"VIO: Ignoring extra vdisk port %s",
 999			dev_name(&vdev->dev));
1000		goto err_out_release_mdesc;
1001	}
1002
1003	port = kzalloc(sizeof(*port), GFP_KERNEL);
1004	err = -ENOMEM;
1005	if (!port) {
1006		printk(KERN_ERR PFX "Cannot allocate vdc_port.\n");
1007		goto err_out_release_mdesc;
1008	}
1009
1010	if (vdev->dev_no >= 26)
1011		snprintf(port->disk_name, sizeof(port->disk_name),
1012			 VDCBLK_NAME "%c%c",
1013			 'a' + ((int)vdev->dev_no / 26) - 1,
1014			 'a' + ((int)vdev->dev_no % 26));
1015	else
1016		snprintf(port->disk_name, sizeof(port->disk_name),
1017			 VDCBLK_NAME "%c", 'a' + ((int)vdev->dev_no % 26));
1018	port->vdisk_size = -1;
1019
1020	/* Actual wall time may be double due to do_generic_file_read() doing
1021	 * a readahead I/O first, and once that fails it will try to read a
1022	 * single page.
1023	 */
1024	ldc_timeout = mdesc_get_property(hp, vdev->mp, "vdc-timeout", NULL);
1025	port->ldc_timeout = ldc_timeout ? *ldc_timeout : 0;
1026	INIT_DELAYED_WORK(&port->ldc_reset_timer_work, vdc_ldc_reset_timer_work);
1027	INIT_WORK(&port->ldc_reset_work, vdc_ldc_reset_work);
1028
1029	err = vio_driver_init(&port->vio, vdev, VDEV_DISK,
1030			      vdc_versions, ARRAY_SIZE(vdc_versions),
1031			      &vdc_vio_ops, port->disk_name);
1032	if (err)
1033		goto err_out_free_port;
1034
1035	port->vdisk_block_size = VDC_DEFAULT_BLK_SIZE;
1036	port->max_xfer_size = MAX_XFER_SIZE;
1037	port->ring_cookies = MAX_RING_COOKIES;
 
1038
1039	err = vio_ldc_alloc(&port->vio, &vdc_ldc_cfg, port);
1040	if (err)
1041		goto err_out_free_port;
1042
1043	err = vdc_alloc_tx_ring(port);
1044	if (err)
1045		goto err_out_free_ldc;
1046
1047	err = probe_disk(port);
1048	if (err)
1049		goto err_out_free_tx_ring;
1050
1051	/* Note that the device driver_data is used to determine
1052	 * whether the port has been probed.
1053	 */
1054	dev_set_drvdata(&vdev->dev, port);
1055
1056	mdesc_release(hp);
1057
1058	return 0;
1059
1060err_out_free_tx_ring:
1061	vdc_free_tx_ring(port);
1062
1063err_out_free_ldc:
1064	vio_ldc_free(&port->vio);
1065
1066err_out_free_port:
1067	kfree(port);
1068
1069err_out_release_mdesc:
1070	mdesc_release(hp);
1071	return err;
1072}
1073
1074static int vdc_port_remove(struct vio_dev *vdev)
1075{
1076	struct vdc_port *port = dev_get_drvdata(&vdev->dev);
1077
1078	if (port) {
1079		blk_mq_stop_hw_queues(port->disk->queue);
1080
1081		flush_work(&port->ldc_reset_work);
1082		cancel_delayed_work_sync(&port->ldc_reset_timer_work);
1083		del_timer_sync(&port->vio.timer);
1084
1085		del_gendisk(port->disk);
1086		cleanup_queue(port->disk->queue);
1087		put_disk(port->disk);
1088		port->disk = NULL;
1089
1090		vdc_free_tx_ring(port);
1091		vio_ldc_free(&port->vio);
1092
1093		dev_set_drvdata(&vdev->dev, NULL);
1094
1095		kfree(port);
1096	}
1097	return 0;
1098}
1099
1100static void vdc_requeue_inflight(struct vdc_port *port)
1101{
1102	struct vio_dring_state *dr = &port->vio.drings[VIO_DRIVER_TX_RING];
1103	u32 idx;
1104
1105	for (idx = dr->cons; idx != dr->prod; idx = vio_dring_next(dr, idx)) {
1106		struct vio_disk_desc *desc = vio_dring_entry(dr, idx);
1107		struct vdc_req_entry *rqe = &port->rq_arr[idx];
1108		struct request *req;
1109
1110		ldc_unmap(port->vio.lp, desc->cookies, desc->ncookies);
1111		desc->hdr.state = VIO_DESC_FREE;
1112		dr->cons = vio_dring_next(dr, idx);
1113
1114		req = rqe->req;
1115		if (req == NULL) {
1116			vdc_end_special(port, desc);
1117			continue;
1118		}
1119
1120		rqe->req = NULL;
1121		blk_mq_requeue_request(req, false);
1122	}
1123}
1124
1125static void vdc_queue_drain(struct vdc_port *port)
1126{
1127	struct request_queue *q = port->disk->queue;
1128
1129	/*
1130	 * Mark the queue as draining, then freeze/quiesce to ensure
1131	 * that all existing requests are seen in ->queue_rq() and killed
1132	 */
1133	port->drain = 1;
1134	spin_unlock_irq(&port->vio.lock);
1135
1136	blk_mq_freeze_queue(q);
1137	blk_mq_quiesce_queue(q);
1138
1139	spin_lock_irq(&port->vio.lock);
1140	port->drain = 0;
1141	blk_mq_unquiesce_queue(q);
1142	blk_mq_unfreeze_queue(q);
1143}
1144
1145static void vdc_ldc_reset_timer_work(struct work_struct *work)
1146{
1147	struct vdc_port *port;
1148	struct vio_driver_state *vio;
1149
1150	port = container_of(work, struct vdc_port, ldc_reset_timer_work.work);
1151	vio = &port->vio;
1152
1153	spin_lock_irq(&vio->lock);
1154	if (!(port->vio.hs_state & VIO_HS_COMPLETE)) {
1155		pr_warn(PFX "%s ldc down %llu seconds, draining queue\n",
1156			port->disk_name, port->ldc_timeout);
1157		vdc_queue_drain(port);
1158		vdc_blk_queue_start(port);
1159	}
1160	spin_unlock_irq(&vio->lock);
1161}
1162
1163static void vdc_ldc_reset_work(struct work_struct *work)
1164{
1165	struct vdc_port *port;
1166	struct vio_driver_state *vio;
1167	unsigned long flags;
1168
1169	port = container_of(work, struct vdc_port, ldc_reset_work);
1170	vio = &port->vio;
1171
1172	spin_lock_irqsave(&vio->lock, flags);
1173	vdc_ldc_reset(port);
1174	spin_unlock_irqrestore(&vio->lock, flags);
1175}
1176
1177static void vdc_ldc_reset(struct vdc_port *port)
1178{
1179	int err;
1180
1181	assert_spin_locked(&port->vio.lock);
1182
1183	pr_warn(PFX "%s ldc link reset\n", port->disk_name);
1184	blk_mq_stop_hw_queues(port->disk->queue);
1185	vdc_requeue_inflight(port);
1186	vdc_port_down(port);
1187
1188	err = vio_ldc_alloc(&port->vio, &vdc_ldc_cfg, port);
1189	if (err) {
1190		pr_err(PFX "%s vio_ldc_alloc:%d\n", port->disk_name, err);
1191		return;
1192	}
1193
1194	err = vdc_alloc_tx_ring(port);
1195	if (err) {
1196		pr_err(PFX "%s vio_alloc_tx_ring:%d\n", port->disk_name, err);
1197		goto err_free_ldc;
1198	}
1199
1200	if (port->ldc_timeout)
1201		mod_delayed_work(system_wq, &port->ldc_reset_timer_work,
1202			  round_jiffies(jiffies + HZ * port->ldc_timeout));
1203	mod_timer(&port->vio.timer, round_jiffies(jiffies + HZ));
1204	return;
1205
1206err_free_ldc:
1207	vio_ldc_free(&port->vio);
1208}
1209
1210static const struct vio_device_id vdc_port_match[] = {
1211	{
1212		.type = "vdc-port",
1213	},
1214	{},
1215};
1216MODULE_DEVICE_TABLE(vio, vdc_port_match);
1217
1218static struct vio_driver vdc_port_driver = {
1219	.id_table	= vdc_port_match,
1220	.probe		= vdc_port_probe,
1221	.remove		= vdc_port_remove,
1222	.name		= "vdc_port",
 
 
 
1223};
1224
1225static int __init vdc_init(void)
1226{
1227	int err;
1228
1229	sunvdc_wq = alloc_workqueue("sunvdc", 0, 0);
1230	if (!sunvdc_wq)
1231		return -ENOMEM;
1232
1233	err = register_blkdev(0, VDCBLK_NAME);
1234	if (err < 0)
1235		goto out_free_wq;
1236
1237	vdc_major = err;
1238
1239	err = vio_register_driver(&vdc_port_driver);
1240	if (err)
1241		goto out_unregister_blkdev;
1242
1243	return 0;
1244
1245out_unregister_blkdev:
1246	unregister_blkdev(vdc_major, VDCBLK_NAME);
1247	vdc_major = 0;
1248
1249out_free_wq:
1250	destroy_workqueue(sunvdc_wq);
1251	return err;
1252}
1253
1254static void __exit vdc_exit(void)
1255{
1256	vio_unregister_driver(&vdc_port_driver);
1257	unregister_blkdev(vdc_major, VDCBLK_NAME);
1258	destroy_workqueue(sunvdc_wq);
1259}
1260
1261module_init(vdc_init);
1262module_exit(vdc_exit);