Loading...
1/* viohs.c: LDOM Virtual I/O handshake helper layer.
2 *
3 * Copyright (C) 2007 David S. Miller <davem@davemloft.net>
4 */
5
6#include <linux/kernel.h>
7#include <linux/export.h>
8#include <linux/string.h>
9#include <linux/delay.h>
10#include <linux/sched.h>
11#include <linux/slab.h>
12
13#include <asm/ldc.h>
14#include <asm/vio.h>
15
16int vio_ldc_send(struct vio_driver_state *vio, void *data, int len)
17{
18 int err, limit = 1000;
19
20 err = -EINVAL;
21 while (limit-- > 0) {
22 err = ldc_write(vio->lp, data, len);
23 if (!err || (err != -EAGAIN))
24 break;
25 udelay(1);
26 }
27
28 return err;
29}
30EXPORT_SYMBOL(vio_ldc_send);
31
32static int send_ctrl(struct vio_driver_state *vio,
33 struct vio_msg_tag *tag, int len)
34{
35 tag->sid = vio_send_sid(vio);
36 return vio_ldc_send(vio, tag, len);
37}
38
39static void init_tag(struct vio_msg_tag *tag, u8 type, u8 stype, u16 stype_env)
40{
41 tag->type = type;
42 tag->stype = stype;
43 tag->stype_env = stype_env;
44}
45
46static int send_version(struct vio_driver_state *vio, u16 major, u16 minor)
47{
48 struct vio_ver_info pkt;
49
50 vio->_local_sid = (u32) sched_clock();
51
52 memset(&pkt, 0, sizeof(pkt));
53 init_tag(&pkt.tag, VIO_TYPE_CTRL, VIO_SUBTYPE_INFO, VIO_VER_INFO);
54 pkt.major = major;
55 pkt.minor = minor;
56 pkt.dev_class = vio->dev_class;
57
58 viodbg(HS, "SEND VERSION INFO maj[%u] min[%u] devclass[%u]\n",
59 major, minor, vio->dev_class);
60
61 return send_ctrl(vio, &pkt.tag, sizeof(pkt));
62}
63
64static int start_handshake(struct vio_driver_state *vio)
65{
66 int err;
67
68 viodbg(HS, "START HANDSHAKE\n");
69
70 vio->hs_state = VIO_HS_INVALID;
71
72 err = send_version(vio,
73 vio->ver_table[0].major,
74 vio->ver_table[0].minor);
75 if (err < 0)
76 return err;
77
78 return 0;
79}
80
81static void flush_rx_dring(struct vio_driver_state *vio)
82{
83 struct vio_dring_state *dr;
84 u64 ident;
85
86 BUG_ON(!(vio->dr_state & VIO_DR_STATE_RXREG));
87
88 dr = &vio->drings[VIO_DRIVER_RX_RING];
89 ident = dr->ident;
90
91 BUG_ON(!vio->desc_buf);
92 kfree(vio->desc_buf);
93 vio->desc_buf = NULL;
94
95 memset(dr, 0, sizeof(*dr));
96 dr->ident = ident;
97}
98
99void vio_link_state_change(struct vio_driver_state *vio, int event)
100{
101 if (event == LDC_EVENT_UP) {
102 vio->hs_state = VIO_HS_INVALID;
103
104 switch (vio->dev_class) {
105 case VDEV_NETWORK:
106 case VDEV_NETWORK_SWITCH:
107 vio->dr_state = (VIO_DR_STATE_TXREQ |
108 VIO_DR_STATE_RXREQ);
109 break;
110
111 case VDEV_DISK:
112 vio->dr_state = VIO_DR_STATE_TXREQ;
113 break;
114 case VDEV_DISK_SERVER:
115 vio->dr_state = VIO_DR_STATE_RXREQ;
116 break;
117 }
118 start_handshake(vio);
119 } else if (event == LDC_EVENT_RESET) {
120 vio->hs_state = VIO_HS_INVALID;
121
122 if (vio->dr_state & VIO_DR_STATE_RXREG)
123 flush_rx_dring(vio);
124
125 vio->dr_state = 0x00;
126 memset(&vio->ver, 0, sizeof(vio->ver));
127
128 ldc_disconnect(vio->lp);
129 }
130}
131EXPORT_SYMBOL(vio_link_state_change);
132
133static int handshake_failure(struct vio_driver_state *vio)
134{
135 struct vio_dring_state *dr;
136
137 /* XXX Put policy here... Perhaps start a timer to fire
138 * XXX in 100 ms, which will bring the link up and retry
139 * XXX the handshake.
140 */
141
142 viodbg(HS, "HANDSHAKE FAILURE\n");
143
144 vio->dr_state &= ~(VIO_DR_STATE_TXREG |
145 VIO_DR_STATE_RXREG);
146
147 dr = &vio->drings[VIO_DRIVER_RX_RING];
148 memset(dr, 0, sizeof(*dr));
149
150 kfree(vio->desc_buf);
151 vio->desc_buf = NULL;
152 vio->desc_buf_len = 0;
153
154 vio->hs_state = VIO_HS_INVALID;
155
156 return -ECONNRESET;
157}
158
159static int process_unknown(struct vio_driver_state *vio, void *arg)
160{
161 struct vio_msg_tag *pkt = arg;
162
163 viodbg(HS, "UNKNOWN CONTROL [%02x:%02x:%04x:%08x]\n",
164 pkt->type, pkt->stype, pkt->stype_env, pkt->sid);
165
166 printk(KERN_ERR "vio: ID[%lu] Resetting connection.\n",
167 vio->vdev->channel_id);
168
169 ldc_disconnect(vio->lp);
170
171 return -ECONNRESET;
172}
173
174static int send_dreg(struct vio_driver_state *vio)
175{
176 struct vio_dring_state *dr = &vio->drings[VIO_DRIVER_TX_RING];
177 union {
178 struct vio_dring_register pkt;
179 char all[sizeof(struct vio_dring_register) +
180 (sizeof(struct ldc_trans_cookie) *
181 dr->ncookies)];
182 } u;
183 int i;
184
185 memset(&u, 0, sizeof(u));
186 init_tag(&u.pkt.tag, VIO_TYPE_CTRL, VIO_SUBTYPE_INFO, VIO_DRING_REG);
187 u.pkt.dring_ident = 0;
188 u.pkt.num_descr = dr->num_entries;
189 u.pkt.descr_size = dr->entry_size;
190 u.pkt.options = VIO_TX_DRING;
191 u.pkt.num_cookies = dr->ncookies;
192
193 viodbg(HS, "SEND DRING_REG INFO ndesc[%u] dsz[%u] opt[0x%x] "
194 "ncookies[%u]\n",
195 u.pkt.num_descr, u.pkt.descr_size, u.pkt.options,
196 u.pkt.num_cookies);
197
198 for (i = 0; i < dr->ncookies; i++) {
199 u.pkt.cookies[i] = dr->cookies[i];
200
201 viodbg(HS, "DRING COOKIE(%d) [%016llx:%016llx]\n",
202 i,
203 (unsigned long long) u.pkt.cookies[i].cookie_addr,
204 (unsigned long long) u.pkt.cookies[i].cookie_size);
205 }
206
207 return send_ctrl(vio, &u.pkt.tag, sizeof(u));
208}
209
210static int send_rdx(struct vio_driver_state *vio)
211{
212 struct vio_rdx pkt;
213
214 memset(&pkt, 0, sizeof(pkt));
215
216 init_tag(&pkt.tag, VIO_TYPE_CTRL, VIO_SUBTYPE_INFO, VIO_RDX);
217
218 viodbg(HS, "SEND RDX INFO\n");
219
220 return send_ctrl(vio, &pkt.tag, sizeof(pkt));
221}
222
223static int send_attr(struct vio_driver_state *vio)
224{
225 return vio->ops->send_attr(vio);
226}
227
228static struct vio_version *find_by_major(struct vio_driver_state *vio,
229 u16 major)
230{
231 struct vio_version *ret = NULL;
232 int i;
233
234 for (i = 0; i < vio->ver_table_entries; i++) {
235 struct vio_version *v = &vio->ver_table[i];
236 if (v->major <= major) {
237 ret = v;
238 break;
239 }
240 }
241 return ret;
242}
243
244static int process_ver_info(struct vio_driver_state *vio,
245 struct vio_ver_info *pkt)
246{
247 struct vio_version *vap;
248 int err;
249
250 viodbg(HS, "GOT VERSION INFO maj[%u] min[%u] devclass[%u]\n",
251 pkt->major, pkt->minor, pkt->dev_class);
252
253 if (vio->hs_state != VIO_HS_INVALID) {
254 /* XXX Perhaps invoke start_handshake? XXX */
255 memset(&vio->ver, 0, sizeof(vio->ver));
256 vio->hs_state = VIO_HS_INVALID;
257 }
258
259 vap = find_by_major(vio, pkt->major);
260
261 vio->_peer_sid = pkt->tag.sid;
262
263 if (!vap) {
264 pkt->tag.stype = VIO_SUBTYPE_NACK;
265 pkt->major = 0;
266 pkt->minor = 0;
267 viodbg(HS, "SEND VERSION NACK maj[0] min[0]\n");
268 err = send_ctrl(vio, &pkt->tag, sizeof(*pkt));
269 } else if (vap->major != pkt->major) {
270 pkt->tag.stype = VIO_SUBTYPE_NACK;
271 pkt->major = vap->major;
272 pkt->minor = vap->minor;
273 viodbg(HS, "SEND VERSION NACK maj[%u] min[%u]\n",
274 pkt->major, pkt->minor);
275 err = send_ctrl(vio, &pkt->tag, sizeof(*pkt));
276 } else {
277 struct vio_version ver = {
278 .major = pkt->major,
279 .minor = pkt->minor,
280 };
281 if (ver.minor > vap->minor)
282 ver.minor = vap->minor;
283 pkt->minor = ver.minor;
284 pkt->tag.stype = VIO_SUBTYPE_ACK;
285 viodbg(HS, "SEND VERSION ACK maj[%u] min[%u]\n",
286 pkt->major, pkt->minor);
287 err = send_ctrl(vio, &pkt->tag, sizeof(*pkt));
288 if (err > 0) {
289 vio->ver = ver;
290 vio->hs_state = VIO_HS_GOTVERS;
291 }
292 }
293 if (err < 0)
294 return handshake_failure(vio);
295
296 return 0;
297}
298
299static int process_ver_ack(struct vio_driver_state *vio,
300 struct vio_ver_info *pkt)
301{
302 viodbg(HS, "GOT VERSION ACK maj[%u] min[%u] devclass[%u]\n",
303 pkt->major, pkt->minor, pkt->dev_class);
304
305 if (vio->hs_state & VIO_HS_GOTVERS) {
306 if (vio->ver.major != pkt->major ||
307 vio->ver.minor != pkt->minor) {
308 pkt->tag.stype = VIO_SUBTYPE_NACK;
309 (void) send_ctrl(vio, &pkt->tag, sizeof(*pkt));
310 return handshake_failure(vio);
311 }
312 } else {
313 vio->ver.major = pkt->major;
314 vio->ver.minor = pkt->minor;
315 vio->hs_state = VIO_HS_GOTVERS;
316 }
317
318 switch (vio->dev_class) {
319 case VDEV_NETWORK:
320 case VDEV_DISK:
321 if (send_attr(vio) < 0)
322 return handshake_failure(vio);
323 break;
324
325 default:
326 break;
327 }
328
329 return 0;
330}
331
332static int process_ver_nack(struct vio_driver_state *vio,
333 struct vio_ver_info *pkt)
334{
335 struct vio_version *nver;
336
337 viodbg(HS, "GOT VERSION NACK maj[%u] min[%u] devclass[%u]\n",
338 pkt->major, pkt->minor, pkt->dev_class);
339
340 if (pkt->major == 0 && pkt->minor == 0)
341 return handshake_failure(vio);
342 nver = find_by_major(vio, pkt->major);
343 if (!nver)
344 return handshake_failure(vio);
345
346 if (send_version(vio, nver->major, nver->minor) < 0)
347 return handshake_failure(vio);
348
349 return 0;
350}
351
352static int process_ver(struct vio_driver_state *vio, struct vio_ver_info *pkt)
353{
354 switch (pkt->tag.stype) {
355 case VIO_SUBTYPE_INFO:
356 return process_ver_info(vio, pkt);
357
358 case VIO_SUBTYPE_ACK:
359 return process_ver_ack(vio, pkt);
360
361 case VIO_SUBTYPE_NACK:
362 return process_ver_nack(vio, pkt);
363
364 default:
365 return handshake_failure(vio);
366 }
367}
368
369static int process_attr(struct vio_driver_state *vio, void *pkt)
370{
371 int err;
372
373 if (!(vio->hs_state & VIO_HS_GOTVERS))
374 return handshake_failure(vio);
375
376 err = vio->ops->handle_attr(vio, pkt);
377 if (err < 0) {
378 return handshake_failure(vio);
379 } else {
380 vio->hs_state |= VIO_HS_GOT_ATTR;
381
382 if ((vio->dr_state & VIO_DR_STATE_TXREQ) &&
383 !(vio->hs_state & VIO_HS_SENT_DREG)) {
384 if (send_dreg(vio) < 0)
385 return handshake_failure(vio);
386
387 vio->hs_state |= VIO_HS_SENT_DREG;
388 }
389 }
390 return 0;
391}
392
393static int all_drings_registered(struct vio_driver_state *vio)
394{
395 int need_rx, need_tx;
396
397 need_rx = (vio->dr_state & VIO_DR_STATE_RXREQ);
398 need_tx = (vio->dr_state & VIO_DR_STATE_TXREQ);
399
400 if (need_rx &&
401 !(vio->dr_state & VIO_DR_STATE_RXREG))
402 return 0;
403
404 if (need_tx &&
405 !(vio->dr_state & VIO_DR_STATE_TXREG))
406 return 0;
407
408 return 1;
409}
410
411static int process_dreg_info(struct vio_driver_state *vio,
412 struct vio_dring_register *pkt)
413{
414 struct vio_dring_state *dr;
415 int i, len;
416
417 viodbg(HS, "GOT DRING_REG INFO ident[%llx] "
418 "ndesc[%u] dsz[%u] opt[0x%x] ncookies[%u]\n",
419 (unsigned long long) pkt->dring_ident,
420 pkt->num_descr, pkt->descr_size, pkt->options,
421 pkt->num_cookies);
422
423 if (!(vio->dr_state & VIO_DR_STATE_RXREQ))
424 goto send_nack;
425
426 if (vio->dr_state & VIO_DR_STATE_RXREG)
427 goto send_nack;
428
429 BUG_ON(vio->desc_buf);
430
431 vio->desc_buf = kzalloc(pkt->descr_size, GFP_ATOMIC);
432 if (!vio->desc_buf)
433 goto send_nack;
434
435 vio->desc_buf_len = pkt->descr_size;
436
437 dr = &vio->drings[VIO_DRIVER_RX_RING];
438
439 dr->num_entries = pkt->num_descr;
440 dr->entry_size = pkt->descr_size;
441 dr->ncookies = pkt->num_cookies;
442 for (i = 0; i < dr->ncookies; i++) {
443 dr->cookies[i] = pkt->cookies[i];
444
445 viodbg(HS, "DRING COOKIE(%d) [%016llx:%016llx]\n",
446 i,
447 (unsigned long long)
448 pkt->cookies[i].cookie_addr,
449 (unsigned long long)
450 pkt->cookies[i].cookie_size);
451 }
452
453 pkt->tag.stype = VIO_SUBTYPE_ACK;
454 pkt->dring_ident = ++dr->ident;
455
456 viodbg(HS, "SEND DRING_REG ACK ident[%llx]\n",
457 (unsigned long long) pkt->dring_ident);
458
459 len = (sizeof(*pkt) +
460 (dr->ncookies * sizeof(struct ldc_trans_cookie)));
461 if (send_ctrl(vio, &pkt->tag, len) < 0)
462 goto send_nack;
463
464 vio->dr_state |= VIO_DR_STATE_RXREG;
465
466 return 0;
467
468send_nack:
469 pkt->tag.stype = VIO_SUBTYPE_NACK;
470 viodbg(HS, "SEND DRING_REG NACK\n");
471 (void) send_ctrl(vio, &pkt->tag, sizeof(*pkt));
472
473 return handshake_failure(vio);
474}
475
476static int process_dreg_ack(struct vio_driver_state *vio,
477 struct vio_dring_register *pkt)
478{
479 struct vio_dring_state *dr;
480
481 viodbg(HS, "GOT DRING_REG ACK ident[%llx] "
482 "ndesc[%u] dsz[%u] opt[0x%x] ncookies[%u]\n",
483 (unsigned long long) pkt->dring_ident,
484 pkt->num_descr, pkt->descr_size, pkt->options,
485 pkt->num_cookies);
486
487 dr = &vio->drings[VIO_DRIVER_TX_RING];
488
489 if (!(vio->dr_state & VIO_DR_STATE_TXREQ))
490 return handshake_failure(vio);
491
492 dr->ident = pkt->dring_ident;
493 vio->dr_state |= VIO_DR_STATE_TXREG;
494
495 if (all_drings_registered(vio)) {
496 if (send_rdx(vio) < 0)
497 return handshake_failure(vio);
498 vio->hs_state = VIO_HS_SENT_RDX;
499 }
500 return 0;
501}
502
503static int process_dreg_nack(struct vio_driver_state *vio,
504 struct vio_dring_register *pkt)
505{
506 viodbg(HS, "GOT DRING_REG NACK ident[%llx] "
507 "ndesc[%u] dsz[%u] opt[0x%x] ncookies[%u]\n",
508 (unsigned long long) pkt->dring_ident,
509 pkt->num_descr, pkt->descr_size, pkt->options,
510 pkt->num_cookies);
511
512 return handshake_failure(vio);
513}
514
515static int process_dreg(struct vio_driver_state *vio,
516 struct vio_dring_register *pkt)
517{
518 if (!(vio->hs_state & VIO_HS_GOTVERS))
519 return handshake_failure(vio);
520
521 switch (pkt->tag.stype) {
522 case VIO_SUBTYPE_INFO:
523 return process_dreg_info(vio, pkt);
524
525 case VIO_SUBTYPE_ACK:
526 return process_dreg_ack(vio, pkt);
527
528 case VIO_SUBTYPE_NACK:
529 return process_dreg_nack(vio, pkt);
530
531 default:
532 return handshake_failure(vio);
533 }
534}
535
536static int process_dunreg(struct vio_driver_state *vio,
537 struct vio_dring_unregister *pkt)
538{
539 struct vio_dring_state *dr = &vio->drings[VIO_DRIVER_RX_RING];
540
541 viodbg(HS, "GOT DRING_UNREG\n");
542
543 if (pkt->dring_ident != dr->ident)
544 return 0;
545
546 vio->dr_state &= ~VIO_DR_STATE_RXREG;
547
548 memset(dr, 0, sizeof(*dr));
549
550 kfree(vio->desc_buf);
551 vio->desc_buf = NULL;
552 vio->desc_buf_len = 0;
553
554 return 0;
555}
556
557static int process_rdx_info(struct vio_driver_state *vio, struct vio_rdx *pkt)
558{
559 viodbg(HS, "GOT RDX INFO\n");
560
561 pkt->tag.stype = VIO_SUBTYPE_ACK;
562 viodbg(HS, "SEND RDX ACK\n");
563 if (send_ctrl(vio, &pkt->tag, sizeof(*pkt)) < 0)
564 return handshake_failure(vio);
565
566 vio->hs_state |= VIO_HS_SENT_RDX_ACK;
567 return 0;
568}
569
570static int process_rdx_ack(struct vio_driver_state *vio, struct vio_rdx *pkt)
571{
572 viodbg(HS, "GOT RDX ACK\n");
573
574 if (!(vio->hs_state & VIO_HS_SENT_RDX))
575 return handshake_failure(vio);
576
577 vio->hs_state |= VIO_HS_GOT_RDX_ACK;
578 return 0;
579}
580
581static int process_rdx_nack(struct vio_driver_state *vio, struct vio_rdx *pkt)
582{
583 viodbg(HS, "GOT RDX NACK\n");
584
585 return handshake_failure(vio);
586}
587
588static int process_rdx(struct vio_driver_state *vio, struct vio_rdx *pkt)
589{
590 if (!all_drings_registered(vio))
591 handshake_failure(vio);
592
593 switch (pkt->tag.stype) {
594 case VIO_SUBTYPE_INFO:
595 return process_rdx_info(vio, pkt);
596
597 case VIO_SUBTYPE_ACK:
598 return process_rdx_ack(vio, pkt);
599
600 case VIO_SUBTYPE_NACK:
601 return process_rdx_nack(vio, pkt);
602
603 default:
604 return handshake_failure(vio);
605 }
606}
607
608int vio_control_pkt_engine(struct vio_driver_state *vio, void *pkt)
609{
610 struct vio_msg_tag *tag = pkt;
611 u8 prev_state = vio->hs_state;
612 int err;
613
614 switch (tag->stype_env) {
615 case VIO_VER_INFO:
616 err = process_ver(vio, pkt);
617 break;
618
619 case VIO_ATTR_INFO:
620 err = process_attr(vio, pkt);
621 break;
622
623 case VIO_DRING_REG:
624 err = process_dreg(vio, pkt);
625 break;
626
627 case VIO_DRING_UNREG:
628 err = process_dunreg(vio, pkt);
629 break;
630
631 case VIO_RDX:
632 err = process_rdx(vio, pkt);
633 break;
634
635 default:
636 err = process_unknown(vio, pkt);
637 break;
638 }
639 if (!err &&
640 vio->hs_state != prev_state &&
641 (vio->hs_state & VIO_HS_COMPLETE))
642 vio->ops->handshake_complete(vio);
643
644 return err;
645}
646EXPORT_SYMBOL(vio_control_pkt_engine);
647
648void vio_conn_reset(struct vio_driver_state *vio)
649{
650}
651EXPORT_SYMBOL(vio_conn_reset);
652
653/* The issue is that the Solaris virtual disk server just mirrors the
654 * SID values it gets from the client peer. So we work around that
655 * here in vio_{validate,send}_sid() so that the drivers don't need
656 * to be aware of this crap.
657 */
658int vio_validate_sid(struct vio_driver_state *vio, struct vio_msg_tag *tp)
659{
660 u32 sid;
661
662 /* Always let VERSION+INFO packets through unchecked, they
663 * define the new SID.
664 */
665 if (tp->type == VIO_TYPE_CTRL &&
666 tp->stype == VIO_SUBTYPE_INFO &&
667 tp->stype_env == VIO_VER_INFO)
668 return 0;
669
670 /* Ok, now figure out which SID to use. */
671 switch (vio->dev_class) {
672 case VDEV_NETWORK:
673 case VDEV_NETWORK_SWITCH:
674 case VDEV_DISK_SERVER:
675 default:
676 sid = vio->_peer_sid;
677 break;
678
679 case VDEV_DISK:
680 sid = vio->_local_sid;
681 break;
682 }
683
684 if (sid == tp->sid)
685 return 0;
686 viodbg(DATA, "BAD SID tag->sid[%08x] peer_sid[%08x] local_sid[%08x]\n",
687 tp->sid, vio->_peer_sid, vio->_local_sid);
688 return -EINVAL;
689}
690EXPORT_SYMBOL(vio_validate_sid);
691
692u32 vio_send_sid(struct vio_driver_state *vio)
693{
694 switch (vio->dev_class) {
695 case VDEV_NETWORK:
696 case VDEV_NETWORK_SWITCH:
697 case VDEV_DISK:
698 default:
699 return vio->_local_sid;
700
701 case VDEV_DISK_SERVER:
702 return vio->_peer_sid;
703 }
704}
705EXPORT_SYMBOL(vio_send_sid);
706
707int vio_ldc_alloc(struct vio_driver_state *vio,
708 struct ldc_channel_config *base_cfg,
709 void *event_arg)
710{
711 struct ldc_channel_config cfg = *base_cfg;
712 struct ldc_channel *lp;
713
714 cfg.tx_irq = vio->vdev->tx_irq;
715 cfg.rx_irq = vio->vdev->rx_irq;
716
717 lp = ldc_alloc(vio->vdev->channel_id, &cfg, event_arg);
718 if (IS_ERR(lp))
719 return PTR_ERR(lp);
720
721 vio->lp = lp;
722
723 return 0;
724}
725EXPORT_SYMBOL(vio_ldc_alloc);
726
727void vio_ldc_free(struct vio_driver_state *vio)
728{
729 ldc_free(vio->lp);
730 vio->lp = NULL;
731
732 kfree(vio->desc_buf);
733 vio->desc_buf = NULL;
734 vio->desc_buf_len = 0;
735}
736EXPORT_SYMBOL(vio_ldc_free);
737
738void vio_port_up(struct vio_driver_state *vio)
739{
740 unsigned long flags;
741 int err, state;
742
743 spin_lock_irqsave(&vio->lock, flags);
744
745 state = ldc_state(vio->lp);
746
747 err = 0;
748 if (state == LDC_STATE_INIT) {
749 err = ldc_bind(vio->lp, vio->name);
750 if (err)
751 printk(KERN_WARNING "%s: Port %lu bind failed, "
752 "err=%d\n",
753 vio->name, vio->vdev->channel_id, err);
754 }
755
756 if (!err) {
757 err = ldc_connect(vio->lp);
758 if (err)
759 printk(KERN_WARNING "%s: Port %lu connect failed, "
760 "err=%d\n",
761 vio->name, vio->vdev->channel_id, err);
762 }
763 if (err) {
764 unsigned long expires = jiffies + HZ;
765
766 expires = round_jiffies(expires);
767 mod_timer(&vio->timer, expires);
768 }
769
770 spin_unlock_irqrestore(&vio->lock, flags);
771}
772EXPORT_SYMBOL(vio_port_up);
773
774static void vio_port_timer(unsigned long _arg)
775{
776 struct vio_driver_state *vio = (struct vio_driver_state *) _arg;
777
778 vio_port_up(vio);
779}
780
781int vio_driver_init(struct vio_driver_state *vio, struct vio_dev *vdev,
782 u8 dev_class, struct vio_version *ver_table,
783 int ver_table_size, struct vio_driver_ops *ops,
784 char *name)
785{
786 switch (dev_class) {
787 case VDEV_NETWORK:
788 case VDEV_NETWORK_SWITCH:
789 case VDEV_DISK:
790 case VDEV_DISK_SERVER:
791 break;
792
793 default:
794 return -EINVAL;
795 }
796
797 if (!ops->send_attr ||
798 !ops->handle_attr ||
799 !ops->handshake_complete)
800 return -EINVAL;
801
802 if (!ver_table || ver_table_size < 0)
803 return -EINVAL;
804
805 if (!name)
806 return -EINVAL;
807
808 spin_lock_init(&vio->lock);
809
810 vio->name = name;
811
812 vio->dev_class = dev_class;
813 vio->vdev = vdev;
814
815 vio->ver_table = ver_table;
816 vio->ver_table_entries = ver_table_size;
817
818 vio->ops = ops;
819
820 setup_timer(&vio->timer, vio_port_timer, (unsigned long) vio);
821
822 return 0;
823}
824EXPORT_SYMBOL(vio_driver_init);
1// SPDX-License-Identifier: GPL-2.0
2/* viohs.c: LDOM Virtual I/O handshake helper layer.
3 *
4 * Copyright (C) 2007 David S. Miller <davem@davemloft.net>
5 */
6
7#include <linux/kernel.h>
8#include <linux/export.h>
9#include <linux/string.h>
10#include <linux/delay.h>
11#include <linux/sched.h>
12#include <linux/sched/clock.h>
13#include <linux/slab.h>
14
15#include <asm/ldc.h>
16#include <asm/vio.h>
17
18int vio_ldc_send(struct vio_driver_state *vio, void *data, int len)
19{
20 int err, limit = 1000;
21
22 err = -EINVAL;
23 while (limit-- > 0) {
24 err = ldc_write(vio->lp, data, len);
25 if (!err || (err != -EAGAIN))
26 break;
27 udelay(1);
28 }
29
30 return err;
31}
32EXPORT_SYMBOL(vio_ldc_send);
33
34static int send_ctrl(struct vio_driver_state *vio,
35 struct vio_msg_tag *tag, int len)
36{
37 tag->sid = vio_send_sid(vio);
38 return vio_ldc_send(vio, tag, len);
39}
40
41static void init_tag(struct vio_msg_tag *tag, u8 type, u8 stype, u16 stype_env)
42{
43 tag->type = type;
44 tag->stype = stype;
45 tag->stype_env = stype_env;
46}
47
48static int send_version(struct vio_driver_state *vio, u16 major, u16 minor)
49{
50 struct vio_ver_info pkt;
51
52 vio->_local_sid = (u32) sched_clock();
53
54 memset(&pkt, 0, sizeof(pkt));
55 init_tag(&pkt.tag, VIO_TYPE_CTRL, VIO_SUBTYPE_INFO, VIO_VER_INFO);
56 pkt.major = major;
57 pkt.minor = minor;
58 pkt.dev_class = vio->dev_class;
59
60 viodbg(HS, "SEND VERSION INFO maj[%u] min[%u] devclass[%u]\n",
61 major, minor, vio->dev_class);
62
63 return send_ctrl(vio, &pkt.tag, sizeof(pkt));
64}
65
66static int start_handshake(struct vio_driver_state *vio)
67{
68 int err;
69
70 viodbg(HS, "START HANDSHAKE\n");
71
72 vio->hs_state = VIO_HS_INVALID;
73
74 err = send_version(vio,
75 vio->ver_table[0].major,
76 vio->ver_table[0].minor);
77 if (err < 0)
78 return err;
79
80 return 0;
81}
82
83static void flush_rx_dring(struct vio_driver_state *vio)
84{
85 struct vio_dring_state *dr;
86 u64 ident;
87
88 BUG_ON(!(vio->dr_state & VIO_DR_STATE_RXREG));
89
90 dr = &vio->drings[VIO_DRIVER_RX_RING];
91 ident = dr->ident;
92
93 BUG_ON(!vio->desc_buf);
94 kfree(vio->desc_buf);
95 vio->desc_buf = NULL;
96
97 memset(dr, 0, sizeof(*dr));
98 dr->ident = ident;
99}
100
101void vio_link_state_change(struct vio_driver_state *vio, int event)
102{
103 if (event == LDC_EVENT_UP) {
104 vio->hs_state = VIO_HS_INVALID;
105
106 switch (vio->dev_class) {
107 case VDEV_NETWORK:
108 case VDEV_NETWORK_SWITCH:
109 vio->dr_state = (VIO_DR_STATE_TXREQ |
110 VIO_DR_STATE_RXREQ);
111 break;
112
113 case VDEV_DISK:
114 vio->dr_state = VIO_DR_STATE_TXREQ;
115 break;
116 case VDEV_DISK_SERVER:
117 vio->dr_state = VIO_DR_STATE_RXREQ;
118 break;
119 }
120 start_handshake(vio);
121 } else if (event == LDC_EVENT_RESET) {
122 vio->hs_state = VIO_HS_INVALID;
123
124 if (vio->dr_state & VIO_DR_STATE_RXREG)
125 flush_rx_dring(vio);
126
127 vio->dr_state = 0x00;
128 memset(&vio->ver, 0, sizeof(vio->ver));
129
130 ldc_disconnect(vio->lp);
131 }
132}
133EXPORT_SYMBOL(vio_link_state_change);
134
135static int handshake_failure(struct vio_driver_state *vio)
136{
137 struct vio_dring_state *dr;
138
139 /* XXX Put policy here... Perhaps start a timer to fire
140 * XXX in 100 ms, which will bring the link up and retry
141 * XXX the handshake.
142 */
143
144 viodbg(HS, "HANDSHAKE FAILURE\n");
145
146 vio->dr_state &= ~(VIO_DR_STATE_TXREG |
147 VIO_DR_STATE_RXREG);
148
149 dr = &vio->drings[VIO_DRIVER_RX_RING];
150 memset(dr, 0, sizeof(*dr));
151
152 kfree(vio->desc_buf);
153 vio->desc_buf = NULL;
154 vio->desc_buf_len = 0;
155
156 vio->hs_state = VIO_HS_INVALID;
157
158 return -ECONNRESET;
159}
160
161static int process_unknown(struct vio_driver_state *vio, void *arg)
162{
163 struct vio_msg_tag *pkt = arg;
164
165 viodbg(HS, "UNKNOWN CONTROL [%02x:%02x:%04x:%08x]\n",
166 pkt->type, pkt->stype, pkt->stype_env, pkt->sid);
167
168 printk(KERN_ERR "vio: ID[%lu] Resetting connection.\n",
169 vio->vdev->channel_id);
170
171 ldc_disconnect(vio->lp);
172
173 return -ECONNRESET;
174}
175
176static int send_dreg(struct vio_driver_state *vio)
177{
178 struct vio_dring_state *dr = &vio->drings[VIO_DRIVER_TX_RING];
179 union {
180 struct vio_dring_register pkt;
181 char all[sizeof(struct vio_dring_register) +
182 (sizeof(struct ldc_trans_cookie) *
183 VIO_MAX_RING_COOKIES)];
184 } u;
185 size_t bytes = sizeof(struct vio_dring_register) +
186 (sizeof(struct ldc_trans_cookie) *
187 dr->ncookies);
188 int i;
189
190 if (WARN_ON(bytes > sizeof(u)))
191 return -EINVAL;
192
193 memset(&u, 0, bytes);
194 init_tag(&u.pkt.tag, VIO_TYPE_CTRL, VIO_SUBTYPE_INFO, VIO_DRING_REG);
195 u.pkt.dring_ident = 0;
196 u.pkt.num_descr = dr->num_entries;
197 u.pkt.descr_size = dr->entry_size;
198 u.pkt.options = VIO_TX_DRING;
199 u.pkt.num_cookies = dr->ncookies;
200
201 viodbg(HS, "SEND DRING_REG INFO ndesc[%u] dsz[%u] opt[0x%x] "
202 "ncookies[%u]\n",
203 u.pkt.num_descr, u.pkt.descr_size, u.pkt.options,
204 u.pkt.num_cookies);
205
206 for (i = 0; i < dr->ncookies; i++) {
207 u.pkt.cookies[i] = dr->cookies[i];
208
209 viodbg(HS, "DRING COOKIE(%d) [%016llx:%016llx]\n",
210 i,
211 (unsigned long long) u.pkt.cookies[i].cookie_addr,
212 (unsigned long long) u.pkt.cookies[i].cookie_size);
213 }
214
215 return send_ctrl(vio, &u.pkt.tag, bytes);
216}
217
218static int send_rdx(struct vio_driver_state *vio)
219{
220 struct vio_rdx pkt;
221
222 memset(&pkt, 0, sizeof(pkt));
223
224 init_tag(&pkt.tag, VIO_TYPE_CTRL, VIO_SUBTYPE_INFO, VIO_RDX);
225
226 viodbg(HS, "SEND RDX INFO\n");
227
228 return send_ctrl(vio, &pkt.tag, sizeof(pkt));
229}
230
231static int send_attr(struct vio_driver_state *vio)
232{
233 if (!vio->ops)
234 return -EINVAL;
235
236 return vio->ops->send_attr(vio);
237}
238
239static struct vio_version *find_by_major(struct vio_driver_state *vio,
240 u16 major)
241{
242 struct vio_version *ret = NULL;
243 int i;
244
245 for (i = 0; i < vio->ver_table_entries; i++) {
246 struct vio_version *v = &vio->ver_table[i];
247 if (v->major <= major) {
248 ret = v;
249 break;
250 }
251 }
252 return ret;
253}
254
255static int process_ver_info(struct vio_driver_state *vio,
256 struct vio_ver_info *pkt)
257{
258 struct vio_version *vap;
259 int err;
260
261 viodbg(HS, "GOT VERSION INFO maj[%u] min[%u] devclass[%u]\n",
262 pkt->major, pkt->minor, pkt->dev_class);
263
264 if (vio->hs_state != VIO_HS_INVALID) {
265 /* XXX Perhaps invoke start_handshake? XXX */
266 memset(&vio->ver, 0, sizeof(vio->ver));
267 vio->hs_state = VIO_HS_INVALID;
268 }
269
270 vap = find_by_major(vio, pkt->major);
271
272 vio->_peer_sid = pkt->tag.sid;
273
274 if (!vap) {
275 pkt->tag.stype = VIO_SUBTYPE_NACK;
276 pkt->major = 0;
277 pkt->minor = 0;
278 viodbg(HS, "SEND VERSION NACK maj[0] min[0]\n");
279 err = send_ctrl(vio, &pkt->tag, sizeof(*pkt));
280 } else if (vap->major != pkt->major) {
281 pkt->tag.stype = VIO_SUBTYPE_NACK;
282 pkt->major = vap->major;
283 pkt->minor = vap->minor;
284 viodbg(HS, "SEND VERSION NACK maj[%u] min[%u]\n",
285 pkt->major, pkt->minor);
286 err = send_ctrl(vio, &pkt->tag, sizeof(*pkt));
287 } else {
288 struct vio_version ver = {
289 .major = pkt->major,
290 .minor = pkt->minor,
291 };
292 if (ver.minor > vap->minor)
293 ver.minor = vap->minor;
294 pkt->minor = ver.minor;
295 pkt->tag.stype = VIO_SUBTYPE_ACK;
296 pkt->dev_class = vio->dev_class;
297 viodbg(HS, "SEND VERSION ACK maj[%u] min[%u]\n",
298 pkt->major, pkt->minor);
299 err = send_ctrl(vio, &pkt->tag, sizeof(*pkt));
300 if (err > 0) {
301 vio->ver = ver;
302 vio->hs_state = VIO_HS_GOTVERS;
303 }
304 }
305 if (err < 0)
306 return handshake_failure(vio);
307
308 return 0;
309}
310
311static int process_ver_ack(struct vio_driver_state *vio,
312 struct vio_ver_info *pkt)
313{
314 viodbg(HS, "GOT VERSION ACK maj[%u] min[%u] devclass[%u]\n",
315 pkt->major, pkt->minor, pkt->dev_class);
316
317 if (vio->hs_state & VIO_HS_GOTVERS) {
318 if (vio->ver.major != pkt->major ||
319 vio->ver.minor != pkt->minor) {
320 pkt->tag.stype = VIO_SUBTYPE_NACK;
321 (void) send_ctrl(vio, &pkt->tag, sizeof(*pkt));
322 return handshake_failure(vio);
323 }
324 } else {
325 vio->ver.major = pkt->major;
326 vio->ver.minor = pkt->minor;
327 vio->hs_state = VIO_HS_GOTVERS;
328 }
329
330 switch (vio->dev_class) {
331 case VDEV_NETWORK:
332 case VDEV_DISK:
333 if (send_attr(vio) < 0)
334 return handshake_failure(vio);
335 break;
336
337 default:
338 break;
339 }
340
341 return 0;
342}
343
344static int process_ver_nack(struct vio_driver_state *vio,
345 struct vio_ver_info *pkt)
346{
347 struct vio_version *nver;
348
349 viodbg(HS, "GOT VERSION NACK maj[%u] min[%u] devclass[%u]\n",
350 pkt->major, pkt->minor, pkt->dev_class);
351
352 if (pkt->major == 0 && pkt->minor == 0)
353 return handshake_failure(vio);
354 nver = find_by_major(vio, pkt->major);
355 if (!nver)
356 return handshake_failure(vio);
357
358 if (send_version(vio, nver->major, nver->minor) < 0)
359 return handshake_failure(vio);
360
361 return 0;
362}
363
364static int process_ver(struct vio_driver_state *vio, struct vio_ver_info *pkt)
365{
366 switch (pkt->tag.stype) {
367 case VIO_SUBTYPE_INFO:
368 return process_ver_info(vio, pkt);
369
370 case VIO_SUBTYPE_ACK:
371 return process_ver_ack(vio, pkt);
372
373 case VIO_SUBTYPE_NACK:
374 return process_ver_nack(vio, pkt);
375
376 default:
377 return handshake_failure(vio);
378 }
379}
380
381static int process_attr(struct vio_driver_state *vio, void *pkt)
382{
383 int err;
384
385 if (!(vio->hs_state & VIO_HS_GOTVERS))
386 return handshake_failure(vio);
387
388 if (!vio->ops)
389 return 0;
390
391 err = vio->ops->handle_attr(vio, pkt);
392 if (err < 0) {
393 return handshake_failure(vio);
394 } else {
395 vio->hs_state |= VIO_HS_GOT_ATTR;
396
397 if ((vio->dr_state & VIO_DR_STATE_TXREQ) &&
398 !(vio->hs_state & VIO_HS_SENT_DREG)) {
399 if (send_dreg(vio) < 0)
400 return handshake_failure(vio);
401
402 vio->hs_state |= VIO_HS_SENT_DREG;
403 }
404 }
405
406 return 0;
407}
408
409static int all_drings_registered(struct vio_driver_state *vio)
410{
411 int need_rx, need_tx;
412
413 need_rx = (vio->dr_state & VIO_DR_STATE_RXREQ);
414 need_tx = (vio->dr_state & VIO_DR_STATE_TXREQ);
415
416 if (need_rx &&
417 !(vio->dr_state & VIO_DR_STATE_RXREG))
418 return 0;
419
420 if (need_tx &&
421 !(vio->dr_state & VIO_DR_STATE_TXREG))
422 return 0;
423
424 return 1;
425}
426
427static int process_dreg_info(struct vio_driver_state *vio,
428 struct vio_dring_register *pkt)
429{
430 struct vio_dring_state *dr;
431 int i, len;
432
433 viodbg(HS, "GOT DRING_REG INFO ident[%llx] "
434 "ndesc[%u] dsz[%u] opt[0x%x] ncookies[%u]\n",
435 (unsigned long long) pkt->dring_ident,
436 pkt->num_descr, pkt->descr_size, pkt->options,
437 pkt->num_cookies);
438
439 if (!(vio->dr_state & VIO_DR_STATE_RXREQ))
440 goto send_nack;
441
442 if (vio->dr_state & VIO_DR_STATE_RXREG)
443 goto send_nack;
444
445 /* v1.6 and higher, ACK with desired, supported mode, or NACK */
446 if (vio_version_after_eq(vio, 1, 6)) {
447 if (!(pkt->options & VIO_TX_DRING))
448 goto send_nack;
449 pkt->options = VIO_TX_DRING;
450 }
451
452 BUG_ON(vio->desc_buf);
453
454 vio->desc_buf = kzalloc(pkt->descr_size, GFP_ATOMIC);
455 if (!vio->desc_buf)
456 goto send_nack;
457
458 vio->desc_buf_len = pkt->descr_size;
459
460 dr = &vio->drings[VIO_DRIVER_RX_RING];
461
462 dr->num_entries = pkt->num_descr;
463 dr->entry_size = pkt->descr_size;
464 dr->ncookies = pkt->num_cookies;
465 for (i = 0; i < dr->ncookies; i++) {
466 dr->cookies[i] = pkt->cookies[i];
467
468 viodbg(HS, "DRING COOKIE(%d) [%016llx:%016llx]\n",
469 i,
470 (unsigned long long)
471 pkt->cookies[i].cookie_addr,
472 (unsigned long long)
473 pkt->cookies[i].cookie_size);
474 }
475
476 pkt->tag.stype = VIO_SUBTYPE_ACK;
477 pkt->dring_ident = ++dr->ident;
478
479 viodbg(HS, "SEND DRING_REG ACK ident[%llx] "
480 "ndesc[%u] dsz[%u] opt[0x%x] ncookies[%u]\n",
481 (unsigned long long) pkt->dring_ident,
482 pkt->num_descr, pkt->descr_size, pkt->options,
483 pkt->num_cookies);
484
485 len = (sizeof(*pkt) +
486 (dr->ncookies * sizeof(struct ldc_trans_cookie)));
487 if (send_ctrl(vio, &pkt->tag, len) < 0)
488 goto send_nack;
489
490 vio->dr_state |= VIO_DR_STATE_RXREG;
491
492 return 0;
493
494send_nack:
495 pkt->tag.stype = VIO_SUBTYPE_NACK;
496 viodbg(HS, "SEND DRING_REG NACK\n");
497 (void) send_ctrl(vio, &pkt->tag, sizeof(*pkt));
498
499 return handshake_failure(vio);
500}
501
502static int process_dreg_ack(struct vio_driver_state *vio,
503 struct vio_dring_register *pkt)
504{
505 struct vio_dring_state *dr;
506
507 viodbg(HS, "GOT DRING_REG ACK ident[%llx] "
508 "ndesc[%u] dsz[%u] opt[0x%x] ncookies[%u]\n",
509 (unsigned long long) pkt->dring_ident,
510 pkt->num_descr, pkt->descr_size, pkt->options,
511 pkt->num_cookies);
512
513 dr = &vio->drings[VIO_DRIVER_TX_RING];
514
515 if (!(vio->dr_state & VIO_DR_STATE_TXREQ))
516 return handshake_failure(vio);
517
518 dr->ident = pkt->dring_ident;
519 vio->dr_state |= VIO_DR_STATE_TXREG;
520
521 if (all_drings_registered(vio)) {
522 if (send_rdx(vio) < 0)
523 return handshake_failure(vio);
524 vio->hs_state = VIO_HS_SENT_RDX;
525 }
526 return 0;
527}
528
529static int process_dreg_nack(struct vio_driver_state *vio,
530 struct vio_dring_register *pkt)
531{
532 viodbg(HS, "GOT DRING_REG NACK ident[%llx] "
533 "ndesc[%u] dsz[%u] opt[0x%x] ncookies[%u]\n",
534 (unsigned long long) pkt->dring_ident,
535 pkt->num_descr, pkt->descr_size, pkt->options,
536 pkt->num_cookies);
537
538 return handshake_failure(vio);
539}
540
541static int process_dreg(struct vio_driver_state *vio,
542 struct vio_dring_register *pkt)
543{
544 if (!(vio->hs_state & VIO_HS_GOTVERS))
545 return handshake_failure(vio);
546
547 switch (pkt->tag.stype) {
548 case VIO_SUBTYPE_INFO:
549 return process_dreg_info(vio, pkt);
550
551 case VIO_SUBTYPE_ACK:
552 return process_dreg_ack(vio, pkt);
553
554 case VIO_SUBTYPE_NACK:
555 return process_dreg_nack(vio, pkt);
556
557 default:
558 return handshake_failure(vio);
559 }
560}
561
562static int process_dunreg(struct vio_driver_state *vio,
563 struct vio_dring_unregister *pkt)
564{
565 struct vio_dring_state *dr = &vio->drings[VIO_DRIVER_RX_RING];
566
567 viodbg(HS, "GOT DRING_UNREG\n");
568
569 if (pkt->dring_ident != dr->ident)
570 return 0;
571
572 vio->dr_state &= ~VIO_DR_STATE_RXREG;
573
574 memset(dr, 0, sizeof(*dr));
575
576 kfree(vio->desc_buf);
577 vio->desc_buf = NULL;
578 vio->desc_buf_len = 0;
579
580 return 0;
581}
582
583static int process_rdx_info(struct vio_driver_state *vio, struct vio_rdx *pkt)
584{
585 viodbg(HS, "GOT RDX INFO\n");
586
587 pkt->tag.stype = VIO_SUBTYPE_ACK;
588 viodbg(HS, "SEND RDX ACK\n");
589 if (send_ctrl(vio, &pkt->tag, sizeof(*pkt)) < 0)
590 return handshake_failure(vio);
591
592 vio->hs_state |= VIO_HS_SENT_RDX_ACK;
593 return 0;
594}
595
596static int process_rdx_ack(struct vio_driver_state *vio, struct vio_rdx *pkt)
597{
598 viodbg(HS, "GOT RDX ACK\n");
599
600 if (!(vio->hs_state & VIO_HS_SENT_RDX))
601 return handshake_failure(vio);
602
603 vio->hs_state |= VIO_HS_GOT_RDX_ACK;
604 return 0;
605}
606
607static int process_rdx_nack(struct vio_driver_state *vio, struct vio_rdx *pkt)
608{
609 viodbg(HS, "GOT RDX NACK\n");
610
611 return handshake_failure(vio);
612}
613
614static int process_rdx(struct vio_driver_state *vio, struct vio_rdx *pkt)
615{
616 if (!all_drings_registered(vio))
617 handshake_failure(vio);
618
619 switch (pkt->tag.stype) {
620 case VIO_SUBTYPE_INFO:
621 return process_rdx_info(vio, pkt);
622
623 case VIO_SUBTYPE_ACK:
624 return process_rdx_ack(vio, pkt);
625
626 case VIO_SUBTYPE_NACK:
627 return process_rdx_nack(vio, pkt);
628
629 default:
630 return handshake_failure(vio);
631 }
632}
633
634int vio_control_pkt_engine(struct vio_driver_state *vio, void *pkt)
635{
636 struct vio_msg_tag *tag = pkt;
637 u8 prev_state = vio->hs_state;
638 int err;
639
640 switch (tag->stype_env) {
641 case VIO_VER_INFO:
642 err = process_ver(vio, pkt);
643 break;
644
645 case VIO_ATTR_INFO:
646 err = process_attr(vio, pkt);
647 break;
648
649 case VIO_DRING_REG:
650 err = process_dreg(vio, pkt);
651 break;
652
653 case VIO_DRING_UNREG:
654 err = process_dunreg(vio, pkt);
655 break;
656
657 case VIO_RDX:
658 err = process_rdx(vio, pkt);
659 break;
660
661 default:
662 err = process_unknown(vio, pkt);
663 break;
664 }
665
666 if (!err &&
667 vio->hs_state != prev_state &&
668 (vio->hs_state & VIO_HS_COMPLETE)) {
669 if (vio->ops)
670 vio->ops->handshake_complete(vio);
671 }
672
673 return err;
674}
675EXPORT_SYMBOL(vio_control_pkt_engine);
676
677void vio_conn_reset(struct vio_driver_state *vio)
678{
679}
680EXPORT_SYMBOL(vio_conn_reset);
681
682/* The issue is that the Solaris virtual disk server just mirrors the
683 * SID values it gets from the client peer. So we work around that
684 * here in vio_{validate,send}_sid() so that the drivers don't need
685 * to be aware of this crap.
686 */
687int vio_validate_sid(struct vio_driver_state *vio, struct vio_msg_tag *tp)
688{
689 u32 sid;
690
691 /* Always let VERSION+INFO packets through unchecked, they
692 * define the new SID.
693 */
694 if (tp->type == VIO_TYPE_CTRL &&
695 tp->stype == VIO_SUBTYPE_INFO &&
696 tp->stype_env == VIO_VER_INFO)
697 return 0;
698
699 /* Ok, now figure out which SID to use. */
700 switch (vio->dev_class) {
701 case VDEV_NETWORK:
702 case VDEV_NETWORK_SWITCH:
703 case VDEV_DISK_SERVER:
704 default:
705 sid = vio->_peer_sid;
706 break;
707
708 case VDEV_DISK:
709 sid = vio->_local_sid;
710 break;
711 }
712
713 if (sid == tp->sid)
714 return 0;
715 viodbg(DATA, "BAD SID tag->sid[%08x] peer_sid[%08x] local_sid[%08x]\n",
716 tp->sid, vio->_peer_sid, vio->_local_sid);
717 return -EINVAL;
718}
719EXPORT_SYMBOL(vio_validate_sid);
720
721u32 vio_send_sid(struct vio_driver_state *vio)
722{
723 switch (vio->dev_class) {
724 case VDEV_NETWORK:
725 case VDEV_NETWORK_SWITCH:
726 case VDEV_DISK:
727 default:
728 return vio->_local_sid;
729
730 case VDEV_DISK_SERVER:
731 return vio->_peer_sid;
732 }
733}
734EXPORT_SYMBOL(vio_send_sid);
735
736int vio_ldc_alloc(struct vio_driver_state *vio,
737 struct ldc_channel_config *base_cfg,
738 void *event_arg)
739{
740 struct ldc_channel_config cfg = *base_cfg;
741 struct ldc_channel *lp;
742
743 cfg.tx_irq = vio->vdev->tx_irq;
744 cfg.rx_irq = vio->vdev->rx_irq;
745
746 lp = ldc_alloc(vio->vdev->channel_id, &cfg, event_arg, vio->name);
747 if (IS_ERR(lp))
748 return PTR_ERR(lp);
749
750 vio->lp = lp;
751
752 return 0;
753}
754EXPORT_SYMBOL(vio_ldc_alloc);
755
756void vio_ldc_free(struct vio_driver_state *vio)
757{
758 ldc_free(vio->lp);
759 vio->lp = NULL;
760
761 kfree(vio->desc_buf);
762 vio->desc_buf = NULL;
763 vio->desc_buf_len = 0;
764}
765EXPORT_SYMBOL(vio_ldc_free);
766
767void vio_port_up(struct vio_driver_state *vio)
768{
769 unsigned long flags;
770 int err, state;
771
772 spin_lock_irqsave(&vio->lock, flags);
773
774 state = ldc_state(vio->lp);
775
776 err = 0;
777 if (state == LDC_STATE_INIT) {
778 err = ldc_bind(vio->lp);
779 if (err)
780 printk(KERN_WARNING "%s: Port %lu bind failed, "
781 "err=%d\n",
782 vio->name, vio->vdev->channel_id, err);
783 }
784
785 if (!err) {
786 if (ldc_mode(vio->lp) == LDC_MODE_RAW)
787 ldc_set_state(vio->lp, LDC_STATE_CONNECTED);
788 else
789 err = ldc_connect(vio->lp);
790
791 if (err)
792 printk(KERN_WARNING "%s: Port %lu connect failed, "
793 "err=%d\n",
794 vio->name, vio->vdev->channel_id, err);
795 }
796 if (err) {
797 unsigned long expires = jiffies + HZ;
798
799 expires = round_jiffies(expires);
800 mod_timer(&vio->timer, expires);
801 }
802
803 spin_unlock_irqrestore(&vio->lock, flags);
804}
805EXPORT_SYMBOL(vio_port_up);
806
807static void vio_port_timer(struct timer_list *t)
808{
809 struct vio_driver_state *vio = from_timer(vio, t, timer);
810
811 vio_port_up(vio);
812}
813
814int vio_driver_init(struct vio_driver_state *vio, struct vio_dev *vdev,
815 u8 dev_class, struct vio_version *ver_table,
816 int ver_table_size, struct vio_driver_ops *ops,
817 char *name)
818{
819 switch (dev_class) {
820 case VDEV_NETWORK:
821 case VDEV_NETWORK_SWITCH:
822 case VDEV_DISK:
823 case VDEV_DISK_SERVER:
824 case VDEV_CONSOLE_CON:
825 break;
826
827 default:
828 return -EINVAL;
829 }
830
831 if (dev_class == VDEV_NETWORK ||
832 dev_class == VDEV_NETWORK_SWITCH ||
833 dev_class == VDEV_DISK ||
834 dev_class == VDEV_DISK_SERVER) {
835 if (!ops || !ops->send_attr || !ops->handle_attr ||
836 !ops->handshake_complete)
837 return -EINVAL;
838 }
839
840 if (!ver_table || ver_table_size < 0)
841 return -EINVAL;
842
843 if (!name)
844 return -EINVAL;
845
846 spin_lock_init(&vio->lock);
847
848 vio->name = name;
849
850 vio->dev_class = dev_class;
851 vio->vdev = vdev;
852
853 vio->ver_table = ver_table;
854 vio->ver_table_entries = ver_table_size;
855
856 vio->ops = ops;
857
858 timer_setup(&vio->timer, vio_port_timer, 0);
859
860 return 0;
861}
862EXPORT_SYMBOL(vio_driver_init);