Loading...
1/*
2 * drivers/s390/net/claw.c
3 * ESCON CLAW network driver
4 *
5 * Linux for zSeries version
6 * Copyright IBM Corp. 2002, 2009
7 * Author(s) Original code written by:
8 * Kazuo Iimura <iimura@jp.ibm.com>
9 * Rewritten by
10 * Andy Richter <richtera@us.ibm.com>
11 * Marc Price <mwprice@us.ibm.com>
12 *
13 * sysfs parms:
14 * group x.x.rrrr,x.x.wwww
15 * read_buffer nnnnnnn
16 * write_buffer nnnnnn
17 * host_name aaaaaaaa
18 * adapter_name aaaaaaaa
19 * api_type aaaaaaaa
20 *
21 * eg.
22 * group 0.0.0200 0.0.0201
23 * read_buffer 25
24 * write_buffer 20
25 * host_name LINUX390
26 * adapter_name RS6K
27 * api_type TCPIP
28 *
29 * where
30 *
31 * The device id is decided by the order entries
32 * are added to the group the first is claw0 the second claw1
33 * up to CLAW_MAX_DEV
34 *
35 * rrrr - the first of 2 consecutive device addresses used for the
36 * CLAW protocol.
37 * The specified address is always used as the input (Read)
38 * channel and the next address is used as the output channel.
39 *
40 * wwww - the second of 2 consecutive device addresses used for
41 * the CLAW protocol.
42 * The specified address is always used as the output
43 * channel and the previous address is used as the input channel.
44 *
45 * read_buffer - specifies number of input buffers to allocate.
46 * write_buffer - specifies number of output buffers to allocate.
47 * host_name - host name
48 * adaptor_name - adaptor name
49 * api_type - API type TCPIP or API will be sent and expected
50 * as ws_name
51 *
52 * Note the following requirements:
53 * 1) host_name must match the configured adapter_name on the remote side
54 * 2) adaptor_name must match the configured host name on the remote side
55 *
56 * Change History
57 * 1.00 Initial release shipped
58 * 1.10 Changes for Buffer allocation
59 * 1.15 Changed for 2.6 Kernel No longer compiles on 2.4 or lower
60 * 1.25 Added Packing support
61 * 1.5
62 */
63
64#define KMSG_COMPONENT "claw"
65
66#include <linux/kernel_stat.h>
67#include <asm/ccwdev.h>
68#include <asm/ccwgroup.h>
69#include <asm/debug.h>
70#include <asm/idals.h>
71#include <asm/io.h>
72#include <linux/bitops.h>
73#include <linux/ctype.h>
74#include <linux/delay.h>
75#include <linux/errno.h>
76#include <linux/if_arp.h>
77#include <linux/init.h>
78#include <linux/interrupt.h>
79#include <linux/ip.h>
80#include <linux/kernel.h>
81#include <linux/module.h>
82#include <linux/netdevice.h>
83#include <linux/etherdevice.h>
84#include <linux/proc_fs.h>
85#include <linux/sched.h>
86#include <linux/signal.h>
87#include <linux/skbuff.h>
88#include <linux/slab.h>
89#include <linux/string.h>
90#include <linux/tcp.h>
91#include <linux/timer.h>
92#include <linux/types.h>
93
94#include "claw.h"
95
96/*
97 CLAW uses the s390dbf file system see claw_trace and claw_setup
98*/
99
100static char version[] __initdata = "CLAW driver";
101static char debug_buffer[255];
102/**
103 * Debug Facility Stuff
104 */
105static debug_info_t *claw_dbf_setup;
106static debug_info_t *claw_dbf_trace;
107
108/**
109 * CLAW Debug Facility functions
110 */
111static void
112claw_unregister_debug_facility(void)
113{
114 if (claw_dbf_setup)
115 debug_unregister(claw_dbf_setup);
116 if (claw_dbf_trace)
117 debug_unregister(claw_dbf_trace);
118}
119
120static int
121claw_register_debug_facility(void)
122{
123 claw_dbf_setup = debug_register("claw_setup", 2, 1, 8);
124 claw_dbf_trace = debug_register("claw_trace", 2, 2, 8);
125 if (claw_dbf_setup == NULL || claw_dbf_trace == NULL) {
126 claw_unregister_debug_facility();
127 return -ENOMEM;
128 }
129 debug_register_view(claw_dbf_setup, &debug_hex_ascii_view);
130 debug_set_level(claw_dbf_setup, 2);
131 debug_register_view(claw_dbf_trace, &debug_hex_ascii_view);
132 debug_set_level(claw_dbf_trace, 2);
133 return 0;
134}
135
136static inline void
137claw_set_busy(struct net_device *dev)
138{
139 ((struct claw_privbk *)dev->ml_priv)->tbusy = 1;
140 eieio();
141}
142
143static inline void
144claw_clear_busy(struct net_device *dev)
145{
146 clear_bit(0, &(((struct claw_privbk *) dev->ml_priv)->tbusy));
147 netif_wake_queue(dev);
148 eieio();
149}
150
151static inline int
152claw_check_busy(struct net_device *dev)
153{
154 eieio();
155 return ((struct claw_privbk *) dev->ml_priv)->tbusy;
156}
157
158static inline void
159claw_setbit_busy(int nr,struct net_device *dev)
160{
161 netif_stop_queue(dev);
162 set_bit(nr, (void *)&(((struct claw_privbk *)dev->ml_priv)->tbusy));
163}
164
165static inline void
166claw_clearbit_busy(int nr,struct net_device *dev)
167{
168 clear_bit(nr, (void *)&(((struct claw_privbk *)dev->ml_priv)->tbusy));
169 netif_wake_queue(dev);
170}
171
172static inline int
173claw_test_and_setbit_busy(int nr,struct net_device *dev)
174{
175 netif_stop_queue(dev);
176 return test_and_set_bit(nr,
177 (void *)&(((struct claw_privbk *) dev->ml_priv)->tbusy));
178}
179
180
181/* Functions for the DEV methods */
182
183static int claw_probe(struct ccwgroup_device *cgdev);
184static void claw_remove_device(struct ccwgroup_device *cgdev);
185static void claw_purge_skb_queue(struct sk_buff_head *q);
186static int claw_new_device(struct ccwgroup_device *cgdev);
187static int claw_shutdown_device(struct ccwgroup_device *cgdev);
188static int claw_tx(struct sk_buff *skb, struct net_device *dev);
189static int claw_change_mtu( struct net_device *dev, int new_mtu);
190static int claw_open(struct net_device *dev);
191static void claw_irq_handler(struct ccw_device *cdev,
192 unsigned long intparm, struct irb *irb);
193static void claw_irq_tasklet ( unsigned long data );
194static int claw_release(struct net_device *dev);
195static void claw_write_retry ( struct chbk * p_ch );
196static void claw_write_next ( struct chbk * p_ch );
197static void claw_timer ( struct chbk * p_ch );
198
199/* Functions */
200static int add_claw_reads(struct net_device *dev,
201 struct ccwbk* p_first, struct ccwbk* p_last);
202static void ccw_check_return_code (struct ccw_device *cdev, int return_code);
203static void ccw_check_unit_check (struct chbk * p_ch, unsigned char sense );
204static int find_link(struct net_device *dev, char *host_name, char *ws_name );
205static int claw_hw_tx(struct sk_buff *skb, struct net_device *dev, long linkid);
206static int init_ccw_bk(struct net_device *dev);
207static void probe_error( struct ccwgroup_device *cgdev);
208static struct net_device_stats *claw_stats(struct net_device *dev);
209static int pages_to_order_of_mag(int num_of_pages);
210static struct sk_buff *claw_pack_skb(struct claw_privbk *privptr);
211/* sysfs Functions */
212static ssize_t claw_hname_show(struct device *dev,
213 struct device_attribute *attr, char *buf);
214static ssize_t claw_hname_write(struct device *dev,
215 struct device_attribute *attr,
216 const char *buf, size_t count);
217static ssize_t claw_adname_show(struct device *dev,
218 struct device_attribute *attr, char *buf);
219static ssize_t claw_adname_write(struct device *dev,
220 struct device_attribute *attr,
221 const char *buf, size_t count);
222static ssize_t claw_apname_show(struct device *dev,
223 struct device_attribute *attr, char *buf);
224static ssize_t claw_apname_write(struct device *dev,
225 struct device_attribute *attr,
226 const char *buf, size_t count);
227static ssize_t claw_wbuff_show(struct device *dev,
228 struct device_attribute *attr, char *buf);
229static ssize_t claw_wbuff_write(struct device *dev,
230 struct device_attribute *attr,
231 const char *buf, size_t count);
232static ssize_t claw_rbuff_show(struct device *dev,
233 struct device_attribute *attr, char *buf);
234static ssize_t claw_rbuff_write(struct device *dev,
235 struct device_attribute *attr,
236 const char *buf, size_t count);
237static int claw_add_files(struct device *dev);
238static void claw_remove_files(struct device *dev);
239
240/* Functions for System Validate */
241static int claw_process_control( struct net_device *dev, struct ccwbk * p_ccw);
242static int claw_send_control(struct net_device *dev, __u8 type, __u8 link,
243 __u8 correlator, __u8 rc , char *local_name, char *remote_name);
244static int claw_snd_conn_req(struct net_device *dev, __u8 link);
245static int claw_snd_disc(struct net_device *dev, struct clawctl * p_ctl);
246static int claw_snd_sys_validate_rsp(struct net_device *dev,
247 struct clawctl * p_ctl, __u32 return_code);
248static int claw_strt_conn_req(struct net_device *dev );
249static void claw_strt_read(struct net_device *dev, int lock);
250static void claw_strt_out_IO(struct net_device *dev);
251static void claw_free_wrt_buf(struct net_device *dev);
252
253/* Functions for unpack reads */
254static void unpack_read(struct net_device *dev);
255
256static int claw_pm_prepare(struct ccwgroup_device *gdev)
257{
258 return -EPERM;
259}
260
261/* the root device for claw group devices */
262static struct device *claw_root_dev;
263
264/* ccwgroup table */
265
266static struct ccwgroup_driver claw_group_driver = {
267 .driver = {
268 .owner = THIS_MODULE,
269 .name = "claw",
270 },
271 .max_slaves = 2,
272 .driver_id = 0xC3D3C1E6,
273 .probe = claw_probe,
274 .remove = claw_remove_device,
275 .set_online = claw_new_device,
276 .set_offline = claw_shutdown_device,
277 .prepare = claw_pm_prepare,
278};
279
280static struct ccw_device_id claw_ids[] = {
281 {CCW_DEVICE(0x3088, 0x61), .driver_info = claw_channel_type_claw},
282 {},
283};
284MODULE_DEVICE_TABLE(ccw, claw_ids);
285
286static struct ccw_driver claw_ccw_driver = {
287 .driver = {
288 .owner = THIS_MODULE,
289 .name = "claw",
290 },
291 .ids = claw_ids,
292 .probe = ccwgroup_probe_ccwdev,
293 .remove = ccwgroup_remove_ccwdev,
294};
295
296static ssize_t
297claw_driver_group_store(struct device_driver *ddrv, const char *buf,
298 size_t count)
299{
300 int err;
301 err = ccwgroup_create_from_string(claw_root_dev,
302 claw_group_driver.driver_id,
303 &claw_ccw_driver, 2, buf);
304 return err ? err : count;
305}
306
307static DRIVER_ATTR(group, 0200, NULL, claw_driver_group_store);
308
309static struct attribute *claw_group_attrs[] = {
310 &driver_attr_group.attr,
311 NULL,
312};
313
314static struct attribute_group claw_group_attr_group = {
315 .attrs = claw_group_attrs,
316};
317
318static const struct attribute_group *claw_group_attr_groups[] = {
319 &claw_group_attr_group,
320 NULL,
321};
322
323/*
324* Key functions
325*/
326
327/*----------------------------------------------------------------*
328 * claw_probe *
329 * this function is called for each CLAW device. *
330 *----------------------------------------------------------------*/
331static int
332claw_probe(struct ccwgroup_device *cgdev)
333{
334 int rc;
335 struct claw_privbk *privptr=NULL;
336
337 CLAW_DBF_TEXT(2, setup, "probe");
338 if (!get_device(&cgdev->dev))
339 return -ENODEV;
340 privptr = kzalloc(sizeof(struct claw_privbk), GFP_KERNEL);
341 dev_set_drvdata(&cgdev->dev, privptr);
342 if (privptr == NULL) {
343 probe_error(cgdev);
344 put_device(&cgdev->dev);
345 CLAW_DBF_TEXT_(2, setup, "probex%d", -ENOMEM);
346 return -ENOMEM;
347 }
348 privptr->p_mtc_envelope= kzalloc( MAX_ENVELOPE_SIZE, GFP_KERNEL);
349 privptr->p_env = kzalloc(sizeof(struct claw_env), GFP_KERNEL);
350 if ((privptr->p_mtc_envelope==NULL) || (privptr->p_env==NULL)) {
351 probe_error(cgdev);
352 put_device(&cgdev->dev);
353 CLAW_DBF_TEXT_(2, setup, "probex%d", -ENOMEM);
354 return -ENOMEM;
355 }
356 memcpy(privptr->p_env->adapter_name,WS_NAME_NOT_DEF,8);
357 memcpy(privptr->p_env->host_name,WS_NAME_NOT_DEF,8);
358 memcpy(privptr->p_env->api_type,WS_NAME_NOT_DEF,8);
359 privptr->p_env->packing = 0;
360 privptr->p_env->write_buffers = 5;
361 privptr->p_env->read_buffers = 5;
362 privptr->p_env->read_size = CLAW_FRAME_SIZE;
363 privptr->p_env->write_size = CLAW_FRAME_SIZE;
364 rc = claw_add_files(&cgdev->dev);
365 if (rc) {
366 probe_error(cgdev);
367 put_device(&cgdev->dev);
368 dev_err(&cgdev->dev, "Creating the /proc files for a new"
369 " CLAW device failed\n");
370 CLAW_DBF_TEXT_(2, setup, "probex%d", rc);
371 return rc;
372 }
373 privptr->p_env->p_priv = privptr;
374 cgdev->cdev[0]->handler = claw_irq_handler;
375 cgdev->cdev[1]->handler = claw_irq_handler;
376 CLAW_DBF_TEXT(2, setup, "prbext 0");
377
378 return 0;
379} /* end of claw_probe */
380
381/*-------------------------------------------------------------------*
382 * claw_tx *
383 *-------------------------------------------------------------------*/
384
385static int
386claw_tx(struct sk_buff *skb, struct net_device *dev)
387{
388 int rc;
389 struct claw_privbk *privptr = dev->ml_priv;
390 unsigned long saveflags;
391 struct chbk *p_ch;
392
393 CLAW_DBF_TEXT(4, trace, "claw_tx");
394 p_ch = &privptr->channel[WRITE_CHANNEL];
395 spin_lock_irqsave(get_ccwdev_lock(p_ch->cdev), saveflags);
396 rc=claw_hw_tx( skb, dev, 1 );
397 spin_unlock_irqrestore(get_ccwdev_lock(p_ch->cdev), saveflags);
398 CLAW_DBF_TEXT_(4, trace, "clawtx%d", rc);
399 if (rc)
400 rc = NETDEV_TX_BUSY;
401 else
402 rc = NETDEV_TX_OK;
403 return rc;
404} /* end of claw_tx */
405
406/*------------------------------------------------------------------*
407 * pack the collect queue into an skb and return it *
408 * If not packing just return the top skb from the queue *
409 *------------------------------------------------------------------*/
410
411static struct sk_buff *
412claw_pack_skb(struct claw_privbk *privptr)
413{
414 struct sk_buff *new_skb,*held_skb;
415 struct chbk *p_ch = &privptr->channel[WRITE_CHANNEL];
416 struct claw_env *p_env = privptr->p_env;
417 int pkt_cnt,pk_ind,so_far;
418
419 new_skb = NULL; /* assume no dice */
420 pkt_cnt = 0;
421 CLAW_DBF_TEXT(4, trace, "PackSKBe");
422 if (!skb_queue_empty(&p_ch->collect_queue)) {
423 /* some data */
424 held_skb = skb_dequeue(&p_ch->collect_queue);
425 if (held_skb)
426 dev_kfree_skb_any(held_skb);
427 else
428 return NULL;
429 if (p_env->packing != DO_PACKED)
430 return held_skb;
431 /* get a new SKB we will pack at least one */
432 new_skb = dev_alloc_skb(p_env->write_size);
433 if (new_skb == NULL) {
434 atomic_inc(&held_skb->users);
435 skb_queue_head(&p_ch->collect_queue,held_skb);
436 return NULL;
437 }
438 /* we have packed packet and a place to put it */
439 pk_ind = 1;
440 so_far = 0;
441 new_skb->cb[1] = 'P'; /* every skb on queue has pack header */
442 while ((pk_ind) && (held_skb != NULL)) {
443 if (held_skb->len+so_far <= p_env->write_size-8) {
444 memcpy(skb_put(new_skb,held_skb->len),
445 held_skb->data,held_skb->len);
446 privptr->stats.tx_packets++;
447 so_far += held_skb->len;
448 pkt_cnt++;
449 dev_kfree_skb_any(held_skb);
450 held_skb = skb_dequeue(&p_ch->collect_queue);
451 if (held_skb)
452 atomic_dec(&held_skb->users);
453 } else {
454 pk_ind = 0;
455 atomic_inc(&held_skb->users);
456 skb_queue_head(&p_ch->collect_queue,held_skb);
457 }
458 }
459 }
460 CLAW_DBF_TEXT(4, trace, "PackSKBx");
461 return new_skb;
462}
463
464/*-------------------------------------------------------------------*
465 * claw_change_mtu *
466 * *
467 *-------------------------------------------------------------------*/
468
469static int
470claw_change_mtu(struct net_device *dev, int new_mtu)
471{
472 struct claw_privbk *privptr = dev->ml_priv;
473 int buff_size;
474 CLAW_DBF_TEXT(4, trace, "setmtu");
475 buff_size = privptr->p_env->write_size;
476 if ((new_mtu < 60) || (new_mtu > buff_size)) {
477 return -EINVAL;
478 }
479 dev->mtu = new_mtu;
480 return 0;
481} /* end of claw_change_mtu */
482
483
484/*-------------------------------------------------------------------*
485 * claw_open *
486 * *
487 *-------------------------------------------------------------------*/
488static int
489claw_open(struct net_device *dev)
490{
491
492 int rc;
493 int i;
494 unsigned long saveflags=0;
495 unsigned long parm;
496 struct claw_privbk *privptr;
497 DECLARE_WAITQUEUE(wait, current);
498 struct timer_list timer;
499 struct ccwbk *p_buf;
500
501 CLAW_DBF_TEXT(4, trace, "open");
502 privptr = (struct claw_privbk *)dev->ml_priv;
503 /* allocate and initialize CCW blocks */
504 if (privptr->buffs_alloc == 0) {
505 rc=init_ccw_bk(dev);
506 if (rc) {
507 CLAW_DBF_TEXT(2, trace, "openmem");
508 return -ENOMEM;
509 }
510 }
511 privptr->system_validate_comp=0;
512 privptr->release_pend=0;
513 if(strncmp(privptr->p_env->api_type,WS_APPL_NAME_PACKED,6) == 0) {
514 privptr->p_env->read_size=DEF_PACK_BUFSIZE;
515 privptr->p_env->write_size=DEF_PACK_BUFSIZE;
516 privptr->p_env->packing=PACKING_ASK;
517 } else {
518 privptr->p_env->packing=0;
519 privptr->p_env->read_size=CLAW_FRAME_SIZE;
520 privptr->p_env->write_size=CLAW_FRAME_SIZE;
521 }
522 claw_set_busy(dev);
523 tasklet_init(&privptr->channel[READ_CHANNEL].tasklet, claw_irq_tasklet,
524 (unsigned long) &privptr->channel[READ_CHANNEL]);
525 for ( i = 0; i < 2; i++) {
526 CLAW_DBF_TEXT_(2, trace, "opn_ch%d", i);
527 init_waitqueue_head(&privptr->channel[i].wait);
528 /* skb_queue_head_init(&p_ch->io_queue); */
529 if (i == WRITE_CHANNEL)
530 skb_queue_head_init(
531 &privptr->channel[WRITE_CHANNEL].collect_queue);
532 privptr->channel[i].flag_a = 0;
533 privptr->channel[i].IO_active = 0;
534 privptr->channel[i].flag &= ~CLAW_TIMER;
535 init_timer(&timer);
536 timer.function = (void *)claw_timer;
537 timer.data = (unsigned long)(&privptr->channel[i]);
538 timer.expires = jiffies + 15*HZ;
539 add_timer(&timer);
540 spin_lock_irqsave(get_ccwdev_lock(
541 privptr->channel[i].cdev), saveflags);
542 parm = (unsigned long) &privptr->channel[i];
543 privptr->channel[i].claw_state = CLAW_START_HALT_IO;
544 rc = 0;
545 add_wait_queue(&privptr->channel[i].wait, &wait);
546 rc = ccw_device_halt(
547 (struct ccw_device *)privptr->channel[i].cdev,parm);
548 set_current_state(TASK_INTERRUPTIBLE);
549 spin_unlock_irqrestore(
550 get_ccwdev_lock(privptr->channel[i].cdev), saveflags);
551 schedule();
552 set_current_state(TASK_RUNNING);
553 remove_wait_queue(&privptr->channel[i].wait, &wait);
554 if(rc != 0)
555 ccw_check_return_code(privptr->channel[i].cdev, rc);
556 if((privptr->channel[i].flag & CLAW_TIMER) == 0x00)
557 del_timer(&timer);
558 }
559 if ((((privptr->channel[READ_CHANNEL].last_dstat |
560 privptr->channel[WRITE_CHANNEL].last_dstat) &
561 ~(DEV_STAT_CHN_END | DEV_STAT_DEV_END)) != 0x00) ||
562 (((privptr->channel[READ_CHANNEL].flag |
563 privptr->channel[WRITE_CHANNEL].flag) & CLAW_TIMER) != 0x00)) {
564 dev_info(&privptr->channel[READ_CHANNEL].cdev->dev,
565 "%s: remote side is not ready\n", dev->name);
566 CLAW_DBF_TEXT(2, trace, "notrdy");
567
568 for ( i = 0; i < 2; i++) {
569 spin_lock_irqsave(
570 get_ccwdev_lock(privptr->channel[i].cdev),
571 saveflags);
572 parm = (unsigned long) &privptr->channel[i];
573 privptr->channel[i].claw_state = CLAW_STOP;
574 rc = ccw_device_halt(
575 (struct ccw_device *)&privptr->channel[i].cdev,
576 parm);
577 spin_unlock_irqrestore(
578 get_ccwdev_lock(privptr->channel[i].cdev),
579 saveflags);
580 if (rc != 0) {
581 ccw_check_return_code(
582 privptr->channel[i].cdev, rc);
583 }
584 }
585 free_pages((unsigned long)privptr->p_buff_ccw,
586 (int)pages_to_order_of_mag(privptr->p_buff_ccw_num));
587 if (privptr->p_env->read_size < PAGE_SIZE) {
588 free_pages((unsigned long)privptr->p_buff_read,
589 (int)pages_to_order_of_mag(
590 privptr->p_buff_read_num));
591 }
592 else {
593 p_buf=privptr->p_read_active_first;
594 while (p_buf!=NULL) {
595 free_pages((unsigned long)p_buf->p_buffer,
596 (int)pages_to_order_of_mag(
597 privptr->p_buff_pages_perread ));
598 p_buf=p_buf->next;
599 }
600 }
601 if (privptr->p_env->write_size < PAGE_SIZE ) {
602 free_pages((unsigned long)privptr->p_buff_write,
603 (int)pages_to_order_of_mag(
604 privptr->p_buff_write_num));
605 }
606 else {
607 p_buf=privptr->p_write_active_first;
608 while (p_buf!=NULL) {
609 free_pages((unsigned long)p_buf->p_buffer,
610 (int)pages_to_order_of_mag(
611 privptr->p_buff_pages_perwrite ));
612 p_buf=p_buf->next;
613 }
614 }
615 privptr->buffs_alloc = 0;
616 privptr->channel[READ_CHANNEL].flag = 0x00;
617 privptr->channel[WRITE_CHANNEL].flag = 0x00;
618 privptr->p_buff_ccw=NULL;
619 privptr->p_buff_read=NULL;
620 privptr->p_buff_write=NULL;
621 claw_clear_busy(dev);
622 CLAW_DBF_TEXT(2, trace, "open EIO");
623 return -EIO;
624 }
625
626 /* Send SystemValidate command */
627
628 claw_clear_busy(dev);
629 CLAW_DBF_TEXT(4, trace, "openok");
630 return 0;
631} /* end of claw_open */
632
633/*-------------------------------------------------------------------*
634* *
635* claw_irq_handler *
636* *
637*--------------------------------------------------------------------*/
638static void
639claw_irq_handler(struct ccw_device *cdev,
640 unsigned long intparm, struct irb *irb)
641{
642 struct chbk *p_ch = NULL;
643 struct claw_privbk *privptr = NULL;
644 struct net_device *dev = NULL;
645 struct claw_env *p_env;
646 struct chbk *p_ch_r=NULL;
647
648 kstat_cpu(smp_processor_id()).irqs[IOINT_CLW]++;
649 CLAW_DBF_TEXT(4, trace, "clawirq");
650 /* Bypass all 'unsolicited interrupts' */
651 privptr = dev_get_drvdata(&cdev->dev);
652 if (!privptr) {
653 dev_warn(&cdev->dev, "An uninitialized CLAW device received an"
654 " IRQ, c-%02x d-%02x\n",
655 irb->scsw.cmd.cstat, irb->scsw.cmd.dstat);
656 CLAW_DBF_TEXT(2, trace, "badirq");
657 return;
658 }
659
660 /* Try to extract channel from driver data. */
661 if (privptr->channel[READ_CHANNEL].cdev == cdev)
662 p_ch = &privptr->channel[READ_CHANNEL];
663 else if (privptr->channel[WRITE_CHANNEL].cdev == cdev)
664 p_ch = &privptr->channel[WRITE_CHANNEL];
665 else {
666 dev_warn(&cdev->dev, "The device is not a CLAW device\n");
667 CLAW_DBF_TEXT(2, trace, "badchan");
668 return;
669 }
670 CLAW_DBF_TEXT_(4, trace, "IRQCH=%d", p_ch->flag);
671
672 dev = (struct net_device *) (p_ch->ndev);
673 p_env=privptr->p_env;
674
675 /* Copy interruption response block. */
676 memcpy(p_ch->irb, irb, sizeof(struct irb));
677
678 /* Check for good subchannel return code, otherwise info message */
679 if (irb->scsw.cmd.cstat && !(irb->scsw.cmd.cstat & SCHN_STAT_PCI)) {
680 dev_info(&cdev->dev,
681 "%s: subchannel check for device: %04x -"
682 " Sch Stat %02x Dev Stat %02x CPA - %04x\n",
683 dev->name, p_ch->devno,
684 irb->scsw.cmd.cstat, irb->scsw.cmd.dstat,
685 irb->scsw.cmd.cpa);
686 CLAW_DBF_TEXT(2, trace, "chanchk");
687 /* return; */
688 }
689
690 /* Check the reason-code of a unit check */
691 if (irb->scsw.cmd.dstat & DEV_STAT_UNIT_CHECK)
692 ccw_check_unit_check(p_ch, irb->ecw[0]);
693
694 /* State machine to bring the connection up, down and to restart */
695 p_ch->last_dstat = irb->scsw.cmd.dstat;
696
697 switch (p_ch->claw_state) {
698 case CLAW_STOP:/* HALT_IO by claw_release (halt sequence) */
699 if (!((p_ch->irb->scsw.cmd.stctl & SCSW_STCTL_SEC_STATUS) ||
700 (p_ch->irb->scsw.cmd.stctl == SCSW_STCTL_STATUS_PEND) ||
701 (p_ch->irb->scsw.cmd.stctl ==
702 (SCSW_STCTL_ALERT_STATUS | SCSW_STCTL_STATUS_PEND))))
703 return;
704 wake_up(&p_ch->wait); /* wake up claw_release */
705 CLAW_DBF_TEXT(4, trace, "stop");
706 return;
707 case CLAW_START_HALT_IO: /* HALT_IO issued by claw_open */
708 if (!((p_ch->irb->scsw.cmd.stctl & SCSW_STCTL_SEC_STATUS) ||
709 (p_ch->irb->scsw.cmd.stctl == SCSW_STCTL_STATUS_PEND) ||
710 (p_ch->irb->scsw.cmd.stctl ==
711 (SCSW_STCTL_ALERT_STATUS | SCSW_STCTL_STATUS_PEND)))) {
712 CLAW_DBF_TEXT(4, trace, "haltio");
713 return;
714 }
715 if (p_ch->flag == CLAW_READ) {
716 p_ch->claw_state = CLAW_START_READ;
717 wake_up(&p_ch->wait); /* wake claw_open (READ)*/
718 } else if (p_ch->flag == CLAW_WRITE) {
719 p_ch->claw_state = CLAW_START_WRITE;
720 /* send SYSTEM_VALIDATE */
721 claw_strt_read(dev, LOCK_NO);
722 claw_send_control(dev,
723 SYSTEM_VALIDATE_REQUEST,
724 0, 0, 0,
725 p_env->host_name,
726 p_env->adapter_name);
727 } else {
728 dev_warn(&cdev->dev, "The CLAW device received"
729 " an unexpected IRQ, "
730 "c-%02x d-%02x\n",
731 irb->scsw.cmd.cstat,
732 irb->scsw.cmd.dstat);
733 return;
734 }
735 CLAW_DBF_TEXT(4, trace, "haltio");
736 return;
737 case CLAW_START_READ:
738 CLAW_DBF_TEXT(4, trace, "ReadIRQ");
739 if (p_ch->irb->scsw.cmd.dstat & DEV_STAT_UNIT_CHECK) {
740 clear_bit(0, (void *)&p_ch->IO_active);
741 if ((p_ch->irb->ecw[0] & 0x41) == 0x41 ||
742 (p_ch->irb->ecw[0] & 0x40) == 0x40 ||
743 (p_ch->irb->ecw[0]) == 0) {
744 privptr->stats.rx_errors++;
745 dev_info(&cdev->dev,
746 "%s: Restart is required after remote "
747 "side recovers \n",
748 dev->name);
749 }
750 CLAW_DBF_TEXT(4, trace, "notrdy");
751 return;
752 }
753 if ((p_ch->irb->scsw.cmd.cstat & SCHN_STAT_PCI) &&
754 (p_ch->irb->scsw.cmd.dstat == 0)) {
755 if (test_and_set_bit(CLAW_BH_ACTIVE,
756 (void *)&p_ch->flag_a) == 0)
757 tasklet_schedule(&p_ch->tasklet);
758 else
759 CLAW_DBF_TEXT(4, trace, "PCINoBH");
760 CLAW_DBF_TEXT(4, trace, "PCI_read");
761 return;
762 }
763 if (!((p_ch->irb->scsw.cmd.stctl & SCSW_STCTL_SEC_STATUS) ||
764 (p_ch->irb->scsw.cmd.stctl == SCSW_STCTL_STATUS_PEND) ||
765 (p_ch->irb->scsw.cmd.stctl ==
766 (SCSW_STCTL_ALERT_STATUS | SCSW_STCTL_STATUS_PEND)))) {
767 CLAW_DBF_TEXT(4, trace, "SPend_rd");
768 return;
769 }
770 clear_bit(0, (void *)&p_ch->IO_active);
771 claw_clearbit_busy(TB_RETRY, dev);
772 if (test_and_set_bit(CLAW_BH_ACTIVE,
773 (void *)&p_ch->flag_a) == 0)
774 tasklet_schedule(&p_ch->tasklet);
775 else
776 CLAW_DBF_TEXT(4, trace, "RdBHAct");
777 CLAW_DBF_TEXT(4, trace, "RdIRQXit");
778 return;
779 case CLAW_START_WRITE:
780 if (p_ch->irb->scsw.cmd.dstat & DEV_STAT_UNIT_CHECK) {
781 dev_info(&cdev->dev,
782 "%s: Unit Check Occurred in "
783 "write channel\n", dev->name);
784 clear_bit(0, (void *)&p_ch->IO_active);
785 if (p_ch->irb->ecw[0] & 0x80) {
786 dev_info(&cdev->dev,
787 "%s: Resetting Event "
788 "occurred:\n", dev->name);
789 init_timer(&p_ch->timer);
790 p_ch->timer.function =
791 (void *)claw_write_retry;
792 p_ch->timer.data = (unsigned long)p_ch;
793 p_ch->timer.expires = jiffies + 10*HZ;
794 add_timer(&p_ch->timer);
795 dev_info(&cdev->dev,
796 "%s: write connection "
797 "restarting\n", dev->name);
798 }
799 CLAW_DBF_TEXT(4, trace, "rstrtwrt");
800 return;
801 }
802 if (p_ch->irb->scsw.cmd.dstat & DEV_STAT_UNIT_EXCEP) {
803 clear_bit(0, (void *)&p_ch->IO_active);
804 dev_info(&cdev->dev,
805 "%s: Unit Exception "
806 "occurred in write channel\n",
807 dev->name);
808 }
809 if (!((p_ch->irb->scsw.cmd.stctl & SCSW_STCTL_SEC_STATUS) ||
810 (p_ch->irb->scsw.cmd.stctl == SCSW_STCTL_STATUS_PEND) ||
811 (p_ch->irb->scsw.cmd.stctl ==
812 (SCSW_STCTL_ALERT_STATUS | SCSW_STCTL_STATUS_PEND)))) {
813 CLAW_DBF_TEXT(4, trace, "writeUE");
814 return;
815 }
816 clear_bit(0, (void *)&p_ch->IO_active);
817 if (claw_test_and_setbit_busy(TB_TX, dev) == 0) {
818 claw_write_next(p_ch);
819 claw_clearbit_busy(TB_TX, dev);
820 claw_clear_busy(dev);
821 }
822 p_ch_r = (struct chbk *)&privptr->channel[READ_CHANNEL];
823 if (test_and_set_bit(CLAW_BH_ACTIVE,
824 (void *)&p_ch_r->flag_a) == 0)
825 tasklet_schedule(&p_ch_r->tasklet);
826 CLAW_DBF_TEXT(4, trace, "StWtExit");
827 return;
828 default:
829 dev_warn(&cdev->dev,
830 "The CLAW device for %s received an unexpected IRQ\n",
831 dev->name);
832 CLAW_DBF_TEXT(2, trace, "badIRQ");
833 return;
834 }
835
836} /* end of claw_irq_handler */
837
838
839/*-------------------------------------------------------------------*
840* claw_irq_tasklet *
841* *
842*--------------------------------------------------------------------*/
843static void
844claw_irq_tasklet ( unsigned long data )
845{
846 struct chbk * p_ch;
847 struct net_device *dev;
848
849 p_ch = (struct chbk *) data;
850 dev = (struct net_device *)p_ch->ndev;
851 CLAW_DBF_TEXT(4, trace, "IRQtask");
852 unpack_read(dev);
853 clear_bit(CLAW_BH_ACTIVE, (void *)&p_ch->flag_a);
854 CLAW_DBF_TEXT(4, trace, "TskletXt");
855 return;
856} /* end of claw_irq_bh */
857
858/*-------------------------------------------------------------------*
859* claw_release *
860* *
861*--------------------------------------------------------------------*/
862static int
863claw_release(struct net_device *dev)
864{
865 int rc;
866 int i;
867 unsigned long saveflags;
868 unsigned long parm;
869 struct claw_privbk *privptr;
870 DECLARE_WAITQUEUE(wait, current);
871 struct ccwbk* p_this_ccw;
872 struct ccwbk* p_buf;
873
874 if (!dev)
875 return 0;
876 privptr = (struct claw_privbk *)dev->ml_priv;
877 if (!privptr)
878 return 0;
879 CLAW_DBF_TEXT(4, trace, "release");
880 privptr->release_pend=1;
881 claw_setbit_busy(TB_STOP,dev);
882 for ( i = 1; i >=0 ; i--) {
883 spin_lock_irqsave(
884 get_ccwdev_lock(privptr->channel[i].cdev), saveflags);
885 /* del_timer(&privptr->channel[READ_CHANNEL].timer); */
886 privptr->channel[i].claw_state = CLAW_STOP;
887 privptr->channel[i].IO_active = 0;
888 parm = (unsigned long) &privptr->channel[i];
889 if (i == WRITE_CHANNEL)
890 claw_purge_skb_queue(
891 &privptr->channel[WRITE_CHANNEL].collect_queue);
892 rc = ccw_device_halt (privptr->channel[i].cdev, parm);
893 if (privptr->system_validate_comp==0x00) /* never opened? */
894 init_waitqueue_head(&privptr->channel[i].wait);
895 add_wait_queue(&privptr->channel[i].wait, &wait);
896 set_current_state(TASK_INTERRUPTIBLE);
897 spin_unlock_irqrestore(
898 get_ccwdev_lock(privptr->channel[i].cdev), saveflags);
899 schedule();
900 set_current_state(TASK_RUNNING);
901 remove_wait_queue(&privptr->channel[i].wait, &wait);
902 if (rc != 0) {
903 ccw_check_return_code(privptr->channel[i].cdev, rc);
904 }
905 }
906 if (privptr->pk_skb != NULL) {
907 dev_kfree_skb_any(privptr->pk_skb);
908 privptr->pk_skb = NULL;
909 }
910 if(privptr->buffs_alloc != 1) {
911 CLAW_DBF_TEXT(4, trace, "none2fre");
912 return 0;
913 }
914 CLAW_DBF_TEXT(4, trace, "freebufs");
915 if (privptr->p_buff_ccw != NULL) {
916 free_pages((unsigned long)privptr->p_buff_ccw,
917 (int)pages_to_order_of_mag(privptr->p_buff_ccw_num));
918 }
919 CLAW_DBF_TEXT(4, trace, "freeread");
920 if (privptr->p_env->read_size < PAGE_SIZE) {
921 if (privptr->p_buff_read != NULL) {
922 free_pages((unsigned long)privptr->p_buff_read,
923 (int)pages_to_order_of_mag(privptr->p_buff_read_num));
924 }
925 }
926 else {
927 p_buf=privptr->p_read_active_first;
928 while (p_buf!=NULL) {
929 free_pages((unsigned long)p_buf->p_buffer,
930 (int)pages_to_order_of_mag(
931 privptr->p_buff_pages_perread ));
932 p_buf=p_buf->next;
933 }
934 }
935 CLAW_DBF_TEXT(4, trace, "freewrit");
936 if (privptr->p_env->write_size < PAGE_SIZE ) {
937 free_pages((unsigned long)privptr->p_buff_write,
938 (int)pages_to_order_of_mag(privptr->p_buff_write_num));
939 }
940 else {
941 p_buf=privptr->p_write_active_first;
942 while (p_buf!=NULL) {
943 free_pages((unsigned long)p_buf->p_buffer,
944 (int)pages_to_order_of_mag(
945 privptr->p_buff_pages_perwrite ));
946 p_buf=p_buf->next;
947 }
948 }
949 CLAW_DBF_TEXT(4, trace, "clearptr");
950 privptr->buffs_alloc = 0;
951 privptr->p_buff_ccw=NULL;
952 privptr->p_buff_read=NULL;
953 privptr->p_buff_write=NULL;
954 privptr->system_validate_comp=0;
955 privptr->release_pend=0;
956 /* Remove any writes that were pending and reset all reads */
957 p_this_ccw=privptr->p_read_active_first;
958 while (p_this_ccw!=NULL) {
959 p_this_ccw->header.length=0xffff;
960 p_this_ccw->header.opcode=0xff;
961 p_this_ccw->header.flag=0x00;
962 p_this_ccw=p_this_ccw->next;
963 }
964
965 while (privptr->p_write_active_first!=NULL) {
966 p_this_ccw=privptr->p_write_active_first;
967 p_this_ccw->header.flag=CLAW_PENDING;
968 privptr->p_write_active_first=p_this_ccw->next;
969 p_this_ccw->next=privptr->p_write_free_chain;
970 privptr->p_write_free_chain=p_this_ccw;
971 ++privptr->write_free_count;
972 }
973 privptr->p_write_active_last=NULL;
974 privptr->mtc_logical_link = -1;
975 privptr->mtc_skipping = 1;
976 privptr->mtc_offset=0;
977
978 if (((privptr->channel[READ_CHANNEL].last_dstat |
979 privptr->channel[WRITE_CHANNEL].last_dstat) &
980 ~(DEV_STAT_CHN_END | DEV_STAT_DEV_END)) != 0x00) {
981 dev_warn(&privptr->channel[READ_CHANNEL].cdev->dev,
982 "Deactivating %s completed with incorrect"
983 " subchannel status "
984 "(read %02x, write %02x)\n",
985 dev->name,
986 privptr->channel[READ_CHANNEL].last_dstat,
987 privptr->channel[WRITE_CHANNEL].last_dstat);
988 CLAW_DBF_TEXT(2, trace, "badclose");
989 }
990 CLAW_DBF_TEXT(4, trace, "rlsexit");
991 return 0;
992} /* end of claw_release */
993
994/*-------------------------------------------------------------------*
995* claw_write_retry *
996* *
997*--------------------------------------------------------------------*/
998
999static void
1000claw_write_retry ( struct chbk *p_ch )
1001{
1002
1003 struct net_device *dev=p_ch->ndev;
1004
1005 CLAW_DBF_TEXT(4, trace, "w_retry");
1006 if (p_ch->claw_state == CLAW_STOP) {
1007 return;
1008 }
1009 claw_strt_out_IO( dev );
1010 CLAW_DBF_TEXT(4, trace, "rtry_xit");
1011 return;
1012} /* end of claw_write_retry */
1013
1014
1015/*-------------------------------------------------------------------*
1016* claw_write_next *
1017* *
1018*--------------------------------------------------------------------*/
1019
1020static void
1021claw_write_next ( struct chbk * p_ch )
1022{
1023
1024 struct net_device *dev;
1025 struct claw_privbk *privptr=NULL;
1026 struct sk_buff *pk_skb;
1027
1028 CLAW_DBF_TEXT(4, trace, "claw_wrt");
1029 if (p_ch->claw_state == CLAW_STOP)
1030 return;
1031 dev = (struct net_device *) p_ch->ndev;
1032 privptr = (struct claw_privbk *) dev->ml_priv;
1033 claw_free_wrt_buf( dev );
1034 if ((privptr->write_free_count > 0) &&
1035 !skb_queue_empty(&p_ch->collect_queue)) {
1036 pk_skb = claw_pack_skb(privptr);
1037 while (pk_skb != NULL) {
1038 claw_hw_tx(pk_skb, dev, 1);
1039 if (privptr->write_free_count > 0) {
1040 pk_skb = claw_pack_skb(privptr);
1041 } else
1042 pk_skb = NULL;
1043 }
1044 }
1045 if (privptr->p_write_active_first!=NULL) {
1046 claw_strt_out_IO(dev);
1047 }
1048 return;
1049} /* end of claw_write_next */
1050
1051/*-------------------------------------------------------------------*
1052* *
1053* claw_timer *
1054*--------------------------------------------------------------------*/
1055
1056static void
1057claw_timer ( struct chbk * p_ch )
1058{
1059 CLAW_DBF_TEXT(4, trace, "timer");
1060 p_ch->flag |= CLAW_TIMER;
1061 wake_up(&p_ch->wait);
1062 return;
1063} /* end of claw_timer */
1064
1065/*
1066*
1067* functions
1068*/
1069
1070
1071/*-------------------------------------------------------------------*
1072* *
1073* pages_to_order_of_mag *
1074* *
1075* takes a number of pages from 1 to 512 and returns the *
1076* log(num_pages)/log(2) get_free_pages() needs a base 2 order *
1077* of magnitude get_free_pages() has an upper order of 9 *
1078*--------------------------------------------------------------------*/
1079
1080static int
1081pages_to_order_of_mag(int num_of_pages)
1082{
1083 int order_of_mag=1; /* assume 2 pages */
1084 int nump;
1085
1086 CLAW_DBF_TEXT_(5, trace, "pages%d", num_of_pages);
1087 if (num_of_pages == 1) {return 0; } /* magnitude of 0 = 1 page */
1088 /* 512 pages = 2Meg on 4k page systems */
1089 if (num_of_pages >= 512) {return 9; }
1090 /* we have two or more pages order is at least 1 */
1091 for (nump=2 ;nump <= 512;nump*=2) {
1092 if (num_of_pages <= nump)
1093 break;
1094 order_of_mag +=1;
1095 }
1096 if (order_of_mag > 9) { order_of_mag = 9; } /* I know it's paranoid */
1097 CLAW_DBF_TEXT_(5, trace, "mag%d", order_of_mag);
1098 return order_of_mag;
1099}
1100
1101/*-------------------------------------------------------------------*
1102* *
1103* add_claw_reads *
1104* *
1105*--------------------------------------------------------------------*/
1106static int
1107add_claw_reads(struct net_device *dev, struct ccwbk* p_first,
1108 struct ccwbk* p_last)
1109{
1110 struct claw_privbk *privptr;
1111 struct ccw1 temp_ccw;
1112 struct endccw * p_end;
1113 CLAW_DBF_TEXT(4, trace, "addreads");
1114 privptr = dev->ml_priv;
1115 p_end = privptr->p_end_ccw;
1116
1117 /* first CCW and last CCW contains a new set of read channel programs
1118 * to apend the running channel programs
1119 */
1120 if ( p_first==NULL) {
1121 CLAW_DBF_TEXT(4, trace, "addexit");
1122 return 0;
1123 }
1124
1125 /* set up ending CCW sequence for this segment */
1126 if (p_end->read1) {
1127 p_end->read1=0x00; /* second ending CCW is now active */
1128 /* reset ending CCWs and setup TIC CCWs */
1129 p_end->read2_nop2.cmd_code = CCW_CLAW_CMD_READFF;
1130 p_end->read2_nop2.flags = CCW_FLAG_SLI | CCW_FLAG_SKIP;
1131 p_last->r_TIC_1.cda =(__u32)__pa(&p_end->read2_nop1);
1132 p_last->r_TIC_2.cda =(__u32)__pa(&p_end->read2_nop1);
1133 p_end->read2_nop2.cda=0;
1134 p_end->read2_nop2.count=1;
1135 }
1136 else {
1137 p_end->read1=0x01; /* first ending CCW is now active */
1138 /* reset ending CCWs and setup TIC CCWs */
1139 p_end->read1_nop2.cmd_code = CCW_CLAW_CMD_READFF;
1140 p_end->read1_nop2.flags = CCW_FLAG_SLI | CCW_FLAG_SKIP;
1141 p_last->r_TIC_1.cda = (__u32)__pa(&p_end->read1_nop1);
1142 p_last->r_TIC_2.cda = (__u32)__pa(&p_end->read1_nop1);
1143 p_end->read1_nop2.cda=0;
1144 p_end->read1_nop2.count=1;
1145 }
1146
1147 if ( privptr-> p_read_active_first ==NULL ) {
1148 privptr->p_read_active_first = p_first; /* set new first */
1149 privptr->p_read_active_last = p_last; /* set new last */
1150 }
1151 else {
1152
1153 /* set up TIC ccw */
1154 temp_ccw.cda= (__u32)__pa(&p_first->read);
1155 temp_ccw.count=0;
1156 temp_ccw.flags=0;
1157 temp_ccw.cmd_code = CCW_CLAW_CMD_TIC;
1158
1159
1160 if (p_end->read1) {
1161
1162 /* first set of CCW's is chained to the new read */
1163 /* chain, so the second set is chained to the active chain. */
1164 /* Therefore modify the second set to point to the new */
1165 /* read chain set up TIC CCWs */
1166 /* make sure we update the CCW so channel doesn't fetch it */
1167 /* when it's only half done */
1168 memcpy( &p_end->read2_nop2, &temp_ccw ,
1169 sizeof(struct ccw1));
1170 privptr->p_read_active_last->r_TIC_1.cda=
1171 (__u32)__pa(&p_first->read);
1172 privptr->p_read_active_last->r_TIC_2.cda=
1173 (__u32)__pa(&p_first->read);
1174 }
1175 else {
1176 /* make sure we update the CCW so channel doesn't */
1177 /* fetch it when it is only half done */
1178 memcpy( &p_end->read1_nop2, &temp_ccw ,
1179 sizeof(struct ccw1));
1180 privptr->p_read_active_last->r_TIC_1.cda=
1181 (__u32)__pa(&p_first->read);
1182 privptr->p_read_active_last->r_TIC_2.cda=
1183 (__u32)__pa(&p_first->read);
1184 }
1185 /* chain in new set of blocks */
1186 privptr->p_read_active_last->next = p_first;
1187 privptr->p_read_active_last=p_last;
1188 } /* end of if ( privptr-> p_read_active_first ==NULL) */
1189 CLAW_DBF_TEXT(4, trace, "addexit");
1190 return 0;
1191} /* end of add_claw_reads */
1192
1193/*-------------------------------------------------------------------*
1194 * ccw_check_return_code *
1195 * *
1196 *-------------------------------------------------------------------*/
1197
1198static void
1199ccw_check_return_code(struct ccw_device *cdev, int return_code)
1200{
1201 CLAW_DBF_TEXT(4, trace, "ccwret");
1202 if (return_code != 0) {
1203 switch (return_code) {
1204 case -EBUSY: /* BUSY is a transient state no action needed */
1205 break;
1206 case -ENODEV:
1207 dev_err(&cdev->dev, "The remote channel adapter is not"
1208 " available\n");
1209 break;
1210 case -EINVAL:
1211 dev_err(&cdev->dev,
1212 "The status of the remote channel adapter"
1213 " is not valid\n");
1214 break;
1215 default:
1216 dev_err(&cdev->dev, "The common device layer"
1217 " returned error code %d\n",
1218 return_code);
1219 }
1220 }
1221 CLAW_DBF_TEXT(4, trace, "ccwret");
1222} /* end of ccw_check_return_code */
1223
1224/*-------------------------------------------------------------------*
1225* ccw_check_unit_check *
1226*--------------------------------------------------------------------*/
1227
1228static void
1229ccw_check_unit_check(struct chbk * p_ch, unsigned char sense )
1230{
1231 struct net_device *ndev = p_ch->ndev;
1232 struct device *dev = &p_ch->cdev->dev;
1233
1234 CLAW_DBF_TEXT(4, trace, "unitchek");
1235 dev_warn(dev, "The communication peer of %s disconnected\n",
1236 ndev->name);
1237
1238 if (sense & 0x40) {
1239 if (sense & 0x01) {
1240 dev_warn(dev, "The remote channel adapter for"
1241 " %s has been reset\n",
1242 ndev->name);
1243 }
1244 } else if (sense & 0x20) {
1245 if (sense & 0x04) {
1246 dev_warn(dev, "A data streaming timeout occurred"
1247 " for %s\n",
1248 ndev->name);
1249 } else if (sense & 0x10) {
1250 dev_warn(dev, "The remote channel adapter for %s"
1251 " is faulty\n",
1252 ndev->name);
1253 } else {
1254 dev_warn(dev, "A data transfer parity error occurred"
1255 " for %s\n",
1256 ndev->name);
1257 }
1258 } else if (sense & 0x10) {
1259 dev_warn(dev, "A read data parity error occurred"
1260 " for %s\n",
1261 ndev->name);
1262 }
1263
1264} /* end of ccw_check_unit_check */
1265
1266/*-------------------------------------------------------------------*
1267* find_link *
1268*--------------------------------------------------------------------*/
1269static int
1270find_link(struct net_device *dev, char *host_name, char *ws_name )
1271{
1272 struct claw_privbk *privptr;
1273 struct claw_env *p_env;
1274 int rc=0;
1275
1276 CLAW_DBF_TEXT(2, setup, "findlink");
1277 privptr = dev->ml_priv;
1278 p_env=privptr->p_env;
1279 switch (p_env->packing)
1280 {
1281 case PACKING_ASK:
1282 if ((memcmp(WS_APPL_NAME_PACKED, host_name, 8)!=0) ||
1283 (memcmp(WS_APPL_NAME_PACKED, ws_name, 8)!=0 ))
1284 rc = EINVAL;
1285 break;
1286 case DO_PACKED:
1287 case PACK_SEND:
1288 if ((memcmp(WS_APPL_NAME_IP_NAME, host_name, 8)!=0) ||
1289 (memcmp(WS_APPL_NAME_IP_NAME, ws_name, 8)!=0 ))
1290 rc = EINVAL;
1291 break;
1292 default:
1293 if ((memcmp(HOST_APPL_NAME, host_name, 8)!=0) ||
1294 (memcmp(p_env->api_type , ws_name, 8)!=0))
1295 rc = EINVAL;
1296 break;
1297 }
1298
1299 return rc;
1300} /* end of find_link */
1301
1302/*-------------------------------------------------------------------*
1303 * claw_hw_tx *
1304 * *
1305 * *
1306 *-------------------------------------------------------------------*/
1307
1308static int
1309claw_hw_tx(struct sk_buff *skb, struct net_device *dev, long linkid)
1310{
1311 int rc=0;
1312 struct claw_privbk *privptr;
1313 struct ccwbk *p_this_ccw;
1314 struct ccwbk *p_first_ccw;
1315 struct ccwbk *p_last_ccw;
1316 __u32 numBuffers;
1317 signed long len_of_data;
1318 unsigned long bytesInThisBuffer;
1319 unsigned char *pDataAddress;
1320 struct endccw *pEnd;
1321 struct ccw1 tempCCW;
1322 struct claw_env *p_env;
1323 struct clawph *pk_head;
1324 struct chbk *ch;
1325
1326 CLAW_DBF_TEXT(4, trace, "hw_tx");
1327 privptr = (struct claw_privbk *)(dev->ml_priv);
1328 p_env =privptr->p_env;
1329 claw_free_wrt_buf(dev); /* Clean up free chain if posible */
1330 /* scan the write queue to free any completed write packets */
1331 p_first_ccw=NULL;
1332 p_last_ccw=NULL;
1333 if ((p_env->packing >= PACK_SEND) &&
1334 (skb->cb[1] != 'P')) {
1335 skb_push(skb,sizeof(struct clawph));
1336 pk_head=(struct clawph *)skb->data;
1337 pk_head->len=skb->len-sizeof(struct clawph);
1338 if (pk_head->len%4) {
1339 pk_head->len+= 4-(pk_head->len%4);
1340 skb_pad(skb,4-(pk_head->len%4));
1341 skb_put(skb,4-(pk_head->len%4));
1342 }
1343 if (p_env->packing == DO_PACKED)
1344 pk_head->link_num = linkid;
1345 else
1346 pk_head->link_num = 0;
1347 pk_head->flag = 0x00;
1348 skb_pad(skb,4);
1349 skb->cb[1] = 'P';
1350 }
1351 if (linkid == 0) {
1352 if (claw_check_busy(dev)) {
1353 if (privptr->write_free_count!=0) {
1354 claw_clear_busy(dev);
1355 }
1356 else {
1357 claw_strt_out_IO(dev );
1358 claw_free_wrt_buf( dev );
1359 if (privptr->write_free_count==0) {
1360 ch = &privptr->channel[WRITE_CHANNEL];
1361 atomic_inc(&skb->users);
1362 skb_queue_tail(&ch->collect_queue, skb);
1363 goto Done;
1364 }
1365 else {
1366 claw_clear_busy(dev);
1367 }
1368 }
1369 }
1370 /* tx lock */
1371 if (claw_test_and_setbit_busy(TB_TX,dev)) { /* set to busy */
1372 ch = &privptr->channel[WRITE_CHANNEL];
1373 atomic_inc(&skb->users);
1374 skb_queue_tail(&ch->collect_queue, skb);
1375 claw_strt_out_IO(dev );
1376 rc=-EBUSY;
1377 goto Done2;
1378 }
1379 }
1380 /* See how many write buffers are required to hold this data */
1381 numBuffers = DIV_ROUND_UP(skb->len, privptr->p_env->write_size);
1382
1383 /* If that number of buffers isn't available, give up for now */
1384 if (privptr->write_free_count < numBuffers ||
1385 privptr->p_write_free_chain == NULL ) {
1386
1387 claw_setbit_busy(TB_NOBUFFER,dev);
1388 ch = &privptr->channel[WRITE_CHANNEL];
1389 atomic_inc(&skb->users);
1390 skb_queue_tail(&ch->collect_queue, skb);
1391 CLAW_DBF_TEXT(2, trace, "clawbusy");
1392 goto Done2;
1393 }
1394 pDataAddress=skb->data;
1395 len_of_data=skb->len;
1396
1397 while (len_of_data > 0) {
1398 p_this_ccw=privptr->p_write_free_chain; /* get a block */
1399 if (p_this_ccw == NULL) { /* lost the race */
1400 ch = &privptr->channel[WRITE_CHANNEL];
1401 atomic_inc(&skb->users);
1402 skb_queue_tail(&ch->collect_queue, skb);
1403 goto Done2;
1404 }
1405 privptr->p_write_free_chain=p_this_ccw->next;
1406 p_this_ccw->next=NULL;
1407 --privptr->write_free_count; /* -1 */
1408 if (len_of_data >= privptr->p_env->write_size)
1409 bytesInThisBuffer = privptr->p_env->write_size;
1410 else
1411 bytesInThisBuffer = len_of_data;
1412 memcpy( p_this_ccw->p_buffer,pDataAddress, bytesInThisBuffer);
1413 len_of_data-=bytesInThisBuffer;
1414 pDataAddress+=(unsigned long)bytesInThisBuffer;
1415 /* setup write CCW */
1416 p_this_ccw->write.cmd_code = (linkid * 8) +1;
1417 if (len_of_data>0) {
1418 p_this_ccw->write.cmd_code+=MORE_to_COME_FLAG;
1419 }
1420 p_this_ccw->write.count=bytesInThisBuffer;
1421 /* now add to end of this chain */
1422 if (p_first_ccw==NULL) {
1423 p_first_ccw=p_this_ccw;
1424 }
1425 if (p_last_ccw!=NULL) {
1426 p_last_ccw->next=p_this_ccw;
1427 /* set up TIC ccws */
1428 p_last_ccw->w_TIC_1.cda=
1429 (__u32)__pa(&p_this_ccw->write);
1430 }
1431 p_last_ccw=p_this_ccw; /* save new last block */
1432 }
1433
1434 /* FirstCCW and LastCCW now contain a new set of write channel
1435 * programs to append to the running channel program
1436 */
1437
1438 if (p_first_ccw!=NULL) {
1439 /* setup ending ccw sequence for this segment */
1440 pEnd=privptr->p_end_ccw;
1441 if (pEnd->write1) {
1442 pEnd->write1=0x00; /* second end ccw is now active */
1443 /* set up Tic CCWs */
1444 p_last_ccw->w_TIC_1.cda=
1445 (__u32)__pa(&pEnd->write2_nop1);
1446 pEnd->write2_nop2.cmd_code = CCW_CLAW_CMD_READFF;
1447 pEnd->write2_nop2.flags =
1448 CCW_FLAG_SLI | CCW_FLAG_SKIP;
1449 pEnd->write2_nop2.cda=0;
1450 pEnd->write2_nop2.count=1;
1451 }
1452 else { /* end of if (pEnd->write1)*/
1453 pEnd->write1=0x01; /* first end ccw is now active */
1454 /* set up Tic CCWs */
1455 p_last_ccw->w_TIC_1.cda=
1456 (__u32)__pa(&pEnd->write1_nop1);
1457 pEnd->write1_nop2.cmd_code = CCW_CLAW_CMD_READFF;
1458 pEnd->write1_nop2.flags =
1459 CCW_FLAG_SLI | CCW_FLAG_SKIP;
1460 pEnd->write1_nop2.cda=0;
1461 pEnd->write1_nop2.count=1;
1462 } /* end if if (pEnd->write1) */
1463
1464 if (privptr->p_write_active_first==NULL ) {
1465 privptr->p_write_active_first=p_first_ccw;
1466 privptr->p_write_active_last=p_last_ccw;
1467 }
1468 else {
1469 /* set up Tic CCWs */
1470
1471 tempCCW.cda=(__u32)__pa(&p_first_ccw->write);
1472 tempCCW.count=0;
1473 tempCCW.flags=0;
1474 tempCCW.cmd_code=CCW_CLAW_CMD_TIC;
1475
1476 if (pEnd->write1) {
1477
1478 /*
1479 * first set of ending CCW's is chained to the new write
1480 * chain, so the second set is chained to the active chain
1481 * Therefore modify the second set to point the new write chain.
1482 * make sure we update the CCW atomically
1483 * so channel does not fetch it when it's only half done
1484 */
1485 memcpy( &pEnd->write2_nop2, &tempCCW ,
1486 sizeof(struct ccw1));
1487 privptr->p_write_active_last->w_TIC_1.cda=
1488 (__u32)__pa(&p_first_ccw->write);
1489 }
1490 else {
1491
1492 /*make sure we update the CCW atomically
1493 *so channel does not fetch it when it's only half done
1494 */
1495 memcpy(&pEnd->write1_nop2, &tempCCW ,
1496 sizeof(struct ccw1));
1497 privptr->p_write_active_last->w_TIC_1.cda=
1498 (__u32)__pa(&p_first_ccw->write);
1499
1500 } /* end if if (pEnd->write1) */
1501
1502 privptr->p_write_active_last->next=p_first_ccw;
1503 privptr->p_write_active_last=p_last_ccw;
1504 }
1505
1506 } /* endif (p_first_ccw!=NULL) */
1507 dev_kfree_skb_any(skb);
1508 claw_strt_out_IO(dev );
1509 /* if write free count is zero , set NOBUFFER */
1510 if (privptr->write_free_count==0) {
1511 claw_setbit_busy(TB_NOBUFFER,dev);
1512 }
1513Done2:
1514 claw_clearbit_busy(TB_TX,dev);
1515Done:
1516 return(rc);
1517} /* end of claw_hw_tx */
1518
1519/*-------------------------------------------------------------------*
1520* *
1521* init_ccw_bk *
1522* *
1523*--------------------------------------------------------------------*/
1524
1525static int
1526init_ccw_bk(struct net_device *dev)
1527{
1528
1529 __u32 ccw_blocks_required;
1530 __u32 ccw_blocks_perpage;
1531 __u32 ccw_pages_required;
1532 __u32 claw_reads_perpage=1;
1533 __u32 claw_read_pages;
1534 __u32 claw_writes_perpage=1;
1535 __u32 claw_write_pages;
1536 void *p_buff=NULL;
1537 struct ccwbk*p_free_chain;
1538 struct ccwbk*p_buf;
1539 struct ccwbk*p_last_CCWB;
1540 struct ccwbk*p_first_CCWB;
1541 struct endccw *p_endccw=NULL;
1542 addr_t real_address;
1543 struct claw_privbk *privptr = dev->ml_priv;
1544 struct clawh *pClawH=NULL;
1545 addr_t real_TIC_address;
1546 int i,j;
1547 CLAW_DBF_TEXT(4, trace, "init_ccw");
1548
1549 /* initialize statistics field */
1550 privptr->active_link_ID=0;
1551 /* initialize ccwbk pointers */
1552 privptr->p_write_free_chain=NULL; /* pointer to free ccw chain*/
1553 privptr->p_write_active_first=NULL; /* pointer to the first write ccw*/
1554 privptr->p_write_active_last=NULL; /* pointer to the last write ccw*/
1555 privptr->p_read_active_first=NULL; /* pointer to the first read ccw*/
1556 privptr->p_read_active_last=NULL; /* pointer to the last read ccw */
1557 privptr->p_end_ccw=NULL; /* pointer to ending ccw */
1558 privptr->p_claw_signal_blk=NULL; /* pointer to signal block */
1559 privptr->buffs_alloc = 0;
1560 memset(&privptr->end_ccw, 0x00, sizeof(struct endccw));
1561 memset(&privptr->ctl_bk, 0x00, sizeof(struct clawctl));
1562 /* initialize free write ccwbk counter */
1563 privptr->write_free_count=0; /* number of free bufs on write chain */
1564 p_last_CCWB = NULL;
1565 p_first_CCWB= NULL;
1566 /*
1567 * We need 1 CCW block for each read buffer, 1 for each
1568 * write buffer, plus 1 for ClawSignalBlock
1569 */
1570 ccw_blocks_required =
1571 privptr->p_env->read_buffers+privptr->p_env->write_buffers+1;
1572 /*
1573 * compute number of CCW blocks that will fit in a page
1574 */
1575 ccw_blocks_perpage= PAGE_SIZE / CCWBK_SIZE;
1576 ccw_pages_required=
1577 DIV_ROUND_UP(ccw_blocks_required, ccw_blocks_perpage);
1578
1579 /*
1580 * read and write sizes are set by 2 constants in claw.h
1581 * 4k and 32k. Unpacked values other than 4k are not going to
1582 * provide good performance. With packing buffers support 32k
1583 * buffers are used.
1584 */
1585 if (privptr->p_env->read_size < PAGE_SIZE) {
1586 claw_reads_perpage = PAGE_SIZE / privptr->p_env->read_size;
1587 claw_read_pages = DIV_ROUND_UP(privptr->p_env->read_buffers,
1588 claw_reads_perpage);
1589 }
1590 else { /* > or equal */
1591 privptr->p_buff_pages_perread =
1592 DIV_ROUND_UP(privptr->p_env->read_size, PAGE_SIZE);
1593 claw_read_pages = privptr->p_env->read_buffers *
1594 privptr->p_buff_pages_perread;
1595 }
1596 if (privptr->p_env->write_size < PAGE_SIZE) {
1597 claw_writes_perpage =
1598 PAGE_SIZE / privptr->p_env->write_size;
1599 claw_write_pages = DIV_ROUND_UP(privptr->p_env->write_buffers,
1600 claw_writes_perpage);
1601
1602 }
1603 else { /* > or equal */
1604 privptr->p_buff_pages_perwrite =
1605 DIV_ROUND_UP(privptr->p_env->read_size, PAGE_SIZE);
1606 claw_write_pages = privptr->p_env->write_buffers *
1607 privptr->p_buff_pages_perwrite;
1608 }
1609 /*
1610 * allocate ccw_pages_required
1611 */
1612 if (privptr->p_buff_ccw==NULL) {
1613 privptr->p_buff_ccw=
1614 (void *)__get_free_pages(__GFP_DMA,
1615 (int)pages_to_order_of_mag(ccw_pages_required ));
1616 if (privptr->p_buff_ccw==NULL) {
1617 return -ENOMEM;
1618 }
1619 privptr->p_buff_ccw_num=ccw_pages_required;
1620 }
1621 memset(privptr->p_buff_ccw, 0x00,
1622 privptr->p_buff_ccw_num * PAGE_SIZE);
1623
1624 /*
1625 * obtain ending ccw block address
1626 *
1627 */
1628 privptr->p_end_ccw = (struct endccw *)&privptr->end_ccw;
1629 real_address = (__u32)__pa(privptr->p_end_ccw);
1630 /* Initialize ending CCW block */
1631 p_endccw=privptr->p_end_ccw;
1632 p_endccw->real=real_address;
1633 p_endccw->write1=0x00;
1634 p_endccw->read1=0x00;
1635
1636 /* write1_nop1 */
1637 p_endccw->write1_nop1.cmd_code = CCW_CLAW_CMD_NOP;
1638 p_endccw->write1_nop1.flags = CCW_FLAG_SLI | CCW_FLAG_CC;
1639 p_endccw->write1_nop1.count = 1;
1640 p_endccw->write1_nop1.cda = 0;
1641
1642 /* write1_nop2 */
1643 p_endccw->write1_nop2.cmd_code = CCW_CLAW_CMD_READFF;
1644 p_endccw->write1_nop2.flags = CCW_FLAG_SLI | CCW_FLAG_SKIP;
1645 p_endccw->write1_nop2.count = 1;
1646 p_endccw->write1_nop2.cda = 0;
1647
1648 /* write2_nop1 */
1649 p_endccw->write2_nop1.cmd_code = CCW_CLAW_CMD_NOP;
1650 p_endccw->write2_nop1.flags = CCW_FLAG_SLI | CCW_FLAG_CC;
1651 p_endccw->write2_nop1.count = 1;
1652 p_endccw->write2_nop1.cda = 0;
1653
1654 /* write2_nop2 */
1655 p_endccw->write2_nop2.cmd_code = CCW_CLAW_CMD_READFF;
1656 p_endccw->write2_nop2.flags = CCW_FLAG_SLI | CCW_FLAG_SKIP;
1657 p_endccw->write2_nop2.count = 1;
1658 p_endccw->write2_nop2.cda = 0;
1659
1660 /* read1_nop1 */
1661 p_endccw->read1_nop1.cmd_code = CCW_CLAW_CMD_NOP;
1662 p_endccw->read1_nop1.flags = CCW_FLAG_SLI | CCW_FLAG_CC;
1663 p_endccw->read1_nop1.count = 1;
1664 p_endccw->read1_nop1.cda = 0;
1665
1666 /* read1_nop2 */
1667 p_endccw->read1_nop2.cmd_code = CCW_CLAW_CMD_READFF;
1668 p_endccw->read1_nop2.flags = CCW_FLAG_SLI | CCW_FLAG_SKIP;
1669 p_endccw->read1_nop2.count = 1;
1670 p_endccw->read1_nop2.cda = 0;
1671
1672 /* read2_nop1 */
1673 p_endccw->read2_nop1.cmd_code = CCW_CLAW_CMD_NOP;
1674 p_endccw->read2_nop1.flags = CCW_FLAG_SLI | CCW_FLAG_CC;
1675 p_endccw->read2_nop1.count = 1;
1676 p_endccw->read2_nop1.cda = 0;
1677
1678 /* read2_nop2 */
1679 p_endccw->read2_nop2.cmd_code = CCW_CLAW_CMD_READFF;
1680 p_endccw->read2_nop2.flags = CCW_FLAG_SLI | CCW_FLAG_SKIP;
1681 p_endccw->read2_nop2.count = 1;
1682 p_endccw->read2_nop2.cda = 0;
1683
1684 /*
1685 * Build a chain of CCWs
1686 *
1687 */
1688 p_buff=privptr->p_buff_ccw;
1689
1690 p_free_chain=NULL;
1691 for (i=0 ; i < ccw_pages_required; i++ ) {
1692 real_address = (__u32)__pa(p_buff);
1693 p_buf=p_buff;
1694 for (j=0 ; j < ccw_blocks_perpage ; j++) {
1695 p_buf->next = p_free_chain;
1696 p_free_chain = p_buf;
1697 p_buf->real=(__u32)__pa(p_buf);
1698 ++p_buf;
1699 }
1700 p_buff+=PAGE_SIZE;
1701 }
1702 /*
1703 * Initialize ClawSignalBlock
1704 *
1705 */
1706 if (privptr->p_claw_signal_blk==NULL) {
1707 privptr->p_claw_signal_blk=p_free_chain;
1708 p_free_chain=p_free_chain->next;
1709 pClawH=(struct clawh *)privptr->p_claw_signal_blk;
1710 pClawH->length=0xffff;
1711 pClawH->opcode=0xff;
1712 pClawH->flag=CLAW_BUSY;
1713 }
1714
1715 /*
1716 * allocate write_pages_required and add to free chain
1717 */
1718 if (privptr->p_buff_write==NULL) {
1719 if (privptr->p_env->write_size < PAGE_SIZE) {
1720 privptr->p_buff_write=
1721 (void *)__get_free_pages(__GFP_DMA,
1722 (int)pages_to_order_of_mag(claw_write_pages ));
1723 if (privptr->p_buff_write==NULL) {
1724 privptr->p_buff_ccw=NULL;
1725 return -ENOMEM;
1726 }
1727 /*
1728 * Build CLAW write free chain
1729 *
1730 */
1731
1732 memset(privptr->p_buff_write, 0x00,
1733 ccw_pages_required * PAGE_SIZE);
1734 privptr->p_write_free_chain=NULL;
1735
1736 p_buff=privptr->p_buff_write;
1737
1738 for (i=0 ; i< privptr->p_env->write_buffers ; i++) {
1739 p_buf = p_free_chain; /* get a CCW */
1740 p_free_chain = p_buf->next;
1741 p_buf->next =privptr->p_write_free_chain;
1742 privptr->p_write_free_chain = p_buf;
1743 p_buf-> p_buffer = (struct clawbuf *)p_buff;
1744 p_buf-> write.cda = (__u32)__pa(p_buff);
1745 p_buf-> write.flags = CCW_FLAG_SLI | CCW_FLAG_CC;
1746 p_buf-> w_read_FF.cmd_code = CCW_CLAW_CMD_READFF;
1747 p_buf-> w_read_FF.flags = CCW_FLAG_SLI | CCW_FLAG_CC;
1748 p_buf-> w_read_FF.count = 1;
1749 p_buf-> w_read_FF.cda =
1750 (__u32)__pa(&p_buf-> header.flag);
1751 p_buf-> w_TIC_1.cmd_code = CCW_CLAW_CMD_TIC;
1752 p_buf-> w_TIC_1.flags = 0;
1753 p_buf-> w_TIC_1.count = 0;
1754
1755 if (((unsigned long)p_buff +
1756 privptr->p_env->write_size) >=
1757 ((unsigned long)(p_buff+2*
1758 (privptr->p_env->write_size) - 1) & PAGE_MASK)) {
1759 p_buff = p_buff+privptr->p_env->write_size;
1760 }
1761 }
1762 }
1763 else /* Buffers are => PAGE_SIZE. 1 buff per get_free_pages */
1764 {
1765 privptr->p_write_free_chain=NULL;
1766 for (i = 0; i< privptr->p_env->write_buffers ; i++) {
1767 p_buff=(void *)__get_free_pages(__GFP_DMA,
1768 (int)pages_to_order_of_mag(
1769 privptr->p_buff_pages_perwrite) );
1770 if (p_buff==NULL) {
1771 free_pages((unsigned long)privptr->p_buff_ccw,
1772 (int)pages_to_order_of_mag(
1773 privptr->p_buff_ccw_num));
1774 privptr->p_buff_ccw=NULL;
1775 p_buf=privptr->p_buff_write;
1776 while (p_buf!=NULL) {
1777 free_pages((unsigned long)
1778 p_buf->p_buffer,
1779 (int)pages_to_order_of_mag(
1780 privptr->p_buff_pages_perwrite));
1781 p_buf=p_buf->next;
1782 }
1783 return -ENOMEM;
1784 } /* Error on get_pages */
1785 memset(p_buff, 0x00, privptr->p_env->write_size );
1786 p_buf = p_free_chain;
1787 p_free_chain = p_buf->next;
1788 p_buf->next = privptr->p_write_free_chain;
1789 privptr->p_write_free_chain = p_buf;
1790 privptr->p_buff_write = p_buf;
1791 p_buf->p_buffer=(struct clawbuf *)p_buff;
1792 p_buf-> write.cda = (__u32)__pa(p_buff);
1793 p_buf-> write.flags = CCW_FLAG_SLI | CCW_FLAG_CC;
1794 p_buf-> w_read_FF.cmd_code = CCW_CLAW_CMD_READFF;
1795 p_buf-> w_read_FF.flags = CCW_FLAG_SLI | CCW_FLAG_CC;
1796 p_buf-> w_read_FF.count = 1;
1797 p_buf-> w_read_FF.cda =
1798 (__u32)__pa(&p_buf-> header.flag);
1799 p_buf-> w_TIC_1.cmd_code = CCW_CLAW_CMD_TIC;
1800 p_buf-> w_TIC_1.flags = 0;
1801 p_buf-> w_TIC_1.count = 0;
1802 } /* for all write_buffers */
1803
1804 } /* else buffers are PAGE_SIZE or bigger */
1805
1806 }
1807 privptr->p_buff_write_num=claw_write_pages;
1808 privptr->write_free_count=privptr->p_env->write_buffers;
1809
1810
1811 /*
1812 * allocate read_pages_required and chain to free chain
1813 */
1814 if (privptr->p_buff_read==NULL) {
1815 if (privptr->p_env->read_size < PAGE_SIZE) {
1816 privptr->p_buff_read=
1817 (void *)__get_free_pages(__GFP_DMA,
1818 (int)pages_to_order_of_mag(claw_read_pages) );
1819 if (privptr->p_buff_read==NULL) {
1820 free_pages((unsigned long)privptr->p_buff_ccw,
1821 (int)pages_to_order_of_mag(
1822 privptr->p_buff_ccw_num));
1823 /* free the write pages size is < page size */
1824 free_pages((unsigned long)privptr->p_buff_write,
1825 (int)pages_to_order_of_mag(
1826 privptr->p_buff_write_num));
1827 privptr->p_buff_ccw=NULL;
1828 privptr->p_buff_write=NULL;
1829 return -ENOMEM;
1830 }
1831 memset(privptr->p_buff_read, 0x00, claw_read_pages * PAGE_SIZE);
1832 privptr->p_buff_read_num=claw_read_pages;
1833 /*
1834 * Build CLAW read free chain
1835 *
1836 */
1837 p_buff=privptr->p_buff_read;
1838 for (i=0 ; i< privptr->p_env->read_buffers ; i++) {
1839 p_buf = p_free_chain;
1840 p_free_chain = p_buf->next;
1841
1842 if (p_last_CCWB==NULL) {
1843 p_buf->next=NULL;
1844 real_TIC_address=0;
1845 p_last_CCWB=p_buf;
1846 }
1847 else {
1848 p_buf->next=p_first_CCWB;
1849 real_TIC_address=
1850 (__u32)__pa(&p_first_CCWB -> read );
1851 }
1852
1853 p_first_CCWB=p_buf;
1854
1855 p_buf->p_buffer=(struct clawbuf *)p_buff;
1856 /* initialize read command */
1857 p_buf-> read.cmd_code = CCW_CLAW_CMD_READ;
1858 p_buf-> read.cda = (__u32)__pa(p_buff);
1859 p_buf-> read.flags = CCW_FLAG_SLI | CCW_FLAG_CC;
1860 p_buf-> read.count = privptr->p_env->read_size;
1861
1862 /* initialize read_h command */
1863 p_buf-> read_h.cmd_code = CCW_CLAW_CMD_READHEADER;
1864 p_buf-> read_h.cda =
1865 (__u32)__pa(&(p_buf->header));
1866 p_buf-> read_h.flags = CCW_FLAG_SLI | CCW_FLAG_CC;
1867 p_buf-> read_h.count = sizeof(struct clawh);
1868
1869 /* initialize Signal command */
1870 p_buf-> signal.cmd_code = CCW_CLAW_CMD_SIGNAL_SMOD;
1871 p_buf-> signal.cda =
1872 (__u32)__pa(&(pClawH->flag));
1873 p_buf-> signal.flags = CCW_FLAG_SLI | CCW_FLAG_CC;
1874 p_buf-> signal.count = 1;
1875
1876 /* initialize r_TIC_1 command */
1877 p_buf-> r_TIC_1.cmd_code = CCW_CLAW_CMD_TIC;
1878 p_buf-> r_TIC_1.cda = (__u32)real_TIC_address;
1879 p_buf-> r_TIC_1.flags = 0;
1880 p_buf-> r_TIC_1.count = 0;
1881
1882 /* initialize r_read_FF command */
1883 p_buf-> r_read_FF.cmd_code = CCW_CLAW_CMD_READFF;
1884 p_buf-> r_read_FF.cda =
1885 (__u32)__pa(&(pClawH->flag));
1886 p_buf-> r_read_FF.flags =
1887 CCW_FLAG_SLI | CCW_FLAG_CC | CCW_FLAG_PCI;
1888 p_buf-> r_read_FF.count = 1;
1889
1890 /* initialize r_TIC_2 */
1891 memcpy(&p_buf->r_TIC_2,
1892 &p_buf->r_TIC_1, sizeof(struct ccw1));
1893
1894 /* initialize Header */
1895 p_buf->header.length=0xffff;
1896 p_buf->header.opcode=0xff;
1897 p_buf->header.flag=CLAW_PENDING;
1898
1899 if (((unsigned long)p_buff+privptr->p_env->read_size) >=
1900 ((unsigned long)(p_buff+2*(privptr->p_env->read_size)
1901 -1)
1902 & PAGE_MASK)) {
1903 p_buff= p_buff+privptr->p_env->read_size;
1904 }
1905 else {
1906 p_buff=
1907 (void *)((unsigned long)
1908 (p_buff+2*(privptr->p_env->read_size)-1)
1909 & PAGE_MASK) ;
1910 }
1911 } /* for read_buffers */
1912 } /* read_size < PAGE_SIZE */
1913 else { /* read Size >= PAGE_SIZE */
1914 for (i=0 ; i< privptr->p_env->read_buffers ; i++) {
1915 p_buff = (void *)__get_free_pages(__GFP_DMA,
1916 (int)pages_to_order_of_mag(
1917 privptr->p_buff_pages_perread));
1918 if (p_buff==NULL) {
1919 free_pages((unsigned long)privptr->p_buff_ccw,
1920 (int)pages_to_order_of_mag(privptr->
1921 p_buff_ccw_num));
1922 /* free the write pages */
1923 p_buf=privptr->p_buff_write;
1924 while (p_buf!=NULL) {
1925 free_pages(
1926 (unsigned long)p_buf->p_buffer,
1927 (int)pages_to_order_of_mag(
1928 privptr->p_buff_pages_perwrite));
1929 p_buf=p_buf->next;
1930 }
1931 /* free any read pages already alloc */
1932 p_buf=privptr->p_buff_read;
1933 while (p_buf!=NULL) {
1934 free_pages(
1935 (unsigned long)p_buf->p_buffer,
1936 (int)pages_to_order_of_mag(
1937 privptr->p_buff_pages_perread));
1938 p_buf=p_buf->next;
1939 }
1940 privptr->p_buff_ccw=NULL;
1941 privptr->p_buff_write=NULL;
1942 return -ENOMEM;
1943 }
1944 memset(p_buff, 0x00, privptr->p_env->read_size);
1945 p_buf = p_free_chain;
1946 privptr->p_buff_read = p_buf;
1947 p_free_chain = p_buf->next;
1948
1949 if (p_last_CCWB==NULL) {
1950 p_buf->next=NULL;
1951 real_TIC_address=0;
1952 p_last_CCWB=p_buf;
1953 }
1954 else {
1955 p_buf->next=p_first_CCWB;
1956 real_TIC_address=
1957 (addr_t)__pa(
1958 &p_first_CCWB -> read );
1959 }
1960
1961 p_first_CCWB=p_buf;
1962 /* save buff address */
1963 p_buf->p_buffer=(struct clawbuf *)p_buff;
1964 /* initialize read command */
1965 p_buf-> read.cmd_code = CCW_CLAW_CMD_READ;
1966 p_buf-> read.cda = (__u32)__pa(p_buff);
1967 p_buf-> read.flags = CCW_FLAG_SLI | CCW_FLAG_CC;
1968 p_buf-> read.count = privptr->p_env->read_size;
1969
1970 /* initialize read_h command */
1971 p_buf-> read_h.cmd_code = CCW_CLAW_CMD_READHEADER;
1972 p_buf-> read_h.cda =
1973 (__u32)__pa(&(p_buf->header));
1974 p_buf-> read_h.flags = CCW_FLAG_SLI | CCW_FLAG_CC;
1975 p_buf-> read_h.count = sizeof(struct clawh);
1976
1977 /* initialize Signal command */
1978 p_buf-> signal.cmd_code = CCW_CLAW_CMD_SIGNAL_SMOD;
1979 p_buf-> signal.cda =
1980 (__u32)__pa(&(pClawH->flag));
1981 p_buf-> signal.flags = CCW_FLAG_SLI | CCW_FLAG_CC;
1982 p_buf-> signal.count = 1;
1983
1984 /* initialize r_TIC_1 command */
1985 p_buf-> r_TIC_1.cmd_code = CCW_CLAW_CMD_TIC;
1986 p_buf-> r_TIC_1.cda = (__u32)real_TIC_address;
1987 p_buf-> r_TIC_1.flags = 0;
1988 p_buf-> r_TIC_1.count = 0;
1989
1990 /* initialize r_read_FF command */
1991 p_buf-> r_read_FF.cmd_code = CCW_CLAW_CMD_READFF;
1992 p_buf-> r_read_FF.cda =
1993 (__u32)__pa(&(pClawH->flag));
1994 p_buf-> r_read_FF.flags =
1995 CCW_FLAG_SLI | CCW_FLAG_CC | CCW_FLAG_PCI;
1996 p_buf-> r_read_FF.count = 1;
1997
1998 /* initialize r_TIC_2 */
1999 memcpy(&p_buf->r_TIC_2, &p_buf->r_TIC_1,
2000 sizeof(struct ccw1));
2001
2002 /* initialize Header */
2003 p_buf->header.length=0xffff;
2004 p_buf->header.opcode=0xff;
2005 p_buf->header.flag=CLAW_PENDING;
2006
2007 } /* For read_buffers */
2008 } /* read_size >= PAGE_SIZE */
2009 } /* pBuffread = NULL */
2010 add_claw_reads( dev ,p_first_CCWB , p_last_CCWB);
2011 privptr->buffs_alloc = 1;
2012
2013 return 0;
2014} /* end of init_ccw_bk */
2015
2016/*-------------------------------------------------------------------*
2017* *
2018* probe_error *
2019* *
2020*--------------------------------------------------------------------*/
2021
2022static void
2023probe_error( struct ccwgroup_device *cgdev)
2024{
2025 struct claw_privbk *privptr;
2026
2027 CLAW_DBF_TEXT(4, trace, "proberr");
2028 privptr = dev_get_drvdata(&cgdev->dev);
2029 if (privptr != NULL) {
2030 dev_set_drvdata(&cgdev->dev, NULL);
2031 kfree(privptr->p_env);
2032 kfree(privptr->p_mtc_envelope);
2033 kfree(privptr);
2034 }
2035} /* probe_error */
2036
2037/*-------------------------------------------------------------------*
2038* claw_process_control *
2039* *
2040* *
2041*--------------------------------------------------------------------*/
2042
2043static int
2044claw_process_control( struct net_device *dev, struct ccwbk * p_ccw)
2045{
2046
2047 struct clawbuf *p_buf;
2048 struct clawctl ctlbk;
2049 struct clawctl *p_ctlbk;
2050 char temp_host_name[8];
2051 char temp_ws_name[8];
2052 struct claw_privbk *privptr;
2053 struct claw_env *p_env;
2054 struct sysval *p_sysval;
2055 struct conncmd *p_connect=NULL;
2056 int rc;
2057 struct chbk *p_ch = NULL;
2058 struct device *tdev;
2059 CLAW_DBF_TEXT(2, setup, "clw_cntl");
2060 udelay(1000); /* Wait a ms for the control packets to
2061 *catch up to each other */
2062 privptr = dev->ml_priv;
2063 p_env=privptr->p_env;
2064 tdev = &privptr->channel[READ_CHANNEL].cdev->dev;
2065 memcpy( &temp_host_name, p_env->host_name, 8);
2066 memcpy( &temp_ws_name, p_env->adapter_name , 8);
2067 dev_info(tdev, "%s: CLAW device %.8s: "
2068 "Received Control Packet\n",
2069 dev->name, temp_ws_name);
2070 if (privptr->release_pend==1) {
2071 return 0;
2072 }
2073 p_buf=p_ccw->p_buffer;
2074 p_ctlbk=&ctlbk;
2075 if (p_env->packing == DO_PACKED) { /* packing in progress?*/
2076 memcpy(p_ctlbk, &p_buf->buffer[4], sizeof(struct clawctl));
2077 } else {
2078 memcpy(p_ctlbk, p_buf, sizeof(struct clawctl));
2079 }
2080 switch (p_ctlbk->command)
2081 {
2082 case SYSTEM_VALIDATE_REQUEST:
2083 if (p_ctlbk->version != CLAW_VERSION_ID) {
2084 claw_snd_sys_validate_rsp(dev, p_ctlbk,
2085 CLAW_RC_WRONG_VERSION);
2086 dev_warn(tdev, "The communication peer of %s"
2087 " uses an incorrect API version %d\n",
2088 dev->name, p_ctlbk->version);
2089 }
2090 p_sysval = (struct sysval *)&(p_ctlbk->data);
2091 dev_info(tdev, "%s: Recv Sys Validate Request: "
2092 "Vers=%d,link_id=%d,Corr=%d,WS name=%.8s,"
2093 "Host name=%.8s\n",
2094 dev->name, p_ctlbk->version,
2095 p_ctlbk->linkid,
2096 p_ctlbk->correlator,
2097 p_sysval->WS_name,
2098 p_sysval->host_name);
2099 if (memcmp(temp_host_name, p_sysval->host_name, 8)) {
2100 claw_snd_sys_validate_rsp(dev, p_ctlbk,
2101 CLAW_RC_NAME_MISMATCH);
2102 CLAW_DBF_TEXT(2, setup, "HSTBAD");
2103 CLAW_DBF_TEXT_(2, setup, "%s", p_sysval->host_name);
2104 CLAW_DBF_TEXT_(2, setup, "%s", temp_host_name);
2105 dev_warn(tdev,
2106 "Host name %s for %s does not match the"
2107 " remote adapter name %s\n",
2108 p_sysval->host_name,
2109 dev->name,
2110 temp_host_name);
2111 }
2112 if (memcmp(temp_ws_name, p_sysval->WS_name, 8)) {
2113 claw_snd_sys_validate_rsp(dev, p_ctlbk,
2114 CLAW_RC_NAME_MISMATCH);
2115 CLAW_DBF_TEXT(2, setup, "WSNBAD");
2116 CLAW_DBF_TEXT_(2, setup, "%s", p_sysval->WS_name);
2117 CLAW_DBF_TEXT_(2, setup, "%s", temp_ws_name);
2118 dev_warn(tdev, "Adapter name %s for %s does not match"
2119 " the remote host name %s\n",
2120 p_sysval->WS_name,
2121 dev->name,
2122 temp_ws_name);
2123 }
2124 if ((p_sysval->write_frame_size < p_env->write_size) &&
2125 (p_env->packing == 0)) {
2126 claw_snd_sys_validate_rsp(dev, p_ctlbk,
2127 CLAW_RC_HOST_RCV_TOO_SMALL);
2128 dev_warn(tdev,
2129 "The local write buffer is smaller than the"
2130 " remote read buffer\n");
2131 CLAW_DBF_TEXT(2, setup, "wrtszbad");
2132 }
2133 if ((p_sysval->read_frame_size < p_env->read_size) &&
2134 (p_env->packing == 0)) {
2135 claw_snd_sys_validate_rsp(dev, p_ctlbk,
2136 CLAW_RC_HOST_RCV_TOO_SMALL);
2137 dev_warn(tdev,
2138 "The local read buffer is smaller than the"
2139 " remote write buffer\n");
2140 CLAW_DBF_TEXT(2, setup, "rdsizbad");
2141 }
2142 claw_snd_sys_validate_rsp(dev, p_ctlbk, 0);
2143 dev_info(tdev,
2144 "CLAW device %.8s: System validate"
2145 " completed.\n", temp_ws_name);
2146 dev_info(tdev,
2147 "%s: sys Validate Rsize:%d Wsize:%d\n",
2148 dev->name, p_sysval->read_frame_size,
2149 p_sysval->write_frame_size);
2150 privptr->system_validate_comp = 1;
2151 if (strncmp(p_env->api_type, WS_APPL_NAME_PACKED, 6) == 0)
2152 p_env->packing = PACKING_ASK;
2153 claw_strt_conn_req(dev);
2154 break;
2155 case SYSTEM_VALIDATE_RESPONSE:
2156 p_sysval = (struct sysval *)&(p_ctlbk->data);
2157 dev_info(tdev,
2158 "Settings for %s validated (version=%d, "
2159 "remote device=%d, rc=%d, adapter name=%.8s, "
2160 "host name=%.8s)\n",
2161 dev->name,
2162 p_ctlbk->version,
2163 p_ctlbk->correlator,
2164 p_ctlbk->rc,
2165 p_sysval->WS_name,
2166 p_sysval->host_name);
2167 switch (p_ctlbk->rc) {
2168 case 0:
2169 dev_info(tdev, "%s: CLAW device "
2170 "%.8s: System validate completed.\n",
2171 dev->name, temp_ws_name);
2172 if (privptr->system_validate_comp == 0)
2173 claw_strt_conn_req(dev);
2174 privptr->system_validate_comp = 1;
2175 break;
2176 case CLAW_RC_NAME_MISMATCH:
2177 dev_warn(tdev, "Validating %s failed because of"
2178 " a host or adapter name mismatch\n",
2179 dev->name);
2180 break;
2181 case CLAW_RC_WRONG_VERSION:
2182 dev_warn(tdev, "Validating %s failed because of a"
2183 " version conflict\n",
2184 dev->name);
2185 break;
2186 case CLAW_RC_HOST_RCV_TOO_SMALL:
2187 dev_warn(tdev, "Validating %s failed because of a"
2188 " frame size conflict\n",
2189 dev->name);
2190 break;
2191 default:
2192 dev_warn(tdev, "The communication peer of %s rejected"
2193 " the connection\n",
2194 dev->name);
2195 break;
2196 }
2197 break;
2198
2199 case CONNECTION_REQUEST:
2200 p_connect = (struct conncmd *)&(p_ctlbk->data);
2201 dev_info(tdev, "%s: Recv Conn Req: Vers=%d,link_id=%d,"
2202 "Corr=%d,HOST appl=%.8s,WS appl=%.8s\n",
2203 dev->name,
2204 p_ctlbk->version,
2205 p_ctlbk->linkid,
2206 p_ctlbk->correlator,
2207 p_connect->host_name,
2208 p_connect->WS_name);
2209 if (privptr->active_link_ID != 0) {
2210 claw_snd_disc(dev, p_ctlbk);
2211 dev_info(tdev, "%s rejected a connection request"
2212 " because it is already active\n",
2213 dev->name);
2214 }
2215 if (p_ctlbk->linkid != 1) {
2216 claw_snd_disc(dev, p_ctlbk);
2217 dev_info(tdev, "%s rejected a request to open multiple"
2218 " connections\n",
2219 dev->name);
2220 }
2221 rc = find_link(dev, p_connect->host_name, p_connect->WS_name);
2222 if (rc != 0) {
2223 claw_snd_disc(dev, p_ctlbk);
2224 dev_info(tdev, "%s rejected a connection request"
2225 " because of a type mismatch\n",
2226 dev->name);
2227 }
2228 claw_send_control(dev,
2229 CONNECTION_CONFIRM, p_ctlbk->linkid,
2230 p_ctlbk->correlator,
2231 0, p_connect->host_name,
2232 p_connect->WS_name);
2233 if (p_env->packing == PACKING_ASK) {
2234 p_env->packing = PACK_SEND;
2235 claw_snd_conn_req(dev, 0);
2236 }
2237 dev_info(tdev, "%s: CLAW device %.8s: Connection "
2238 "completed link_id=%d.\n",
2239 dev->name, temp_ws_name,
2240 p_ctlbk->linkid);
2241 privptr->active_link_ID = p_ctlbk->linkid;
2242 p_ch = &privptr->channel[WRITE_CHANNEL];
2243 wake_up(&p_ch->wait); /* wake up claw_open ( WRITE) */
2244 break;
2245 case CONNECTION_RESPONSE:
2246 p_connect = (struct conncmd *)&(p_ctlbk->data);
2247 dev_info(tdev, "%s: Recv Conn Resp: Vers=%d,link_id=%d,"
2248 "Corr=%d,RC=%d,Host appl=%.8s, WS appl=%.8s\n",
2249 dev->name,
2250 p_ctlbk->version,
2251 p_ctlbk->linkid,
2252 p_ctlbk->correlator,
2253 p_ctlbk->rc,
2254 p_connect->host_name,
2255 p_connect->WS_name);
2256
2257 if (p_ctlbk->rc != 0) {
2258 dev_warn(tdev, "The communication peer of %s rejected"
2259 " a connection request\n",
2260 dev->name);
2261 return 1;
2262 }
2263 rc = find_link(dev,
2264 p_connect->host_name, p_connect->WS_name);
2265 if (rc != 0) {
2266 claw_snd_disc(dev, p_ctlbk);
2267 dev_warn(tdev, "The communication peer of %s"
2268 " rejected a connection "
2269 "request because of a type mismatch\n",
2270 dev->name);
2271 }
2272 /* should be until CONNECTION_CONFIRM */
2273 privptr->active_link_ID = -(p_ctlbk->linkid);
2274 break;
2275 case CONNECTION_CONFIRM:
2276 p_connect = (struct conncmd *)&(p_ctlbk->data);
2277 dev_info(tdev,
2278 "%s: Recv Conn Confirm:Vers=%d,link_id=%d,"
2279 "Corr=%d,Host appl=%.8s,WS appl=%.8s\n",
2280 dev->name,
2281 p_ctlbk->version,
2282 p_ctlbk->linkid,
2283 p_ctlbk->correlator,
2284 p_connect->host_name,
2285 p_connect->WS_name);
2286 if (p_ctlbk->linkid == -(privptr->active_link_ID)) {
2287 privptr->active_link_ID = p_ctlbk->linkid;
2288 if (p_env->packing > PACKING_ASK) {
2289 dev_info(tdev,
2290 "%s: Confirmed Now packing\n", dev->name);
2291 p_env->packing = DO_PACKED;
2292 }
2293 p_ch = &privptr->channel[WRITE_CHANNEL];
2294 wake_up(&p_ch->wait);
2295 } else {
2296 dev_warn(tdev, "Activating %s failed because of"
2297 " an incorrect link ID=%d\n",
2298 dev->name, p_ctlbk->linkid);
2299 claw_snd_disc(dev, p_ctlbk);
2300 }
2301 break;
2302 case DISCONNECT:
2303 dev_info(tdev, "%s: Disconnect: "
2304 "Vers=%d,link_id=%d,Corr=%d\n",
2305 dev->name, p_ctlbk->version,
2306 p_ctlbk->linkid, p_ctlbk->correlator);
2307 if ((p_ctlbk->linkid == 2) &&
2308 (p_env->packing == PACK_SEND)) {
2309 privptr->active_link_ID = 1;
2310 p_env->packing = DO_PACKED;
2311 } else
2312 privptr->active_link_ID = 0;
2313 break;
2314 case CLAW_ERROR:
2315 dev_warn(tdev, "The communication peer of %s failed\n",
2316 dev->name);
2317 break;
2318 default:
2319 dev_warn(tdev, "The communication peer of %s sent"
2320 " an unknown command code\n",
2321 dev->name);
2322 break;
2323 }
2324
2325 return 0;
2326} /* end of claw_process_control */
2327
2328
2329/*-------------------------------------------------------------------*
2330* claw_send_control *
2331* *
2332*--------------------------------------------------------------------*/
2333
2334static int
2335claw_send_control(struct net_device *dev, __u8 type, __u8 link,
2336 __u8 correlator, __u8 rc, char *local_name, char *remote_name)
2337{
2338 struct claw_privbk *privptr;
2339 struct clawctl *p_ctl;
2340 struct sysval *p_sysval;
2341 struct conncmd *p_connect;
2342 struct sk_buff *skb;
2343
2344 CLAW_DBF_TEXT(2, setup, "sndcntl");
2345 privptr = dev->ml_priv;
2346 p_ctl=(struct clawctl *)&privptr->ctl_bk;
2347
2348 p_ctl->command=type;
2349 p_ctl->version=CLAW_VERSION_ID;
2350 p_ctl->linkid=link;
2351 p_ctl->correlator=correlator;
2352 p_ctl->rc=rc;
2353
2354 p_sysval=(struct sysval *)&p_ctl->data;
2355 p_connect=(struct conncmd *)&p_ctl->data;
2356
2357 switch (p_ctl->command) {
2358 case SYSTEM_VALIDATE_REQUEST:
2359 case SYSTEM_VALIDATE_RESPONSE:
2360 memcpy(&p_sysval->host_name, local_name, 8);
2361 memcpy(&p_sysval->WS_name, remote_name, 8);
2362 if (privptr->p_env->packing > 0) {
2363 p_sysval->read_frame_size = DEF_PACK_BUFSIZE;
2364 p_sysval->write_frame_size = DEF_PACK_BUFSIZE;
2365 } else {
2366 /* how big is the biggest group of packets */
2367 p_sysval->read_frame_size =
2368 privptr->p_env->read_size;
2369 p_sysval->write_frame_size =
2370 privptr->p_env->write_size;
2371 }
2372 memset(&p_sysval->reserved, 0x00, 4);
2373 break;
2374 case CONNECTION_REQUEST:
2375 case CONNECTION_RESPONSE:
2376 case CONNECTION_CONFIRM:
2377 case DISCONNECT:
2378 memcpy(&p_sysval->host_name, local_name, 8);
2379 memcpy(&p_sysval->WS_name, remote_name, 8);
2380 if (privptr->p_env->packing > 0) {
2381 /* How big is the biggest packet */
2382 p_connect->reserved1[0]=CLAW_FRAME_SIZE;
2383 p_connect->reserved1[1]=CLAW_FRAME_SIZE;
2384 } else {
2385 memset(&p_connect->reserved1, 0x00, 4);
2386 memset(&p_connect->reserved2, 0x00, 4);
2387 }
2388 break;
2389 default:
2390 break;
2391 }
2392
2393 /* write Control Record to the device */
2394
2395
2396 skb = dev_alloc_skb(sizeof(struct clawctl));
2397 if (!skb) {
2398 return -ENOMEM;
2399 }
2400 memcpy(skb_put(skb, sizeof(struct clawctl)),
2401 p_ctl, sizeof(struct clawctl));
2402 if (privptr->p_env->packing >= PACK_SEND)
2403 claw_hw_tx(skb, dev, 1);
2404 else
2405 claw_hw_tx(skb, dev, 0);
2406 return 0;
2407} /* end of claw_send_control */
2408
2409/*-------------------------------------------------------------------*
2410* claw_snd_conn_req *
2411* *
2412*--------------------------------------------------------------------*/
2413static int
2414claw_snd_conn_req(struct net_device *dev, __u8 link)
2415{
2416 int rc;
2417 struct claw_privbk *privptr = dev->ml_priv;
2418 struct clawctl *p_ctl;
2419
2420 CLAW_DBF_TEXT(2, setup, "snd_conn");
2421 rc = 1;
2422 p_ctl=(struct clawctl *)&privptr->ctl_bk;
2423 p_ctl->linkid = link;
2424 if ( privptr->system_validate_comp==0x00 ) {
2425 return rc;
2426 }
2427 if (privptr->p_env->packing == PACKING_ASK )
2428 rc=claw_send_control(dev, CONNECTION_REQUEST,0,0,0,
2429 WS_APPL_NAME_PACKED, WS_APPL_NAME_PACKED);
2430 if (privptr->p_env->packing == PACK_SEND) {
2431 rc=claw_send_control(dev, CONNECTION_REQUEST,0,0,0,
2432 WS_APPL_NAME_IP_NAME, WS_APPL_NAME_IP_NAME);
2433 }
2434 if (privptr->p_env->packing == 0)
2435 rc=claw_send_control(dev, CONNECTION_REQUEST,0,0,0,
2436 HOST_APPL_NAME, privptr->p_env->api_type);
2437 return rc;
2438
2439} /* end of claw_snd_conn_req */
2440
2441
2442/*-------------------------------------------------------------------*
2443* claw_snd_disc *
2444* *
2445*--------------------------------------------------------------------*/
2446
2447static int
2448claw_snd_disc(struct net_device *dev, struct clawctl * p_ctl)
2449{
2450 int rc;
2451 struct conncmd * p_connect;
2452
2453 CLAW_DBF_TEXT(2, setup, "snd_dsc");
2454 p_connect=(struct conncmd *)&p_ctl->data;
2455
2456 rc=claw_send_control(dev, DISCONNECT, p_ctl->linkid,
2457 p_ctl->correlator, 0,
2458 p_connect->host_name, p_connect->WS_name);
2459 return rc;
2460} /* end of claw_snd_disc */
2461
2462
2463/*-------------------------------------------------------------------*
2464* claw_snd_sys_validate_rsp *
2465* *
2466*--------------------------------------------------------------------*/
2467
2468static int
2469claw_snd_sys_validate_rsp(struct net_device *dev,
2470 struct clawctl *p_ctl, __u32 return_code)
2471{
2472 struct claw_env * p_env;
2473 struct claw_privbk *privptr;
2474 int rc;
2475
2476 CLAW_DBF_TEXT(2, setup, "chkresp");
2477 privptr = dev->ml_priv;
2478 p_env=privptr->p_env;
2479 rc=claw_send_control(dev, SYSTEM_VALIDATE_RESPONSE,
2480 p_ctl->linkid,
2481 p_ctl->correlator,
2482 return_code,
2483 p_env->host_name,
2484 p_env->adapter_name );
2485 return rc;
2486} /* end of claw_snd_sys_validate_rsp */
2487
2488/*-------------------------------------------------------------------*
2489* claw_strt_conn_req *
2490* *
2491*--------------------------------------------------------------------*/
2492
2493static int
2494claw_strt_conn_req(struct net_device *dev )
2495{
2496 int rc;
2497
2498 CLAW_DBF_TEXT(2, setup, "conn_req");
2499 rc=claw_snd_conn_req(dev, 1);
2500 return rc;
2501} /* end of claw_strt_conn_req */
2502
2503
2504
2505/*-------------------------------------------------------------------*
2506 * claw_stats *
2507 *-------------------------------------------------------------------*/
2508
2509static struct
2510net_device_stats *claw_stats(struct net_device *dev)
2511{
2512 struct claw_privbk *privptr;
2513
2514 CLAW_DBF_TEXT(4, trace, "stats");
2515 privptr = dev->ml_priv;
2516 return &privptr->stats;
2517} /* end of claw_stats */
2518
2519
2520/*-------------------------------------------------------------------*
2521* unpack_read *
2522* *
2523*--------------------------------------------------------------------*/
2524static void
2525unpack_read(struct net_device *dev )
2526{
2527 struct sk_buff *skb;
2528 struct claw_privbk *privptr;
2529 struct claw_env *p_env;
2530 struct ccwbk *p_this_ccw;
2531 struct ccwbk *p_first_ccw;
2532 struct ccwbk *p_last_ccw;
2533 struct clawph *p_packh;
2534 void *p_packd;
2535 struct clawctl *p_ctlrec=NULL;
2536 struct device *p_dev;
2537
2538 __u32 len_of_data;
2539 __u32 pack_off;
2540 __u8 link_num;
2541 __u8 mtc_this_frm=0;
2542 __u32 bytes_to_mov;
2543 int i=0;
2544 int p=0;
2545
2546 CLAW_DBF_TEXT(4, trace, "unpkread");
2547 p_first_ccw=NULL;
2548 p_last_ccw=NULL;
2549 p_packh=NULL;
2550 p_packd=NULL;
2551 privptr = dev->ml_priv;
2552
2553 p_dev = &privptr->channel[READ_CHANNEL].cdev->dev;
2554 p_env = privptr->p_env;
2555 p_this_ccw=privptr->p_read_active_first;
2556 while (p_this_ccw!=NULL && p_this_ccw->header.flag!=CLAW_PENDING) {
2557 pack_off = 0;
2558 p = 0;
2559 p_this_ccw->header.flag=CLAW_PENDING;
2560 privptr->p_read_active_first=p_this_ccw->next;
2561 p_this_ccw->next=NULL;
2562 p_packh = (struct clawph *)p_this_ccw->p_buffer;
2563 if ((p_env->packing == PACK_SEND) &&
2564 (p_packh->len == 32) &&
2565 (p_packh->link_num == 0)) { /* is it a packed ctl rec? */
2566 p_packh++; /* peek past pack header */
2567 p_ctlrec = (struct clawctl *)p_packh;
2568 p_packh--; /* un peek */
2569 if ((p_ctlrec->command == CONNECTION_RESPONSE) ||
2570 (p_ctlrec->command == CONNECTION_CONFIRM))
2571 p_env->packing = DO_PACKED;
2572 }
2573 if (p_env->packing == DO_PACKED)
2574 link_num=p_packh->link_num;
2575 else
2576 link_num=p_this_ccw->header.opcode / 8;
2577 if ((p_this_ccw->header.opcode & MORE_to_COME_FLAG)!=0) {
2578 mtc_this_frm=1;
2579 if (p_this_ccw->header.length!=
2580 privptr->p_env->read_size ) {
2581 dev_warn(p_dev,
2582 "The communication peer of %s"
2583 " sent a faulty"
2584 " frame of length %02x\n",
2585 dev->name, p_this_ccw->header.length);
2586 }
2587 }
2588
2589 if (privptr->mtc_skipping) {
2590 /*
2591 * We're in the mode of skipping past a
2592 * multi-frame message
2593 * that we can't process for some reason or other.
2594 * The first frame without the More-To-Come flag is
2595 * the last frame of the skipped message.
2596 */
2597 /* in case of More-To-Come not set in this frame */
2598 if (mtc_this_frm==0) {
2599 privptr->mtc_skipping=0; /* Ok, the end */
2600 privptr->mtc_logical_link=-1;
2601 }
2602 goto NextFrame;
2603 }
2604
2605 if (link_num==0) {
2606 claw_process_control(dev, p_this_ccw);
2607 CLAW_DBF_TEXT(4, trace, "UnpkCntl");
2608 goto NextFrame;
2609 }
2610unpack_next:
2611 if (p_env->packing == DO_PACKED) {
2612 if (pack_off > p_env->read_size)
2613 goto NextFrame;
2614 p_packd = p_this_ccw->p_buffer+pack_off;
2615 p_packh = (struct clawph *) p_packd;
2616 if ((p_packh->len == 0) || /* done with this frame? */
2617 (p_packh->flag != 0))
2618 goto NextFrame;
2619 bytes_to_mov = p_packh->len;
2620 pack_off += bytes_to_mov+sizeof(struct clawph);
2621 p++;
2622 } else {
2623 bytes_to_mov=p_this_ccw->header.length;
2624 }
2625 if (privptr->mtc_logical_link<0) {
2626
2627 /*
2628 * if More-To-Come is set in this frame then we don't know
2629 * length of entire message, and hence have to allocate
2630 * large buffer */
2631
2632 /* We are starting a new envelope */
2633 privptr->mtc_offset=0;
2634 privptr->mtc_logical_link=link_num;
2635 }
2636
2637 if (bytes_to_mov > (MAX_ENVELOPE_SIZE- privptr->mtc_offset) ) {
2638 /* error */
2639 privptr->stats.rx_frame_errors++;
2640 goto NextFrame;
2641 }
2642 if (p_env->packing == DO_PACKED) {
2643 memcpy( privptr->p_mtc_envelope+ privptr->mtc_offset,
2644 p_packd+sizeof(struct clawph), bytes_to_mov);
2645
2646 } else {
2647 memcpy( privptr->p_mtc_envelope+ privptr->mtc_offset,
2648 p_this_ccw->p_buffer, bytes_to_mov);
2649 }
2650 if (mtc_this_frm==0) {
2651 len_of_data=privptr->mtc_offset+bytes_to_mov;
2652 skb=dev_alloc_skb(len_of_data);
2653 if (skb) {
2654 memcpy(skb_put(skb,len_of_data),
2655 privptr->p_mtc_envelope,
2656 len_of_data);
2657 skb->dev=dev;
2658 skb_reset_mac_header(skb);
2659 skb->protocol=htons(ETH_P_IP);
2660 skb->ip_summed=CHECKSUM_UNNECESSARY;
2661 privptr->stats.rx_packets++;
2662 privptr->stats.rx_bytes+=len_of_data;
2663 netif_rx(skb);
2664 }
2665 else {
2666 dev_info(p_dev, "Allocating a buffer for"
2667 " incoming data failed\n");
2668 privptr->stats.rx_dropped++;
2669 }
2670 privptr->mtc_offset=0;
2671 privptr->mtc_logical_link=-1;
2672 }
2673 else {
2674 privptr->mtc_offset+=bytes_to_mov;
2675 }
2676 if (p_env->packing == DO_PACKED)
2677 goto unpack_next;
2678NextFrame:
2679 /*
2680 * Remove ThisCCWblock from active read queue, and add it
2681 * to queue of free blocks to be reused.
2682 */
2683 i++;
2684 p_this_ccw->header.length=0xffff;
2685 p_this_ccw->header.opcode=0xff;
2686 /*
2687 * add this one to the free queue for later reuse
2688 */
2689 if (p_first_ccw==NULL) {
2690 p_first_ccw = p_this_ccw;
2691 }
2692 else {
2693 p_last_ccw->next = p_this_ccw;
2694 }
2695 p_last_ccw = p_this_ccw;
2696 /*
2697 * chain to next block on active read queue
2698 */
2699 p_this_ccw = privptr->p_read_active_first;
2700 CLAW_DBF_TEXT_(4, trace, "rxpkt %d", p);
2701 } /* end of while */
2702
2703 /* check validity */
2704
2705 CLAW_DBF_TEXT_(4, trace, "rxfrm %d", i);
2706 add_claw_reads(dev, p_first_ccw, p_last_ccw);
2707 claw_strt_read(dev, LOCK_YES);
2708 return;
2709} /* end of unpack_read */
2710
2711/*-------------------------------------------------------------------*
2712* claw_strt_read *
2713* *
2714*--------------------------------------------------------------------*/
2715static void
2716claw_strt_read (struct net_device *dev, int lock )
2717{
2718 int rc = 0;
2719 __u32 parm;
2720 unsigned long saveflags = 0;
2721 struct claw_privbk *privptr = dev->ml_priv;
2722 struct ccwbk*p_ccwbk;
2723 struct chbk *p_ch;
2724 struct clawh *p_clawh;
2725 p_ch = &privptr->channel[READ_CHANNEL];
2726
2727 CLAW_DBF_TEXT(4, trace, "StRdNter");
2728 p_clawh=(struct clawh *)privptr->p_claw_signal_blk;
2729 p_clawh->flag=CLAW_IDLE; /* 0x00 */
2730
2731 if ((privptr->p_write_active_first!=NULL &&
2732 privptr->p_write_active_first->header.flag!=CLAW_PENDING) ||
2733 (privptr->p_read_active_first!=NULL &&
2734 privptr->p_read_active_first->header.flag!=CLAW_PENDING )) {
2735 p_clawh->flag=CLAW_BUSY; /* 0xff */
2736 }
2737 if (lock==LOCK_YES) {
2738 spin_lock_irqsave(get_ccwdev_lock(p_ch->cdev), saveflags);
2739 }
2740 if (test_and_set_bit(0, (void *)&p_ch->IO_active) == 0) {
2741 CLAW_DBF_TEXT(4, trace, "HotRead");
2742 p_ccwbk=privptr->p_read_active_first;
2743 parm = (unsigned long) p_ch;
2744 rc = ccw_device_start (p_ch->cdev, &p_ccwbk->read, parm,
2745 0xff, 0);
2746 if (rc != 0) {
2747 ccw_check_return_code(p_ch->cdev, rc);
2748 }
2749 }
2750 else {
2751 CLAW_DBF_TEXT(2, trace, "ReadAct");
2752 }
2753
2754 if (lock==LOCK_YES) {
2755 spin_unlock_irqrestore(get_ccwdev_lock(p_ch->cdev), saveflags);
2756 }
2757 CLAW_DBF_TEXT(4, trace, "StRdExit");
2758 return;
2759} /* end of claw_strt_read */
2760
2761/*-------------------------------------------------------------------*
2762* claw_strt_out_IO *
2763* *
2764*--------------------------------------------------------------------*/
2765
2766static void
2767claw_strt_out_IO( struct net_device *dev )
2768{
2769 int rc = 0;
2770 unsigned long parm;
2771 struct claw_privbk *privptr;
2772 struct chbk *p_ch;
2773 struct ccwbk *p_first_ccw;
2774
2775 if (!dev) {
2776 return;
2777 }
2778 privptr = (struct claw_privbk *)dev->ml_priv;
2779 p_ch = &privptr->channel[WRITE_CHANNEL];
2780
2781 CLAW_DBF_TEXT(4, trace, "strt_io");
2782 p_first_ccw=privptr->p_write_active_first;
2783
2784 if (p_ch->claw_state == CLAW_STOP)
2785 return;
2786 if (p_first_ccw == NULL) {
2787 return;
2788 }
2789 if (test_and_set_bit(0, (void *)&p_ch->IO_active) == 0) {
2790 parm = (unsigned long) p_ch;
2791 CLAW_DBF_TEXT(2, trace, "StWrtIO");
2792 rc = ccw_device_start(p_ch->cdev, &p_first_ccw->write, parm,
2793 0xff, 0);
2794 if (rc != 0) {
2795 ccw_check_return_code(p_ch->cdev, rc);
2796 }
2797 }
2798 dev->trans_start = jiffies;
2799 return;
2800} /* end of claw_strt_out_IO */
2801
2802/*-------------------------------------------------------------------*
2803* Free write buffers *
2804* *
2805*--------------------------------------------------------------------*/
2806
2807static void
2808claw_free_wrt_buf( struct net_device *dev )
2809{
2810
2811 struct claw_privbk *privptr = (struct claw_privbk *)dev->ml_priv;
2812 struct ccwbk*p_this_ccw;
2813 struct ccwbk*p_next_ccw;
2814
2815 CLAW_DBF_TEXT(4, trace, "freewrtb");
2816 /* scan the write queue to free any completed write packets */
2817 p_this_ccw=privptr->p_write_active_first;
2818 while ( (p_this_ccw!=NULL) && (p_this_ccw->header.flag!=CLAW_PENDING))
2819 {
2820 p_next_ccw = p_this_ccw->next;
2821 if (((p_next_ccw!=NULL) &&
2822 (p_next_ccw->header.flag!=CLAW_PENDING)) ||
2823 ((p_this_ccw == privptr->p_write_active_last) &&
2824 (p_this_ccw->header.flag!=CLAW_PENDING))) {
2825 /* The next CCW is OK or this is */
2826 /* the last CCW...free it @A1A */
2827 privptr->p_write_active_first=p_this_ccw->next;
2828 p_this_ccw->header.flag=CLAW_PENDING;
2829 p_this_ccw->next=privptr->p_write_free_chain;
2830 privptr->p_write_free_chain=p_this_ccw;
2831 ++privptr->write_free_count;
2832 privptr->stats.tx_bytes+= p_this_ccw->write.count;
2833 p_this_ccw=privptr->p_write_active_first;
2834 privptr->stats.tx_packets++;
2835 }
2836 else {
2837 break;
2838 }
2839 }
2840 if (privptr->write_free_count!=0) {
2841 claw_clearbit_busy(TB_NOBUFFER,dev);
2842 }
2843 /* whole chain removed? */
2844 if (privptr->p_write_active_first==NULL) {
2845 privptr->p_write_active_last=NULL;
2846 }
2847 CLAW_DBF_TEXT_(4, trace, "FWC=%d", privptr->write_free_count);
2848 return;
2849}
2850
2851/*-------------------------------------------------------------------*
2852* claw free netdevice *
2853* *
2854*--------------------------------------------------------------------*/
2855static void
2856claw_free_netdevice(struct net_device * dev, int free_dev)
2857{
2858 struct claw_privbk *privptr;
2859
2860 CLAW_DBF_TEXT(2, setup, "free_dev");
2861 if (!dev)
2862 return;
2863 CLAW_DBF_TEXT_(2, setup, "%s", dev->name);
2864 privptr = dev->ml_priv;
2865 if (dev->flags & IFF_RUNNING)
2866 claw_release(dev);
2867 if (privptr) {
2868 privptr->channel[READ_CHANNEL].ndev = NULL; /* say it's free */
2869 }
2870 dev->ml_priv = NULL;
2871#ifdef MODULE
2872 if (free_dev) {
2873 free_netdev(dev);
2874 }
2875#endif
2876 CLAW_DBF_TEXT(2, setup, "free_ok");
2877}
2878
2879/**
2880 * Claw init netdevice
2881 * Initialize everything of the net device except the name and the
2882 * channel structs.
2883 */
2884static const struct net_device_ops claw_netdev_ops = {
2885 .ndo_open = claw_open,
2886 .ndo_stop = claw_release,
2887 .ndo_get_stats = claw_stats,
2888 .ndo_start_xmit = claw_tx,
2889 .ndo_change_mtu = claw_change_mtu,
2890};
2891
2892static void
2893claw_init_netdevice(struct net_device * dev)
2894{
2895 CLAW_DBF_TEXT(2, setup, "init_dev");
2896 CLAW_DBF_TEXT_(2, setup, "%s", dev->name);
2897 dev->mtu = CLAW_DEFAULT_MTU_SIZE;
2898 dev->hard_header_len = 0;
2899 dev->addr_len = 0;
2900 dev->type = ARPHRD_SLIP;
2901 dev->tx_queue_len = 1300;
2902 dev->flags = IFF_POINTOPOINT | IFF_NOARP;
2903 dev->netdev_ops = &claw_netdev_ops;
2904 CLAW_DBF_TEXT(2, setup, "initok");
2905 return;
2906}
2907
2908/**
2909 * Init a new channel in the privptr->channel[i].
2910 *
2911 * @param cdev The ccw_device to be added.
2912 *
2913 * @return 0 on success, !0 on error.
2914 */
2915static int
2916add_channel(struct ccw_device *cdev,int i,struct claw_privbk *privptr)
2917{
2918 struct chbk *p_ch;
2919 struct ccw_dev_id dev_id;
2920
2921 CLAW_DBF_TEXT_(2, setup, "%s", dev_name(&cdev->dev));
2922 privptr->channel[i].flag = i+1; /* Read is 1 Write is 2 */
2923 p_ch = &privptr->channel[i];
2924 p_ch->cdev = cdev;
2925 snprintf(p_ch->id, CLAW_ID_SIZE, "cl-%s", dev_name(&cdev->dev));
2926 ccw_device_get_id(cdev, &dev_id);
2927 p_ch->devno = dev_id.devno;
2928 if ((p_ch->irb = kzalloc(sizeof (struct irb),GFP_KERNEL)) == NULL) {
2929 return -ENOMEM;
2930 }
2931 return 0;
2932}
2933
2934
2935/**
2936 *
2937 * Setup an interface.
2938 *
2939 * @param cgdev Device to be setup.
2940 *
2941 * @returns 0 on success, !0 on failure.
2942 */
2943static int
2944claw_new_device(struct ccwgroup_device *cgdev)
2945{
2946 struct claw_privbk *privptr;
2947 struct claw_env *p_env;
2948 struct net_device *dev;
2949 int ret;
2950 struct ccw_dev_id dev_id;
2951
2952 dev_info(&cgdev->dev, "add for %s\n",
2953 dev_name(&cgdev->cdev[READ_CHANNEL]->dev));
2954 CLAW_DBF_TEXT(2, setup, "new_dev");
2955 privptr = dev_get_drvdata(&cgdev->dev);
2956 dev_set_drvdata(&cgdev->cdev[READ_CHANNEL]->dev, privptr);
2957 dev_set_drvdata(&cgdev->cdev[WRITE_CHANNEL]->dev, privptr);
2958 if (!privptr)
2959 return -ENODEV;
2960 p_env = privptr->p_env;
2961 ccw_device_get_id(cgdev->cdev[READ_CHANNEL], &dev_id);
2962 p_env->devno[READ_CHANNEL] = dev_id.devno;
2963 ccw_device_get_id(cgdev->cdev[WRITE_CHANNEL], &dev_id);
2964 p_env->devno[WRITE_CHANNEL] = dev_id.devno;
2965 ret = add_channel(cgdev->cdev[0],0,privptr);
2966 if (ret == 0)
2967 ret = add_channel(cgdev->cdev[1],1,privptr);
2968 if (ret != 0) {
2969 dev_warn(&cgdev->dev, "Creating a CLAW group device"
2970 " failed with error code %d\n", ret);
2971 goto out;
2972 }
2973 ret = ccw_device_set_online(cgdev->cdev[READ_CHANNEL]);
2974 if (ret != 0) {
2975 dev_warn(&cgdev->dev,
2976 "Setting the read subchannel online"
2977 " failed with error code %d\n", ret);
2978 goto out;
2979 }
2980 ret = ccw_device_set_online(cgdev->cdev[WRITE_CHANNEL]);
2981 if (ret != 0) {
2982 dev_warn(&cgdev->dev,
2983 "Setting the write subchannel online "
2984 "failed with error code %d\n", ret);
2985 goto out;
2986 }
2987 dev = alloc_netdev(0,"claw%d",claw_init_netdevice);
2988 if (!dev) {
2989 dev_warn(&cgdev->dev,
2990 "Activating the CLAW device failed\n");
2991 goto out;
2992 }
2993 dev->ml_priv = privptr;
2994 dev_set_drvdata(&cgdev->dev, privptr);
2995 dev_set_drvdata(&cgdev->cdev[READ_CHANNEL]->dev, privptr);
2996 dev_set_drvdata(&cgdev->cdev[WRITE_CHANNEL]->dev, privptr);
2997 /* sysfs magic */
2998 SET_NETDEV_DEV(dev, &cgdev->dev);
2999 if (register_netdev(dev) != 0) {
3000 claw_free_netdevice(dev, 1);
3001 CLAW_DBF_TEXT(2, trace, "regfail");
3002 goto out;
3003 }
3004 dev->flags &=~IFF_RUNNING;
3005 if (privptr->buffs_alloc == 0) {
3006 ret=init_ccw_bk(dev);
3007 if (ret !=0) {
3008 unregister_netdev(dev);
3009 claw_free_netdevice(dev,1);
3010 CLAW_DBF_TEXT(2, trace, "ccwmem");
3011 goto out;
3012 }
3013 }
3014 privptr->channel[READ_CHANNEL].ndev = dev;
3015 privptr->channel[WRITE_CHANNEL].ndev = dev;
3016 privptr->p_env->ndev = dev;
3017
3018 dev_info(&cgdev->dev, "%s:readsize=%d writesize=%d "
3019 "readbuffer=%d writebuffer=%d read=0x%04x write=0x%04x\n",
3020 dev->name, p_env->read_size,
3021 p_env->write_size, p_env->read_buffers,
3022 p_env->write_buffers, p_env->devno[READ_CHANNEL],
3023 p_env->devno[WRITE_CHANNEL]);
3024 dev_info(&cgdev->dev, "%s:host_name:%.8s, adapter_name "
3025 ":%.8s api_type: %.8s\n",
3026 dev->name, p_env->host_name,
3027 p_env->adapter_name , p_env->api_type);
3028 return 0;
3029out:
3030 ccw_device_set_offline(cgdev->cdev[1]);
3031 ccw_device_set_offline(cgdev->cdev[0]);
3032 return -ENODEV;
3033}
3034
3035static void
3036claw_purge_skb_queue(struct sk_buff_head *q)
3037{
3038 struct sk_buff *skb;
3039
3040 CLAW_DBF_TEXT(4, trace, "purgque");
3041 while ((skb = skb_dequeue(q))) {
3042 atomic_dec(&skb->users);
3043 dev_kfree_skb_any(skb);
3044 }
3045}
3046
3047/**
3048 * Shutdown an interface.
3049 *
3050 * @param cgdev Device to be shut down.
3051 *
3052 * @returns 0 on success, !0 on failure.
3053 */
3054static int
3055claw_shutdown_device(struct ccwgroup_device *cgdev)
3056{
3057 struct claw_privbk *priv;
3058 struct net_device *ndev;
3059 int ret = 0;
3060
3061 CLAW_DBF_TEXT_(2, setup, "%s", dev_name(&cgdev->dev));
3062 priv = dev_get_drvdata(&cgdev->dev);
3063 if (!priv)
3064 return -ENODEV;
3065 ndev = priv->channel[READ_CHANNEL].ndev;
3066 if (ndev) {
3067 /* Close the device */
3068 dev_info(&cgdev->dev, "%s: shutting down\n",
3069 ndev->name);
3070 if (ndev->flags & IFF_RUNNING)
3071 ret = claw_release(ndev);
3072 ndev->flags &=~IFF_RUNNING;
3073 unregister_netdev(ndev);
3074 ndev->ml_priv = NULL; /* cgdev data, not ndev's to free */
3075 claw_free_netdevice(ndev, 1);
3076 priv->channel[READ_CHANNEL].ndev = NULL;
3077 priv->channel[WRITE_CHANNEL].ndev = NULL;
3078 priv->p_env->ndev = NULL;
3079 }
3080 ccw_device_set_offline(cgdev->cdev[1]);
3081 ccw_device_set_offline(cgdev->cdev[0]);
3082 return ret;
3083}
3084
3085static void
3086claw_remove_device(struct ccwgroup_device *cgdev)
3087{
3088 struct claw_privbk *priv;
3089
3090 BUG_ON(!cgdev);
3091 CLAW_DBF_TEXT_(2, setup, "%s", dev_name(&cgdev->dev));
3092 priv = dev_get_drvdata(&cgdev->dev);
3093 BUG_ON(!priv);
3094 dev_info(&cgdev->dev, " will be removed.\n");
3095 if (cgdev->state == CCWGROUP_ONLINE)
3096 claw_shutdown_device(cgdev);
3097 claw_remove_files(&cgdev->dev);
3098 kfree(priv->p_mtc_envelope);
3099 priv->p_mtc_envelope=NULL;
3100 kfree(priv->p_env);
3101 priv->p_env=NULL;
3102 kfree(priv->channel[0].irb);
3103 priv->channel[0].irb=NULL;
3104 kfree(priv->channel[1].irb);
3105 priv->channel[1].irb=NULL;
3106 kfree(priv);
3107 dev_set_drvdata(&cgdev->dev, NULL);
3108 dev_set_drvdata(&cgdev->cdev[READ_CHANNEL]->dev, NULL);
3109 dev_set_drvdata(&cgdev->cdev[WRITE_CHANNEL]->dev, NULL);
3110 put_device(&cgdev->dev);
3111
3112 return;
3113}
3114
3115
3116/*
3117 * sysfs attributes
3118 */
3119static ssize_t
3120claw_hname_show(struct device *dev, struct device_attribute *attr, char *buf)
3121{
3122 struct claw_privbk *priv;
3123 struct claw_env * p_env;
3124
3125 priv = dev_get_drvdata(dev);
3126 if (!priv)
3127 return -ENODEV;
3128 p_env = priv->p_env;
3129 return sprintf(buf, "%s\n",p_env->host_name);
3130}
3131
3132static ssize_t
3133claw_hname_write(struct device *dev, struct device_attribute *attr,
3134 const char *buf, size_t count)
3135{
3136 struct claw_privbk *priv;
3137 struct claw_env * p_env;
3138
3139 priv = dev_get_drvdata(dev);
3140 if (!priv)
3141 return -ENODEV;
3142 p_env = priv->p_env;
3143 if (count > MAX_NAME_LEN+1)
3144 return -EINVAL;
3145 memset(p_env->host_name, 0x20, MAX_NAME_LEN);
3146 strncpy(p_env->host_name,buf, count);
3147 p_env->host_name[count-1] = 0x20; /* clear extra 0x0a */
3148 p_env->host_name[MAX_NAME_LEN] = 0x00;
3149 CLAW_DBF_TEXT(2, setup, "HstnSet");
3150 CLAW_DBF_TEXT_(2, setup, "%s", p_env->host_name);
3151
3152 return count;
3153}
3154
3155static DEVICE_ATTR(host_name, 0644, claw_hname_show, claw_hname_write);
3156
3157static ssize_t
3158claw_adname_show(struct device *dev, struct device_attribute *attr, char *buf)
3159{
3160 struct claw_privbk *priv;
3161 struct claw_env * p_env;
3162
3163 priv = dev_get_drvdata(dev);
3164 if (!priv)
3165 return -ENODEV;
3166 p_env = priv->p_env;
3167 return sprintf(buf, "%s\n", p_env->adapter_name);
3168}
3169
3170static ssize_t
3171claw_adname_write(struct device *dev, struct device_attribute *attr,
3172 const char *buf, size_t count)
3173{
3174 struct claw_privbk *priv;
3175 struct claw_env * p_env;
3176
3177 priv = dev_get_drvdata(dev);
3178 if (!priv)
3179 return -ENODEV;
3180 p_env = priv->p_env;
3181 if (count > MAX_NAME_LEN+1)
3182 return -EINVAL;
3183 memset(p_env->adapter_name, 0x20, MAX_NAME_LEN);
3184 strncpy(p_env->adapter_name,buf, count);
3185 p_env->adapter_name[count-1] = 0x20; /* clear extra 0x0a */
3186 p_env->adapter_name[MAX_NAME_LEN] = 0x00;
3187 CLAW_DBF_TEXT(2, setup, "AdnSet");
3188 CLAW_DBF_TEXT_(2, setup, "%s", p_env->adapter_name);
3189
3190 return count;
3191}
3192
3193static DEVICE_ATTR(adapter_name, 0644, claw_adname_show, claw_adname_write);
3194
3195static ssize_t
3196claw_apname_show(struct device *dev, struct device_attribute *attr, char *buf)
3197{
3198 struct claw_privbk *priv;
3199 struct claw_env * p_env;
3200
3201 priv = dev_get_drvdata(dev);
3202 if (!priv)
3203 return -ENODEV;
3204 p_env = priv->p_env;
3205 return sprintf(buf, "%s\n",
3206 p_env->api_type);
3207}
3208
3209static ssize_t
3210claw_apname_write(struct device *dev, struct device_attribute *attr,
3211 const char *buf, size_t count)
3212{
3213 struct claw_privbk *priv;
3214 struct claw_env * p_env;
3215
3216 priv = dev_get_drvdata(dev);
3217 if (!priv)
3218 return -ENODEV;
3219 p_env = priv->p_env;
3220 if (count > MAX_NAME_LEN+1)
3221 return -EINVAL;
3222 memset(p_env->api_type, 0x20, MAX_NAME_LEN);
3223 strncpy(p_env->api_type,buf, count);
3224 p_env->api_type[count-1] = 0x20; /* we get a loose 0x0a */
3225 p_env->api_type[MAX_NAME_LEN] = 0x00;
3226 if(strncmp(p_env->api_type,WS_APPL_NAME_PACKED,6) == 0) {
3227 p_env->read_size=DEF_PACK_BUFSIZE;
3228 p_env->write_size=DEF_PACK_BUFSIZE;
3229 p_env->packing=PACKING_ASK;
3230 CLAW_DBF_TEXT(2, setup, "PACKING");
3231 }
3232 else {
3233 p_env->packing=0;
3234 p_env->read_size=CLAW_FRAME_SIZE;
3235 p_env->write_size=CLAW_FRAME_SIZE;
3236 CLAW_DBF_TEXT(2, setup, "ApiSet");
3237 }
3238 CLAW_DBF_TEXT_(2, setup, "%s", p_env->api_type);
3239 return count;
3240}
3241
3242static DEVICE_ATTR(api_type, 0644, claw_apname_show, claw_apname_write);
3243
3244static ssize_t
3245claw_wbuff_show(struct device *dev, struct device_attribute *attr, char *buf)
3246{
3247 struct claw_privbk *priv;
3248 struct claw_env * p_env;
3249
3250 priv = dev_get_drvdata(dev);
3251 if (!priv)
3252 return -ENODEV;
3253 p_env = priv->p_env;
3254 return sprintf(buf, "%d\n", p_env->write_buffers);
3255}
3256
3257static ssize_t
3258claw_wbuff_write(struct device *dev, struct device_attribute *attr,
3259 const char *buf, size_t count)
3260{
3261 struct claw_privbk *priv;
3262 struct claw_env * p_env;
3263 int nnn,max;
3264
3265 priv = dev_get_drvdata(dev);
3266 if (!priv)
3267 return -ENODEV;
3268 p_env = priv->p_env;
3269 sscanf(buf, "%i", &nnn);
3270 if (p_env->packing) {
3271 max = 64;
3272 }
3273 else {
3274 max = 512;
3275 }
3276 if ((nnn > max ) || (nnn < 2))
3277 return -EINVAL;
3278 p_env->write_buffers = nnn;
3279 CLAW_DBF_TEXT(2, setup, "Wbufset");
3280 CLAW_DBF_TEXT_(2, setup, "WB=%d", p_env->write_buffers);
3281 return count;
3282}
3283
3284static DEVICE_ATTR(write_buffer, 0644, claw_wbuff_show, claw_wbuff_write);
3285
3286static ssize_t
3287claw_rbuff_show(struct device *dev, struct device_attribute *attr, char *buf)
3288{
3289 struct claw_privbk *priv;
3290 struct claw_env * p_env;
3291
3292 priv = dev_get_drvdata(dev);
3293 if (!priv)
3294 return -ENODEV;
3295 p_env = priv->p_env;
3296 return sprintf(buf, "%d\n", p_env->read_buffers);
3297}
3298
3299static ssize_t
3300claw_rbuff_write(struct device *dev, struct device_attribute *attr,
3301 const char *buf, size_t count)
3302{
3303 struct claw_privbk *priv;
3304 struct claw_env *p_env;
3305 int nnn,max;
3306
3307 priv = dev_get_drvdata(dev);
3308 if (!priv)
3309 return -ENODEV;
3310 p_env = priv->p_env;
3311 sscanf(buf, "%i", &nnn);
3312 if (p_env->packing) {
3313 max = 64;
3314 }
3315 else {
3316 max = 512;
3317 }
3318 if ((nnn > max ) || (nnn < 2))
3319 return -EINVAL;
3320 p_env->read_buffers = nnn;
3321 CLAW_DBF_TEXT(2, setup, "Rbufset");
3322 CLAW_DBF_TEXT_(2, setup, "RB=%d", p_env->read_buffers);
3323 return count;
3324}
3325
3326static DEVICE_ATTR(read_buffer, 0644, claw_rbuff_show, claw_rbuff_write);
3327
3328static struct attribute *claw_attr[] = {
3329 &dev_attr_read_buffer.attr,
3330 &dev_attr_write_buffer.attr,
3331 &dev_attr_adapter_name.attr,
3332 &dev_attr_api_type.attr,
3333 &dev_attr_host_name.attr,
3334 NULL,
3335};
3336
3337static struct attribute_group claw_attr_group = {
3338 .attrs = claw_attr,
3339};
3340
3341static int
3342claw_add_files(struct device *dev)
3343{
3344 CLAW_DBF_TEXT(2, setup, "add_file");
3345 return sysfs_create_group(&dev->kobj, &claw_attr_group);
3346}
3347
3348static void
3349claw_remove_files(struct device *dev)
3350{
3351 CLAW_DBF_TEXT(2, setup, "rem_file");
3352 sysfs_remove_group(&dev->kobj, &claw_attr_group);
3353}
3354
3355/*--------------------------------------------------------------------*
3356* claw_init and cleanup *
3357*---------------------------------------------------------------------*/
3358
3359static void __exit
3360claw_cleanup(void)
3361{
3362 driver_remove_file(&claw_group_driver.driver,
3363 &driver_attr_group);
3364 ccwgroup_driver_unregister(&claw_group_driver);
3365 ccw_driver_unregister(&claw_ccw_driver);
3366 root_device_unregister(claw_root_dev);
3367 claw_unregister_debug_facility();
3368 pr_info("Driver unloaded\n");
3369
3370}
3371
3372/**
3373 * Initialize module.
3374 * This is called just after the module is loaded.
3375 *
3376 * @return 0 on success, !0 on error.
3377 */
3378static int __init
3379claw_init(void)
3380{
3381 int ret = 0;
3382
3383 pr_info("Loading %s\n", version);
3384 ret = claw_register_debug_facility();
3385 if (ret) {
3386 pr_err("Registering with the S/390 debug feature"
3387 " failed with error code %d\n", ret);
3388 goto out_err;
3389 }
3390 CLAW_DBF_TEXT(2, setup, "init_mod");
3391 claw_root_dev = root_device_register("claw");
3392 ret = IS_ERR(claw_root_dev) ? PTR_ERR(claw_root_dev) : 0;
3393 if (ret)
3394 goto register_err;
3395 ret = ccw_driver_register(&claw_ccw_driver);
3396 if (ret)
3397 goto ccw_err;
3398 claw_group_driver.driver.groups = claw_group_attr_groups;
3399 ret = ccwgroup_driver_register(&claw_group_driver);
3400 if (ret)
3401 goto ccwgroup_err;
3402 return 0;
3403
3404ccwgroup_err:
3405 ccw_driver_unregister(&claw_ccw_driver);
3406ccw_err:
3407 root_device_unregister(claw_root_dev);
3408register_err:
3409 CLAW_DBF_TEXT(2, setup, "init_bad");
3410 claw_unregister_debug_facility();
3411out_err:
3412 pr_err("Initializing the claw device driver failed\n");
3413 return ret;
3414}
3415
3416module_init(claw_init);
3417module_exit(claw_cleanup);
3418
3419MODULE_AUTHOR("Andy Richter <richtera@us.ibm.com>");
3420MODULE_DESCRIPTION("Linux for System z CLAW Driver\n" \
3421 "Copyright 2000,2008 IBM Corporation\n");
3422MODULE_LICENSE("GPL");
1/*
2 * ESCON CLAW network driver
3 *
4 * Linux for zSeries version
5 * Copyright IBM Corp. 2002, 2009
6 * Author(s) Original code written by:
7 * Kazuo Iimura <iimura@jp.ibm.com>
8 * Rewritten by
9 * Andy Richter <richtera@us.ibm.com>
10 * Marc Price <mwprice@us.ibm.com>
11 *
12 * sysfs parms:
13 * group x.x.rrrr,x.x.wwww
14 * read_buffer nnnnnnn
15 * write_buffer nnnnnn
16 * host_name aaaaaaaa
17 * adapter_name aaaaaaaa
18 * api_type aaaaaaaa
19 *
20 * eg.
21 * group 0.0.0200 0.0.0201
22 * read_buffer 25
23 * write_buffer 20
24 * host_name LINUX390
25 * adapter_name RS6K
26 * api_type TCPIP
27 *
28 * where
29 *
30 * The device id is decided by the order entries
31 * are added to the group the first is claw0 the second claw1
32 * up to CLAW_MAX_DEV
33 *
34 * rrrr - the first of 2 consecutive device addresses used for the
35 * CLAW protocol.
36 * The specified address is always used as the input (Read)
37 * channel and the next address is used as the output channel.
38 *
39 * wwww - the second of 2 consecutive device addresses used for
40 * the CLAW protocol.
41 * The specified address is always used as the output
42 * channel and the previous address is used as the input channel.
43 *
44 * read_buffer - specifies number of input buffers to allocate.
45 * write_buffer - specifies number of output buffers to allocate.
46 * host_name - host name
47 * adaptor_name - adaptor name
48 * api_type - API type TCPIP or API will be sent and expected
49 * as ws_name
50 *
51 * Note the following requirements:
52 * 1) host_name must match the configured adapter_name on the remote side
53 * 2) adaptor_name must match the configured host name on the remote side
54 *
55 * Change History
56 * 1.00 Initial release shipped
57 * 1.10 Changes for Buffer allocation
58 * 1.15 Changed for 2.6 Kernel No longer compiles on 2.4 or lower
59 * 1.25 Added Packing support
60 * 1.5
61 */
62
63#define KMSG_COMPONENT "claw"
64
65#include <asm/ccwdev.h>
66#include <asm/ccwgroup.h>
67#include <asm/debug.h>
68#include <asm/idals.h>
69#include <asm/io.h>
70#include <linux/bitops.h>
71#include <linux/ctype.h>
72#include <linux/delay.h>
73#include <linux/errno.h>
74#include <linux/if_arp.h>
75#include <linux/init.h>
76#include <linux/interrupt.h>
77#include <linux/ip.h>
78#include <linux/kernel.h>
79#include <linux/module.h>
80#include <linux/netdevice.h>
81#include <linux/etherdevice.h>
82#include <linux/proc_fs.h>
83#include <linux/sched.h>
84#include <linux/signal.h>
85#include <linux/skbuff.h>
86#include <linux/slab.h>
87#include <linux/string.h>
88#include <linux/tcp.h>
89#include <linux/timer.h>
90#include <linux/types.h>
91
92#include "claw.h"
93
94/*
95 CLAW uses the s390dbf file system see claw_trace and claw_setup
96*/
97
98static char version[] __initdata = "CLAW driver";
99static char debug_buffer[255];
100/**
101 * Debug Facility Stuff
102 */
103static debug_info_t *claw_dbf_setup;
104static debug_info_t *claw_dbf_trace;
105
106/**
107 * CLAW Debug Facility functions
108 */
109static void
110claw_unregister_debug_facility(void)
111{
112 if (claw_dbf_setup)
113 debug_unregister(claw_dbf_setup);
114 if (claw_dbf_trace)
115 debug_unregister(claw_dbf_trace);
116}
117
118static int
119claw_register_debug_facility(void)
120{
121 claw_dbf_setup = debug_register("claw_setup", 2, 1, 8);
122 claw_dbf_trace = debug_register("claw_trace", 2, 2, 8);
123 if (claw_dbf_setup == NULL || claw_dbf_trace == NULL) {
124 claw_unregister_debug_facility();
125 return -ENOMEM;
126 }
127 debug_register_view(claw_dbf_setup, &debug_hex_ascii_view);
128 debug_set_level(claw_dbf_setup, 2);
129 debug_register_view(claw_dbf_trace, &debug_hex_ascii_view);
130 debug_set_level(claw_dbf_trace, 2);
131 return 0;
132}
133
134static inline void
135claw_set_busy(struct net_device *dev)
136{
137 ((struct claw_privbk *)dev->ml_priv)->tbusy = 1;
138}
139
140static inline void
141claw_clear_busy(struct net_device *dev)
142{
143 clear_bit(0, &(((struct claw_privbk *) dev->ml_priv)->tbusy));
144 netif_wake_queue(dev);
145}
146
147static inline int
148claw_check_busy(struct net_device *dev)
149{
150 return ((struct claw_privbk *) dev->ml_priv)->tbusy;
151}
152
153static inline void
154claw_setbit_busy(int nr,struct net_device *dev)
155{
156 netif_stop_queue(dev);
157 set_bit(nr, (void *)&(((struct claw_privbk *)dev->ml_priv)->tbusy));
158}
159
160static inline void
161claw_clearbit_busy(int nr,struct net_device *dev)
162{
163 clear_bit(nr, (void *)&(((struct claw_privbk *)dev->ml_priv)->tbusy));
164 netif_wake_queue(dev);
165}
166
167static inline int
168claw_test_and_setbit_busy(int nr,struct net_device *dev)
169{
170 netif_stop_queue(dev);
171 return test_and_set_bit(nr,
172 (void *)&(((struct claw_privbk *) dev->ml_priv)->tbusy));
173}
174
175
176/* Functions for the DEV methods */
177
178static int claw_probe(struct ccwgroup_device *cgdev);
179static void claw_remove_device(struct ccwgroup_device *cgdev);
180static void claw_purge_skb_queue(struct sk_buff_head *q);
181static int claw_new_device(struct ccwgroup_device *cgdev);
182static int claw_shutdown_device(struct ccwgroup_device *cgdev);
183static int claw_tx(struct sk_buff *skb, struct net_device *dev);
184static int claw_change_mtu( struct net_device *dev, int new_mtu);
185static int claw_open(struct net_device *dev);
186static void claw_irq_handler(struct ccw_device *cdev,
187 unsigned long intparm, struct irb *irb);
188static void claw_irq_tasklet ( unsigned long data );
189static int claw_release(struct net_device *dev);
190static void claw_write_retry ( struct chbk * p_ch );
191static void claw_write_next ( struct chbk * p_ch );
192static void claw_timer ( struct chbk * p_ch );
193
194/* Functions */
195static int add_claw_reads(struct net_device *dev,
196 struct ccwbk* p_first, struct ccwbk* p_last);
197static void ccw_check_return_code (struct ccw_device *cdev, int return_code);
198static void ccw_check_unit_check (struct chbk * p_ch, unsigned char sense );
199static int find_link(struct net_device *dev, char *host_name, char *ws_name );
200static int claw_hw_tx(struct sk_buff *skb, struct net_device *dev, long linkid);
201static int init_ccw_bk(struct net_device *dev);
202static void probe_error( struct ccwgroup_device *cgdev);
203static struct net_device_stats *claw_stats(struct net_device *dev);
204static int pages_to_order_of_mag(int num_of_pages);
205static struct sk_buff *claw_pack_skb(struct claw_privbk *privptr);
206/* sysfs Functions */
207static ssize_t claw_hname_show(struct device *dev,
208 struct device_attribute *attr, char *buf);
209static ssize_t claw_hname_write(struct device *dev,
210 struct device_attribute *attr,
211 const char *buf, size_t count);
212static ssize_t claw_adname_show(struct device *dev,
213 struct device_attribute *attr, char *buf);
214static ssize_t claw_adname_write(struct device *dev,
215 struct device_attribute *attr,
216 const char *buf, size_t count);
217static ssize_t claw_apname_show(struct device *dev,
218 struct device_attribute *attr, char *buf);
219static ssize_t claw_apname_write(struct device *dev,
220 struct device_attribute *attr,
221 const char *buf, size_t count);
222static ssize_t claw_wbuff_show(struct device *dev,
223 struct device_attribute *attr, char *buf);
224static ssize_t claw_wbuff_write(struct device *dev,
225 struct device_attribute *attr,
226 const char *buf, size_t count);
227static ssize_t claw_rbuff_show(struct device *dev,
228 struct device_attribute *attr, char *buf);
229static ssize_t claw_rbuff_write(struct device *dev,
230 struct device_attribute *attr,
231 const char *buf, size_t count);
232
233/* Functions for System Validate */
234static int claw_process_control( struct net_device *dev, struct ccwbk * p_ccw);
235static int claw_send_control(struct net_device *dev, __u8 type, __u8 link,
236 __u8 correlator, __u8 rc , char *local_name, char *remote_name);
237static int claw_snd_conn_req(struct net_device *dev, __u8 link);
238static int claw_snd_disc(struct net_device *dev, struct clawctl * p_ctl);
239static int claw_snd_sys_validate_rsp(struct net_device *dev,
240 struct clawctl * p_ctl, __u32 return_code);
241static int claw_strt_conn_req(struct net_device *dev );
242static void claw_strt_read(struct net_device *dev, int lock);
243static void claw_strt_out_IO(struct net_device *dev);
244static void claw_free_wrt_buf(struct net_device *dev);
245
246/* Functions for unpack reads */
247static void unpack_read(struct net_device *dev);
248
249static int claw_pm_prepare(struct ccwgroup_device *gdev)
250{
251 return -EPERM;
252}
253
254/* the root device for claw group devices */
255static struct device *claw_root_dev;
256
257/* ccwgroup table */
258
259static struct ccwgroup_driver claw_group_driver = {
260 .driver = {
261 .owner = THIS_MODULE,
262 .name = "claw",
263 },
264 .setup = claw_probe,
265 .remove = claw_remove_device,
266 .set_online = claw_new_device,
267 .set_offline = claw_shutdown_device,
268 .prepare = claw_pm_prepare,
269};
270
271static struct ccw_device_id claw_ids[] = {
272 {CCW_DEVICE(0x3088, 0x61), .driver_info = claw_channel_type_claw},
273 {},
274};
275MODULE_DEVICE_TABLE(ccw, claw_ids);
276
277static struct ccw_driver claw_ccw_driver = {
278 .driver = {
279 .owner = THIS_MODULE,
280 .name = "claw",
281 },
282 .ids = claw_ids,
283 .probe = ccwgroup_probe_ccwdev,
284 .remove = ccwgroup_remove_ccwdev,
285 .int_class = IRQIO_CLW,
286};
287
288static ssize_t claw_driver_group_store(struct device_driver *ddrv,
289 const char *buf, size_t count)
290{
291 int err;
292 err = ccwgroup_create_dev(claw_root_dev, &claw_group_driver, 2, buf);
293 return err ? err : count;
294}
295static DRIVER_ATTR(group, 0200, NULL, claw_driver_group_store);
296
297static struct attribute *claw_drv_attrs[] = {
298 &driver_attr_group.attr,
299 NULL,
300};
301static struct attribute_group claw_drv_attr_group = {
302 .attrs = claw_drv_attrs,
303};
304static const struct attribute_group *claw_drv_attr_groups[] = {
305 &claw_drv_attr_group,
306 NULL,
307};
308
309/*
310* Key functions
311*/
312
313/*-------------------------------------------------------------------*
314 * claw_tx *
315 *-------------------------------------------------------------------*/
316
317static int
318claw_tx(struct sk_buff *skb, struct net_device *dev)
319{
320 int rc;
321 struct claw_privbk *privptr = dev->ml_priv;
322 unsigned long saveflags;
323 struct chbk *p_ch;
324
325 CLAW_DBF_TEXT(4, trace, "claw_tx");
326 p_ch = &privptr->channel[WRITE_CHANNEL];
327 spin_lock_irqsave(get_ccwdev_lock(p_ch->cdev), saveflags);
328 rc=claw_hw_tx( skb, dev, 1 );
329 spin_unlock_irqrestore(get_ccwdev_lock(p_ch->cdev), saveflags);
330 CLAW_DBF_TEXT_(4, trace, "clawtx%d", rc);
331 if (rc)
332 rc = NETDEV_TX_BUSY;
333 else
334 rc = NETDEV_TX_OK;
335 return rc;
336} /* end of claw_tx */
337
338/*------------------------------------------------------------------*
339 * pack the collect queue into an skb and return it *
340 * If not packing just return the top skb from the queue *
341 *------------------------------------------------------------------*/
342
343static struct sk_buff *
344claw_pack_skb(struct claw_privbk *privptr)
345{
346 struct sk_buff *new_skb,*held_skb;
347 struct chbk *p_ch = &privptr->channel[WRITE_CHANNEL];
348 struct claw_env *p_env = privptr->p_env;
349 int pkt_cnt,pk_ind,so_far;
350
351 new_skb = NULL; /* assume no dice */
352 pkt_cnt = 0;
353 CLAW_DBF_TEXT(4, trace, "PackSKBe");
354 if (!skb_queue_empty(&p_ch->collect_queue)) {
355 /* some data */
356 held_skb = skb_dequeue(&p_ch->collect_queue);
357 if (held_skb)
358 dev_kfree_skb_any(held_skb);
359 else
360 return NULL;
361 if (p_env->packing != DO_PACKED)
362 return held_skb;
363 /* get a new SKB we will pack at least one */
364 new_skb = dev_alloc_skb(p_env->write_size);
365 if (new_skb == NULL) {
366 atomic_inc(&held_skb->users);
367 skb_queue_head(&p_ch->collect_queue,held_skb);
368 return NULL;
369 }
370 /* we have packed packet and a place to put it */
371 pk_ind = 1;
372 so_far = 0;
373 new_skb->cb[1] = 'P'; /* every skb on queue has pack header */
374 while ((pk_ind) && (held_skb != NULL)) {
375 if (held_skb->len+so_far <= p_env->write_size-8) {
376 memcpy(skb_put(new_skb,held_skb->len),
377 held_skb->data,held_skb->len);
378 privptr->stats.tx_packets++;
379 so_far += held_skb->len;
380 pkt_cnt++;
381 dev_kfree_skb_any(held_skb);
382 held_skb = skb_dequeue(&p_ch->collect_queue);
383 if (held_skb)
384 atomic_dec(&held_skb->users);
385 } else {
386 pk_ind = 0;
387 atomic_inc(&held_skb->users);
388 skb_queue_head(&p_ch->collect_queue,held_skb);
389 }
390 }
391 }
392 CLAW_DBF_TEXT(4, trace, "PackSKBx");
393 return new_skb;
394}
395
396/*-------------------------------------------------------------------*
397 * claw_change_mtu *
398 * *
399 *-------------------------------------------------------------------*/
400
401static int
402claw_change_mtu(struct net_device *dev, int new_mtu)
403{
404 struct claw_privbk *privptr = dev->ml_priv;
405 int buff_size;
406 CLAW_DBF_TEXT(4, trace, "setmtu");
407 buff_size = privptr->p_env->write_size;
408 if ((new_mtu < 60) || (new_mtu > buff_size)) {
409 return -EINVAL;
410 }
411 dev->mtu = new_mtu;
412 return 0;
413} /* end of claw_change_mtu */
414
415
416/*-------------------------------------------------------------------*
417 * claw_open *
418 * *
419 *-------------------------------------------------------------------*/
420static int
421claw_open(struct net_device *dev)
422{
423
424 int rc;
425 int i;
426 unsigned long saveflags=0;
427 unsigned long parm;
428 struct claw_privbk *privptr;
429 DECLARE_WAITQUEUE(wait, current);
430 struct timer_list timer;
431 struct ccwbk *p_buf;
432
433 CLAW_DBF_TEXT(4, trace, "open");
434 privptr = (struct claw_privbk *)dev->ml_priv;
435 /* allocate and initialize CCW blocks */
436 if (privptr->buffs_alloc == 0) {
437 rc=init_ccw_bk(dev);
438 if (rc) {
439 CLAW_DBF_TEXT(2, trace, "openmem");
440 return -ENOMEM;
441 }
442 }
443 privptr->system_validate_comp=0;
444 privptr->release_pend=0;
445 if(strncmp(privptr->p_env->api_type,WS_APPL_NAME_PACKED,6) == 0) {
446 privptr->p_env->read_size=DEF_PACK_BUFSIZE;
447 privptr->p_env->write_size=DEF_PACK_BUFSIZE;
448 privptr->p_env->packing=PACKING_ASK;
449 } else {
450 privptr->p_env->packing=0;
451 privptr->p_env->read_size=CLAW_FRAME_SIZE;
452 privptr->p_env->write_size=CLAW_FRAME_SIZE;
453 }
454 claw_set_busy(dev);
455 tasklet_init(&privptr->channel[READ_CHANNEL].tasklet, claw_irq_tasklet,
456 (unsigned long) &privptr->channel[READ_CHANNEL]);
457 for ( i = 0; i < 2; i++) {
458 CLAW_DBF_TEXT_(2, trace, "opn_ch%d", i);
459 init_waitqueue_head(&privptr->channel[i].wait);
460 /* skb_queue_head_init(&p_ch->io_queue); */
461 if (i == WRITE_CHANNEL)
462 skb_queue_head_init(
463 &privptr->channel[WRITE_CHANNEL].collect_queue);
464 privptr->channel[i].flag_a = 0;
465 privptr->channel[i].IO_active = 0;
466 privptr->channel[i].flag &= ~CLAW_TIMER;
467 init_timer(&timer);
468 timer.function = (void *)claw_timer;
469 timer.data = (unsigned long)(&privptr->channel[i]);
470 timer.expires = jiffies + 15*HZ;
471 add_timer(&timer);
472 spin_lock_irqsave(get_ccwdev_lock(
473 privptr->channel[i].cdev), saveflags);
474 parm = (unsigned long) &privptr->channel[i];
475 privptr->channel[i].claw_state = CLAW_START_HALT_IO;
476 rc = 0;
477 add_wait_queue(&privptr->channel[i].wait, &wait);
478 rc = ccw_device_halt(
479 (struct ccw_device *)privptr->channel[i].cdev,parm);
480 set_current_state(TASK_INTERRUPTIBLE);
481 spin_unlock_irqrestore(
482 get_ccwdev_lock(privptr->channel[i].cdev), saveflags);
483 schedule();
484 set_current_state(TASK_RUNNING);
485 remove_wait_queue(&privptr->channel[i].wait, &wait);
486 if(rc != 0)
487 ccw_check_return_code(privptr->channel[i].cdev, rc);
488 if((privptr->channel[i].flag & CLAW_TIMER) == 0x00)
489 del_timer(&timer);
490 }
491 if ((((privptr->channel[READ_CHANNEL].last_dstat |
492 privptr->channel[WRITE_CHANNEL].last_dstat) &
493 ~(DEV_STAT_CHN_END | DEV_STAT_DEV_END)) != 0x00) ||
494 (((privptr->channel[READ_CHANNEL].flag |
495 privptr->channel[WRITE_CHANNEL].flag) & CLAW_TIMER) != 0x00)) {
496 dev_info(&privptr->channel[READ_CHANNEL].cdev->dev,
497 "%s: remote side is not ready\n", dev->name);
498 CLAW_DBF_TEXT(2, trace, "notrdy");
499
500 for ( i = 0; i < 2; i++) {
501 spin_lock_irqsave(
502 get_ccwdev_lock(privptr->channel[i].cdev),
503 saveflags);
504 parm = (unsigned long) &privptr->channel[i];
505 privptr->channel[i].claw_state = CLAW_STOP;
506 rc = ccw_device_halt(
507 (struct ccw_device *)&privptr->channel[i].cdev,
508 parm);
509 spin_unlock_irqrestore(
510 get_ccwdev_lock(privptr->channel[i].cdev),
511 saveflags);
512 if (rc != 0) {
513 ccw_check_return_code(
514 privptr->channel[i].cdev, rc);
515 }
516 }
517 free_pages((unsigned long)privptr->p_buff_ccw,
518 (int)pages_to_order_of_mag(privptr->p_buff_ccw_num));
519 if (privptr->p_env->read_size < PAGE_SIZE) {
520 free_pages((unsigned long)privptr->p_buff_read,
521 (int)pages_to_order_of_mag(
522 privptr->p_buff_read_num));
523 }
524 else {
525 p_buf=privptr->p_read_active_first;
526 while (p_buf!=NULL) {
527 free_pages((unsigned long)p_buf->p_buffer,
528 (int)pages_to_order_of_mag(
529 privptr->p_buff_pages_perread ));
530 p_buf=p_buf->next;
531 }
532 }
533 if (privptr->p_env->write_size < PAGE_SIZE ) {
534 free_pages((unsigned long)privptr->p_buff_write,
535 (int)pages_to_order_of_mag(
536 privptr->p_buff_write_num));
537 }
538 else {
539 p_buf=privptr->p_write_active_first;
540 while (p_buf!=NULL) {
541 free_pages((unsigned long)p_buf->p_buffer,
542 (int)pages_to_order_of_mag(
543 privptr->p_buff_pages_perwrite ));
544 p_buf=p_buf->next;
545 }
546 }
547 privptr->buffs_alloc = 0;
548 privptr->channel[READ_CHANNEL].flag = 0x00;
549 privptr->channel[WRITE_CHANNEL].flag = 0x00;
550 privptr->p_buff_ccw=NULL;
551 privptr->p_buff_read=NULL;
552 privptr->p_buff_write=NULL;
553 claw_clear_busy(dev);
554 CLAW_DBF_TEXT(2, trace, "open EIO");
555 return -EIO;
556 }
557
558 /* Send SystemValidate command */
559
560 claw_clear_busy(dev);
561 CLAW_DBF_TEXT(4, trace, "openok");
562 return 0;
563} /* end of claw_open */
564
565/*-------------------------------------------------------------------*
566* *
567* claw_irq_handler *
568* *
569*--------------------------------------------------------------------*/
570static void
571claw_irq_handler(struct ccw_device *cdev,
572 unsigned long intparm, struct irb *irb)
573{
574 struct chbk *p_ch = NULL;
575 struct claw_privbk *privptr = NULL;
576 struct net_device *dev = NULL;
577 struct claw_env *p_env;
578 struct chbk *p_ch_r=NULL;
579
580 CLAW_DBF_TEXT(4, trace, "clawirq");
581 /* Bypass all 'unsolicited interrupts' */
582 privptr = dev_get_drvdata(&cdev->dev);
583 if (!privptr) {
584 dev_warn(&cdev->dev, "An uninitialized CLAW device received an"
585 " IRQ, c-%02x d-%02x\n",
586 irb->scsw.cmd.cstat, irb->scsw.cmd.dstat);
587 CLAW_DBF_TEXT(2, trace, "badirq");
588 return;
589 }
590
591 /* Try to extract channel from driver data. */
592 if (privptr->channel[READ_CHANNEL].cdev == cdev)
593 p_ch = &privptr->channel[READ_CHANNEL];
594 else if (privptr->channel[WRITE_CHANNEL].cdev == cdev)
595 p_ch = &privptr->channel[WRITE_CHANNEL];
596 else {
597 dev_warn(&cdev->dev, "The device is not a CLAW device\n");
598 CLAW_DBF_TEXT(2, trace, "badchan");
599 return;
600 }
601 CLAW_DBF_TEXT_(4, trace, "IRQCH=%d", p_ch->flag);
602
603 dev = (struct net_device *) (p_ch->ndev);
604 p_env=privptr->p_env;
605
606 /* Copy interruption response block. */
607 memcpy(p_ch->irb, irb, sizeof(struct irb));
608
609 /* Check for good subchannel return code, otherwise info message */
610 if (irb->scsw.cmd.cstat && !(irb->scsw.cmd.cstat & SCHN_STAT_PCI)) {
611 dev_info(&cdev->dev,
612 "%s: subchannel check for device: %04x -"
613 " Sch Stat %02x Dev Stat %02x CPA - %04x\n",
614 dev->name, p_ch->devno,
615 irb->scsw.cmd.cstat, irb->scsw.cmd.dstat,
616 irb->scsw.cmd.cpa);
617 CLAW_DBF_TEXT(2, trace, "chanchk");
618 /* return; */
619 }
620
621 /* Check the reason-code of a unit check */
622 if (irb->scsw.cmd.dstat & DEV_STAT_UNIT_CHECK)
623 ccw_check_unit_check(p_ch, irb->ecw[0]);
624
625 /* State machine to bring the connection up, down and to restart */
626 p_ch->last_dstat = irb->scsw.cmd.dstat;
627
628 switch (p_ch->claw_state) {
629 case CLAW_STOP:/* HALT_IO by claw_release (halt sequence) */
630 if (!((p_ch->irb->scsw.cmd.stctl & SCSW_STCTL_SEC_STATUS) ||
631 (p_ch->irb->scsw.cmd.stctl == SCSW_STCTL_STATUS_PEND) ||
632 (p_ch->irb->scsw.cmd.stctl ==
633 (SCSW_STCTL_ALERT_STATUS | SCSW_STCTL_STATUS_PEND))))
634 return;
635 wake_up(&p_ch->wait); /* wake up claw_release */
636 CLAW_DBF_TEXT(4, trace, "stop");
637 return;
638 case CLAW_START_HALT_IO: /* HALT_IO issued by claw_open */
639 if (!((p_ch->irb->scsw.cmd.stctl & SCSW_STCTL_SEC_STATUS) ||
640 (p_ch->irb->scsw.cmd.stctl == SCSW_STCTL_STATUS_PEND) ||
641 (p_ch->irb->scsw.cmd.stctl ==
642 (SCSW_STCTL_ALERT_STATUS | SCSW_STCTL_STATUS_PEND)))) {
643 CLAW_DBF_TEXT(4, trace, "haltio");
644 return;
645 }
646 if (p_ch->flag == CLAW_READ) {
647 p_ch->claw_state = CLAW_START_READ;
648 wake_up(&p_ch->wait); /* wake claw_open (READ)*/
649 } else if (p_ch->flag == CLAW_WRITE) {
650 p_ch->claw_state = CLAW_START_WRITE;
651 /* send SYSTEM_VALIDATE */
652 claw_strt_read(dev, LOCK_NO);
653 claw_send_control(dev,
654 SYSTEM_VALIDATE_REQUEST,
655 0, 0, 0,
656 p_env->host_name,
657 p_env->adapter_name);
658 } else {
659 dev_warn(&cdev->dev, "The CLAW device received"
660 " an unexpected IRQ, "
661 "c-%02x d-%02x\n",
662 irb->scsw.cmd.cstat,
663 irb->scsw.cmd.dstat);
664 return;
665 }
666 CLAW_DBF_TEXT(4, trace, "haltio");
667 return;
668 case CLAW_START_READ:
669 CLAW_DBF_TEXT(4, trace, "ReadIRQ");
670 if (p_ch->irb->scsw.cmd.dstat & DEV_STAT_UNIT_CHECK) {
671 clear_bit(0, (void *)&p_ch->IO_active);
672 if ((p_ch->irb->ecw[0] & 0x41) == 0x41 ||
673 (p_ch->irb->ecw[0] & 0x40) == 0x40 ||
674 (p_ch->irb->ecw[0]) == 0) {
675 privptr->stats.rx_errors++;
676 dev_info(&cdev->dev,
677 "%s: Restart is required after remote "
678 "side recovers \n",
679 dev->name);
680 }
681 CLAW_DBF_TEXT(4, trace, "notrdy");
682 return;
683 }
684 if ((p_ch->irb->scsw.cmd.cstat & SCHN_STAT_PCI) &&
685 (p_ch->irb->scsw.cmd.dstat == 0)) {
686 if (test_and_set_bit(CLAW_BH_ACTIVE,
687 (void *)&p_ch->flag_a) == 0)
688 tasklet_schedule(&p_ch->tasklet);
689 else
690 CLAW_DBF_TEXT(4, trace, "PCINoBH");
691 CLAW_DBF_TEXT(4, trace, "PCI_read");
692 return;
693 }
694 if (!((p_ch->irb->scsw.cmd.stctl & SCSW_STCTL_SEC_STATUS) ||
695 (p_ch->irb->scsw.cmd.stctl == SCSW_STCTL_STATUS_PEND) ||
696 (p_ch->irb->scsw.cmd.stctl ==
697 (SCSW_STCTL_ALERT_STATUS | SCSW_STCTL_STATUS_PEND)))) {
698 CLAW_DBF_TEXT(4, trace, "SPend_rd");
699 return;
700 }
701 clear_bit(0, (void *)&p_ch->IO_active);
702 claw_clearbit_busy(TB_RETRY, dev);
703 if (test_and_set_bit(CLAW_BH_ACTIVE,
704 (void *)&p_ch->flag_a) == 0)
705 tasklet_schedule(&p_ch->tasklet);
706 else
707 CLAW_DBF_TEXT(4, trace, "RdBHAct");
708 CLAW_DBF_TEXT(4, trace, "RdIRQXit");
709 return;
710 case CLAW_START_WRITE:
711 if (p_ch->irb->scsw.cmd.dstat & DEV_STAT_UNIT_CHECK) {
712 dev_info(&cdev->dev,
713 "%s: Unit Check Occurred in "
714 "write channel\n", dev->name);
715 clear_bit(0, (void *)&p_ch->IO_active);
716 if (p_ch->irb->ecw[0] & 0x80) {
717 dev_info(&cdev->dev,
718 "%s: Resetting Event "
719 "occurred:\n", dev->name);
720 init_timer(&p_ch->timer);
721 p_ch->timer.function =
722 (void *)claw_write_retry;
723 p_ch->timer.data = (unsigned long)p_ch;
724 p_ch->timer.expires = jiffies + 10*HZ;
725 add_timer(&p_ch->timer);
726 dev_info(&cdev->dev,
727 "%s: write connection "
728 "restarting\n", dev->name);
729 }
730 CLAW_DBF_TEXT(4, trace, "rstrtwrt");
731 return;
732 }
733 if (p_ch->irb->scsw.cmd.dstat & DEV_STAT_UNIT_EXCEP) {
734 clear_bit(0, (void *)&p_ch->IO_active);
735 dev_info(&cdev->dev,
736 "%s: Unit Exception "
737 "occurred in write channel\n",
738 dev->name);
739 }
740 if (!((p_ch->irb->scsw.cmd.stctl & SCSW_STCTL_SEC_STATUS) ||
741 (p_ch->irb->scsw.cmd.stctl == SCSW_STCTL_STATUS_PEND) ||
742 (p_ch->irb->scsw.cmd.stctl ==
743 (SCSW_STCTL_ALERT_STATUS | SCSW_STCTL_STATUS_PEND)))) {
744 CLAW_DBF_TEXT(4, trace, "writeUE");
745 return;
746 }
747 clear_bit(0, (void *)&p_ch->IO_active);
748 if (claw_test_and_setbit_busy(TB_TX, dev) == 0) {
749 claw_write_next(p_ch);
750 claw_clearbit_busy(TB_TX, dev);
751 claw_clear_busy(dev);
752 }
753 p_ch_r = (struct chbk *)&privptr->channel[READ_CHANNEL];
754 if (test_and_set_bit(CLAW_BH_ACTIVE,
755 (void *)&p_ch_r->flag_a) == 0)
756 tasklet_schedule(&p_ch_r->tasklet);
757 CLAW_DBF_TEXT(4, trace, "StWtExit");
758 return;
759 default:
760 dev_warn(&cdev->dev,
761 "The CLAW device for %s received an unexpected IRQ\n",
762 dev->name);
763 CLAW_DBF_TEXT(2, trace, "badIRQ");
764 return;
765 }
766
767} /* end of claw_irq_handler */
768
769
770/*-------------------------------------------------------------------*
771* claw_irq_tasklet *
772* *
773*--------------------------------------------------------------------*/
774static void
775claw_irq_tasklet ( unsigned long data )
776{
777 struct chbk * p_ch;
778 struct net_device *dev;
779
780 p_ch = (struct chbk *) data;
781 dev = (struct net_device *)p_ch->ndev;
782 CLAW_DBF_TEXT(4, trace, "IRQtask");
783 unpack_read(dev);
784 clear_bit(CLAW_BH_ACTIVE, (void *)&p_ch->flag_a);
785 CLAW_DBF_TEXT(4, trace, "TskletXt");
786 return;
787} /* end of claw_irq_bh */
788
789/*-------------------------------------------------------------------*
790* claw_release *
791* *
792*--------------------------------------------------------------------*/
793static int
794claw_release(struct net_device *dev)
795{
796 int rc;
797 int i;
798 unsigned long saveflags;
799 unsigned long parm;
800 struct claw_privbk *privptr;
801 DECLARE_WAITQUEUE(wait, current);
802 struct ccwbk* p_this_ccw;
803 struct ccwbk* p_buf;
804
805 if (!dev)
806 return 0;
807 privptr = (struct claw_privbk *)dev->ml_priv;
808 if (!privptr)
809 return 0;
810 CLAW_DBF_TEXT(4, trace, "release");
811 privptr->release_pend=1;
812 claw_setbit_busy(TB_STOP,dev);
813 for ( i = 1; i >=0 ; i--) {
814 spin_lock_irqsave(
815 get_ccwdev_lock(privptr->channel[i].cdev), saveflags);
816 /* del_timer(&privptr->channel[READ_CHANNEL].timer); */
817 privptr->channel[i].claw_state = CLAW_STOP;
818 privptr->channel[i].IO_active = 0;
819 parm = (unsigned long) &privptr->channel[i];
820 if (i == WRITE_CHANNEL)
821 claw_purge_skb_queue(
822 &privptr->channel[WRITE_CHANNEL].collect_queue);
823 rc = ccw_device_halt (privptr->channel[i].cdev, parm);
824 if (privptr->system_validate_comp==0x00) /* never opened? */
825 init_waitqueue_head(&privptr->channel[i].wait);
826 add_wait_queue(&privptr->channel[i].wait, &wait);
827 set_current_state(TASK_INTERRUPTIBLE);
828 spin_unlock_irqrestore(
829 get_ccwdev_lock(privptr->channel[i].cdev), saveflags);
830 schedule();
831 set_current_state(TASK_RUNNING);
832 remove_wait_queue(&privptr->channel[i].wait, &wait);
833 if (rc != 0) {
834 ccw_check_return_code(privptr->channel[i].cdev, rc);
835 }
836 }
837 if (privptr->pk_skb != NULL) {
838 dev_kfree_skb_any(privptr->pk_skb);
839 privptr->pk_skb = NULL;
840 }
841 if(privptr->buffs_alloc != 1) {
842 CLAW_DBF_TEXT(4, trace, "none2fre");
843 return 0;
844 }
845 CLAW_DBF_TEXT(4, trace, "freebufs");
846 if (privptr->p_buff_ccw != NULL) {
847 free_pages((unsigned long)privptr->p_buff_ccw,
848 (int)pages_to_order_of_mag(privptr->p_buff_ccw_num));
849 }
850 CLAW_DBF_TEXT(4, trace, "freeread");
851 if (privptr->p_env->read_size < PAGE_SIZE) {
852 if (privptr->p_buff_read != NULL) {
853 free_pages((unsigned long)privptr->p_buff_read,
854 (int)pages_to_order_of_mag(privptr->p_buff_read_num));
855 }
856 }
857 else {
858 p_buf=privptr->p_read_active_first;
859 while (p_buf!=NULL) {
860 free_pages((unsigned long)p_buf->p_buffer,
861 (int)pages_to_order_of_mag(
862 privptr->p_buff_pages_perread ));
863 p_buf=p_buf->next;
864 }
865 }
866 CLAW_DBF_TEXT(4, trace, "freewrit");
867 if (privptr->p_env->write_size < PAGE_SIZE ) {
868 free_pages((unsigned long)privptr->p_buff_write,
869 (int)pages_to_order_of_mag(privptr->p_buff_write_num));
870 }
871 else {
872 p_buf=privptr->p_write_active_first;
873 while (p_buf!=NULL) {
874 free_pages((unsigned long)p_buf->p_buffer,
875 (int)pages_to_order_of_mag(
876 privptr->p_buff_pages_perwrite ));
877 p_buf=p_buf->next;
878 }
879 }
880 CLAW_DBF_TEXT(4, trace, "clearptr");
881 privptr->buffs_alloc = 0;
882 privptr->p_buff_ccw=NULL;
883 privptr->p_buff_read=NULL;
884 privptr->p_buff_write=NULL;
885 privptr->system_validate_comp=0;
886 privptr->release_pend=0;
887 /* Remove any writes that were pending and reset all reads */
888 p_this_ccw=privptr->p_read_active_first;
889 while (p_this_ccw!=NULL) {
890 p_this_ccw->header.length=0xffff;
891 p_this_ccw->header.opcode=0xff;
892 p_this_ccw->header.flag=0x00;
893 p_this_ccw=p_this_ccw->next;
894 }
895
896 while (privptr->p_write_active_first!=NULL) {
897 p_this_ccw=privptr->p_write_active_first;
898 p_this_ccw->header.flag=CLAW_PENDING;
899 privptr->p_write_active_first=p_this_ccw->next;
900 p_this_ccw->next=privptr->p_write_free_chain;
901 privptr->p_write_free_chain=p_this_ccw;
902 ++privptr->write_free_count;
903 }
904 privptr->p_write_active_last=NULL;
905 privptr->mtc_logical_link = -1;
906 privptr->mtc_skipping = 1;
907 privptr->mtc_offset=0;
908
909 if (((privptr->channel[READ_CHANNEL].last_dstat |
910 privptr->channel[WRITE_CHANNEL].last_dstat) &
911 ~(DEV_STAT_CHN_END | DEV_STAT_DEV_END)) != 0x00) {
912 dev_warn(&privptr->channel[READ_CHANNEL].cdev->dev,
913 "Deactivating %s completed with incorrect"
914 " subchannel status "
915 "(read %02x, write %02x)\n",
916 dev->name,
917 privptr->channel[READ_CHANNEL].last_dstat,
918 privptr->channel[WRITE_CHANNEL].last_dstat);
919 CLAW_DBF_TEXT(2, trace, "badclose");
920 }
921 CLAW_DBF_TEXT(4, trace, "rlsexit");
922 return 0;
923} /* end of claw_release */
924
925/*-------------------------------------------------------------------*
926* claw_write_retry *
927* *
928*--------------------------------------------------------------------*/
929
930static void
931claw_write_retry ( struct chbk *p_ch )
932{
933
934 struct net_device *dev=p_ch->ndev;
935
936 CLAW_DBF_TEXT(4, trace, "w_retry");
937 if (p_ch->claw_state == CLAW_STOP) {
938 return;
939 }
940 claw_strt_out_IO( dev );
941 CLAW_DBF_TEXT(4, trace, "rtry_xit");
942 return;
943} /* end of claw_write_retry */
944
945
946/*-------------------------------------------------------------------*
947* claw_write_next *
948* *
949*--------------------------------------------------------------------*/
950
951static void
952claw_write_next ( struct chbk * p_ch )
953{
954
955 struct net_device *dev;
956 struct claw_privbk *privptr=NULL;
957 struct sk_buff *pk_skb;
958
959 CLAW_DBF_TEXT(4, trace, "claw_wrt");
960 if (p_ch->claw_state == CLAW_STOP)
961 return;
962 dev = (struct net_device *) p_ch->ndev;
963 privptr = (struct claw_privbk *) dev->ml_priv;
964 claw_free_wrt_buf( dev );
965 if ((privptr->write_free_count > 0) &&
966 !skb_queue_empty(&p_ch->collect_queue)) {
967 pk_skb = claw_pack_skb(privptr);
968 while (pk_skb != NULL) {
969 claw_hw_tx(pk_skb, dev, 1);
970 if (privptr->write_free_count > 0) {
971 pk_skb = claw_pack_skb(privptr);
972 } else
973 pk_skb = NULL;
974 }
975 }
976 if (privptr->p_write_active_first!=NULL) {
977 claw_strt_out_IO(dev);
978 }
979 return;
980} /* end of claw_write_next */
981
982/*-------------------------------------------------------------------*
983* *
984* claw_timer *
985*--------------------------------------------------------------------*/
986
987static void
988claw_timer ( struct chbk * p_ch )
989{
990 CLAW_DBF_TEXT(4, trace, "timer");
991 p_ch->flag |= CLAW_TIMER;
992 wake_up(&p_ch->wait);
993 return;
994} /* end of claw_timer */
995
996/*
997*
998* functions
999*/
1000
1001
1002/*-------------------------------------------------------------------*
1003* *
1004* pages_to_order_of_mag *
1005* *
1006* takes a number of pages from 1 to 512 and returns the *
1007* log(num_pages)/log(2) get_free_pages() needs a base 2 order *
1008* of magnitude get_free_pages() has an upper order of 9 *
1009*--------------------------------------------------------------------*/
1010
1011static int
1012pages_to_order_of_mag(int num_of_pages)
1013{
1014 int order_of_mag=1; /* assume 2 pages */
1015 int nump;
1016
1017 CLAW_DBF_TEXT_(5, trace, "pages%d", num_of_pages);
1018 if (num_of_pages == 1) {return 0; } /* magnitude of 0 = 1 page */
1019 /* 512 pages = 2Meg on 4k page systems */
1020 if (num_of_pages >= 512) {return 9; }
1021 /* we have two or more pages order is at least 1 */
1022 for (nump=2 ;nump <= 512;nump*=2) {
1023 if (num_of_pages <= nump)
1024 break;
1025 order_of_mag +=1;
1026 }
1027 if (order_of_mag > 9) { order_of_mag = 9; } /* I know it's paranoid */
1028 CLAW_DBF_TEXT_(5, trace, "mag%d", order_of_mag);
1029 return order_of_mag;
1030}
1031
1032/*-------------------------------------------------------------------*
1033* *
1034* add_claw_reads *
1035* *
1036*--------------------------------------------------------------------*/
1037static int
1038add_claw_reads(struct net_device *dev, struct ccwbk* p_first,
1039 struct ccwbk* p_last)
1040{
1041 struct claw_privbk *privptr;
1042 struct ccw1 temp_ccw;
1043 struct endccw * p_end;
1044 CLAW_DBF_TEXT(4, trace, "addreads");
1045 privptr = dev->ml_priv;
1046 p_end = privptr->p_end_ccw;
1047
1048 /* first CCW and last CCW contains a new set of read channel programs
1049 * to apend the running channel programs
1050 */
1051 if ( p_first==NULL) {
1052 CLAW_DBF_TEXT(4, trace, "addexit");
1053 return 0;
1054 }
1055
1056 /* set up ending CCW sequence for this segment */
1057 if (p_end->read1) {
1058 p_end->read1=0x00; /* second ending CCW is now active */
1059 /* reset ending CCWs and setup TIC CCWs */
1060 p_end->read2_nop2.cmd_code = CCW_CLAW_CMD_READFF;
1061 p_end->read2_nop2.flags = CCW_FLAG_SLI | CCW_FLAG_SKIP;
1062 p_last->r_TIC_1.cda =(__u32)__pa(&p_end->read2_nop1);
1063 p_last->r_TIC_2.cda =(__u32)__pa(&p_end->read2_nop1);
1064 p_end->read2_nop2.cda=0;
1065 p_end->read2_nop2.count=1;
1066 }
1067 else {
1068 p_end->read1=0x01; /* first ending CCW is now active */
1069 /* reset ending CCWs and setup TIC CCWs */
1070 p_end->read1_nop2.cmd_code = CCW_CLAW_CMD_READFF;
1071 p_end->read1_nop2.flags = CCW_FLAG_SLI | CCW_FLAG_SKIP;
1072 p_last->r_TIC_1.cda = (__u32)__pa(&p_end->read1_nop1);
1073 p_last->r_TIC_2.cda = (__u32)__pa(&p_end->read1_nop1);
1074 p_end->read1_nop2.cda=0;
1075 p_end->read1_nop2.count=1;
1076 }
1077
1078 if ( privptr-> p_read_active_first ==NULL ) {
1079 privptr->p_read_active_first = p_first; /* set new first */
1080 privptr->p_read_active_last = p_last; /* set new last */
1081 }
1082 else {
1083
1084 /* set up TIC ccw */
1085 temp_ccw.cda= (__u32)__pa(&p_first->read);
1086 temp_ccw.count=0;
1087 temp_ccw.flags=0;
1088 temp_ccw.cmd_code = CCW_CLAW_CMD_TIC;
1089
1090
1091 if (p_end->read1) {
1092
1093 /* first set of CCW's is chained to the new read */
1094 /* chain, so the second set is chained to the active chain. */
1095 /* Therefore modify the second set to point to the new */
1096 /* read chain set up TIC CCWs */
1097 /* make sure we update the CCW so channel doesn't fetch it */
1098 /* when it's only half done */
1099 memcpy( &p_end->read2_nop2, &temp_ccw ,
1100 sizeof(struct ccw1));
1101 privptr->p_read_active_last->r_TIC_1.cda=
1102 (__u32)__pa(&p_first->read);
1103 privptr->p_read_active_last->r_TIC_2.cda=
1104 (__u32)__pa(&p_first->read);
1105 }
1106 else {
1107 /* make sure we update the CCW so channel doesn't */
1108 /* fetch it when it is only half done */
1109 memcpy( &p_end->read1_nop2, &temp_ccw ,
1110 sizeof(struct ccw1));
1111 privptr->p_read_active_last->r_TIC_1.cda=
1112 (__u32)__pa(&p_first->read);
1113 privptr->p_read_active_last->r_TIC_2.cda=
1114 (__u32)__pa(&p_first->read);
1115 }
1116 /* chain in new set of blocks */
1117 privptr->p_read_active_last->next = p_first;
1118 privptr->p_read_active_last=p_last;
1119 } /* end of if ( privptr-> p_read_active_first ==NULL) */
1120 CLAW_DBF_TEXT(4, trace, "addexit");
1121 return 0;
1122} /* end of add_claw_reads */
1123
1124/*-------------------------------------------------------------------*
1125 * ccw_check_return_code *
1126 * *
1127 *-------------------------------------------------------------------*/
1128
1129static void
1130ccw_check_return_code(struct ccw_device *cdev, int return_code)
1131{
1132 CLAW_DBF_TEXT(4, trace, "ccwret");
1133 if (return_code != 0) {
1134 switch (return_code) {
1135 case -EBUSY: /* BUSY is a transient state no action needed */
1136 break;
1137 case -ENODEV:
1138 dev_err(&cdev->dev, "The remote channel adapter is not"
1139 " available\n");
1140 break;
1141 case -EINVAL:
1142 dev_err(&cdev->dev,
1143 "The status of the remote channel adapter"
1144 " is not valid\n");
1145 break;
1146 default:
1147 dev_err(&cdev->dev, "The common device layer"
1148 " returned error code %d\n",
1149 return_code);
1150 }
1151 }
1152 CLAW_DBF_TEXT(4, trace, "ccwret");
1153} /* end of ccw_check_return_code */
1154
1155/*-------------------------------------------------------------------*
1156* ccw_check_unit_check *
1157*--------------------------------------------------------------------*/
1158
1159static void
1160ccw_check_unit_check(struct chbk * p_ch, unsigned char sense )
1161{
1162 struct net_device *ndev = p_ch->ndev;
1163 struct device *dev = &p_ch->cdev->dev;
1164
1165 CLAW_DBF_TEXT(4, trace, "unitchek");
1166 dev_warn(dev, "The communication peer of %s disconnected\n",
1167 ndev->name);
1168
1169 if (sense & 0x40) {
1170 if (sense & 0x01) {
1171 dev_warn(dev, "The remote channel adapter for"
1172 " %s has been reset\n",
1173 ndev->name);
1174 }
1175 } else if (sense & 0x20) {
1176 if (sense & 0x04) {
1177 dev_warn(dev, "A data streaming timeout occurred"
1178 " for %s\n",
1179 ndev->name);
1180 } else if (sense & 0x10) {
1181 dev_warn(dev, "The remote channel adapter for %s"
1182 " is faulty\n",
1183 ndev->name);
1184 } else {
1185 dev_warn(dev, "A data transfer parity error occurred"
1186 " for %s\n",
1187 ndev->name);
1188 }
1189 } else if (sense & 0x10) {
1190 dev_warn(dev, "A read data parity error occurred"
1191 " for %s\n",
1192 ndev->name);
1193 }
1194
1195} /* end of ccw_check_unit_check */
1196
1197/*-------------------------------------------------------------------*
1198* find_link *
1199*--------------------------------------------------------------------*/
1200static int
1201find_link(struct net_device *dev, char *host_name, char *ws_name )
1202{
1203 struct claw_privbk *privptr;
1204 struct claw_env *p_env;
1205 int rc=0;
1206
1207 CLAW_DBF_TEXT(2, setup, "findlink");
1208 privptr = dev->ml_priv;
1209 p_env=privptr->p_env;
1210 switch (p_env->packing)
1211 {
1212 case PACKING_ASK:
1213 if ((memcmp(WS_APPL_NAME_PACKED, host_name, 8)!=0) ||
1214 (memcmp(WS_APPL_NAME_PACKED, ws_name, 8)!=0 ))
1215 rc = EINVAL;
1216 break;
1217 case DO_PACKED:
1218 case PACK_SEND:
1219 if ((memcmp(WS_APPL_NAME_IP_NAME, host_name, 8)!=0) ||
1220 (memcmp(WS_APPL_NAME_IP_NAME, ws_name, 8)!=0 ))
1221 rc = EINVAL;
1222 break;
1223 default:
1224 if ((memcmp(HOST_APPL_NAME, host_name, 8)!=0) ||
1225 (memcmp(p_env->api_type , ws_name, 8)!=0))
1226 rc = EINVAL;
1227 break;
1228 }
1229
1230 return rc;
1231} /* end of find_link */
1232
1233/*-------------------------------------------------------------------*
1234 * claw_hw_tx *
1235 * *
1236 * *
1237 *-------------------------------------------------------------------*/
1238
1239static int
1240claw_hw_tx(struct sk_buff *skb, struct net_device *dev, long linkid)
1241{
1242 int rc=0;
1243 struct claw_privbk *privptr;
1244 struct ccwbk *p_this_ccw;
1245 struct ccwbk *p_first_ccw;
1246 struct ccwbk *p_last_ccw;
1247 __u32 numBuffers;
1248 signed long len_of_data;
1249 unsigned long bytesInThisBuffer;
1250 unsigned char *pDataAddress;
1251 struct endccw *pEnd;
1252 struct ccw1 tempCCW;
1253 struct claw_env *p_env;
1254 struct clawph *pk_head;
1255 struct chbk *ch;
1256
1257 CLAW_DBF_TEXT(4, trace, "hw_tx");
1258 privptr = (struct claw_privbk *)(dev->ml_priv);
1259 p_env =privptr->p_env;
1260 claw_free_wrt_buf(dev); /* Clean up free chain if posible */
1261 /* scan the write queue to free any completed write packets */
1262 p_first_ccw=NULL;
1263 p_last_ccw=NULL;
1264 if ((p_env->packing >= PACK_SEND) &&
1265 (skb->cb[1] != 'P')) {
1266 skb_push(skb,sizeof(struct clawph));
1267 pk_head=(struct clawph *)skb->data;
1268 pk_head->len=skb->len-sizeof(struct clawph);
1269 if (pk_head->len%4) {
1270 pk_head->len+= 4-(pk_head->len%4);
1271 skb_pad(skb,4-(pk_head->len%4));
1272 skb_put(skb,4-(pk_head->len%4));
1273 }
1274 if (p_env->packing == DO_PACKED)
1275 pk_head->link_num = linkid;
1276 else
1277 pk_head->link_num = 0;
1278 pk_head->flag = 0x00;
1279 skb_pad(skb,4);
1280 skb->cb[1] = 'P';
1281 }
1282 if (linkid == 0) {
1283 if (claw_check_busy(dev)) {
1284 if (privptr->write_free_count!=0) {
1285 claw_clear_busy(dev);
1286 }
1287 else {
1288 claw_strt_out_IO(dev );
1289 claw_free_wrt_buf( dev );
1290 if (privptr->write_free_count==0) {
1291 ch = &privptr->channel[WRITE_CHANNEL];
1292 atomic_inc(&skb->users);
1293 skb_queue_tail(&ch->collect_queue, skb);
1294 goto Done;
1295 }
1296 else {
1297 claw_clear_busy(dev);
1298 }
1299 }
1300 }
1301 /* tx lock */
1302 if (claw_test_and_setbit_busy(TB_TX,dev)) { /* set to busy */
1303 ch = &privptr->channel[WRITE_CHANNEL];
1304 atomic_inc(&skb->users);
1305 skb_queue_tail(&ch->collect_queue, skb);
1306 claw_strt_out_IO(dev );
1307 rc=-EBUSY;
1308 goto Done2;
1309 }
1310 }
1311 /* See how many write buffers are required to hold this data */
1312 numBuffers = DIV_ROUND_UP(skb->len, privptr->p_env->write_size);
1313
1314 /* If that number of buffers isn't available, give up for now */
1315 if (privptr->write_free_count < numBuffers ||
1316 privptr->p_write_free_chain == NULL ) {
1317
1318 claw_setbit_busy(TB_NOBUFFER,dev);
1319 ch = &privptr->channel[WRITE_CHANNEL];
1320 atomic_inc(&skb->users);
1321 skb_queue_tail(&ch->collect_queue, skb);
1322 CLAW_DBF_TEXT(2, trace, "clawbusy");
1323 goto Done2;
1324 }
1325 pDataAddress=skb->data;
1326 len_of_data=skb->len;
1327
1328 while (len_of_data > 0) {
1329 p_this_ccw=privptr->p_write_free_chain; /* get a block */
1330 if (p_this_ccw == NULL) { /* lost the race */
1331 ch = &privptr->channel[WRITE_CHANNEL];
1332 atomic_inc(&skb->users);
1333 skb_queue_tail(&ch->collect_queue, skb);
1334 goto Done2;
1335 }
1336 privptr->p_write_free_chain=p_this_ccw->next;
1337 p_this_ccw->next=NULL;
1338 --privptr->write_free_count; /* -1 */
1339 if (len_of_data >= privptr->p_env->write_size)
1340 bytesInThisBuffer = privptr->p_env->write_size;
1341 else
1342 bytesInThisBuffer = len_of_data;
1343 memcpy( p_this_ccw->p_buffer,pDataAddress, bytesInThisBuffer);
1344 len_of_data-=bytesInThisBuffer;
1345 pDataAddress+=(unsigned long)bytesInThisBuffer;
1346 /* setup write CCW */
1347 p_this_ccw->write.cmd_code = (linkid * 8) +1;
1348 if (len_of_data>0) {
1349 p_this_ccw->write.cmd_code+=MORE_to_COME_FLAG;
1350 }
1351 p_this_ccw->write.count=bytesInThisBuffer;
1352 /* now add to end of this chain */
1353 if (p_first_ccw==NULL) {
1354 p_first_ccw=p_this_ccw;
1355 }
1356 if (p_last_ccw!=NULL) {
1357 p_last_ccw->next=p_this_ccw;
1358 /* set up TIC ccws */
1359 p_last_ccw->w_TIC_1.cda=
1360 (__u32)__pa(&p_this_ccw->write);
1361 }
1362 p_last_ccw=p_this_ccw; /* save new last block */
1363 }
1364
1365 /* FirstCCW and LastCCW now contain a new set of write channel
1366 * programs to append to the running channel program
1367 */
1368
1369 if (p_first_ccw!=NULL) {
1370 /* setup ending ccw sequence for this segment */
1371 pEnd=privptr->p_end_ccw;
1372 if (pEnd->write1) {
1373 pEnd->write1=0x00; /* second end ccw is now active */
1374 /* set up Tic CCWs */
1375 p_last_ccw->w_TIC_1.cda=
1376 (__u32)__pa(&pEnd->write2_nop1);
1377 pEnd->write2_nop2.cmd_code = CCW_CLAW_CMD_READFF;
1378 pEnd->write2_nop2.flags =
1379 CCW_FLAG_SLI | CCW_FLAG_SKIP;
1380 pEnd->write2_nop2.cda=0;
1381 pEnd->write2_nop2.count=1;
1382 }
1383 else { /* end of if (pEnd->write1)*/
1384 pEnd->write1=0x01; /* first end ccw is now active */
1385 /* set up Tic CCWs */
1386 p_last_ccw->w_TIC_1.cda=
1387 (__u32)__pa(&pEnd->write1_nop1);
1388 pEnd->write1_nop2.cmd_code = CCW_CLAW_CMD_READFF;
1389 pEnd->write1_nop2.flags =
1390 CCW_FLAG_SLI | CCW_FLAG_SKIP;
1391 pEnd->write1_nop2.cda=0;
1392 pEnd->write1_nop2.count=1;
1393 } /* end if if (pEnd->write1) */
1394
1395 if (privptr->p_write_active_first==NULL ) {
1396 privptr->p_write_active_first=p_first_ccw;
1397 privptr->p_write_active_last=p_last_ccw;
1398 }
1399 else {
1400 /* set up Tic CCWs */
1401
1402 tempCCW.cda=(__u32)__pa(&p_first_ccw->write);
1403 tempCCW.count=0;
1404 tempCCW.flags=0;
1405 tempCCW.cmd_code=CCW_CLAW_CMD_TIC;
1406
1407 if (pEnd->write1) {
1408
1409 /*
1410 * first set of ending CCW's is chained to the new write
1411 * chain, so the second set is chained to the active chain
1412 * Therefore modify the second set to point the new write chain.
1413 * make sure we update the CCW atomically
1414 * so channel does not fetch it when it's only half done
1415 */
1416 memcpy( &pEnd->write2_nop2, &tempCCW ,
1417 sizeof(struct ccw1));
1418 privptr->p_write_active_last->w_TIC_1.cda=
1419 (__u32)__pa(&p_first_ccw->write);
1420 }
1421 else {
1422
1423 /*make sure we update the CCW atomically
1424 *so channel does not fetch it when it's only half done
1425 */
1426 memcpy(&pEnd->write1_nop2, &tempCCW ,
1427 sizeof(struct ccw1));
1428 privptr->p_write_active_last->w_TIC_1.cda=
1429 (__u32)__pa(&p_first_ccw->write);
1430
1431 } /* end if if (pEnd->write1) */
1432
1433 privptr->p_write_active_last->next=p_first_ccw;
1434 privptr->p_write_active_last=p_last_ccw;
1435 }
1436
1437 } /* endif (p_first_ccw!=NULL) */
1438 dev_kfree_skb_any(skb);
1439 claw_strt_out_IO(dev );
1440 /* if write free count is zero , set NOBUFFER */
1441 if (privptr->write_free_count==0) {
1442 claw_setbit_busy(TB_NOBUFFER,dev);
1443 }
1444Done2:
1445 claw_clearbit_busy(TB_TX,dev);
1446Done:
1447 return(rc);
1448} /* end of claw_hw_tx */
1449
1450/*-------------------------------------------------------------------*
1451* *
1452* init_ccw_bk *
1453* *
1454*--------------------------------------------------------------------*/
1455
1456static int
1457init_ccw_bk(struct net_device *dev)
1458{
1459
1460 __u32 ccw_blocks_required;
1461 __u32 ccw_blocks_perpage;
1462 __u32 ccw_pages_required;
1463 __u32 claw_reads_perpage=1;
1464 __u32 claw_read_pages;
1465 __u32 claw_writes_perpage=1;
1466 __u32 claw_write_pages;
1467 void *p_buff=NULL;
1468 struct ccwbk*p_free_chain;
1469 struct ccwbk*p_buf;
1470 struct ccwbk*p_last_CCWB;
1471 struct ccwbk*p_first_CCWB;
1472 struct endccw *p_endccw=NULL;
1473 addr_t real_address;
1474 struct claw_privbk *privptr = dev->ml_priv;
1475 struct clawh *pClawH=NULL;
1476 addr_t real_TIC_address;
1477 int i,j;
1478 CLAW_DBF_TEXT(4, trace, "init_ccw");
1479
1480 /* initialize statistics field */
1481 privptr->active_link_ID=0;
1482 /* initialize ccwbk pointers */
1483 privptr->p_write_free_chain=NULL; /* pointer to free ccw chain*/
1484 privptr->p_write_active_first=NULL; /* pointer to the first write ccw*/
1485 privptr->p_write_active_last=NULL; /* pointer to the last write ccw*/
1486 privptr->p_read_active_first=NULL; /* pointer to the first read ccw*/
1487 privptr->p_read_active_last=NULL; /* pointer to the last read ccw */
1488 privptr->p_end_ccw=NULL; /* pointer to ending ccw */
1489 privptr->p_claw_signal_blk=NULL; /* pointer to signal block */
1490 privptr->buffs_alloc = 0;
1491 memset(&privptr->end_ccw, 0x00, sizeof(struct endccw));
1492 memset(&privptr->ctl_bk, 0x00, sizeof(struct clawctl));
1493 /* initialize free write ccwbk counter */
1494 privptr->write_free_count=0; /* number of free bufs on write chain */
1495 p_last_CCWB = NULL;
1496 p_first_CCWB= NULL;
1497 /*
1498 * We need 1 CCW block for each read buffer, 1 for each
1499 * write buffer, plus 1 for ClawSignalBlock
1500 */
1501 ccw_blocks_required =
1502 privptr->p_env->read_buffers+privptr->p_env->write_buffers+1;
1503 /*
1504 * compute number of CCW blocks that will fit in a page
1505 */
1506 ccw_blocks_perpage= PAGE_SIZE / CCWBK_SIZE;
1507 ccw_pages_required=
1508 DIV_ROUND_UP(ccw_blocks_required, ccw_blocks_perpage);
1509
1510 /*
1511 * read and write sizes are set by 2 constants in claw.h
1512 * 4k and 32k. Unpacked values other than 4k are not going to
1513 * provide good performance. With packing buffers support 32k
1514 * buffers are used.
1515 */
1516 if (privptr->p_env->read_size < PAGE_SIZE) {
1517 claw_reads_perpage = PAGE_SIZE / privptr->p_env->read_size;
1518 claw_read_pages = DIV_ROUND_UP(privptr->p_env->read_buffers,
1519 claw_reads_perpage);
1520 }
1521 else { /* > or equal */
1522 privptr->p_buff_pages_perread =
1523 DIV_ROUND_UP(privptr->p_env->read_size, PAGE_SIZE);
1524 claw_read_pages = privptr->p_env->read_buffers *
1525 privptr->p_buff_pages_perread;
1526 }
1527 if (privptr->p_env->write_size < PAGE_SIZE) {
1528 claw_writes_perpage =
1529 PAGE_SIZE / privptr->p_env->write_size;
1530 claw_write_pages = DIV_ROUND_UP(privptr->p_env->write_buffers,
1531 claw_writes_perpage);
1532
1533 }
1534 else { /* > or equal */
1535 privptr->p_buff_pages_perwrite =
1536 DIV_ROUND_UP(privptr->p_env->read_size, PAGE_SIZE);
1537 claw_write_pages = privptr->p_env->write_buffers *
1538 privptr->p_buff_pages_perwrite;
1539 }
1540 /*
1541 * allocate ccw_pages_required
1542 */
1543 if (privptr->p_buff_ccw==NULL) {
1544 privptr->p_buff_ccw=
1545 (void *)__get_free_pages(__GFP_DMA,
1546 (int)pages_to_order_of_mag(ccw_pages_required ));
1547 if (privptr->p_buff_ccw==NULL) {
1548 return -ENOMEM;
1549 }
1550 privptr->p_buff_ccw_num=ccw_pages_required;
1551 }
1552 memset(privptr->p_buff_ccw, 0x00,
1553 privptr->p_buff_ccw_num * PAGE_SIZE);
1554
1555 /*
1556 * obtain ending ccw block address
1557 *
1558 */
1559 privptr->p_end_ccw = (struct endccw *)&privptr->end_ccw;
1560 real_address = (__u32)__pa(privptr->p_end_ccw);
1561 /* Initialize ending CCW block */
1562 p_endccw=privptr->p_end_ccw;
1563 p_endccw->real=real_address;
1564 p_endccw->write1=0x00;
1565 p_endccw->read1=0x00;
1566
1567 /* write1_nop1 */
1568 p_endccw->write1_nop1.cmd_code = CCW_CLAW_CMD_NOP;
1569 p_endccw->write1_nop1.flags = CCW_FLAG_SLI | CCW_FLAG_CC;
1570 p_endccw->write1_nop1.count = 1;
1571 p_endccw->write1_nop1.cda = 0;
1572
1573 /* write1_nop2 */
1574 p_endccw->write1_nop2.cmd_code = CCW_CLAW_CMD_READFF;
1575 p_endccw->write1_nop2.flags = CCW_FLAG_SLI | CCW_FLAG_SKIP;
1576 p_endccw->write1_nop2.count = 1;
1577 p_endccw->write1_nop2.cda = 0;
1578
1579 /* write2_nop1 */
1580 p_endccw->write2_nop1.cmd_code = CCW_CLAW_CMD_NOP;
1581 p_endccw->write2_nop1.flags = CCW_FLAG_SLI | CCW_FLAG_CC;
1582 p_endccw->write2_nop1.count = 1;
1583 p_endccw->write2_nop1.cda = 0;
1584
1585 /* write2_nop2 */
1586 p_endccw->write2_nop2.cmd_code = CCW_CLAW_CMD_READFF;
1587 p_endccw->write2_nop2.flags = CCW_FLAG_SLI | CCW_FLAG_SKIP;
1588 p_endccw->write2_nop2.count = 1;
1589 p_endccw->write2_nop2.cda = 0;
1590
1591 /* read1_nop1 */
1592 p_endccw->read1_nop1.cmd_code = CCW_CLAW_CMD_NOP;
1593 p_endccw->read1_nop1.flags = CCW_FLAG_SLI | CCW_FLAG_CC;
1594 p_endccw->read1_nop1.count = 1;
1595 p_endccw->read1_nop1.cda = 0;
1596
1597 /* read1_nop2 */
1598 p_endccw->read1_nop2.cmd_code = CCW_CLAW_CMD_READFF;
1599 p_endccw->read1_nop2.flags = CCW_FLAG_SLI | CCW_FLAG_SKIP;
1600 p_endccw->read1_nop2.count = 1;
1601 p_endccw->read1_nop2.cda = 0;
1602
1603 /* read2_nop1 */
1604 p_endccw->read2_nop1.cmd_code = CCW_CLAW_CMD_NOP;
1605 p_endccw->read2_nop1.flags = CCW_FLAG_SLI | CCW_FLAG_CC;
1606 p_endccw->read2_nop1.count = 1;
1607 p_endccw->read2_nop1.cda = 0;
1608
1609 /* read2_nop2 */
1610 p_endccw->read2_nop2.cmd_code = CCW_CLAW_CMD_READFF;
1611 p_endccw->read2_nop2.flags = CCW_FLAG_SLI | CCW_FLAG_SKIP;
1612 p_endccw->read2_nop2.count = 1;
1613 p_endccw->read2_nop2.cda = 0;
1614
1615 /*
1616 * Build a chain of CCWs
1617 *
1618 */
1619 p_buff=privptr->p_buff_ccw;
1620
1621 p_free_chain=NULL;
1622 for (i=0 ; i < ccw_pages_required; i++ ) {
1623 real_address = (__u32)__pa(p_buff);
1624 p_buf=p_buff;
1625 for (j=0 ; j < ccw_blocks_perpage ; j++) {
1626 p_buf->next = p_free_chain;
1627 p_free_chain = p_buf;
1628 p_buf->real=(__u32)__pa(p_buf);
1629 ++p_buf;
1630 }
1631 p_buff+=PAGE_SIZE;
1632 }
1633 /*
1634 * Initialize ClawSignalBlock
1635 *
1636 */
1637 if (privptr->p_claw_signal_blk==NULL) {
1638 privptr->p_claw_signal_blk=p_free_chain;
1639 p_free_chain=p_free_chain->next;
1640 pClawH=(struct clawh *)privptr->p_claw_signal_blk;
1641 pClawH->length=0xffff;
1642 pClawH->opcode=0xff;
1643 pClawH->flag=CLAW_BUSY;
1644 }
1645
1646 /*
1647 * allocate write_pages_required and add to free chain
1648 */
1649 if (privptr->p_buff_write==NULL) {
1650 if (privptr->p_env->write_size < PAGE_SIZE) {
1651 privptr->p_buff_write=
1652 (void *)__get_free_pages(__GFP_DMA,
1653 (int)pages_to_order_of_mag(claw_write_pages ));
1654 if (privptr->p_buff_write==NULL) {
1655 privptr->p_buff_ccw=NULL;
1656 return -ENOMEM;
1657 }
1658 /*
1659 * Build CLAW write free chain
1660 *
1661 */
1662
1663 memset(privptr->p_buff_write, 0x00,
1664 ccw_pages_required * PAGE_SIZE);
1665 privptr->p_write_free_chain=NULL;
1666
1667 p_buff=privptr->p_buff_write;
1668
1669 for (i=0 ; i< privptr->p_env->write_buffers ; i++) {
1670 p_buf = p_free_chain; /* get a CCW */
1671 p_free_chain = p_buf->next;
1672 p_buf->next =privptr->p_write_free_chain;
1673 privptr->p_write_free_chain = p_buf;
1674 p_buf-> p_buffer = (struct clawbuf *)p_buff;
1675 p_buf-> write.cda = (__u32)__pa(p_buff);
1676 p_buf-> write.flags = CCW_FLAG_SLI | CCW_FLAG_CC;
1677 p_buf-> w_read_FF.cmd_code = CCW_CLAW_CMD_READFF;
1678 p_buf-> w_read_FF.flags = CCW_FLAG_SLI | CCW_FLAG_CC;
1679 p_buf-> w_read_FF.count = 1;
1680 p_buf-> w_read_FF.cda =
1681 (__u32)__pa(&p_buf-> header.flag);
1682 p_buf-> w_TIC_1.cmd_code = CCW_CLAW_CMD_TIC;
1683 p_buf-> w_TIC_1.flags = 0;
1684 p_buf-> w_TIC_1.count = 0;
1685
1686 if (((unsigned long)p_buff +
1687 privptr->p_env->write_size) >=
1688 ((unsigned long)(p_buff+2*
1689 (privptr->p_env->write_size) - 1) & PAGE_MASK)) {
1690 p_buff = p_buff+privptr->p_env->write_size;
1691 }
1692 }
1693 }
1694 else /* Buffers are => PAGE_SIZE. 1 buff per get_free_pages */
1695 {
1696 privptr->p_write_free_chain=NULL;
1697 for (i = 0; i< privptr->p_env->write_buffers ; i++) {
1698 p_buff=(void *)__get_free_pages(__GFP_DMA,
1699 (int)pages_to_order_of_mag(
1700 privptr->p_buff_pages_perwrite) );
1701 if (p_buff==NULL) {
1702 free_pages((unsigned long)privptr->p_buff_ccw,
1703 (int)pages_to_order_of_mag(
1704 privptr->p_buff_ccw_num));
1705 privptr->p_buff_ccw=NULL;
1706 p_buf=privptr->p_buff_write;
1707 while (p_buf!=NULL) {
1708 free_pages((unsigned long)
1709 p_buf->p_buffer,
1710 (int)pages_to_order_of_mag(
1711 privptr->p_buff_pages_perwrite));
1712 p_buf=p_buf->next;
1713 }
1714 return -ENOMEM;
1715 } /* Error on get_pages */
1716 memset(p_buff, 0x00, privptr->p_env->write_size );
1717 p_buf = p_free_chain;
1718 p_free_chain = p_buf->next;
1719 p_buf->next = privptr->p_write_free_chain;
1720 privptr->p_write_free_chain = p_buf;
1721 privptr->p_buff_write = p_buf;
1722 p_buf->p_buffer=(struct clawbuf *)p_buff;
1723 p_buf-> write.cda = (__u32)__pa(p_buff);
1724 p_buf-> write.flags = CCW_FLAG_SLI | CCW_FLAG_CC;
1725 p_buf-> w_read_FF.cmd_code = CCW_CLAW_CMD_READFF;
1726 p_buf-> w_read_FF.flags = CCW_FLAG_SLI | CCW_FLAG_CC;
1727 p_buf-> w_read_FF.count = 1;
1728 p_buf-> w_read_FF.cda =
1729 (__u32)__pa(&p_buf-> header.flag);
1730 p_buf-> w_TIC_1.cmd_code = CCW_CLAW_CMD_TIC;
1731 p_buf-> w_TIC_1.flags = 0;
1732 p_buf-> w_TIC_1.count = 0;
1733 } /* for all write_buffers */
1734
1735 } /* else buffers are PAGE_SIZE or bigger */
1736
1737 }
1738 privptr->p_buff_write_num=claw_write_pages;
1739 privptr->write_free_count=privptr->p_env->write_buffers;
1740
1741
1742 /*
1743 * allocate read_pages_required and chain to free chain
1744 */
1745 if (privptr->p_buff_read==NULL) {
1746 if (privptr->p_env->read_size < PAGE_SIZE) {
1747 privptr->p_buff_read=
1748 (void *)__get_free_pages(__GFP_DMA,
1749 (int)pages_to_order_of_mag(claw_read_pages) );
1750 if (privptr->p_buff_read==NULL) {
1751 free_pages((unsigned long)privptr->p_buff_ccw,
1752 (int)pages_to_order_of_mag(
1753 privptr->p_buff_ccw_num));
1754 /* free the write pages size is < page size */
1755 free_pages((unsigned long)privptr->p_buff_write,
1756 (int)pages_to_order_of_mag(
1757 privptr->p_buff_write_num));
1758 privptr->p_buff_ccw=NULL;
1759 privptr->p_buff_write=NULL;
1760 return -ENOMEM;
1761 }
1762 memset(privptr->p_buff_read, 0x00, claw_read_pages * PAGE_SIZE);
1763 privptr->p_buff_read_num=claw_read_pages;
1764 /*
1765 * Build CLAW read free chain
1766 *
1767 */
1768 p_buff=privptr->p_buff_read;
1769 for (i=0 ; i< privptr->p_env->read_buffers ; i++) {
1770 p_buf = p_free_chain;
1771 p_free_chain = p_buf->next;
1772
1773 if (p_last_CCWB==NULL) {
1774 p_buf->next=NULL;
1775 real_TIC_address=0;
1776 p_last_CCWB=p_buf;
1777 }
1778 else {
1779 p_buf->next=p_first_CCWB;
1780 real_TIC_address=
1781 (__u32)__pa(&p_first_CCWB -> read );
1782 }
1783
1784 p_first_CCWB=p_buf;
1785
1786 p_buf->p_buffer=(struct clawbuf *)p_buff;
1787 /* initialize read command */
1788 p_buf-> read.cmd_code = CCW_CLAW_CMD_READ;
1789 p_buf-> read.cda = (__u32)__pa(p_buff);
1790 p_buf-> read.flags = CCW_FLAG_SLI | CCW_FLAG_CC;
1791 p_buf-> read.count = privptr->p_env->read_size;
1792
1793 /* initialize read_h command */
1794 p_buf-> read_h.cmd_code = CCW_CLAW_CMD_READHEADER;
1795 p_buf-> read_h.cda =
1796 (__u32)__pa(&(p_buf->header));
1797 p_buf-> read_h.flags = CCW_FLAG_SLI | CCW_FLAG_CC;
1798 p_buf-> read_h.count = sizeof(struct clawh);
1799
1800 /* initialize Signal command */
1801 p_buf-> signal.cmd_code = CCW_CLAW_CMD_SIGNAL_SMOD;
1802 p_buf-> signal.cda =
1803 (__u32)__pa(&(pClawH->flag));
1804 p_buf-> signal.flags = CCW_FLAG_SLI | CCW_FLAG_CC;
1805 p_buf-> signal.count = 1;
1806
1807 /* initialize r_TIC_1 command */
1808 p_buf-> r_TIC_1.cmd_code = CCW_CLAW_CMD_TIC;
1809 p_buf-> r_TIC_1.cda = (__u32)real_TIC_address;
1810 p_buf-> r_TIC_1.flags = 0;
1811 p_buf-> r_TIC_1.count = 0;
1812
1813 /* initialize r_read_FF command */
1814 p_buf-> r_read_FF.cmd_code = CCW_CLAW_CMD_READFF;
1815 p_buf-> r_read_FF.cda =
1816 (__u32)__pa(&(pClawH->flag));
1817 p_buf-> r_read_FF.flags =
1818 CCW_FLAG_SLI | CCW_FLAG_CC | CCW_FLAG_PCI;
1819 p_buf-> r_read_FF.count = 1;
1820
1821 /* initialize r_TIC_2 */
1822 memcpy(&p_buf->r_TIC_2,
1823 &p_buf->r_TIC_1, sizeof(struct ccw1));
1824
1825 /* initialize Header */
1826 p_buf->header.length=0xffff;
1827 p_buf->header.opcode=0xff;
1828 p_buf->header.flag=CLAW_PENDING;
1829
1830 if (((unsigned long)p_buff+privptr->p_env->read_size) >=
1831 ((unsigned long)(p_buff+2*(privptr->p_env->read_size)
1832 -1)
1833 & PAGE_MASK)) {
1834 p_buff= p_buff+privptr->p_env->read_size;
1835 }
1836 else {
1837 p_buff=
1838 (void *)((unsigned long)
1839 (p_buff+2*(privptr->p_env->read_size)-1)
1840 & PAGE_MASK) ;
1841 }
1842 } /* for read_buffers */
1843 } /* read_size < PAGE_SIZE */
1844 else { /* read Size >= PAGE_SIZE */
1845 for (i=0 ; i< privptr->p_env->read_buffers ; i++) {
1846 p_buff = (void *)__get_free_pages(__GFP_DMA,
1847 (int)pages_to_order_of_mag(
1848 privptr->p_buff_pages_perread));
1849 if (p_buff==NULL) {
1850 free_pages((unsigned long)privptr->p_buff_ccw,
1851 (int)pages_to_order_of_mag(privptr->
1852 p_buff_ccw_num));
1853 /* free the write pages */
1854 p_buf=privptr->p_buff_write;
1855 while (p_buf!=NULL) {
1856 free_pages(
1857 (unsigned long)p_buf->p_buffer,
1858 (int)pages_to_order_of_mag(
1859 privptr->p_buff_pages_perwrite));
1860 p_buf=p_buf->next;
1861 }
1862 /* free any read pages already alloc */
1863 p_buf=privptr->p_buff_read;
1864 while (p_buf!=NULL) {
1865 free_pages(
1866 (unsigned long)p_buf->p_buffer,
1867 (int)pages_to_order_of_mag(
1868 privptr->p_buff_pages_perread));
1869 p_buf=p_buf->next;
1870 }
1871 privptr->p_buff_ccw=NULL;
1872 privptr->p_buff_write=NULL;
1873 return -ENOMEM;
1874 }
1875 memset(p_buff, 0x00, privptr->p_env->read_size);
1876 p_buf = p_free_chain;
1877 privptr->p_buff_read = p_buf;
1878 p_free_chain = p_buf->next;
1879
1880 if (p_last_CCWB==NULL) {
1881 p_buf->next=NULL;
1882 real_TIC_address=0;
1883 p_last_CCWB=p_buf;
1884 }
1885 else {
1886 p_buf->next=p_first_CCWB;
1887 real_TIC_address=
1888 (addr_t)__pa(
1889 &p_first_CCWB -> read );
1890 }
1891
1892 p_first_CCWB=p_buf;
1893 /* save buff address */
1894 p_buf->p_buffer=(struct clawbuf *)p_buff;
1895 /* initialize read command */
1896 p_buf-> read.cmd_code = CCW_CLAW_CMD_READ;
1897 p_buf-> read.cda = (__u32)__pa(p_buff);
1898 p_buf-> read.flags = CCW_FLAG_SLI | CCW_FLAG_CC;
1899 p_buf-> read.count = privptr->p_env->read_size;
1900
1901 /* initialize read_h command */
1902 p_buf-> read_h.cmd_code = CCW_CLAW_CMD_READHEADER;
1903 p_buf-> read_h.cda =
1904 (__u32)__pa(&(p_buf->header));
1905 p_buf-> read_h.flags = CCW_FLAG_SLI | CCW_FLAG_CC;
1906 p_buf-> read_h.count = sizeof(struct clawh);
1907
1908 /* initialize Signal command */
1909 p_buf-> signal.cmd_code = CCW_CLAW_CMD_SIGNAL_SMOD;
1910 p_buf-> signal.cda =
1911 (__u32)__pa(&(pClawH->flag));
1912 p_buf-> signal.flags = CCW_FLAG_SLI | CCW_FLAG_CC;
1913 p_buf-> signal.count = 1;
1914
1915 /* initialize r_TIC_1 command */
1916 p_buf-> r_TIC_1.cmd_code = CCW_CLAW_CMD_TIC;
1917 p_buf-> r_TIC_1.cda = (__u32)real_TIC_address;
1918 p_buf-> r_TIC_1.flags = 0;
1919 p_buf-> r_TIC_1.count = 0;
1920
1921 /* initialize r_read_FF command */
1922 p_buf-> r_read_FF.cmd_code = CCW_CLAW_CMD_READFF;
1923 p_buf-> r_read_FF.cda =
1924 (__u32)__pa(&(pClawH->flag));
1925 p_buf-> r_read_FF.flags =
1926 CCW_FLAG_SLI | CCW_FLAG_CC | CCW_FLAG_PCI;
1927 p_buf-> r_read_FF.count = 1;
1928
1929 /* initialize r_TIC_2 */
1930 memcpy(&p_buf->r_TIC_2, &p_buf->r_TIC_1,
1931 sizeof(struct ccw1));
1932
1933 /* initialize Header */
1934 p_buf->header.length=0xffff;
1935 p_buf->header.opcode=0xff;
1936 p_buf->header.flag=CLAW_PENDING;
1937
1938 } /* For read_buffers */
1939 } /* read_size >= PAGE_SIZE */
1940 } /* pBuffread = NULL */
1941 add_claw_reads( dev ,p_first_CCWB , p_last_CCWB);
1942 privptr->buffs_alloc = 1;
1943
1944 return 0;
1945} /* end of init_ccw_bk */
1946
1947/*-------------------------------------------------------------------*
1948* *
1949* probe_error *
1950* *
1951*--------------------------------------------------------------------*/
1952
1953static void
1954probe_error( struct ccwgroup_device *cgdev)
1955{
1956 struct claw_privbk *privptr;
1957
1958 CLAW_DBF_TEXT(4, trace, "proberr");
1959 privptr = dev_get_drvdata(&cgdev->dev);
1960 if (privptr != NULL) {
1961 dev_set_drvdata(&cgdev->dev, NULL);
1962 kfree(privptr->p_env);
1963 kfree(privptr->p_mtc_envelope);
1964 kfree(privptr);
1965 }
1966} /* probe_error */
1967
1968/*-------------------------------------------------------------------*
1969* claw_process_control *
1970* *
1971* *
1972*--------------------------------------------------------------------*/
1973
1974static int
1975claw_process_control( struct net_device *dev, struct ccwbk * p_ccw)
1976{
1977
1978 struct clawbuf *p_buf;
1979 struct clawctl ctlbk;
1980 struct clawctl *p_ctlbk;
1981 char temp_host_name[8];
1982 char temp_ws_name[8];
1983 struct claw_privbk *privptr;
1984 struct claw_env *p_env;
1985 struct sysval *p_sysval;
1986 struct conncmd *p_connect=NULL;
1987 int rc;
1988 struct chbk *p_ch = NULL;
1989 struct device *tdev;
1990 CLAW_DBF_TEXT(2, setup, "clw_cntl");
1991 udelay(1000); /* Wait a ms for the control packets to
1992 *catch up to each other */
1993 privptr = dev->ml_priv;
1994 p_env=privptr->p_env;
1995 tdev = &privptr->channel[READ_CHANNEL].cdev->dev;
1996 memcpy( &temp_host_name, p_env->host_name, 8);
1997 memcpy( &temp_ws_name, p_env->adapter_name , 8);
1998 dev_info(tdev, "%s: CLAW device %.8s: "
1999 "Received Control Packet\n",
2000 dev->name, temp_ws_name);
2001 if (privptr->release_pend==1) {
2002 return 0;
2003 }
2004 p_buf=p_ccw->p_buffer;
2005 p_ctlbk=&ctlbk;
2006 if (p_env->packing == DO_PACKED) { /* packing in progress?*/
2007 memcpy(p_ctlbk, &p_buf->buffer[4], sizeof(struct clawctl));
2008 } else {
2009 memcpy(p_ctlbk, p_buf, sizeof(struct clawctl));
2010 }
2011 switch (p_ctlbk->command)
2012 {
2013 case SYSTEM_VALIDATE_REQUEST:
2014 if (p_ctlbk->version != CLAW_VERSION_ID) {
2015 claw_snd_sys_validate_rsp(dev, p_ctlbk,
2016 CLAW_RC_WRONG_VERSION);
2017 dev_warn(tdev, "The communication peer of %s"
2018 " uses an incorrect API version %d\n",
2019 dev->name, p_ctlbk->version);
2020 }
2021 p_sysval = (struct sysval *)&(p_ctlbk->data);
2022 dev_info(tdev, "%s: Recv Sys Validate Request: "
2023 "Vers=%d,link_id=%d,Corr=%d,WS name=%.8s,"
2024 "Host name=%.8s\n",
2025 dev->name, p_ctlbk->version,
2026 p_ctlbk->linkid,
2027 p_ctlbk->correlator,
2028 p_sysval->WS_name,
2029 p_sysval->host_name);
2030 if (memcmp(temp_host_name, p_sysval->host_name, 8)) {
2031 claw_snd_sys_validate_rsp(dev, p_ctlbk,
2032 CLAW_RC_NAME_MISMATCH);
2033 CLAW_DBF_TEXT(2, setup, "HSTBAD");
2034 CLAW_DBF_TEXT_(2, setup, "%s", p_sysval->host_name);
2035 CLAW_DBF_TEXT_(2, setup, "%s", temp_host_name);
2036 dev_warn(tdev,
2037 "Host name %s for %s does not match the"
2038 " remote adapter name %s\n",
2039 p_sysval->host_name,
2040 dev->name,
2041 temp_host_name);
2042 }
2043 if (memcmp(temp_ws_name, p_sysval->WS_name, 8)) {
2044 claw_snd_sys_validate_rsp(dev, p_ctlbk,
2045 CLAW_RC_NAME_MISMATCH);
2046 CLAW_DBF_TEXT(2, setup, "WSNBAD");
2047 CLAW_DBF_TEXT_(2, setup, "%s", p_sysval->WS_name);
2048 CLAW_DBF_TEXT_(2, setup, "%s", temp_ws_name);
2049 dev_warn(tdev, "Adapter name %s for %s does not match"
2050 " the remote host name %s\n",
2051 p_sysval->WS_name,
2052 dev->name,
2053 temp_ws_name);
2054 }
2055 if ((p_sysval->write_frame_size < p_env->write_size) &&
2056 (p_env->packing == 0)) {
2057 claw_snd_sys_validate_rsp(dev, p_ctlbk,
2058 CLAW_RC_HOST_RCV_TOO_SMALL);
2059 dev_warn(tdev,
2060 "The local write buffer is smaller than the"
2061 " remote read buffer\n");
2062 CLAW_DBF_TEXT(2, setup, "wrtszbad");
2063 }
2064 if ((p_sysval->read_frame_size < p_env->read_size) &&
2065 (p_env->packing == 0)) {
2066 claw_snd_sys_validate_rsp(dev, p_ctlbk,
2067 CLAW_RC_HOST_RCV_TOO_SMALL);
2068 dev_warn(tdev,
2069 "The local read buffer is smaller than the"
2070 " remote write buffer\n");
2071 CLAW_DBF_TEXT(2, setup, "rdsizbad");
2072 }
2073 claw_snd_sys_validate_rsp(dev, p_ctlbk, 0);
2074 dev_info(tdev,
2075 "CLAW device %.8s: System validate"
2076 " completed.\n", temp_ws_name);
2077 dev_info(tdev,
2078 "%s: sys Validate Rsize:%d Wsize:%d\n",
2079 dev->name, p_sysval->read_frame_size,
2080 p_sysval->write_frame_size);
2081 privptr->system_validate_comp = 1;
2082 if (strncmp(p_env->api_type, WS_APPL_NAME_PACKED, 6) == 0)
2083 p_env->packing = PACKING_ASK;
2084 claw_strt_conn_req(dev);
2085 break;
2086 case SYSTEM_VALIDATE_RESPONSE:
2087 p_sysval = (struct sysval *)&(p_ctlbk->data);
2088 dev_info(tdev,
2089 "Settings for %s validated (version=%d, "
2090 "remote device=%d, rc=%d, adapter name=%.8s, "
2091 "host name=%.8s)\n",
2092 dev->name,
2093 p_ctlbk->version,
2094 p_ctlbk->correlator,
2095 p_ctlbk->rc,
2096 p_sysval->WS_name,
2097 p_sysval->host_name);
2098 switch (p_ctlbk->rc) {
2099 case 0:
2100 dev_info(tdev, "%s: CLAW device "
2101 "%.8s: System validate completed.\n",
2102 dev->name, temp_ws_name);
2103 if (privptr->system_validate_comp == 0)
2104 claw_strt_conn_req(dev);
2105 privptr->system_validate_comp = 1;
2106 break;
2107 case CLAW_RC_NAME_MISMATCH:
2108 dev_warn(tdev, "Validating %s failed because of"
2109 " a host or adapter name mismatch\n",
2110 dev->name);
2111 break;
2112 case CLAW_RC_WRONG_VERSION:
2113 dev_warn(tdev, "Validating %s failed because of a"
2114 " version conflict\n",
2115 dev->name);
2116 break;
2117 case CLAW_RC_HOST_RCV_TOO_SMALL:
2118 dev_warn(tdev, "Validating %s failed because of a"
2119 " frame size conflict\n",
2120 dev->name);
2121 break;
2122 default:
2123 dev_warn(tdev, "The communication peer of %s rejected"
2124 " the connection\n",
2125 dev->name);
2126 break;
2127 }
2128 break;
2129
2130 case CONNECTION_REQUEST:
2131 p_connect = (struct conncmd *)&(p_ctlbk->data);
2132 dev_info(tdev, "%s: Recv Conn Req: Vers=%d,link_id=%d,"
2133 "Corr=%d,HOST appl=%.8s,WS appl=%.8s\n",
2134 dev->name,
2135 p_ctlbk->version,
2136 p_ctlbk->linkid,
2137 p_ctlbk->correlator,
2138 p_connect->host_name,
2139 p_connect->WS_name);
2140 if (privptr->active_link_ID != 0) {
2141 claw_snd_disc(dev, p_ctlbk);
2142 dev_info(tdev, "%s rejected a connection request"
2143 " because it is already active\n",
2144 dev->name);
2145 }
2146 if (p_ctlbk->linkid != 1) {
2147 claw_snd_disc(dev, p_ctlbk);
2148 dev_info(tdev, "%s rejected a request to open multiple"
2149 " connections\n",
2150 dev->name);
2151 }
2152 rc = find_link(dev, p_connect->host_name, p_connect->WS_name);
2153 if (rc != 0) {
2154 claw_snd_disc(dev, p_ctlbk);
2155 dev_info(tdev, "%s rejected a connection request"
2156 " because of a type mismatch\n",
2157 dev->name);
2158 }
2159 claw_send_control(dev,
2160 CONNECTION_CONFIRM, p_ctlbk->linkid,
2161 p_ctlbk->correlator,
2162 0, p_connect->host_name,
2163 p_connect->WS_name);
2164 if (p_env->packing == PACKING_ASK) {
2165 p_env->packing = PACK_SEND;
2166 claw_snd_conn_req(dev, 0);
2167 }
2168 dev_info(tdev, "%s: CLAW device %.8s: Connection "
2169 "completed link_id=%d.\n",
2170 dev->name, temp_ws_name,
2171 p_ctlbk->linkid);
2172 privptr->active_link_ID = p_ctlbk->linkid;
2173 p_ch = &privptr->channel[WRITE_CHANNEL];
2174 wake_up(&p_ch->wait); /* wake up claw_open ( WRITE) */
2175 break;
2176 case CONNECTION_RESPONSE:
2177 p_connect = (struct conncmd *)&(p_ctlbk->data);
2178 dev_info(tdev, "%s: Recv Conn Resp: Vers=%d,link_id=%d,"
2179 "Corr=%d,RC=%d,Host appl=%.8s, WS appl=%.8s\n",
2180 dev->name,
2181 p_ctlbk->version,
2182 p_ctlbk->linkid,
2183 p_ctlbk->correlator,
2184 p_ctlbk->rc,
2185 p_connect->host_name,
2186 p_connect->WS_name);
2187
2188 if (p_ctlbk->rc != 0) {
2189 dev_warn(tdev, "The communication peer of %s rejected"
2190 " a connection request\n",
2191 dev->name);
2192 return 1;
2193 }
2194 rc = find_link(dev,
2195 p_connect->host_name, p_connect->WS_name);
2196 if (rc != 0) {
2197 claw_snd_disc(dev, p_ctlbk);
2198 dev_warn(tdev, "The communication peer of %s"
2199 " rejected a connection "
2200 "request because of a type mismatch\n",
2201 dev->name);
2202 }
2203 /* should be until CONNECTION_CONFIRM */
2204 privptr->active_link_ID = -(p_ctlbk->linkid);
2205 break;
2206 case CONNECTION_CONFIRM:
2207 p_connect = (struct conncmd *)&(p_ctlbk->data);
2208 dev_info(tdev,
2209 "%s: Recv Conn Confirm:Vers=%d,link_id=%d,"
2210 "Corr=%d,Host appl=%.8s,WS appl=%.8s\n",
2211 dev->name,
2212 p_ctlbk->version,
2213 p_ctlbk->linkid,
2214 p_ctlbk->correlator,
2215 p_connect->host_name,
2216 p_connect->WS_name);
2217 if (p_ctlbk->linkid == -(privptr->active_link_ID)) {
2218 privptr->active_link_ID = p_ctlbk->linkid;
2219 if (p_env->packing > PACKING_ASK) {
2220 dev_info(tdev,
2221 "%s: Confirmed Now packing\n", dev->name);
2222 p_env->packing = DO_PACKED;
2223 }
2224 p_ch = &privptr->channel[WRITE_CHANNEL];
2225 wake_up(&p_ch->wait);
2226 } else {
2227 dev_warn(tdev, "Activating %s failed because of"
2228 " an incorrect link ID=%d\n",
2229 dev->name, p_ctlbk->linkid);
2230 claw_snd_disc(dev, p_ctlbk);
2231 }
2232 break;
2233 case DISCONNECT:
2234 dev_info(tdev, "%s: Disconnect: "
2235 "Vers=%d,link_id=%d,Corr=%d\n",
2236 dev->name, p_ctlbk->version,
2237 p_ctlbk->linkid, p_ctlbk->correlator);
2238 if ((p_ctlbk->linkid == 2) &&
2239 (p_env->packing == PACK_SEND)) {
2240 privptr->active_link_ID = 1;
2241 p_env->packing = DO_PACKED;
2242 } else
2243 privptr->active_link_ID = 0;
2244 break;
2245 case CLAW_ERROR:
2246 dev_warn(tdev, "The communication peer of %s failed\n",
2247 dev->name);
2248 break;
2249 default:
2250 dev_warn(tdev, "The communication peer of %s sent"
2251 " an unknown command code\n",
2252 dev->name);
2253 break;
2254 }
2255
2256 return 0;
2257} /* end of claw_process_control */
2258
2259
2260/*-------------------------------------------------------------------*
2261* claw_send_control *
2262* *
2263*--------------------------------------------------------------------*/
2264
2265static int
2266claw_send_control(struct net_device *dev, __u8 type, __u8 link,
2267 __u8 correlator, __u8 rc, char *local_name, char *remote_name)
2268{
2269 struct claw_privbk *privptr;
2270 struct clawctl *p_ctl;
2271 struct sysval *p_sysval;
2272 struct conncmd *p_connect;
2273 struct sk_buff *skb;
2274
2275 CLAW_DBF_TEXT(2, setup, "sndcntl");
2276 privptr = dev->ml_priv;
2277 p_ctl=(struct clawctl *)&privptr->ctl_bk;
2278
2279 p_ctl->command=type;
2280 p_ctl->version=CLAW_VERSION_ID;
2281 p_ctl->linkid=link;
2282 p_ctl->correlator=correlator;
2283 p_ctl->rc=rc;
2284
2285 p_sysval=(struct sysval *)&p_ctl->data;
2286 p_connect=(struct conncmd *)&p_ctl->data;
2287
2288 switch (p_ctl->command) {
2289 case SYSTEM_VALIDATE_REQUEST:
2290 case SYSTEM_VALIDATE_RESPONSE:
2291 memcpy(&p_sysval->host_name, local_name, 8);
2292 memcpy(&p_sysval->WS_name, remote_name, 8);
2293 if (privptr->p_env->packing > 0) {
2294 p_sysval->read_frame_size = DEF_PACK_BUFSIZE;
2295 p_sysval->write_frame_size = DEF_PACK_BUFSIZE;
2296 } else {
2297 /* how big is the biggest group of packets */
2298 p_sysval->read_frame_size =
2299 privptr->p_env->read_size;
2300 p_sysval->write_frame_size =
2301 privptr->p_env->write_size;
2302 }
2303 memset(&p_sysval->reserved, 0x00, 4);
2304 break;
2305 case CONNECTION_REQUEST:
2306 case CONNECTION_RESPONSE:
2307 case CONNECTION_CONFIRM:
2308 case DISCONNECT:
2309 memcpy(&p_sysval->host_name, local_name, 8);
2310 memcpy(&p_sysval->WS_name, remote_name, 8);
2311 if (privptr->p_env->packing > 0) {
2312 /* How big is the biggest packet */
2313 p_connect->reserved1[0]=CLAW_FRAME_SIZE;
2314 p_connect->reserved1[1]=CLAW_FRAME_SIZE;
2315 } else {
2316 memset(&p_connect->reserved1, 0x00, 4);
2317 memset(&p_connect->reserved2, 0x00, 4);
2318 }
2319 break;
2320 default:
2321 break;
2322 }
2323
2324 /* write Control Record to the device */
2325
2326
2327 skb = dev_alloc_skb(sizeof(struct clawctl));
2328 if (!skb) {
2329 return -ENOMEM;
2330 }
2331 memcpy(skb_put(skb, sizeof(struct clawctl)),
2332 p_ctl, sizeof(struct clawctl));
2333 if (privptr->p_env->packing >= PACK_SEND)
2334 claw_hw_tx(skb, dev, 1);
2335 else
2336 claw_hw_tx(skb, dev, 0);
2337 return 0;
2338} /* end of claw_send_control */
2339
2340/*-------------------------------------------------------------------*
2341* claw_snd_conn_req *
2342* *
2343*--------------------------------------------------------------------*/
2344static int
2345claw_snd_conn_req(struct net_device *dev, __u8 link)
2346{
2347 int rc;
2348 struct claw_privbk *privptr = dev->ml_priv;
2349 struct clawctl *p_ctl;
2350
2351 CLAW_DBF_TEXT(2, setup, "snd_conn");
2352 rc = 1;
2353 p_ctl=(struct clawctl *)&privptr->ctl_bk;
2354 p_ctl->linkid = link;
2355 if ( privptr->system_validate_comp==0x00 ) {
2356 return rc;
2357 }
2358 if (privptr->p_env->packing == PACKING_ASK )
2359 rc=claw_send_control(dev, CONNECTION_REQUEST,0,0,0,
2360 WS_APPL_NAME_PACKED, WS_APPL_NAME_PACKED);
2361 if (privptr->p_env->packing == PACK_SEND) {
2362 rc=claw_send_control(dev, CONNECTION_REQUEST,0,0,0,
2363 WS_APPL_NAME_IP_NAME, WS_APPL_NAME_IP_NAME);
2364 }
2365 if (privptr->p_env->packing == 0)
2366 rc=claw_send_control(dev, CONNECTION_REQUEST,0,0,0,
2367 HOST_APPL_NAME, privptr->p_env->api_type);
2368 return rc;
2369
2370} /* end of claw_snd_conn_req */
2371
2372
2373/*-------------------------------------------------------------------*
2374* claw_snd_disc *
2375* *
2376*--------------------------------------------------------------------*/
2377
2378static int
2379claw_snd_disc(struct net_device *dev, struct clawctl * p_ctl)
2380{
2381 int rc;
2382 struct conncmd * p_connect;
2383
2384 CLAW_DBF_TEXT(2, setup, "snd_dsc");
2385 p_connect=(struct conncmd *)&p_ctl->data;
2386
2387 rc=claw_send_control(dev, DISCONNECT, p_ctl->linkid,
2388 p_ctl->correlator, 0,
2389 p_connect->host_name, p_connect->WS_name);
2390 return rc;
2391} /* end of claw_snd_disc */
2392
2393
2394/*-------------------------------------------------------------------*
2395* claw_snd_sys_validate_rsp *
2396* *
2397*--------------------------------------------------------------------*/
2398
2399static int
2400claw_snd_sys_validate_rsp(struct net_device *dev,
2401 struct clawctl *p_ctl, __u32 return_code)
2402{
2403 struct claw_env * p_env;
2404 struct claw_privbk *privptr;
2405 int rc;
2406
2407 CLAW_DBF_TEXT(2, setup, "chkresp");
2408 privptr = dev->ml_priv;
2409 p_env=privptr->p_env;
2410 rc=claw_send_control(dev, SYSTEM_VALIDATE_RESPONSE,
2411 p_ctl->linkid,
2412 p_ctl->correlator,
2413 return_code,
2414 p_env->host_name,
2415 p_env->adapter_name );
2416 return rc;
2417} /* end of claw_snd_sys_validate_rsp */
2418
2419/*-------------------------------------------------------------------*
2420* claw_strt_conn_req *
2421* *
2422*--------------------------------------------------------------------*/
2423
2424static int
2425claw_strt_conn_req(struct net_device *dev )
2426{
2427 int rc;
2428
2429 CLAW_DBF_TEXT(2, setup, "conn_req");
2430 rc=claw_snd_conn_req(dev, 1);
2431 return rc;
2432} /* end of claw_strt_conn_req */
2433
2434
2435
2436/*-------------------------------------------------------------------*
2437 * claw_stats *
2438 *-------------------------------------------------------------------*/
2439
2440static struct
2441net_device_stats *claw_stats(struct net_device *dev)
2442{
2443 struct claw_privbk *privptr;
2444
2445 CLAW_DBF_TEXT(4, trace, "stats");
2446 privptr = dev->ml_priv;
2447 return &privptr->stats;
2448} /* end of claw_stats */
2449
2450
2451/*-------------------------------------------------------------------*
2452* unpack_read *
2453* *
2454*--------------------------------------------------------------------*/
2455static void
2456unpack_read(struct net_device *dev )
2457{
2458 struct sk_buff *skb;
2459 struct claw_privbk *privptr;
2460 struct claw_env *p_env;
2461 struct ccwbk *p_this_ccw;
2462 struct ccwbk *p_first_ccw;
2463 struct ccwbk *p_last_ccw;
2464 struct clawph *p_packh;
2465 void *p_packd;
2466 struct clawctl *p_ctlrec=NULL;
2467 struct device *p_dev;
2468
2469 __u32 len_of_data;
2470 __u32 pack_off;
2471 __u8 link_num;
2472 __u8 mtc_this_frm=0;
2473 __u32 bytes_to_mov;
2474 int i=0;
2475 int p=0;
2476
2477 CLAW_DBF_TEXT(4, trace, "unpkread");
2478 p_first_ccw=NULL;
2479 p_last_ccw=NULL;
2480 p_packh=NULL;
2481 p_packd=NULL;
2482 privptr = dev->ml_priv;
2483
2484 p_dev = &privptr->channel[READ_CHANNEL].cdev->dev;
2485 p_env = privptr->p_env;
2486 p_this_ccw=privptr->p_read_active_first;
2487 while (p_this_ccw!=NULL && p_this_ccw->header.flag!=CLAW_PENDING) {
2488 pack_off = 0;
2489 p = 0;
2490 p_this_ccw->header.flag=CLAW_PENDING;
2491 privptr->p_read_active_first=p_this_ccw->next;
2492 p_this_ccw->next=NULL;
2493 p_packh = (struct clawph *)p_this_ccw->p_buffer;
2494 if ((p_env->packing == PACK_SEND) &&
2495 (p_packh->len == 32) &&
2496 (p_packh->link_num == 0)) { /* is it a packed ctl rec? */
2497 p_packh++; /* peek past pack header */
2498 p_ctlrec = (struct clawctl *)p_packh;
2499 p_packh--; /* un peek */
2500 if ((p_ctlrec->command == CONNECTION_RESPONSE) ||
2501 (p_ctlrec->command == CONNECTION_CONFIRM))
2502 p_env->packing = DO_PACKED;
2503 }
2504 if (p_env->packing == DO_PACKED)
2505 link_num=p_packh->link_num;
2506 else
2507 link_num=p_this_ccw->header.opcode / 8;
2508 if ((p_this_ccw->header.opcode & MORE_to_COME_FLAG)!=0) {
2509 mtc_this_frm=1;
2510 if (p_this_ccw->header.length!=
2511 privptr->p_env->read_size ) {
2512 dev_warn(p_dev,
2513 "The communication peer of %s"
2514 " sent a faulty"
2515 " frame of length %02x\n",
2516 dev->name, p_this_ccw->header.length);
2517 }
2518 }
2519
2520 if (privptr->mtc_skipping) {
2521 /*
2522 * We're in the mode of skipping past a
2523 * multi-frame message
2524 * that we can't process for some reason or other.
2525 * The first frame without the More-To-Come flag is
2526 * the last frame of the skipped message.
2527 */
2528 /* in case of More-To-Come not set in this frame */
2529 if (mtc_this_frm==0) {
2530 privptr->mtc_skipping=0; /* Ok, the end */
2531 privptr->mtc_logical_link=-1;
2532 }
2533 goto NextFrame;
2534 }
2535
2536 if (link_num==0) {
2537 claw_process_control(dev, p_this_ccw);
2538 CLAW_DBF_TEXT(4, trace, "UnpkCntl");
2539 goto NextFrame;
2540 }
2541unpack_next:
2542 if (p_env->packing == DO_PACKED) {
2543 if (pack_off > p_env->read_size)
2544 goto NextFrame;
2545 p_packd = p_this_ccw->p_buffer+pack_off;
2546 p_packh = (struct clawph *) p_packd;
2547 if ((p_packh->len == 0) || /* done with this frame? */
2548 (p_packh->flag != 0))
2549 goto NextFrame;
2550 bytes_to_mov = p_packh->len;
2551 pack_off += bytes_to_mov+sizeof(struct clawph);
2552 p++;
2553 } else {
2554 bytes_to_mov=p_this_ccw->header.length;
2555 }
2556 if (privptr->mtc_logical_link<0) {
2557
2558 /*
2559 * if More-To-Come is set in this frame then we don't know
2560 * length of entire message, and hence have to allocate
2561 * large buffer */
2562
2563 /* We are starting a new envelope */
2564 privptr->mtc_offset=0;
2565 privptr->mtc_logical_link=link_num;
2566 }
2567
2568 if (bytes_to_mov > (MAX_ENVELOPE_SIZE- privptr->mtc_offset) ) {
2569 /* error */
2570 privptr->stats.rx_frame_errors++;
2571 goto NextFrame;
2572 }
2573 if (p_env->packing == DO_PACKED) {
2574 memcpy( privptr->p_mtc_envelope+ privptr->mtc_offset,
2575 p_packd+sizeof(struct clawph), bytes_to_mov);
2576
2577 } else {
2578 memcpy( privptr->p_mtc_envelope+ privptr->mtc_offset,
2579 p_this_ccw->p_buffer, bytes_to_mov);
2580 }
2581 if (mtc_this_frm==0) {
2582 len_of_data=privptr->mtc_offset+bytes_to_mov;
2583 skb=dev_alloc_skb(len_of_data);
2584 if (skb) {
2585 memcpy(skb_put(skb,len_of_data),
2586 privptr->p_mtc_envelope,
2587 len_of_data);
2588 skb->dev=dev;
2589 skb_reset_mac_header(skb);
2590 skb->protocol=htons(ETH_P_IP);
2591 skb->ip_summed=CHECKSUM_UNNECESSARY;
2592 privptr->stats.rx_packets++;
2593 privptr->stats.rx_bytes+=len_of_data;
2594 netif_rx(skb);
2595 }
2596 else {
2597 dev_info(p_dev, "Allocating a buffer for"
2598 " incoming data failed\n");
2599 privptr->stats.rx_dropped++;
2600 }
2601 privptr->mtc_offset=0;
2602 privptr->mtc_logical_link=-1;
2603 }
2604 else {
2605 privptr->mtc_offset+=bytes_to_mov;
2606 }
2607 if (p_env->packing == DO_PACKED)
2608 goto unpack_next;
2609NextFrame:
2610 /*
2611 * Remove ThisCCWblock from active read queue, and add it
2612 * to queue of free blocks to be reused.
2613 */
2614 i++;
2615 p_this_ccw->header.length=0xffff;
2616 p_this_ccw->header.opcode=0xff;
2617 /*
2618 * add this one to the free queue for later reuse
2619 */
2620 if (p_first_ccw==NULL) {
2621 p_first_ccw = p_this_ccw;
2622 }
2623 else {
2624 p_last_ccw->next = p_this_ccw;
2625 }
2626 p_last_ccw = p_this_ccw;
2627 /*
2628 * chain to next block on active read queue
2629 */
2630 p_this_ccw = privptr->p_read_active_first;
2631 CLAW_DBF_TEXT_(4, trace, "rxpkt %d", p);
2632 } /* end of while */
2633
2634 /* check validity */
2635
2636 CLAW_DBF_TEXT_(4, trace, "rxfrm %d", i);
2637 add_claw_reads(dev, p_first_ccw, p_last_ccw);
2638 claw_strt_read(dev, LOCK_YES);
2639 return;
2640} /* end of unpack_read */
2641
2642/*-------------------------------------------------------------------*
2643* claw_strt_read *
2644* *
2645*--------------------------------------------------------------------*/
2646static void
2647claw_strt_read (struct net_device *dev, int lock )
2648{
2649 int rc = 0;
2650 __u32 parm;
2651 unsigned long saveflags = 0;
2652 struct claw_privbk *privptr = dev->ml_priv;
2653 struct ccwbk*p_ccwbk;
2654 struct chbk *p_ch;
2655 struct clawh *p_clawh;
2656 p_ch = &privptr->channel[READ_CHANNEL];
2657
2658 CLAW_DBF_TEXT(4, trace, "StRdNter");
2659 p_clawh=(struct clawh *)privptr->p_claw_signal_blk;
2660 p_clawh->flag=CLAW_IDLE; /* 0x00 */
2661
2662 if ((privptr->p_write_active_first!=NULL &&
2663 privptr->p_write_active_first->header.flag!=CLAW_PENDING) ||
2664 (privptr->p_read_active_first!=NULL &&
2665 privptr->p_read_active_first->header.flag!=CLAW_PENDING )) {
2666 p_clawh->flag=CLAW_BUSY; /* 0xff */
2667 }
2668 if (lock==LOCK_YES) {
2669 spin_lock_irqsave(get_ccwdev_lock(p_ch->cdev), saveflags);
2670 }
2671 if (test_and_set_bit(0, (void *)&p_ch->IO_active) == 0) {
2672 CLAW_DBF_TEXT(4, trace, "HotRead");
2673 p_ccwbk=privptr->p_read_active_first;
2674 parm = (unsigned long) p_ch;
2675 rc = ccw_device_start (p_ch->cdev, &p_ccwbk->read, parm,
2676 0xff, 0);
2677 if (rc != 0) {
2678 ccw_check_return_code(p_ch->cdev, rc);
2679 }
2680 }
2681 else {
2682 CLAW_DBF_TEXT(2, trace, "ReadAct");
2683 }
2684
2685 if (lock==LOCK_YES) {
2686 spin_unlock_irqrestore(get_ccwdev_lock(p_ch->cdev), saveflags);
2687 }
2688 CLAW_DBF_TEXT(4, trace, "StRdExit");
2689 return;
2690} /* end of claw_strt_read */
2691
2692/*-------------------------------------------------------------------*
2693* claw_strt_out_IO *
2694* *
2695*--------------------------------------------------------------------*/
2696
2697static void
2698claw_strt_out_IO( struct net_device *dev )
2699{
2700 int rc = 0;
2701 unsigned long parm;
2702 struct claw_privbk *privptr;
2703 struct chbk *p_ch;
2704 struct ccwbk *p_first_ccw;
2705
2706 if (!dev) {
2707 return;
2708 }
2709 privptr = (struct claw_privbk *)dev->ml_priv;
2710 p_ch = &privptr->channel[WRITE_CHANNEL];
2711
2712 CLAW_DBF_TEXT(4, trace, "strt_io");
2713 p_first_ccw=privptr->p_write_active_first;
2714
2715 if (p_ch->claw_state == CLAW_STOP)
2716 return;
2717 if (p_first_ccw == NULL) {
2718 return;
2719 }
2720 if (test_and_set_bit(0, (void *)&p_ch->IO_active) == 0) {
2721 parm = (unsigned long) p_ch;
2722 CLAW_DBF_TEXT(2, trace, "StWrtIO");
2723 rc = ccw_device_start(p_ch->cdev, &p_first_ccw->write, parm,
2724 0xff, 0);
2725 if (rc != 0) {
2726 ccw_check_return_code(p_ch->cdev, rc);
2727 }
2728 }
2729 dev->trans_start = jiffies;
2730 return;
2731} /* end of claw_strt_out_IO */
2732
2733/*-------------------------------------------------------------------*
2734* Free write buffers *
2735* *
2736*--------------------------------------------------------------------*/
2737
2738static void
2739claw_free_wrt_buf( struct net_device *dev )
2740{
2741
2742 struct claw_privbk *privptr = (struct claw_privbk *)dev->ml_priv;
2743 struct ccwbk*p_this_ccw;
2744 struct ccwbk*p_next_ccw;
2745
2746 CLAW_DBF_TEXT(4, trace, "freewrtb");
2747 /* scan the write queue to free any completed write packets */
2748 p_this_ccw=privptr->p_write_active_first;
2749 while ( (p_this_ccw!=NULL) && (p_this_ccw->header.flag!=CLAW_PENDING))
2750 {
2751 p_next_ccw = p_this_ccw->next;
2752 if (((p_next_ccw!=NULL) &&
2753 (p_next_ccw->header.flag!=CLAW_PENDING)) ||
2754 ((p_this_ccw == privptr->p_write_active_last) &&
2755 (p_this_ccw->header.flag!=CLAW_PENDING))) {
2756 /* The next CCW is OK or this is */
2757 /* the last CCW...free it @A1A */
2758 privptr->p_write_active_first=p_this_ccw->next;
2759 p_this_ccw->header.flag=CLAW_PENDING;
2760 p_this_ccw->next=privptr->p_write_free_chain;
2761 privptr->p_write_free_chain=p_this_ccw;
2762 ++privptr->write_free_count;
2763 privptr->stats.tx_bytes+= p_this_ccw->write.count;
2764 p_this_ccw=privptr->p_write_active_first;
2765 privptr->stats.tx_packets++;
2766 }
2767 else {
2768 break;
2769 }
2770 }
2771 if (privptr->write_free_count!=0) {
2772 claw_clearbit_busy(TB_NOBUFFER,dev);
2773 }
2774 /* whole chain removed? */
2775 if (privptr->p_write_active_first==NULL) {
2776 privptr->p_write_active_last=NULL;
2777 }
2778 CLAW_DBF_TEXT_(4, trace, "FWC=%d", privptr->write_free_count);
2779 return;
2780}
2781
2782/*-------------------------------------------------------------------*
2783* claw free netdevice *
2784* *
2785*--------------------------------------------------------------------*/
2786static void
2787claw_free_netdevice(struct net_device * dev, int free_dev)
2788{
2789 struct claw_privbk *privptr;
2790
2791 CLAW_DBF_TEXT(2, setup, "free_dev");
2792 if (!dev)
2793 return;
2794 CLAW_DBF_TEXT_(2, setup, "%s", dev->name);
2795 privptr = dev->ml_priv;
2796 if (dev->flags & IFF_RUNNING)
2797 claw_release(dev);
2798 if (privptr) {
2799 privptr->channel[READ_CHANNEL].ndev = NULL; /* say it's free */
2800 }
2801 dev->ml_priv = NULL;
2802#ifdef MODULE
2803 if (free_dev) {
2804 free_netdev(dev);
2805 }
2806#endif
2807 CLAW_DBF_TEXT(2, setup, "free_ok");
2808}
2809
2810/**
2811 * Claw init netdevice
2812 * Initialize everything of the net device except the name and the
2813 * channel structs.
2814 */
2815static const struct net_device_ops claw_netdev_ops = {
2816 .ndo_open = claw_open,
2817 .ndo_stop = claw_release,
2818 .ndo_get_stats = claw_stats,
2819 .ndo_start_xmit = claw_tx,
2820 .ndo_change_mtu = claw_change_mtu,
2821};
2822
2823static void
2824claw_init_netdevice(struct net_device * dev)
2825{
2826 CLAW_DBF_TEXT(2, setup, "init_dev");
2827 CLAW_DBF_TEXT_(2, setup, "%s", dev->name);
2828 dev->mtu = CLAW_DEFAULT_MTU_SIZE;
2829 dev->hard_header_len = 0;
2830 dev->addr_len = 0;
2831 dev->type = ARPHRD_SLIP;
2832 dev->tx_queue_len = 1300;
2833 dev->flags = IFF_POINTOPOINT | IFF_NOARP;
2834 dev->netdev_ops = &claw_netdev_ops;
2835 CLAW_DBF_TEXT(2, setup, "initok");
2836 return;
2837}
2838
2839/**
2840 * Init a new channel in the privptr->channel[i].
2841 *
2842 * @param cdev The ccw_device to be added.
2843 *
2844 * @return 0 on success, !0 on error.
2845 */
2846static int
2847add_channel(struct ccw_device *cdev,int i,struct claw_privbk *privptr)
2848{
2849 struct chbk *p_ch;
2850 struct ccw_dev_id dev_id;
2851
2852 CLAW_DBF_TEXT_(2, setup, "%s", dev_name(&cdev->dev));
2853 privptr->channel[i].flag = i+1; /* Read is 1 Write is 2 */
2854 p_ch = &privptr->channel[i];
2855 p_ch->cdev = cdev;
2856 snprintf(p_ch->id, CLAW_ID_SIZE, "cl-%s", dev_name(&cdev->dev));
2857 ccw_device_get_id(cdev, &dev_id);
2858 p_ch->devno = dev_id.devno;
2859 if ((p_ch->irb = kzalloc(sizeof (struct irb),GFP_KERNEL)) == NULL) {
2860 return -ENOMEM;
2861 }
2862 return 0;
2863}
2864
2865
2866/**
2867 *
2868 * Setup an interface.
2869 *
2870 * @param cgdev Device to be setup.
2871 *
2872 * @returns 0 on success, !0 on failure.
2873 */
2874static int
2875claw_new_device(struct ccwgroup_device *cgdev)
2876{
2877 struct claw_privbk *privptr;
2878 struct claw_env *p_env;
2879 struct net_device *dev;
2880 int ret;
2881 struct ccw_dev_id dev_id;
2882
2883 dev_info(&cgdev->dev, "add for %s\n",
2884 dev_name(&cgdev->cdev[READ_CHANNEL]->dev));
2885 CLAW_DBF_TEXT(2, setup, "new_dev");
2886 privptr = dev_get_drvdata(&cgdev->dev);
2887 dev_set_drvdata(&cgdev->cdev[READ_CHANNEL]->dev, privptr);
2888 dev_set_drvdata(&cgdev->cdev[WRITE_CHANNEL]->dev, privptr);
2889 if (!privptr)
2890 return -ENODEV;
2891 p_env = privptr->p_env;
2892 ccw_device_get_id(cgdev->cdev[READ_CHANNEL], &dev_id);
2893 p_env->devno[READ_CHANNEL] = dev_id.devno;
2894 ccw_device_get_id(cgdev->cdev[WRITE_CHANNEL], &dev_id);
2895 p_env->devno[WRITE_CHANNEL] = dev_id.devno;
2896 ret = add_channel(cgdev->cdev[0],0,privptr);
2897 if (ret == 0)
2898 ret = add_channel(cgdev->cdev[1],1,privptr);
2899 if (ret != 0) {
2900 dev_warn(&cgdev->dev, "Creating a CLAW group device"
2901 " failed with error code %d\n", ret);
2902 goto out;
2903 }
2904 ret = ccw_device_set_online(cgdev->cdev[READ_CHANNEL]);
2905 if (ret != 0) {
2906 dev_warn(&cgdev->dev,
2907 "Setting the read subchannel online"
2908 " failed with error code %d\n", ret);
2909 goto out;
2910 }
2911 ret = ccw_device_set_online(cgdev->cdev[WRITE_CHANNEL]);
2912 if (ret != 0) {
2913 dev_warn(&cgdev->dev,
2914 "Setting the write subchannel online "
2915 "failed with error code %d\n", ret);
2916 goto out;
2917 }
2918 dev = alloc_netdev(0,"claw%d",claw_init_netdevice);
2919 if (!dev) {
2920 dev_warn(&cgdev->dev,
2921 "Activating the CLAW device failed\n");
2922 goto out;
2923 }
2924 dev->ml_priv = privptr;
2925 dev_set_drvdata(&cgdev->dev, privptr);
2926 dev_set_drvdata(&cgdev->cdev[READ_CHANNEL]->dev, privptr);
2927 dev_set_drvdata(&cgdev->cdev[WRITE_CHANNEL]->dev, privptr);
2928 /* sysfs magic */
2929 SET_NETDEV_DEV(dev, &cgdev->dev);
2930 if (register_netdev(dev) != 0) {
2931 claw_free_netdevice(dev, 1);
2932 CLAW_DBF_TEXT(2, trace, "regfail");
2933 goto out;
2934 }
2935 dev->flags &=~IFF_RUNNING;
2936 if (privptr->buffs_alloc == 0) {
2937 ret=init_ccw_bk(dev);
2938 if (ret !=0) {
2939 unregister_netdev(dev);
2940 claw_free_netdevice(dev,1);
2941 CLAW_DBF_TEXT(2, trace, "ccwmem");
2942 goto out;
2943 }
2944 }
2945 privptr->channel[READ_CHANNEL].ndev = dev;
2946 privptr->channel[WRITE_CHANNEL].ndev = dev;
2947 privptr->p_env->ndev = dev;
2948
2949 dev_info(&cgdev->dev, "%s:readsize=%d writesize=%d "
2950 "readbuffer=%d writebuffer=%d read=0x%04x write=0x%04x\n",
2951 dev->name, p_env->read_size,
2952 p_env->write_size, p_env->read_buffers,
2953 p_env->write_buffers, p_env->devno[READ_CHANNEL],
2954 p_env->devno[WRITE_CHANNEL]);
2955 dev_info(&cgdev->dev, "%s:host_name:%.8s, adapter_name "
2956 ":%.8s api_type: %.8s\n",
2957 dev->name, p_env->host_name,
2958 p_env->adapter_name , p_env->api_type);
2959 return 0;
2960out:
2961 ccw_device_set_offline(cgdev->cdev[1]);
2962 ccw_device_set_offline(cgdev->cdev[0]);
2963 return -ENODEV;
2964}
2965
2966static void
2967claw_purge_skb_queue(struct sk_buff_head *q)
2968{
2969 struct sk_buff *skb;
2970
2971 CLAW_DBF_TEXT(4, trace, "purgque");
2972 while ((skb = skb_dequeue(q))) {
2973 atomic_dec(&skb->users);
2974 dev_kfree_skb_any(skb);
2975 }
2976}
2977
2978/**
2979 * Shutdown an interface.
2980 *
2981 * @param cgdev Device to be shut down.
2982 *
2983 * @returns 0 on success, !0 on failure.
2984 */
2985static int
2986claw_shutdown_device(struct ccwgroup_device *cgdev)
2987{
2988 struct claw_privbk *priv;
2989 struct net_device *ndev;
2990 int ret = 0;
2991
2992 CLAW_DBF_TEXT_(2, setup, "%s", dev_name(&cgdev->dev));
2993 priv = dev_get_drvdata(&cgdev->dev);
2994 if (!priv)
2995 return -ENODEV;
2996 ndev = priv->channel[READ_CHANNEL].ndev;
2997 if (ndev) {
2998 /* Close the device */
2999 dev_info(&cgdev->dev, "%s: shutting down\n",
3000 ndev->name);
3001 if (ndev->flags & IFF_RUNNING)
3002 ret = claw_release(ndev);
3003 ndev->flags &=~IFF_RUNNING;
3004 unregister_netdev(ndev);
3005 ndev->ml_priv = NULL; /* cgdev data, not ndev's to free */
3006 claw_free_netdevice(ndev, 1);
3007 priv->channel[READ_CHANNEL].ndev = NULL;
3008 priv->channel[WRITE_CHANNEL].ndev = NULL;
3009 priv->p_env->ndev = NULL;
3010 }
3011 ccw_device_set_offline(cgdev->cdev[1]);
3012 ccw_device_set_offline(cgdev->cdev[0]);
3013 return ret;
3014}
3015
3016static void
3017claw_remove_device(struct ccwgroup_device *cgdev)
3018{
3019 struct claw_privbk *priv;
3020
3021 CLAW_DBF_TEXT_(2, setup, "%s", dev_name(&cgdev->dev));
3022 priv = dev_get_drvdata(&cgdev->dev);
3023 dev_info(&cgdev->dev, " will be removed.\n");
3024 if (cgdev->state == CCWGROUP_ONLINE)
3025 claw_shutdown_device(cgdev);
3026 kfree(priv->p_mtc_envelope);
3027 priv->p_mtc_envelope=NULL;
3028 kfree(priv->p_env);
3029 priv->p_env=NULL;
3030 kfree(priv->channel[0].irb);
3031 priv->channel[0].irb=NULL;
3032 kfree(priv->channel[1].irb);
3033 priv->channel[1].irb=NULL;
3034 kfree(priv);
3035 dev_set_drvdata(&cgdev->dev, NULL);
3036 dev_set_drvdata(&cgdev->cdev[READ_CHANNEL]->dev, NULL);
3037 dev_set_drvdata(&cgdev->cdev[WRITE_CHANNEL]->dev, NULL);
3038 put_device(&cgdev->dev);
3039
3040 return;
3041}
3042
3043
3044/*
3045 * sysfs attributes
3046 */
3047static ssize_t
3048claw_hname_show(struct device *dev, struct device_attribute *attr, char *buf)
3049{
3050 struct claw_privbk *priv;
3051 struct claw_env * p_env;
3052
3053 priv = dev_get_drvdata(dev);
3054 if (!priv)
3055 return -ENODEV;
3056 p_env = priv->p_env;
3057 return sprintf(buf, "%s\n",p_env->host_name);
3058}
3059
3060static ssize_t
3061claw_hname_write(struct device *dev, struct device_attribute *attr,
3062 const char *buf, size_t count)
3063{
3064 struct claw_privbk *priv;
3065 struct claw_env * p_env;
3066
3067 priv = dev_get_drvdata(dev);
3068 if (!priv)
3069 return -ENODEV;
3070 p_env = priv->p_env;
3071 if (count > MAX_NAME_LEN+1)
3072 return -EINVAL;
3073 memset(p_env->host_name, 0x20, MAX_NAME_LEN);
3074 strncpy(p_env->host_name,buf, count);
3075 p_env->host_name[count-1] = 0x20; /* clear extra 0x0a */
3076 p_env->host_name[MAX_NAME_LEN] = 0x00;
3077 CLAW_DBF_TEXT(2, setup, "HstnSet");
3078 CLAW_DBF_TEXT_(2, setup, "%s", p_env->host_name);
3079
3080 return count;
3081}
3082
3083static DEVICE_ATTR(host_name, 0644, claw_hname_show, claw_hname_write);
3084
3085static ssize_t
3086claw_adname_show(struct device *dev, struct device_attribute *attr, char *buf)
3087{
3088 struct claw_privbk *priv;
3089 struct claw_env * p_env;
3090
3091 priv = dev_get_drvdata(dev);
3092 if (!priv)
3093 return -ENODEV;
3094 p_env = priv->p_env;
3095 return sprintf(buf, "%s\n", p_env->adapter_name);
3096}
3097
3098static ssize_t
3099claw_adname_write(struct device *dev, struct device_attribute *attr,
3100 const char *buf, size_t count)
3101{
3102 struct claw_privbk *priv;
3103 struct claw_env * p_env;
3104
3105 priv = dev_get_drvdata(dev);
3106 if (!priv)
3107 return -ENODEV;
3108 p_env = priv->p_env;
3109 if (count > MAX_NAME_LEN+1)
3110 return -EINVAL;
3111 memset(p_env->adapter_name, 0x20, MAX_NAME_LEN);
3112 strncpy(p_env->adapter_name,buf, count);
3113 p_env->adapter_name[count-1] = 0x20; /* clear extra 0x0a */
3114 p_env->adapter_name[MAX_NAME_LEN] = 0x00;
3115 CLAW_DBF_TEXT(2, setup, "AdnSet");
3116 CLAW_DBF_TEXT_(2, setup, "%s", p_env->adapter_name);
3117
3118 return count;
3119}
3120
3121static DEVICE_ATTR(adapter_name, 0644, claw_adname_show, claw_adname_write);
3122
3123static ssize_t
3124claw_apname_show(struct device *dev, struct device_attribute *attr, char *buf)
3125{
3126 struct claw_privbk *priv;
3127 struct claw_env * p_env;
3128
3129 priv = dev_get_drvdata(dev);
3130 if (!priv)
3131 return -ENODEV;
3132 p_env = priv->p_env;
3133 return sprintf(buf, "%s\n",
3134 p_env->api_type);
3135}
3136
3137static ssize_t
3138claw_apname_write(struct device *dev, struct device_attribute *attr,
3139 const char *buf, size_t count)
3140{
3141 struct claw_privbk *priv;
3142 struct claw_env * p_env;
3143
3144 priv = dev_get_drvdata(dev);
3145 if (!priv)
3146 return -ENODEV;
3147 p_env = priv->p_env;
3148 if (count > MAX_NAME_LEN+1)
3149 return -EINVAL;
3150 memset(p_env->api_type, 0x20, MAX_NAME_LEN);
3151 strncpy(p_env->api_type,buf, count);
3152 p_env->api_type[count-1] = 0x20; /* we get a loose 0x0a */
3153 p_env->api_type[MAX_NAME_LEN] = 0x00;
3154 if(strncmp(p_env->api_type,WS_APPL_NAME_PACKED,6) == 0) {
3155 p_env->read_size=DEF_PACK_BUFSIZE;
3156 p_env->write_size=DEF_PACK_BUFSIZE;
3157 p_env->packing=PACKING_ASK;
3158 CLAW_DBF_TEXT(2, setup, "PACKING");
3159 }
3160 else {
3161 p_env->packing=0;
3162 p_env->read_size=CLAW_FRAME_SIZE;
3163 p_env->write_size=CLAW_FRAME_SIZE;
3164 CLAW_DBF_TEXT(2, setup, "ApiSet");
3165 }
3166 CLAW_DBF_TEXT_(2, setup, "%s", p_env->api_type);
3167 return count;
3168}
3169
3170static DEVICE_ATTR(api_type, 0644, claw_apname_show, claw_apname_write);
3171
3172static ssize_t
3173claw_wbuff_show(struct device *dev, struct device_attribute *attr, char *buf)
3174{
3175 struct claw_privbk *priv;
3176 struct claw_env * p_env;
3177
3178 priv = dev_get_drvdata(dev);
3179 if (!priv)
3180 return -ENODEV;
3181 p_env = priv->p_env;
3182 return sprintf(buf, "%d\n", p_env->write_buffers);
3183}
3184
3185static ssize_t
3186claw_wbuff_write(struct device *dev, struct device_attribute *attr,
3187 const char *buf, size_t count)
3188{
3189 struct claw_privbk *priv;
3190 struct claw_env * p_env;
3191 int nnn,max;
3192
3193 priv = dev_get_drvdata(dev);
3194 if (!priv)
3195 return -ENODEV;
3196 p_env = priv->p_env;
3197 sscanf(buf, "%i", &nnn);
3198 if (p_env->packing) {
3199 max = 64;
3200 }
3201 else {
3202 max = 512;
3203 }
3204 if ((nnn > max ) || (nnn < 2))
3205 return -EINVAL;
3206 p_env->write_buffers = nnn;
3207 CLAW_DBF_TEXT(2, setup, "Wbufset");
3208 CLAW_DBF_TEXT_(2, setup, "WB=%d", p_env->write_buffers);
3209 return count;
3210}
3211
3212static DEVICE_ATTR(write_buffer, 0644, claw_wbuff_show, claw_wbuff_write);
3213
3214static ssize_t
3215claw_rbuff_show(struct device *dev, struct device_attribute *attr, char *buf)
3216{
3217 struct claw_privbk *priv;
3218 struct claw_env * p_env;
3219
3220 priv = dev_get_drvdata(dev);
3221 if (!priv)
3222 return -ENODEV;
3223 p_env = priv->p_env;
3224 return sprintf(buf, "%d\n", p_env->read_buffers);
3225}
3226
3227static ssize_t
3228claw_rbuff_write(struct device *dev, struct device_attribute *attr,
3229 const char *buf, size_t count)
3230{
3231 struct claw_privbk *priv;
3232 struct claw_env *p_env;
3233 int nnn,max;
3234
3235 priv = dev_get_drvdata(dev);
3236 if (!priv)
3237 return -ENODEV;
3238 p_env = priv->p_env;
3239 sscanf(buf, "%i", &nnn);
3240 if (p_env->packing) {
3241 max = 64;
3242 }
3243 else {
3244 max = 512;
3245 }
3246 if ((nnn > max ) || (nnn < 2))
3247 return -EINVAL;
3248 p_env->read_buffers = nnn;
3249 CLAW_DBF_TEXT(2, setup, "Rbufset");
3250 CLAW_DBF_TEXT_(2, setup, "RB=%d", p_env->read_buffers);
3251 return count;
3252}
3253static DEVICE_ATTR(read_buffer, 0644, claw_rbuff_show, claw_rbuff_write);
3254
3255static struct attribute *claw_attr[] = {
3256 &dev_attr_read_buffer.attr,
3257 &dev_attr_write_buffer.attr,
3258 &dev_attr_adapter_name.attr,
3259 &dev_attr_api_type.attr,
3260 &dev_attr_host_name.attr,
3261 NULL,
3262};
3263static struct attribute_group claw_attr_group = {
3264 .attrs = claw_attr,
3265};
3266static const struct attribute_group *claw_attr_groups[] = {
3267 &claw_attr_group,
3268 NULL,
3269};
3270static const struct device_type claw_devtype = {
3271 .name = "claw",
3272 .groups = claw_attr_groups,
3273};
3274
3275/*----------------------------------------------------------------*
3276 * claw_probe *
3277 * this function is called for each CLAW device. *
3278 *----------------------------------------------------------------*/
3279static int claw_probe(struct ccwgroup_device *cgdev)
3280{
3281 struct claw_privbk *privptr = NULL;
3282
3283 CLAW_DBF_TEXT(2, setup, "probe");
3284 if (!get_device(&cgdev->dev))
3285 return -ENODEV;
3286 privptr = kzalloc(sizeof(struct claw_privbk), GFP_KERNEL);
3287 dev_set_drvdata(&cgdev->dev, privptr);
3288 if (privptr == NULL) {
3289 probe_error(cgdev);
3290 put_device(&cgdev->dev);
3291 CLAW_DBF_TEXT_(2, setup, "probex%d", -ENOMEM);
3292 return -ENOMEM;
3293 }
3294 privptr->p_mtc_envelope = kzalloc(MAX_ENVELOPE_SIZE, GFP_KERNEL);
3295 privptr->p_env = kzalloc(sizeof(struct claw_env), GFP_KERNEL);
3296 if ((privptr->p_mtc_envelope == NULL) || (privptr->p_env == NULL)) {
3297 probe_error(cgdev);
3298 put_device(&cgdev->dev);
3299 CLAW_DBF_TEXT_(2, setup, "probex%d", -ENOMEM);
3300 return -ENOMEM;
3301 }
3302 memcpy(privptr->p_env->adapter_name, WS_NAME_NOT_DEF, 8);
3303 memcpy(privptr->p_env->host_name, WS_NAME_NOT_DEF, 8);
3304 memcpy(privptr->p_env->api_type, WS_NAME_NOT_DEF, 8);
3305 privptr->p_env->packing = 0;
3306 privptr->p_env->write_buffers = 5;
3307 privptr->p_env->read_buffers = 5;
3308 privptr->p_env->read_size = CLAW_FRAME_SIZE;
3309 privptr->p_env->write_size = CLAW_FRAME_SIZE;
3310 privptr->p_env->p_priv = privptr;
3311 cgdev->cdev[0]->handler = claw_irq_handler;
3312 cgdev->cdev[1]->handler = claw_irq_handler;
3313 cgdev->dev.type = &claw_devtype;
3314 CLAW_DBF_TEXT(2, setup, "prbext 0");
3315
3316 return 0;
3317} /* end of claw_probe */
3318
3319/*--------------------------------------------------------------------*
3320* claw_init and cleanup *
3321*---------------------------------------------------------------------*/
3322
3323static void __exit claw_cleanup(void)
3324{
3325 ccwgroup_driver_unregister(&claw_group_driver);
3326 ccw_driver_unregister(&claw_ccw_driver);
3327 root_device_unregister(claw_root_dev);
3328 claw_unregister_debug_facility();
3329 pr_info("Driver unloaded\n");
3330}
3331
3332/**
3333 * Initialize module.
3334 * This is called just after the module is loaded.
3335 *
3336 * @return 0 on success, !0 on error.
3337 */
3338static int __init claw_init(void)
3339{
3340 int ret = 0;
3341
3342 pr_info("Loading %s\n", version);
3343 ret = claw_register_debug_facility();
3344 if (ret) {
3345 pr_err("Registering with the S/390 debug feature"
3346 " failed with error code %d\n", ret);
3347 goto out_err;
3348 }
3349 CLAW_DBF_TEXT(2, setup, "init_mod");
3350 claw_root_dev = root_device_register("claw");
3351 ret = PTR_RET(claw_root_dev);
3352 if (ret)
3353 goto register_err;
3354 ret = ccw_driver_register(&claw_ccw_driver);
3355 if (ret)
3356 goto ccw_err;
3357 claw_group_driver.driver.groups = claw_drv_attr_groups;
3358 ret = ccwgroup_driver_register(&claw_group_driver);
3359 if (ret)
3360 goto ccwgroup_err;
3361 return 0;
3362
3363ccwgroup_err:
3364 ccw_driver_unregister(&claw_ccw_driver);
3365ccw_err:
3366 root_device_unregister(claw_root_dev);
3367register_err:
3368 CLAW_DBF_TEXT(2, setup, "init_bad");
3369 claw_unregister_debug_facility();
3370out_err:
3371 pr_err("Initializing the claw device driver failed\n");
3372 return ret;
3373}
3374
3375module_init(claw_init);
3376module_exit(claw_cleanup);
3377
3378MODULE_AUTHOR("Andy Richter <richtera@us.ibm.com>");
3379MODULE_DESCRIPTION("Linux for System z CLAW Driver\n" \
3380 "Copyright IBM Corp. 2000, 2008\n");
3381MODULE_LICENSE("GPL");