2 * $Id: ctcmain.c,v 1.68 2004/12/27 09:25:27 heicarst Exp $
4 * CTC / ESCON network driver
6 * Copyright (C) 2001 IBM Deutschland Entwicklung GmbH, IBM Corporation
7 * Author(s): Fritz Elfert (elfert@de.ibm.com, felfert@millenux.com)
8 * Fixes by : Jochen Röhrig (roehrig@de.ibm.com)
9 * Arnaldo Carvalho de Melo <acme@conectiva.com.br>
10 * Driver Model stuff by : Cornelia Huck <cohuck@de.ibm.com>
13 * - Principles of Operation (IBM doc#: SA22-7201-06)
14 * - Common IO/-Device Commands and Self Description (IBM doc#: SA22-7204-02)
15 * - Common IO/-Device Commands and Self Description (IBM doc#: SN22-5535)
16 * - ESCON Channel-to-Channel Adapter (IBM doc#: SA22-7203-00)
17 * - ESCON I/O Interface (IBM doc#: SA22-7202-029
19 * and the source of the original CTC driver by:
20 * Dieter Wellerdiek (wel@de.ibm.com)
21 * Martin Schwidefsky (schwidefsky@de.ibm.com)
22 * Denis Joseph Barrow (djbarrow@de.ibm.com,barrow_dj@yahoo.com)
23 * Jochen Röhrig (roehrig@de.ibm.com)
25 * This program is free software; you can redistribute it and/or modify
26 * it under the terms of the GNU General Public License as published by
27 * the Free Software Foundation; either version 2, or (at your option)
30 * This program is distributed in the hope that it will be useful,
31 * but WITHOUT ANY WARRANTY; without even the implied warranty of
32 * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
33 * GNU General Public License for more details.
35 * You should have received a copy of the GNU General Public License
36 * along with this program; if not, write to the Free Software
37 * Foundation, Inc., 675 Mass Ave, Cambridge, MA 02139, USA.
39 * RELEASE-TAG: CTC/ESCON network driver $Revision: 1.68 $
45 #include <linux/module.h>
46 #include <linux/init.h>
47 #include <linux/kernel.h>
48 #include <linux/slab.h>
49 #include <linux/errno.h>
50 #include <linux/types.h>
51 #include <linux/interrupt.h>
52 #include <linux/timer.h>
53 #include <linux/sched.h>
54 #include <linux/bitops.h>
56 #include <linux/signal.h>
57 #include <linux/string.h>
60 #include <linux/if_arp.h>
61 #include <linux/tcp.h>
62 #include <linux/skbuff.h>
63 #include <linux/ctype.h>
67 #include <asm/ccwdev.h>
68 #include <asm/ccwgroup.h>
69 #include <asm/uaccess.h>
71 #include <asm/idals.h>
78 MODULE_AUTHOR("(C) 2000 IBM Corp. by Fritz Elfert (felfert@millenux.com)");
79 MODULE_DESCRIPTION("Linux for S/390 CTC/Escon Driver");
80 MODULE_LICENSE("GPL");
83 * CCW commands, used in this driver.
85 #define CCW_CMD_WRITE 0x01
86 #define CCW_CMD_READ 0x02
87 #define CCW_CMD_SET_EXTENDED 0xc3
88 #define CCW_CMD_PREPARE 0xe3
90 #define CTC_PROTO_S390 0
91 #define CTC_PROTO_LINUX 1
92 #define CTC_PROTO_LINUX_TTY 2
93 #define CTC_PROTO_OS390 3
94 #define CTC_PROTO_MAX 3
96 #define CTC_BUFSIZE_LIMIT 65535
97 #define CTC_BUFSIZE_DEFAULT 32768
99 #define CTC_TIMEOUT_5SEC 5000
101 #define CTC_INITIAL_BLOCKLEN 2
106 #define CTC_ID_SIZE BUS_ID_SIZE+3
110 unsigned long maxmulti;
111 unsigned long maxcqueue;
112 unsigned long doios_single;
113 unsigned long doios_multi;
115 unsigned long tx_time;
116 struct timespec send_stamp;
120 * Definition of one channel
125 * Pointer to next channel in list.
127 struct channel *next;
128 char id[CTC_ID_SIZE];
129 struct ccw_device *cdev;
132 * Type of this channel.
133 * CTC/A or Escon for valid channels.
135 enum channel_types type;
138 * Misc. flags. See CHANNEL_FLAGS_... below
143 * The protocol of this channel
148 * I/O and irq related stuff
159 * Transmit/Receive buffer.
161 struct sk_buff *trans_skb;
164 * Universal I/O queue.
166 struct sk_buff_head io_queue;
169 * TX queue for collecting skb's during busy.
171 struct sk_buff_head collect_queue;
174 * Amount of data in collect_queue.
179 * spinlock for collect_queue and collect_len
181 spinlock_t collect_lock;
184 * Timer for detecting unresposive
190 * Retry counter for misc. operations.
195 * The finite state machine of this channel
200 * The corresponding net_device this channel
203 struct net_device *netdev;
205 struct ctc_profile prof;
207 unsigned char *trans_skb_data;
212 #define CHANNEL_FLAGS_READ 0
213 #define CHANNEL_FLAGS_WRITE 1
214 #define CHANNEL_FLAGS_INUSE 2
215 #define CHANNEL_FLAGS_BUFSIZE_CHANGED 4
216 #define CHANNEL_FLAGS_FAILED 8
217 #define CHANNEL_FLAGS_WAITIRQ 16
218 #define CHANNEL_FLAGS_RWMASK 1
219 #define CHANNEL_DIRECTION(f) (f & CHANNEL_FLAGS_RWMASK)
221 #define LOG_FLAG_ILLEGALPKT 1
222 #define LOG_FLAG_ILLEGALSIZE 2
223 #define LOG_FLAG_OVERRUN 4
224 #define LOG_FLAG_NOMEM 8
226 #define CTC_LOGLEVEL_INFO 1
227 #define CTC_LOGLEVEL_NOTICE 2
228 #define CTC_LOGLEVEL_WARN 4
229 #define CTC_LOGLEVEL_EMERG 8
230 #define CTC_LOGLEVEL_ERR 16
231 #define CTC_LOGLEVEL_DEBUG 32
232 #define CTC_LOGLEVEL_CRIT 64
234 #define CTC_LOGLEVEL_DEFAULT \
235 (CTC_LOGLEVEL_INFO | CTC_LOGLEVEL_NOTICE | CTC_LOGLEVEL_WARN | CTC_LOGLEVEL_CRIT)
237 #define CTC_LOGLEVEL_MAX ((CTC_LOGLEVEL_CRIT<<1)-1)
239 static int loglevel = CTC_LOGLEVEL_DEFAULT;
241 #define ctc_pr_debug(fmt, arg...) \
242 do { if (loglevel & CTC_LOGLEVEL_DEBUG) printk(KERN_DEBUG fmt,##arg); } while (0)
244 #define ctc_pr_info(fmt, arg...) \
245 do { if (loglevel & CTC_LOGLEVEL_INFO) printk(KERN_INFO fmt,##arg); } while (0)
247 #define ctc_pr_notice(fmt, arg...) \
248 do { if (loglevel & CTC_LOGLEVEL_NOTICE) printk(KERN_NOTICE fmt,##arg); } while (0)
250 #define ctc_pr_warn(fmt, arg...) \
251 do { if (loglevel & CTC_LOGLEVEL_WARN) printk(KERN_WARNING fmt,##arg); } while (0)
253 #define ctc_pr_emerg(fmt, arg...) \
254 do { if (loglevel & CTC_LOGLEVEL_EMERG) printk(KERN_EMERG fmt,##arg); } while (0)
256 #define ctc_pr_err(fmt, arg...) \
257 do { if (loglevel & CTC_LOGLEVEL_ERR) printk(KERN_ERR fmt,##arg); } while (0)
259 #define ctc_pr_crit(fmt, arg...) \
260 do { if (loglevel & CTC_LOGLEVEL_CRIT) printk(KERN_CRIT fmt,##arg); } while (0)
263 * Linked list of all detected channels.
265 static struct channel *channels = NULL;
268 struct net_device_stats stats;
271 * The finite state machine of this interface.
275 * The protocol of this device
279 * Timer for restarting after I/O Errors
281 fsm_timer restart_timer;
283 struct channel *channel[2];
287 * Definition of our link level header.
294 #define LL_HEADER_LENGTH (sizeof(struct ll_header))
297 * Compatibility macros for busy handling
298 * of network devices.
300 static __inline__ void
301 ctc_clear_busy(struct net_device * dev)
303 clear_bit(0, &(((struct ctc_priv *) dev->priv)->tbusy));
304 if (((struct ctc_priv *)dev->priv)->protocol != CTC_PROTO_LINUX_TTY)
305 netif_wake_queue(dev);
308 static __inline__ int
309 ctc_test_and_set_busy(struct net_device * dev)
311 if (((struct ctc_priv *)dev->priv)->protocol != CTC_PROTO_LINUX_TTY)
312 netif_stop_queue(dev);
313 return test_and_set_bit(0, &((struct ctc_priv *) dev->priv)->tbusy);
322 static int printed = 0;
323 char vbuf[] = "$Revision: 1.68 $";
324 char *version = vbuf;
328 if ((version = strchr(version, ':'))) {
329 char *p = strchr(version + 1, '$');
334 printk(KERN_INFO "CTC driver Version%s"
336 " (DEBUG-VERSION, " __DATE__ __TIME__ ")"
338 " initialized\n", version);
343 * Return type of a detected device.
345 static enum channel_types
346 get_channel_type(struct ccw_device_id *id)
348 enum channel_types type = (enum channel_types) id->driver_info;
350 if (type == channel_type_ficon)
351 type = channel_type_escon;
357 * States of the interface statemachine.
361 DEV_STATE_STARTWAIT_RXTX,
362 DEV_STATE_STARTWAIT_RX,
363 DEV_STATE_STARTWAIT_TX,
364 DEV_STATE_STOPWAIT_RXTX,
365 DEV_STATE_STOPWAIT_RX,
366 DEV_STATE_STOPWAIT_TX,
369 * MUST be always the last element!!
374 static const char *dev_state_names[] = {
386 * Events of the interface statemachine.
397 * MUST be always the last element!!
402 static const char *dev_event_names[] = {
413 * Events of the channel statemachine
417 * Events, representing return code of
418 * I/O operations (ccw_device_start, ccw_device_halt et al.)
431 * Events, representing unit-check
435 CH_EVENT_UC_TXTIMEOUT,
436 CH_EVENT_UC_TXPARITY,
438 CH_EVENT_UC_RXPARITY,
443 * Events, representing subchannel-check
448 * Events, representing machine checks
454 * Event, representing normal IRQ
460 * Event, representing timer expiry.
465 * Events, representing commands from upper levels.
471 * MUST be always the last element!!
476 static const char *ch_event_names[] = {
477 "ccw_device success",
481 "ccw_device unknown",
483 "Status ATTN & BUSY",
487 "Unit check remote reset",
488 "Unit check remote system reset",
489 "Unit check TX timeout",
490 "Unit check TX parity",
491 "Unit check Hardware failure",
492 "Unit check RX parity",
494 "Unit check Unknown",
496 "SubChannel check Unknown",
498 "Machine check failure",
499 "Machine check operational",
511 * States of the channel statemachine.
515 * Channel not assigned to any device,
516 * initial state, direction invalid
521 * Channel assigned but not operating
540 * MUST be always the last element!!
545 static const char *ch_state_names[] = {
566 * Dump header and first 16 bytes of an sk_buff for debugging purposes.
568 * @param skb The sk_buff to dump.
569 * @param offset Offset relative to skb-data, where to start the dump.
572 ctc_dump_skb(struct sk_buff *skb, int offset)
574 unsigned char *p = skb->data;
576 struct ll_header *header;
579 if (!(loglevel & CTC_LOGLEVEL_DEBUG))
584 header = (struct ll_header *) p;
587 printk(KERN_DEBUG "dump:\n");
588 printk(KERN_DEBUG "blocklen=%d %04x\n", bl, bl);
590 printk(KERN_DEBUG "h->length=%d %04x\n", header->length,
592 printk(KERN_DEBUG "h->type=%04x\n", header->type);
593 printk(KERN_DEBUG "h->unused=%04x\n", header->unused);
596 printk(KERN_DEBUG "data: ");
597 for (i = 0; i < bl; i++)
598 printk("%02x%s", *p++, (i % 16) ? " " : "\n<7>");
603 ctc_dump_skb(struct sk_buff *skb, int offset)
609 * Unpack a just received skb and hand it over to
612 * @param ch The channel where this skb has been received.
613 * @param pskb The received skb.
615 static __inline__ void
616 ctc_unpack_skb(struct channel *ch, struct sk_buff *pskb)
618 struct net_device *dev = ch->netdev;
619 struct ctc_priv *privptr = (struct ctc_priv *) dev->priv;
620 __u16 len = *((__u16 *) pskb->data);
622 DBF_TEXT(trace, 4, __FUNCTION__);
623 skb_put(pskb, 2 + LL_HEADER_LENGTH);
626 pskb->ip_summed = CHECKSUM_UNNECESSARY;
629 struct ll_header *header = (struct ll_header *) pskb->data;
631 skb_pull(pskb, LL_HEADER_LENGTH);
632 if ((ch->protocol == CTC_PROTO_S390) &&
633 (header->type != ETH_P_IP)) {
636 if (!(ch->logflags & LOG_FLAG_ILLEGALPKT)) {
639 * Check packet type only if we stick strictly
640 * to S/390's protocol of OS390. This only
641 * supports IP. Otherwise allow any packet
645 "%s Illegal packet type 0x%04x received, dropping\n",
646 dev->name, header->type);
647 ch->logflags |= LOG_FLAG_ILLEGALPKT;
652 ctc_dump_skb(pskb, -6);
654 privptr->stats.rx_dropped++;
655 privptr->stats.rx_frame_errors++;
658 pskb->protocol = ntohs(header->type);
659 if (header->length <= LL_HEADER_LENGTH) {
661 if (!(ch->logflags & LOG_FLAG_ILLEGALSIZE)) {
664 "%s Illegal packet size %d "
665 "received (MTU=%d blocklen=%d), "
666 "dropping\n", dev->name, header->length,
668 ch->logflags |= LOG_FLAG_ILLEGALSIZE;
673 ctc_dump_skb(pskb, -6);
675 privptr->stats.rx_dropped++;
676 privptr->stats.rx_length_errors++;
679 header->length -= LL_HEADER_LENGTH;
680 len -= LL_HEADER_LENGTH;
681 if ((header->length > skb_tailroom(pskb)) ||
682 (header->length > len)) {
684 if (!(ch->logflags & LOG_FLAG_OVERRUN)) {
687 "%s Illegal packet size %d "
688 "(beyond the end of received data), "
689 "dropping\n", dev->name, header->length);
690 ch->logflags |= LOG_FLAG_OVERRUN;
695 ctc_dump_skb(pskb, -6);
697 privptr->stats.rx_dropped++;
698 privptr->stats.rx_length_errors++;
701 skb_put(pskb, header->length);
702 pskb->mac.raw = pskb->data;
703 len -= header->length;
704 skb = dev_alloc_skb(pskb->len);
707 if (!(ch->logflags & LOG_FLAG_NOMEM)) {
710 "%s Out of memory in ctc_unpack_skb\n",
712 ch->logflags |= LOG_FLAG_NOMEM;
716 privptr->stats.rx_dropped++;
719 memcpy(skb_put(skb, pskb->len), pskb->data, pskb->len);
720 skb->mac.raw = skb->data;
721 skb->dev = pskb->dev;
722 skb->protocol = pskb->protocol;
723 pskb->ip_summed = CHECKSUM_UNNECESSARY;
724 if (ch->protocol == CTC_PROTO_LINUX_TTY)
725 ctc_tty_netif_rx(skb);
729 * Successful rx; reset logflags
732 dev->last_rx = jiffies;
733 privptr->stats.rx_packets++;
734 privptr->stats.rx_bytes += skb->len;
736 skb_pull(pskb, header->length);
737 if (skb_tailroom(pskb) < LL_HEADER_LENGTH) {
739 if (!(ch->logflags & LOG_FLAG_OVERRUN)) {
742 "%s Overrun in ctc_unpack_skb\n",
744 ch->logflags |= LOG_FLAG_OVERRUN;
750 skb_put(pskb, LL_HEADER_LENGTH);
756 * Check return code of a preceeding ccw_device call, halt_IO etc...
758 * @param ch The channel, the error belongs to.
759 * @param return_code The error code to inspect.
762 ccw_check_return_code(struct channel *ch, int return_code, char *msg)
764 DBF_TEXT(trace, 5, __FUNCTION__);
765 switch (return_code) {
767 fsm_event(ch->fsm, CH_EVENT_IO_SUCCESS, ch);
770 ctc_pr_warn("%s (%s): Busy !\n", ch->id, msg);
771 fsm_event(ch->fsm, CH_EVENT_IO_EBUSY, ch);
774 ctc_pr_emerg("%s (%s): Invalid device called for IO\n",
776 fsm_event(ch->fsm, CH_EVENT_IO_ENODEV, ch);
779 ctc_pr_emerg("%s (%s): Status pending... \n",
781 fsm_event(ch->fsm, CH_EVENT_IO_EIO, ch);
784 ctc_pr_emerg("%s (%s): Unknown error in do_IO %04x\n",
785 ch->id, msg, return_code);
786 fsm_event(ch->fsm, CH_EVENT_IO_UNKNOWN, ch);
791 * Check sense of a unit check.
793 * @param ch The channel, the sense code belongs to.
794 * @param sense The sense code to inspect.
797 ccw_unit_check(struct channel *ch, unsigned char sense)
799 DBF_TEXT(trace, 5, __FUNCTION__);
800 if (sense & SNS0_INTERVENTION_REQ) {
802 if (ch->protocol != CTC_PROTO_LINUX_TTY)
803 ctc_pr_debug("%s: Interface disc. or Sel. reset "
804 "(remote)\n", ch->id);
805 fsm_event(ch->fsm, CH_EVENT_UC_RCRESET, ch);
807 ctc_pr_debug("%s: System reset (remote)\n", ch->id);
808 fsm_event(ch->fsm, CH_EVENT_UC_RSRESET, ch);
810 } else if (sense & SNS0_EQUIPMENT_CHECK) {
811 if (sense & SNS0_BUS_OUT_CHECK) {
812 ctc_pr_warn("%s: Hardware malfunction (remote)\n",
814 fsm_event(ch->fsm, CH_EVENT_UC_HWFAIL, ch);
816 ctc_pr_warn("%s: Read-data parity error (remote)\n",
818 fsm_event(ch->fsm, CH_EVENT_UC_RXPARITY, ch);
820 } else if (sense & SNS0_BUS_OUT_CHECK) {
822 ctc_pr_warn("%s: Data-streaming timeout)\n", ch->id);
823 fsm_event(ch->fsm, CH_EVENT_UC_TXTIMEOUT, ch);
825 ctc_pr_warn("%s: Data-transfer parity error\n", ch->id);
826 fsm_event(ch->fsm, CH_EVENT_UC_TXPARITY, ch);
828 } else if (sense & SNS0_CMD_REJECT) {
829 ctc_pr_warn("%s: Command reject\n", ch->id);
830 } else if (sense == 0) {
831 ctc_pr_debug("%s: Unit check ZERO\n", ch->id);
832 fsm_event(ch->fsm, CH_EVENT_UC_ZERO, ch);
834 ctc_pr_warn("%s: Unit Check with sense code: %02x\n",
836 fsm_event(ch->fsm, CH_EVENT_UC_UNKNOWN, ch);
841 ctc_purge_skb_queue(struct sk_buff_head *q)
845 DBF_TEXT(trace, 5, __FUNCTION__);
847 while ((skb = skb_dequeue(q))) {
848 atomic_dec(&skb->users);
849 dev_kfree_skb_irq(skb);
853 static __inline__ int
854 ctc_checkalloc_buffer(struct channel *ch, int warn)
856 DBF_TEXT(trace, 5, __FUNCTION__);
857 if ((ch->trans_skb == NULL) ||
858 (ch->flags & CHANNEL_FLAGS_BUFSIZE_CHANGED)) {
859 if (ch->trans_skb != NULL)
860 dev_kfree_skb(ch->trans_skb);
861 clear_normalized_cda(&ch->ccw[1]);
862 ch->trans_skb = __dev_alloc_skb(ch->max_bufsize,
863 GFP_ATOMIC | GFP_DMA);
864 if (ch->trans_skb == NULL) {
867 "%s: Couldn't alloc %s trans_skb\n",
869 (CHANNEL_DIRECTION(ch->flags) == READ) ?
873 ch->ccw[1].count = ch->max_bufsize;
874 if (set_normalized_cda(&ch->ccw[1], ch->trans_skb->data)) {
875 dev_kfree_skb(ch->trans_skb);
876 ch->trans_skb = NULL;
879 "%s: set_normalized_cda for %s "
880 "trans_skb failed, dropping packets\n",
882 (CHANNEL_DIRECTION(ch->flags) == READ) ?
886 ch->ccw[1].count = 0;
887 ch->trans_skb_data = ch->trans_skb->data;
888 ch->flags &= ~CHANNEL_FLAGS_BUFSIZE_CHANGED;
894 * Dummy NOP action for statemachines
897 fsm_action_nop(fsm_instance * fi, int event, void *arg)
902 * Actions for channel - statemachines.
903 *****************************************************************************/
906 * Normal data has been send. Free the corresponding
907 * skb (it's in io_queue), reset dev->tbusy and
908 * revert to idle state.
910 * @param fi An instance of a channel statemachine.
911 * @param event The event, just happened.
912 * @param arg Generic pointer, casted from channel * upon call.
915 ch_action_txdone(fsm_instance * fi, int event, void *arg)
917 struct channel *ch = (struct channel *) arg;
918 struct net_device *dev = ch->netdev;
919 struct ctc_priv *privptr = dev->priv;
923 unsigned long duration;
924 struct timespec done_stamp = xtime;
926 DBF_TEXT(trace, 4, __FUNCTION__);
929 (done_stamp.tv_sec - ch->prof.send_stamp.tv_sec) * 1000000 +
930 (done_stamp.tv_nsec - ch->prof.send_stamp.tv_nsec) / 1000;
931 if (duration > ch->prof.tx_time)
932 ch->prof.tx_time = duration;
934 if (ch->irb->scsw.count != 0)
935 ctc_pr_debug("%s: TX not complete, remaining %d bytes\n",
936 dev->name, ch->irb->scsw.count);
937 fsm_deltimer(&ch->timer);
938 while ((skb = skb_dequeue(&ch->io_queue))) {
939 privptr->stats.tx_packets++;
940 privptr->stats.tx_bytes += skb->len - LL_HEADER_LENGTH;
942 privptr->stats.tx_bytes += 2;
945 atomic_dec(&skb->users);
946 dev_kfree_skb_irq(skb);
948 spin_lock(&ch->collect_lock);
949 clear_normalized_cda(&ch->ccw[4]);
950 if (ch->collect_len > 0) {
953 if (ctc_checkalloc_buffer(ch, 1)) {
954 spin_unlock(&ch->collect_lock);
957 ch->trans_skb->tail = ch->trans_skb->data = ch->trans_skb_data;
958 ch->trans_skb->len = 0;
959 if (ch->prof.maxmulti < (ch->collect_len + 2))
960 ch->prof.maxmulti = ch->collect_len + 2;
961 if (ch->prof.maxcqueue < skb_queue_len(&ch->collect_queue))
962 ch->prof.maxcqueue = skb_queue_len(&ch->collect_queue);
963 *((__u16 *) skb_put(ch->trans_skb, 2)) = ch->collect_len + 2;
965 while ((skb = skb_dequeue(&ch->collect_queue))) {
966 memcpy(skb_put(ch->trans_skb, skb->len), skb->data,
968 privptr->stats.tx_packets++;
969 privptr->stats.tx_bytes += skb->len - LL_HEADER_LENGTH;
970 atomic_dec(&skb->users);
971 dev_kfree_skb_irq(skb);
975 spin_unlock(&ch->collect_lock);
976 ch->ccw[1].count = ch->trans_skb->len;
977 fsm_addtimer(&ch->timer, CTC_TIMEOUT_5SEC, CH_EVENT_TIMER, ch);
978 ch->prof.send_stamp = xtime;
979 rc = ccw_device_start(ch->cdev, &ch->ccw[0],
980 (unsigned long) ch, 0xff, 0);
981 ch->prof.doios_multi++;
983 privptr->stats.tx_dropped += i;
984 privptr->stats.tx_errors += i;
985 fsm_deltimer(&ch->timer);
986 ccw_check_return_code(ch, rc, "chained TX");
989 spin_unlock(&ch->collect_lock);
990 fsm_newstate(fi, CH_STATE_TXIDLE);
996 * Initial data is sent.
997 * Notify device statemachine that we are up and
1000 * @param fi An instance of a channel statemachine.
1001 * @param event The event, just happened.
1002 * @param arg Generic pointer, casted from channel * upon call.
1005 ch_action_txidle(fsm_instance * fi, int event, void *arg)
1007 struct channel *ch = (struct channel *) arg;
1009 DBF_TEXT(trace, 4, __FUNCTION__);
1010 fsm_deltimer(&ch->timer);
1011 fsm_newstate(fi, CH_STATE_TXIDLE);
1012 fsm_event(((struct ctc_priv *) ch->netdev->priv)->fsm, DEV_EVENT_TXUP,
1017 * Got normal data, check for sanity, queue it up, allocate new buffer
1018 * trigger bottom half, and initiate next read.
1020 * @param fi An instance of a channel statemachine.
1021 * @param event The event, just happened.
1022 * @param arg Generic pointer, casted from channel * upon call.
1025 ch_action_rx(fsm_instance * fi, int event, void *arg)
1027 struct channel *ch = (struct channel *) arg;
1028 struct net_device *dev = ch->netdev;
1029 struct ctc_priv *privptr = dev->priv;
1030 int len = ch->max_bufsize - ch->irb->scsw.count;
1031 struct sk_buff *skb = ch->trans_skb;
1032 __u16 block_len = *((__u16 *) skb->data);
1036 DBF_TEXT(trace, 4, __FUNCTION__);
1037 fsm_deltimer(&ch->timer);
1039 ctc_pr_debug("%s: got packet with length %d < 8\n",
1041 privptr->stats.rx_dropped++;
1042 privptr->stats.rx_length_errors++;
1045 if (len > ch->max_bufsize) {
1046 ctc_pr_debug("%s: got packet with length %d > %d\n",
1047 dev->name, len, ch->max_bufsize);
1048 privptr->stats.rx_dropped++;
1049 privptr->stats.rx_length_errors++;
1054 * VM TCP seems to have a bug sending 2 trailing bytes of garbage.
1056 switch (ch->protocol) {
1057 case CTC_PROTO_S390:
1058 case CTC_PROTO_OS390:
1059 check_len = block_len + 2;
1062 check_len = block_len;
1065 if ((len < block_len) || (len > check_len)) {
1066 ctc_pr_debug("%s: got block length %d != rx length %d\n",
1067 dev->name, block_len, len);
1069 ctc_dump_skb(skb, 0);
1071 *((__u16 *) skb->data) = len;
1072 privptr->stats.rx_dropped++;
1073 privptr->stats.rx_length_errors++;
1077 if (block_len > 0) {
1078 *((__u16 *) skb->data) = block_len;
1079 ctc_unpack_skb(ch, skb);
1082 skb->data = skb->tail = ch->trans_skb_data;
1084 if (ctc_checkalloc_buffer(ch, 1))
1086 ch->ccw[1].count = ch->max_bufsize;
1087 rc = ccw_device_start(ch->cdev, &ch->ccw[0], (unsigned long) ch, 0xff, 0);
1089 ccw_check_return_code(ch, rc, "normal RX");
1092 static void ch_action_rxidle(fsm_instance * fi, int event, void *arg);
1095 * Initialize connection by sending a __u16 of value 0.
1097 * @param fi An instance of a channel statemachine.
1098 * @param event The event, just happened.
1099 * @param arg Generic pointer, casted from channel * upon call.
1102 ch_action_firstio(fsm_instance * fi, int event, void *arg)
1104 struct channel *ch = (struct channel *) arg;
1107 DBF_TEXT(trace, 4, __FUNCTION__);
1109 if (fsm_getstate(fi) == CH_STATE_TXIDLE)
1110 ctc_pr_debug("%s: remote side issued READ?, init ...\n", ch->id);
1111 fsm_deltimer(&ch->timer);
1112 if (ctc_checkalloc_buffer(ch, 1))
1114 if ((fsm_getstate(fi) == CH_STATE_SETUPWAIT) &&
1115 (ch->protocol == CTC_PROTO_OS390)) {
1116 /* OS/390 resp. z/OS */
1117 if (CHANNEL_DIRECTION(ch->flags) == READ) {
1118 *((__u16 *) ch->trans_skb->data) = CTC_INITIAL_BLOCKLEN;
1119 fsm_addtimer(&ch->timer, CTC_TIMEOUT_5SEC,
1120 CH_EVENT_TIMER, ch);
1121 ch_action_rxidle(fi, event, arg);
1123 struct net_device *dev = ch->netdev;
1124 fsm_newstate(fi, CH_STATE_TXIDLE);
1125 fsm_event(((struct ctc_priv *) dev->priv)->fsm,
1126 DEV_EVENT_TXUP, dev);
1132 * Don´t setup a timer for receiving the initial RX frame
1133 * if in compatibility mode, since VM TCP delays the initial
1134 * frame until it has some data to send.
1136 if ((CHANNEL_DIRECTION(ch->flags) == WRITE) ||
1137 (ch->protocol != CTC_PROTO_S390))
1138 fsm_addtimer(&ch->timer, CTC_TIMEOUT_5SEC, CH_EVENT_TIMER, ch);
1140 *((__u16 *) ch->trans_skb->data) = CTC_INITIAL_BLOCKLEN;
1141 ch->ccw[1].count = 2; /* Transfer only length */
1143 fsm_newstate(fi, (CHANNEL_DIRECTION(ch->flags) == READ)
1144 ? CH_STATE_RXINIT : CH_STATE_TXINIT);
1145 rc = ccw_device_start(ch->cdev, &ch->ccw[0], (unsigned long) ch, 0xff, 0);
1147 fsm_deltimer(&ch->timer);
1148 fsm_newstate(fi, CH_STATE_SETUPWAIT);
1149 ccw_check_return_code(ch, rc, "init IO");
1152 * If in compatibility mode since we don´t setup a timer, we
1153 * also signal RX channel up immediately. This enables us
1154 * to send packets early which in turn usually triggers some
1155 * reply from VM TCP which brings up the RX channel to it´s
1158 if ((CHANNEL_DIRECTION(ch->flags) == READ) &&
1159 (ch->protocol == CTC_PROTO_S390)) {
1160 struct net_device *dev = ch->netdev;
1161 fsm_event(((struct ctc_priv *) dev->priv)->fsm, DEV_EVENT_RXUP,
1167 * Got initial data, check it. If OK,
1168 * notify device statemachine that we are up and
1171 * @param fi An instance of a channel statemachine.
1172 * @param event The event, just happened.
1173 * @param arg Generic pointer, casted from channel * upon call.
1176 ch_action_rxidle(fsm_instance * fi, int event, void *arg)
1178 struct channel *ch = (struct channel *) arg;
1179 struct net_device *dev = ch->netdev;
1183 DBF_TEXT(trace, 4, __FUNCTION__);
1184 fsm_deltimer(&ch->timer);
1185 buflen = *((__u16 *) ch->trans_skb->data);
1187 ctc_pr_debug("%s: Initial RX count %d\n", dev->name, buflen);
1189 if (buflen >= CTC_INITIAL_BLOCKLEN) {
1190 if (ctc_checkalloc_buffer(ch, 1))
1192 ch->ccw[1].count = ch->max_bufsize;
1193 fsm_newstate(fi, CH_STATE_RXIDLE);
1194 rc = ccw_device_start(ch->cdev, &ch->ccw[0],
1195 (unsigned long) ch, 0xff, 0);
1197 fsm_newstate(fi, CH_STATE_RXINIT);
1198 ccw_check_return_code(ch, rc, "initial RX");
1200 fsm_event(((struct ctc_priv *) dev->priv)->fsm,
1201 DEV_EVENT_RXUP, dev);
1203 ctc_pr_debug("%s: Initial RX count %d not %d\n",
1204 dev->name, buflen, CTC_INITIAL_BLOCKLEN);
1205 ch_action_firstio(fi, event, arg);
1210 * Set channel into extended mode.
1212 * @param fi An instance of a channel statemachine.
1213 * @param event The event, just happened.
1214 * @param arg Generic pointer, casted from channel * upon call.
1217 ch_action_setmode(fsm_instance * fi, int event, void *arg)
1219 struct channel *ch = (struct channel *) arg;
1221 unsigned long saveflags;
1223 DBF_TEXT(trace, 4, __FUNCTION__);
1224 fsm_deltimer(&ch->timer);
1225 fsm_addtimer(&ch->timer, CTC_TIMEOUT_5SEC, CH_EVENT_TIMER, ch);
1226 fsm_newstate(fi, CH_STATE_SETUPWAIT);
1227 saveflags = 0; /* avoids compiler warning with
1228 spin_unlock_irqrestore */
1229 if (event == CH_EVENT_TIMER) // only for timer not yet locked
1230 spin_lock_irqsave(get_ccwdev_lock(ch->cdev), saveflags);
1231 rc = ccw_device_start(ch->cdev, &ch->ccw[6], (unsigned long) ch, 0xff, 0);
1232 if (event == CH_EVENT_TIMER)
1233 spin_unlock_irqrestore(get_ccwdev_lock(ch->cdev), saveflags);
1235 fsm_deltimer(&ch->timer);
1236 fsm_newstate(fi, CH_STATE_STARTWAIT);
1237 ccw_check_return_code(ch, rc, "set Mode");
1245 * @param fi An instance of a channel statemachine.
1246 * @param event The event, just happened.
1247 * @param arg Generic pointer, casted from channel * upon call.
1250 ch_action_start(fsm_instance * fi, int event, void *arg)
1252 struct channel *ch = (struct channel *) arg;
1253 unsigned long saveflags;
1255 struct net_device *dev;
1257 DBF_TEXT(trace, 4, __FUNCTION__);
1259 ctc_pr_warn("ch_action_start ch=NULL\n");
1262 if (ch->netdev == NULL) {
1263 ctc_pr_warn("ch_action_start dev=NULL, id=%s\n", ch->id);
1269 ctc_pr_debug("%s: %s channel start\n", dev->name,
1270 (CHANNEL_DIRECTION(ch->flags) == READ) ? "RX" : "TX");
1273 if (ch->trans_skb != NULL) {
1274 clear_normalized_cda(&ch->ccw[1]);
1275 dev_kfree_skb(ch->trans_skb);
1276 ch->trans_skb = NULL;
1278 if (CHANNEL_DIRECTION(ch->flags) == READ) {
1279 ch->ccw[1].cmd_code = CCW_CMD_READ;
1280 ch->ccw[1].flags = CCW_FLAG_SLI;
1281 ch->ccw[1].count = 0;
1283 ch->ccw[1].cmd_code = CCW_CMD_WRITE;
1284 ch->ccw[1].flags = CCW_FLAG_SLI | CCW_FLAG_CC;
1285 ch->ccw[1].count = 0;
1287 if (ctc_checkalloc_buffer(ch, 0)) {
1289 "%s: Could not allocate %s trans_skb, delaying "
1290 "allocation until first transfer\n",
1292 (CHANNEL_DIRECTION(ch->flags) == READ) ? "RX" : "TX");
1295 ch->ccw[0].cmd_code = CCW_CMD_PREPARE;
1296 ch->ccw[0].flags = CCW_FLAG_SLI | CCW_FLAG_CC;
1297 ch->ccw[0].count = 0;
1299 ch->ccw[2].cmd_code = CCW_CMD_NOOP; /* jointed CE + DE */
1300 ch->ccw[2].flags = CCW_FLAG_SLI;
1301 ch->ccw[2].count = 0;
1303 memcpy(&ch->ccw[3], &ch->ccw[0], sizeof (struct ccw1) * 3);
1305 ch->ccw[4].flags &= ~CCW_FLAG_IDA;
1307 fsm_newstate(fi, CH_STATE_STARTWAIT);
1308 fsm_addtimer(&ch->timer, 1000, CH_EVENT_TIMER, ch);
1309 spin_lock_irqsave(get_ccwdev_lock(ch->cdev), saveflags);
1310 rc = ccw_device_halt(ch->cdev, (unsigned long) ch);
1311 spin_unlock_irqrestore(get_ccwdev_lock(ch->cdev), saveflags);
1314 fsm_deltimer(&ch->timer);
1315 ccw_check_return_code(ch, rc, "initial HaltIO");
1318 ctc_pr_debug("ctc: %s(): leaving\n", __func__);
1323 * Shutdown a channel.
1325 * @param fi An instance of a channel statemachine.
1326 * @param event The event, just happened.
1327 * @param arg Generic pointer, casted from channel * upon call.
1330 ch_action_haltio(fsm_instance * fi, int event, void *arg)
1332 struct channel *ch = (struct channel *) arg;
1333 unsigned long saveflags;
1337 DBF_TEXT(trace, 3, __FUNCTION__);
1338 fsm_deltimer(&ch->timer);
1339 fsm_addtimer(&ch->timer, CTC_TIMEOUT_5SEC, CH_EVENT_TIMER, ch);
1340 saveflags = 0; /* avoids comp warning with
1341 spin_unlock_irqrestore */
1342 if (event == CH_EVENT_STOP) // only for STOP not yet locked
1343 spin_lock_irqsave(get_ccwdev_lock(ch->cdev), saveflags);
1344 oldstate = fsm_getstate(fi);
1345 fsm_newstate(fi, CH_STATE_TERM);
1346 rc = ccw_device_halt(ch->cdev, (unsigned long) ch);
1347 if (event == CH_EVENT_STOP)
1348 spin_unlock_irqrestore(get_ccwdev_lock(ch->cdev), saveflags);
1351 fsm_deltimer(&ch->timer);
1352 fsm_newstate(fi, oldstate);
1354 ccw_check_return_code(ch, rc, "HaltIO in ch_action_haltio");
1359 * A channel has successfully been halted.
1360 * Cleanup it's queue and notify interface statemachine.
1362 * @param fi An instance of a channel statemachine.
1363 * @param event The event, just happened.
1364 * @param arg Generic pointer, casted from channel * upon call.
1367 ch_action_stopped(fsm_instance * fi, int event, void *arg)
1369 struct channel *ch = (struct channel *) arg;
1370 struct net_device *dev = ch->netdev;
1372 DBF_TEXT(trace, 3, __FUNCTION__);
1373 fsm_deltimer(&ch->timer);
1374 fsm_newstate(fi, CH_STATE_STOPPED);
1375 if (ch->trans_skb != NULL) {
1376 clear_normalized_cda(&ch->ccw[1]);
1377 dev_kfree_skb(ch->trans_skb);
1378 ch->trans_skb = NULL;
1380 if (CHANNEL_DIRECTION(ch->flags) == READ) {
1381 skb_queue_purge(&ch->io_queue);
1382 fsm_event(((struct ctc_priv *) dev->priv)->fsm,
1383 DEV_EVENT_RXDOWN, dev);
1385 ctc_purge_skb_queue(&ch->io_queue);
1386 spin_lock(&ch->collect_lock);
1387 ctc_purge_skb_queue(&ch->collect_queue);
1388 ch->collect_len = 0;
1389 spin_unlock(&ch->collect_lock);
1390 fsm_event(((struct ctc_priv *) dev->priv)->fsm,
1391 DEV_EVENT_TXDOWN, dev);
1396 * A stop command from device statemachine arrived and we are in
1397 * not operational mode. Set state to stopped.
1399 * @param fi An instance of a channel statemachine.
1400 * @param event The event, just happened.
1401 * @param arg Generic pointer, casted from channel * upon call.
1404 ch_action_stop(fsm_instance * fi, int event, void *arg)
1406 fsm_newstate(fi, CH_STATE_STOPPED);
1410 * A machine check for no path, not operational status or gone device has
1412 * Cleanup queue and notify interface statemachine.
1414 * @param fi An instance of a channel statemachine.
1415 * @param event The event, just happened.
1416 * @param arg Generic pointer, casted from channel * upon call.
1419 ch_action_fail(fsm_instance * fi, int event, void *arg)
1421 struct channel *ch = (struct channel *) arg;
1422 struct net_device *dev = ch->netdev;
1424 DBF_TEXT(trace, 3, __FUNCTION__);
1425 fsm_deltimer(&ch->timer);
1426 fsm_newstate(fi, CH_STATE_NOTOP);
1427 if (CHANNEL_DIRECTION(ch->flags) == READ) {
1428 skb_queue_purge(&ch->io_queue);
1429 fsm_event(((struct ctc_priv *) dev->priv)->fsm,
1430 DEV_EVENT_RXDOWN, dev);
1432 ctc_purge_skb_queue(&ch->io_queue);
1433 spin_lock(&ch->collect_lock);
1434 ctc_purge_skb_queue(&ch->collect_queue);
1435 ch->collect_len = 0;
1436 spin_unlock(&ch->collect_lock);
1437 fsm_event(((struct ctc_priv *) dev->priv)->fsm,
1438 DEV_EVENT_TXDOWN, dev);
1443 * Handle error during setup of channel.
1445 * @param fi An instance of a channel statemachine.
1446 * @param event The event, just happened.
1447 * @param arg Generic pointer, casted from channel * upon call.
1450 ch_action_setuperr(fsm_instance * fi, int event, void *arg)
1452 struct channel *ch = (struct channel *) arg;
1453 struct net_device *dev = ch->netdev;
1455 DBF_TEXT(setup, 3, __FUNCTION__);
1457 * Special case: Got UC_RCRESET on setmode.
1458 * This means that remote side isn't setup. In this case
1459 * simply retry after some 10 secs...
1461 if ((fsm_getstate(fi) == CH_STATE_SETUPWAIT) &&
1462 ((event == CH_EVENT_UC_RCRESET) ||
1463 (event == CH_EVENT_UC_RSRESET))) {
1464 fsm_newstate(fi, CH_STATE_STARTRETRY);
1465 fsm_deltimer(&ch->timer);
1466 fsm_addtimer(&ch->timer, CTC_TIMEOUT_5SEC, CH_EVENT_TIMER, ch);
1467 if (CHANNEL_DIRECTION(ch->flags) == READ) {
1468 int rc = ccw_device_halt(ch->cdev, (unsigned long) ch);
1470 ccw_check_return_code(
1471 ch, rc, "HaltIO in ch_action_setuperr");
1476 ctc_pr_debug("%s: Error %s during %s channel setup state=%s\n",
1477 dev->name, ch_event_names[event],
1478 (CHANNEL_DIRECTION(ch->flags) == READ) ? "RX" : "TX",
1479 fsm_getstate_str(fi));
1480 if (CHANNEL_DIRECTION(ch->flags) == READ) {
1481 fsm_newstate(fi, CH_STATE_RXERR);
1482 fsm_event(((struct ctc_priv *) dev->priv)->fsm,
1483 DEV_EVENT_RXDOWN, dev);
1485 fsm_newstate(fi, CH_STATE_TXERR);
1486 fsm_event(((struct ctc_priv *) dev->priv)->fsm,
1487 DEV_EVENT_TXDOWN, dev);
1492 * Restart a channel after an error.
1494 * @param fi An instance of a channel statemachine.
1495 * @param event The event, just happened.
1496 * @param arg Generic pointer, casted from channel * upon call.
1499 ch_action_restart(fsm_instance * fi, int event, void *arg)
1501 unsigned long saveflags;
1505 struct channel *ch = (struct channel *) arg;
1506 struct net_device *dev = ch->netdev;
1508 DBF_TEXT(trace, 3, __FUNCTION__);
1509 fsm_deltimer(&ch->timer);
1510 ctc_pr_debug("%s: %s channel restart\n", dev->name,
1511 (CHANNEL_DIRECTION(ch->flags) == READ) ? "RX" : "TX");
1512 fsm_addtimer(&ch->timer, CTC_TIMEOUT_5SEC, CH_EVENT_TIMER, ch);
1513 oldstate = fsm_getstate(fi);
1514 fsm_newstate(fi, CH_STATE_STARTWAIT);
1515 saveflags = 0; /* avoids compiler warning with
1516 spin_unlock_irqrestore */
1517 if (event == CH_EVENT_TIMER) // only for timer not yet locked
1518 spin_lock_irqsave(get_ccwdev_lock(ch->cdev), saveflags);
1519 rc = ccw_device_halt(ch->cdev, (unsigned long) ch);
1520 if (event == CH_EVENT_TIMER)
1521 spin_unlock_irqrestore(get_ccwdev_lock(ch->cdev), saveflags);
1524 fsm_deltimer(&ch->timer);
1525 fsm_newstate(fi, oldstate);
1527 ccw_check_return_code(ch, rc, "HaltIO in ch_action_restart");
1532 * Handle error during RX initial handshake (exchange of
1533 * 0-length block header)
1535 * @param fi An instance of a channel statemachine.
1536 * @param event The event, just happened.
1537 * @param arg Generic pointer, casted from channel * upon call.
1540 ch_action_rxiniterr(fsm_instance * fi, int event, void *arg)
1542 struct channel *ch = (struct channel *) arg;
1543 struct net_device *dev = ch->netdev;
1545 DBF_TEXT(setup, 3, __FUNCTION__);
1546 if (event == CH_EVENT_TIMER) {
1547 fsm_deltimer(&ch->timer);
1548 ctc_pr_debug("%s: Timeout during RX init handshake\n", dev->name);
1549 if (ch->retry++ < 3)
1550 ch_action_restart(fi, event, arg);
1552 fsm_newstate(fi, CH_STATE_RXERR);
1553 fsm_event(((struct ctc_priv *) dev->priv)->fsm,
1554 DEV_EVENT_RXDOWN, dev);
1557 ctc_pr_warn("%s: Error during RX init handshake\n", dev->name);
1561 * Notify device statemachine if we gave up initialization
1564 * @param fi An instance of a channel statemachine.
1565 * @param event The event, just happened.
1566 * @param arg Generic pointer, casted from channel * upon call.
1569 ch_action_rxinitfail(fsm_instance * fi, int event, void *arg)
1571 struct channel *ch = (struct channel *) arg;
1572 struct net_device *dev = ch->netdev;
1574 DBF_TEXT(setup, 3, __FUNCTION__);
1575 fsm_newstate(fi, CH_STATE_RXERR);
1576 ctc_pr_warn("%s: RX initialization failed\n", dev->name);
1577 ctc_pr_warn("%s: RX <-> RX connection detected\n", dev->name);
1578 fsm_event(((struct ctc_priv *) dev->priv)->fsm, DEV_EVENT_RXDOWN, dev);
1582 * Handle RX Unit check remote reset (remote disconnected)
1584 * @param fi An instance of a channel statemachine.
1585 * @param event The event, just happened.
1586 * @param arg Generic pointer, casted from channel * upon call.
1589 ch_action_rxdisc(fsm_instance * fi, int event, void *arg)
1591 struct channel *ch = (struct channel *) arg;
1592 struct channel *ch2;
1593 struct net_device *dev = ch->netdev;
1595 DBF_TEXT(trace, 3, __FUNCTION__);
1596 fsm_deltimer(&ch->timer);
1597 ctc_pr_debug("%s: Got remote disconnect, re-initializing ...\n",
1601 * Notify device statemachine
1603 fsm_event(((struct ctc_priv *) dev->priv)->fsm, DEV_EVENT_RXDOWN, dev);
1604 fsm_event(((struct ctc_priv *) dev->priv)->fsm, DEV_EVENT_TXDOWN, dev);
1606 fsm_newstate(fi, CH_STATE_DTERM);
1607 ch2 = ((struct ctc_priv *) dev->priv)->channel[WRITE];
1608 fsm_newstate(ch2->fsm, CH_STATE_DTERM);
1610 ccw_device_halt(ch->cdev, (unsigned long) ch);
1611 ccw_device_halt(ch2->cdev, (unsigned long) ch2);
1615 * Handle error during TX channel initialization.
1617 * @param fi An instance of a channel statemachine.
1618 * @param event The event, just happened.
1619 * @param arg Generic pointer, casted from channel * upon call.
1622 ch_action_txiniterr(fsm_instance * fi, int event, void *arg)
1624 struct channel *ch = (struct channel *) arg;
1625 struct net_device *dev = ch->netdev;
1627 DBF_TEXT(setup, 2, __FUNCTION__);
1628 if (event == CH_EVENT_TIMER) {
1629 fsm_deltimer(&ch->timer);
1630 ctc_pr_debug("%s: Timeout during TX init handshake\n", dev->name);
1631 if (ch->retry++ < 3)
1632 ch_action_restart(fi, event, arg);
1634 fsm_newstate(fi, CH_STATE_TXERR);
1635 fsm_event(((struct ctc_priv *) dev->priv)->fsm,
1636 DEV_EVENT_TXDOWN, dev);
1639 ctc_pr_warn("%s: Error during TX init handshake\n", dev->name);
1643 * Handle TX timeout by retrying operation.
1645 * @param fi An instance of a channel statemachine.
1646 * @param event The event, just happened.
1647 * @param arg Generic pointer, casted from channel * upon call.
1650 ch_action_txretry(fsm_instance * fi, int event, void *arg)
1652 struct channel *ch = (struct channel *) arg;
1653 struct net_device *dev = ch->netdev;
1654 unsigned long saveflags;
1656 DBF_TEXT(trace, 4, __FUNCTION__);
1657 fsm_deltimer(&ch->timer);
1658 if (ch->retry++ > 3) {
1659 ctc_pr_debug("%s: TX retry failed, restarting channel\n",
1661 fsm_event(((struct ctc_priv *) dev->priv)->fsm,
1662 DEV_EVENT_TXDOWN, dev);
1663 ch_action_restart(fi, event, arg);
1665 struct sk_buff *skb;
1667 ctc_pr_debug("%s: TX retry %d\n", dev->name, ch->retry);
1668 if ((skb = skb_peek(&ch->io_queue))) {
1671 clear_normalized_cda(&ch->ccw[4]);
1672 ch->ccw[4].count = skb->len;
1673 if (set_normalized_cda(&ch->ccw[4], skb->data)) {
1675 "%s: IDAL alloc failed, chan restart\n",
1677 fsm_event(((struct ctc_priv *) dev->priv)->fsm,
1678 DEV_EVENT_TXDOWN, dev);
1679 ch_action_restart(fi, event, arg);
1682 fsm_addtimer(&ch->timer, 1000, CH_EVENT_TIMER, ch);
1683 saveflags = 0; /* avoids compiler warning with
1684 spin_unlock_irqrestore */
1685 if (event == CH_EVENT_TIMER) // only for TIMER not yet locked
1686 spin_lock_irqsave(get_ccwdev_lock(ch->cdev),
1688 rc = ccw_device_start(ch->cdev, &ch->ccw[3],
1689 (unsigned long) ch, 0xff, 0);
1690 if (event == CH_EVENT_TIMER)
1691 spin_unlock_irqrestore(get_ccwdev_lock(ch->cdev),
1694 fsm_deltimer(&ch->timer);
1695 ccw_check_return_code(ch, rc, "TX in ch_action_txretry");
1696 ctc_purge_skb_queue(&ch->io_queue);
1704 * Handle fatal errors during an I/O command.
1706 * @param fi An instance of a channel statemachine.
1707 * @param event The event, just happened.
1708 * @param arg Generic pointer, casted from channel * upon call.
1711 ch_action_iofatal(fsm_instance * fi, int event, void *arg)
1713 struct channel *ch = (struct channel *) arg;
1714 struct net_device *dev = ch->netdev;
1716 DBF_TEXT(trace, 3, __FUNCTION__);
1717 fsm_deltimer(&ch->timer);
1718 if (CHANNEL_DIRECTION(ch->flags) == READ) {
1719 ctc_pr_debug("%s: RX I/O error\n", dev->name);
1720 fsm_newstate(fi, CH_STATE_RXERR);
1721 fsm_event(((struct ctc_priv *) dev->priv)->fsm,
1722 DEV_EVENT_RXDOWN, dev);
1724 ctc_pr_debug("%s: TX I/O error\n", dev->name);
1725 fsm_newstate(fi, CH_STATE_TXERR);
1726 fsm_event(((struct ctc_priv *) dev->priv)->fsm,
1727 DEV_EVENT_TXDOWN, dev);
1732 ch_action_reinit(fsm_instance *fi, int event, void *arg)
1734 struct channel *ch = (struct channel *)arg;
1735 struct net_device *dev = ch->netdev;
1736 struct ctc_priv *privptr = dev->priv;
1738 DBF_TEXT(trace, 4, __FUNCTION__);
1739 ch_action_iofatal(fi, event, arg);
1740 fsm_addtimer(&privptr->restart_timer, 1000, DEV_EVENT_RESTART, dev);
1745 * The statemachine for a channel.
1747 static const fsm_node ch_fsm[] = {
1748 {CH_STATE_STOPPED, CH_EVENT_STOP, fsm_action_nop },
1749 {CH_STATE_STOPPED, CH_EVENT_START, ch_action_start },
1750 {CH_STATE_STOPPED, CH_EVENT_FINSTAT, fsm_action_nop },
1751 {CH_STATE_STOPPED, CH_EVENT_MC_FAIL, fsm_action_nop },
1753 {CH_STATE_NOTOP, CH_EVENT_STOP, ch_action_stop },
1754 {CH_STATE_NOTOP, CH_EVENT_START, fsm_action_nop },
1755 {CH_STATE_NOTOP, CH_EVENT_FINSTAT, fsm_action_nop },
1756 {CH_STATE_NOTOP, CH_EVENT_MC_FAIL, fsm_action_nop },
1757 {CH_STATE_NOTOP, CH_EVENT_MC_GOOD, ch_action_start },
1759 {CH_STATE_STARTWAIT, CH_EVENT_STOP, ch_action_haltio },
1760 {CH_STATE_STARTWAIT, CH_EVENT_START, fsm_action_nop },
1761 {CH_STATE_STARTWAIT, CH_EVENT_FINSTAT, ch_action_setmode },
1762 {CH_STATE_STARTWAIT, CH_EVENT_TIMER, ch_action_setuperr },
1763 {CH_STATE_STARTWAIT, CH_EVENT_IO_ENODEV, ch_action_iofatal },
1764 {CH_STATE_STARTWAIT, CH_EVENT_IO_EIO, ch_action_reinit },
1765 {CH_STATE_STARTWAIT, CH_EVENT_MC_FAIL, ch_action_fail },
1767 {CH_STATE_STARTRETRY, CH_EVENT_STOP, ch_action_haltio },
1768 {CH_STATE_STARTRETRY, CH_EVENT_TIMER, ch_action_setmode },
1769 {CH_STATE_STARTRETRY, CH_EVENT_FINSTAT, fsm_action_nop },
1770 {CH_STATE_STARTRETRY, CH_EVENT_MC_FAIL, ch_action_fail },
1772 {CH_STATE_SETUPWAIT, CH_EVENT_STOP, ch_action_haltio },
1773 {CH_STATE_SETUPWAIT, CH_EVENT_START, fsm_action_nop },
1774 {CH_STATE_SETUPWAIT, CH_EVENT_FINSTAT, ch_action_firstio },
1775 {CH_STATE_SETUPWAIT, CH_EVENT_UC_RCRESET, ch_action_setuperr },
1776 {CH_STATE_SETUPWAIT, CH_EVENT_UC_RSRESET, ch_action_setuperr },
1777 {CH_STATE_SETUPWAIT, CH_EVENT_TIMER, ch_action_setmode },
1778 {CH_STATE_SETUPWAIT, CH_EVENT_IO_ENODEV, ch_action_iofatal },
1779 {CH_STATE_SETUPWAIT, CH_EVENT_IO_EIO, ch_action_reinit },
1780 {CH_STATE_SETUPWAIT, CH_EVENT_MC_FAIL, ch_action_fail },
1782 {CH_STATE_RXINIT, CH_EVENT_STOP, ch_action_haltio },
1783 {CH_STATE_RXINIT, CH_EVENT_START, fsm_action_nop },
1784 {CH_STATE_RXINIT, CH_EVENT_FINSTAT, ch_action_rxidle },
1785 {CH_STATE_RXINIT, CH_EVENT_UC_RCRESET, ch_action_rxiniterr },
1786 {CH_STATE_RXINIT, CH_EVENT_UC_RSRESET, ch_action_rxiniterr },
1787 {CH_STATE_RXINIT, CH_EVENT_TIMER, ch_action_rxiniterr },
1788 {CH_STATE_RXINIT, CH_EVENT_ATTNBUSY, ch_action_rxinitfail },
1789 {CH_STATE_RXINIT, CH_EVENT_IO_ENODEV, ch_action_iofatal },
1790 {CH_STATE_RXINIT, CH_EVENT_IO_EIO, ch_action_reinit },
1791 {CH_STATE_RXINIT, CH_EVENT_UC_ZERO, ch_action_firstio },
1792 {CH_STATE_RXINIT, CH_EVENT_MC_FAIL, ch_action_fail },
1794 {CH_STATE_RXIDLE, CH_EVENT_STOP, ch_action_haltio },
1795 {CH_STATE_RXIDLE, CH_EVENT_START, fsm_action_nop },
1796 {CH_STATE_RXIDLE, CH_EVENT_FINSTAT, ch_action_rx },
1797 {CH_STATE_RXIDLE, CH_EVENT_UC_RCRESET, ch_action_rxdisc },
1798 // {CH_STATE_RXIDLE, CH_EVENT_UC_RSRESET, ch_action_rxretry },
1799 {CH_STATE_RXIDLE, CH_EVENT_IO_ENODEV, ch_action_iofatal },
1800 {CH_STATE_RXIDLE, CH_EVENT_IO_EIO, ch_action_reinit },
1801 {CH_STATE_RXIDLE, CH_EVENT_MC_FAIL, ch_action_fail },
1802 {CH_STATE_RXIDLE, CH_EVENT_UC_ZERO, ch_action_rx },
1804 {CH_STATE_TXINIT, CH_EVENT_STOP, ch_action_haltio },
1805 {CH_STATE_TXINIT, CH_EVENT_START, fsm_action_nop },
1806 {CH_STATE_TXINIT, CH_EVENT_FINSTAT, ch_action_txidle },
1807 {CH_STATE_TXINIT, CH_EVENT_UC_RCRESET, ch_action_txiniterr },
1808 {CH_STATE_TXINIT, CH_EVENT_UC_RSRESET, ch_action_txiniterr },
1809 {CH_STATE_TXINIT, CH_EVENT_TIMER, ch_action_txiniterr },
1810 {CH_STATE_TXINIT, CH_EVENT_IO_ENODEV, ch_action_iofatal },
1811 {CH_STATE_TXINIT, CH_EVENT_IO_EIO, ch_action_reinit },
1812 {CH_STATE_TXINIT, CH_EVENT_MC_FAIL, ch_action_fail },
1814 {CH_STATE_TXIDLE, CH_EVENT_STOP, ch_action_haltio },
1815 {CH_STATE_TXIDLE, CH_EVENT_START, fsm_action_nop },
1816 {CH_STATE_TXIDLE, CH_EVENT_FINSTAT, ch_action_firstio },
1817 {CH_STATE_TXIDLE, CH_EVENT_UC_RCRESET, fsm_action_nop },
1818 {CH_STATE_TXIDLE, CH_EVENT_UC_RSRESET, fsm_action_nop },
1819 {CH_STATE_TXIDLE, CH_EVENT_IO_ENODEV, ch_action_iofatal },
1820 {CH_STATE_TXIDLE, CH_EVENT_IO_EIO, ch_action_reinit },
1821 {CH_STATE_TXIDLE, CH_EVENT_MC_FAIL, ch_action_fail },
1823 {CH_STATE_TERM, CH_EVENT_STOP, fsm_action_nop },
1824 {CH_STATE_TERM, CH_EVENT_START, ch_action_restart },
1825 {CH_STATE_TERM, CH_EVENT_FINSTAT, ch_action_stopped },
1826 {CH_STATE_TERM, CH_EVENT_UC_RCRESET, fsm_action_nop },
1827 {CH_STATE_TERM, CH_EVENT_UC_RSRESET, fsm_action_nop },
1828 {CH_STATE_TERM, CH_EVENT_MC_FAIL, ch_action_fail },
1830 {CH_STATE_DTERM, CH_EVENT_STOP, ch_action_haltio },
1831 {CH_STATE_DTERM, CH_EVENT_START, ch_action_restart },
1832 {CH_STATE_DTERM, CH_EVENT_FINSTAT, ch_action_setmode },
1833 {CH_STATE_DTERM, CH_EVENT_UC_RCRESET, fsm_action_nop },
1834 {CH_STATE_DTERM, CH_EVENT_UC_RSRESET, fsm_action_nop },
1835 {CH_STATE_DTERM, CH_EVENT_MC_FAIL, ch_action_fail },
1837 {CH_STATE_TX, CH_EVENT_STOP, ch_action_haltio },
1838 {CH_STATE_TX, CH_EVENT_START, fsm_action_nop },
1839 {CH_STATE_TX, CH_EVENT_FINSTAT, ch_action_txdone },
1840 {CH_STATE_TX, CH_EVENT_UC_RCRESET, ch_action_txretry },
1841 {CH_STATE_TX, CH_EVENT_UC_RSRESET, ch_action_txretry },
1842 {CH_STATE_TX, CH_EVENT_TIMER, ch_action_txretry },
1843 {CH_STATE_TX, CH_EVENT_IO_ENODEV, ch_action_iofatal },
1844 {CH_STATE_TX, CH_EVENT_IO_EIO, ch_action_reinit },
1845 {CH_STATE_TX, CH_EVENT_MC_FAIL, ch_action_fail },
1847 {CH_STATE_RXERR, CH_EVENT_STOP, ch_action_haltio },
1848 {CH_STATE_TXERR, CH_EVENT_STOP, ch_action_haltio },
1849 {CH_STATE_TXERR, CH_EVENT_MC_FAIL, ch_action_fail },
1850 {CH_STATE_RXERR, CH_EVENT_MC_FAIL, ch_action_fail },
1853 static const int CH_FSM_LEN = sizeof (ch_fsm) / sizeof (fsm_node);
1856 * Functions related to setup and device detection.
1857 *****************************************************************************/
1860 less_than(char *id1, char *id2)
1864 for (i = 0; i < 5; i++) {
1868 dev1 = simple_strtoul(id1, &id1, 16);
1869 dev2 = simple_strtoul(id2, &id2, 16);
1871 return (dev1 < dev2);
1875 * Add a new channel to the list of channels.
1876 * Keeps the channel list sorted.
1878 * @param cdev The ccw_device to be added.
1879 * @param type The type class of the new channel.
1881 * @return 0 on success, !0 on error.
1884 add_channel(struct ccw_device *cdev, enum channel_types type)
1886 struct channel **c = &channels;
1889 DBF_TEXT(trace, 2, __FUNCTION__);
1891 (struct channel *) kmalloc(sizeof (struct channel),
1892 GFP_KERNEL)) == NULL) {
1893 ctc_pr_warn("ctc: Out of memory in add_channel\n");
1896 memset(ch, 0, sizeof (struct channel));
1897 if ((ch->ccw = (struct ccw1 *) kmalloc(sizeof (struct ccw1) * 8,
1898 GFP_KERNEL | GFP_DMA)) == NULL) {
1900 ctc_pr_warn("ctc: Out of memory in add_channel\n");
1905 * "static" ccws are used in the following way:
1907 * ccw[0..2] (Channel program for generic I/O):
1909 * 1: read or write (depending on direction) with fixed
1910 * buffer (idal allocated once when buffer is allocated)
1912 * ccw[3..5] (Channel program for direct write of packets)
1914 * 4: write (idal allocated on every write).
1916 * ccw[6..7] (Channel program for initial channel setup):
1917 * 3: set extended mode
1920 * ch->ccw[0..5] are initialized in ch_action_start because
1921 * the channel's direction is yet unknown here.
1923 ch->ccw[6].cmd_code = CCW_CMD_SET_EXTENDED;
1924 ch->ccw[6].flags = CCW_FLAG_SLI;
1925 ch->ccw[6].count = 0;
1928 ch->ccw[7].cmd_code = CCW_CMD_NOOP;
1929 ch->ccw[7].flags = CCW_FLAG_SLI;
1930 ch->ccw[7].count = 0;
1934 snprintf(ch->id, CTC_ID_SIZE, "ch-%s", cdev->dev.bus_id);
1936 loglevel = CTC_LOGLEVEL_DEFAULT;
1937 ch->fsm = init_fsm(ch->id, ch_state_names,
1938 ch_event_names, NR_CH_STATES, NR_CH_EVENTS,
1939 ch_fsm, CH_FSM_LEN, GFP_KERNEL);
1940 if (ch->fsm == NULL) {
1941 ctc_pr_warn("ctc: Could not create FSM in add_channel\n");
1946 fsm_newstate(ch->fsm, CH_STATE_IDLE);
1947 if ((ch->irb = (struct irb *) kmalloc(sizeof (struct irb),
1948 GFP_KERNEL)) == NULL) {
1949 ctc_pr_warn("ctc: Out of memory in add_channel\n");
1955 memset(ch->irb, 0, sizeof (struct irb));
1956 while (*c && less_than((*c)->id, ch->id))
1958 if (!strncmp((*c)->id, ch->id, CTC_ID_SIZE)) {
1960 "ctc: add_channel: device %s already in list, "
1961 "using old entry\n", (*c)->id);
1968 fsm_settimer(ch->fsm, &ch->timer);
1969 skb_queue_head_init(&ch->io_queue);
1970 skb_queue_head_init(&ch->collect_queue);
1977 * Release a specific channel in the channel list.
1979 * @param ch Pointer to channel struct to be released.
1982 channel_free(struct channel *ch)
1984 ch->flags &= ~CHANNEL_FLAGS_INUSE;
1985 fsm_newstate(ch->fsm, CH_STATE_IDLE);
1989 * Remove a specific channel in the channel list.
1991 * @param ch Pointer to channel struct to be released.
1994 channel_remove(struct channel *ch)
1996 struct channel **c = &channels;
1998 DBF_TEXT(trace, 2, __FUNCTION__);
2006 fsm_deltimer(&ch->timer);
2008 clear_normalized_cda(&ch->ccw[4]);
2009 if (ch->trans_skb != NULL) {
2010 clear_normalized_cda(&ch->ccw[1]);
2011 dev_kfree_skb(ch->trans_skb);
2021 * Get a specific channel from the channel list.
2023 * @param type Type of channel we are interested in.
2024 * @param id Id of channel we are interested in.
2025 * @param direction Direction we want to use this channel for.
2027 * @return Pointer to a channel or NULL if no matching channel available.
2029 static struct channel
2031 channel_get(enum channel_types type, char *id, int direction)
2033 struct channel *ch = channels;
2035 DBF_TEXT(trace, 3, __FUNCTION__);
2037 ctc_pr_debug("ctc: %s(): searching for ch with id %s and type %d\n",
2038 __func__, id, type);
2041 while (ch && ((strncmp(ch->id, id, CTC_ID_SIZE)) || (ch->type != type))) {
2043 ctc_pr_debug("ctc: %s(): ch=0x%p (id=%s, type=%d\n",
2044 __func__, ch, ch->id, ch->type);
2049 ctc_pr_debug("ctc: %s(): ch=0x%pq (id=%s, type=%d\n",
2050 __func__, ch, ch->id, ch->type);
2053 ctc_pr_warn("ctc: %s(): channel with id %s "
2054 "and type %d not found in channel list\n",
2055 __func__, id, type);
2057 if (ch->flags & CHANNEL_FLAGS_INUSE)
2060 ch->flags |= CHANNEL_FLAGS_INUSE;
2061 ch->flags &= ~CHANNEL_FLAGS_RWMASK;
2062 ch->flags |= (direction == WRITE)
2063 ? CHANNEL_FLAGS_WRITE : CHANNEL_FLAGS_READ;
2064 fsm_newstate(ch->fsm, CH_STATE_STOPPED);
2071 * Return the channel type by name.
2073 * @param name Name of network interface.
2075 * @return Type class of channel to be used for that interface.
2077 static enum channel_types inline
2078 extract_channel_media(char *name)
2080 enum channel_types ret = channel_type_unknown;
2083 if (strncmp(name, "ctc", 3) == 0)
2084 ret = channel_type_parallel;
2085 if (strncmp(name, "escon", 5) == 0)
2086 ret = channel_type_escon;
2092 __ctc_check_irb_error(struct ccw_device *cdev, struct irb *irb)
2097 switch (PTR_ERR(irb)) {
2099 ctc_pr_warn("i/o-error on device %s\n", cdev->dev.bus_id);
2100 // CTC_DBF_TEXT(trace, 2, "ckirberr");
2101 // CTC_DBF_TEXT_(trace, 2, " rc%d", -EIO);
2104 ctc_pr_warn("timeout on device %s\n", cdev->dev.bus_id);
2105 // CTC_DBF_TEXT(trace, 2, "ckirberr");
2106 // CTC_DBF_TEXT_(trace, 2, " rc%d", -ETIMEDOUT);
2109 ctc_pr_warn("unknown error %ld on device %s\n", PTR_ERR(irb),
2111 // CTC_DBF_TEXT(trace, 2, "ckirberr");
2112 // CTC_DBF_TEXT(trace, 2, " rc???");
2114 return PTR_ERR(irb);
2120 * @param cdev The ccw_device the interrupt is for.
2121 * @param intparm interruption parameter.
2122 * @param irb interruption response block.
2125 ctc_irq_handler(struct ccw_device *cdev, unsigned long intparm, struct irb *irb)
2128 struct net_device *dev;
2129 struct ctc_priv *priv;
2131 DBF_TEXT(trace, 5, __FUNCTION__);
2132 if (__ctc_check_irb_error(cdev, irb))
2135 /* Check for unsolicited interrupts. */
2136 if (!cdev->dev.driver_data) {
2137 ctc_pr_warn("ctc: Got unsolicited irq: %s c-%02x d-%02x\n",
2138 cdev->dev.bus_id, irb->scsw.cstat,
2143 priv = ((struct ccwgroup_device *)cdev->dev.driver_data)
2146 /* Try to extract channel from driver data. */
2147 if (priv->channel[READ]->cdev == cdev)
2148 ch = priv->channel[READ];
2149 else if (priv->channel[WRITE]->cdev == cdev)
2150 ch = priv->channel[WRITE];
2152 ctc_pr_err("ctc: Can't determine channel for interrupt, "
2153 "device %s\n", cdev->dev.bus_id);
2157 dev = (struct net_device *) (ch->netdev);
2159 ctc_pr_crit("ctc: ctc_irq_handler dev=NULL bus_id=%s, ch=0x%p\n",
2160 cdev->dev.bus_id, ch);
2165 ctc_pr_debug("%s: interrupt for device: %s received c-%02x d-%02x\n",
2166 dev->name, ch->id, irb->scsw.cstat, irb->scsw.dstat);
2169 /* Copy interruption response block. */
2170 memcpy(ch->irb, irb, sizeof(struct irb));
2172 /* Check for good subchannel return code, otherwise error message */
2173 if (ch->irb->scsw.cstat) {
2174 fsm_event(ch->fsm, CH_EVENT_SC_UNKNOWN, ch);
2175 ctc_pr_warn("%s: subchannel check for device: %s - %02x %02x\n",
2176 dev->name, ch->id, ch->irb->scsw.cstat,
2177 ch->irb->scsw.dstat);
2181 /* Check the reason-code of a unit check */
2182 if (ch->irb->scsw.dstat & DEV_STAT_UNIT_CHECK) {
2183 ccw_unit_check(ch, ch->irb->ecw[0]);
2186 if (ch->irb->scsw.dstat & DEV_STAT_BUSY) {
2187 if (ch->irb->scsw.dstat & DEV_STAT_ATTENTION)
2188 fsm_event(ch->fsm, CH_EVENT_ATTNBUSY, ch);
2190 fsm_event(ch->fsm, CH_EVENT_BUSY, ch);
2193 if (ch->irb->scsw.dstat & DEV_STAT_ATTENTION) {
2194 fsm_event(ch->fsm, CH_EVENT_ATTN, ch);
2197 if ((ch->irb->scsw.stctl & SCSW_STCTL_SEC_STATUS) ||
2198 (ch->irb->scsw.stctl == SCSW_STCTL_STATUS_PEND) ||
2199 (ch->irb->scsw.stctl ==
2200 (SCSW_STCTL_ALERT_STATUS | SCSW_STCTL_STATUS_PEND)))
2201 fsm_event(ch->fsm, CH_EVENT_FINSTAT, ch);
2203 fsm_event(ch->fsm, CH_EVENT_IRQ, ch);
2208 * Actions for interface - statemachine.
2209 *****************************************************************************/
2212 * Startup channels by sending CH_EVENT_START to each channel.
2214 * @param fi An instance of an interface statemachine.
2215 * @param event The event, just happened.
2216 * @param arg Generic pointer, casted from struct net_device * upon call.
2219 dev_action_start(fsm_instance * fi, int event, void *arg)
2221 struct net_device *dev = (struct net_device *) arg;
2222 struct ctc_priv *privptr = dev->priv;
2225 DBF_TEXT(setup, 3, __FUNCTION__);
2226 fsm_deltimer(&privptr->restart_timer);
2227 fsm_newstate(fi, DEV_STATE_STARTWAIT_RXTX);
2228 for (direction = READ; direction <= WRITE; direction++) {
2229 struct channel *ch = privptr->channel[direction];
2230 fsm_event(ch->fsm, CH_EVENT_START, ch);
2235 * Shutdown channels by sending CH_EVENT_STOP to each channel.
2237 * @param fi An instance of an interface statemachine.
2238 * @param event The event, just happened.
2239 * @param arg Generic pointer, casted from struct net_device * upon call.
2242 dev_action_stop(fsm_instance * fi, int event, void *arg)
2244 struct net_device *dev = (struct net_device *) arg;
2245 struct ctc_priv *privptr = dev->priv;
2248 DBF_TEXT(trace, 3, __FUNCTION__);
2249 fsm_newstate(fi, DEV_STATE_STOPWAIT_RXTX);
2250 for (direction = READ; direction <= WRITE; direction++) {
2251 struct channel *ch = privptr->channel[direction];
2252 fsm_event(ch->fsm, CH_EVENT_STOP, ch);
2256 dev_action_restart(fsm_instance *fi, int event, void *arg)
2258 struct net_device *dev = (struct net_device *)arg;
2259 struct ctc_priv *privptr = dev->priv;
2261 DBF_TEXT(trace, 3, __FUNCTION__);
2262 ctc_pr_debug("%s: Restarting\n", dev->name);
2263 dev_action_stop(fi, event, arg);
2264 fsm_event(privptr->fsm, DEV_EVENT_STOP, dev);
2265 fsm_addtimer(&privptr->restart_timer, CTC_TIMEOUT_5SEC,
2266 DEV_EVENT_START, dev);
2270 * Called from channel statemachine
2271 * when a channel is up and running.
2273 * @param fi An instance of an interface statemachine.
2274 * @param event The event, just happened.
2275 * @param arg Generic pointer, casted from struct net_device * upon call.
2278 dev_action_chup(fsm_instance * fi, int event, void *arg)
2280 struct net_device *dev = (struct net_device *) arg;
2281 struct ctc_priv *privptr = dev->priv;
2283 DBF_TEXT(trace, 3, __FUNCTION__);
2284 switch (fsm_getstate(fi)) {
2285 case DEV_STATE_STARTWAIT_RXTX:
2286 if (event == DEV_EVENT_RXUP)
2287 fsm_newstate(fi, DEV_STATE_STARTWAIT_TX);
2289 fsm_newstate(fi, DEV_STATE_STARTWAIT_RX);
2291 case DEV_STATE_STARTWAIT_RX:
2292 if (event == DEV_EVENT_RXUP) {
2293 fsm_newstate(fi, DEV_STATE_RUNNING);
2294 ctc_pr_info("%s: connected with remote side\n",
2296 if (privptr->protocol == CTC_PROTO_LINUX_TTY)
2297 ctc_tty_setcarrier(dev, 1);
2298 ctc_clear_busy(dev);
2301 case DEV_STATE_STARTWAIT_TX:
2302 if (event == DEV_EVENT_TXUP) {
2303 fsm_newstate(fi, DEV_STATE_RUNNING);
2304 ctc_pr_info("%s: connected with remote side\n",
2306 if (privptr->protocol == CTC_PROTO_LINUX_TTY)
2307 ctc_tty_setcarrier(dev, 1);
2308 ctc_clear_busy(dev);
2311 case DEV_STATE_STOPWAIT_TX:
2312 if (event == DEV_EVENT_RXUP)
2313 fsm_newstate(fi, DEV_STATE_STOPWAIT_RXTX);
2315 case DEV_STATE_STOPWAIT_RX:
2316 if (event == DEV_EVENT_TXUP)
2317 fsm_newstate(fi, DEV_STATE_STOPWAIT_RXTX);
2323 * Called from channel statemachine
2324 * when a channel has been shutdown.
2326 * @param fi An instance of an interface statemachine.
2327 * @param event The event, just happened.
2328 * @param arg Generic pointer, casted from struct net_device * upon call.
2331 dev_action_chdown(fsm_instance * fi, int event, void *arg)
2333 struct net_device *dev = (struct net_device *) arg;
2334 struct ctc_priv *privptr = dev->priv;
2336 DBF_TEXT(trace, 3, __FUNCTION__);
2337 switch (fsm_getstate(fi)) {
2338 case DEV_STATE_RUNNING:
2339 if (privptr->protocol == CTC_PROTO_LINUX_TTY)
2340 ctc_tty_setcarrier(dev, 0);
2341 if (event == DEV_EVENT_TXDOWN)
2342 fsm_newstate(fi, DEV_STATE_STARTWAIT_TX);
2344 fsm_newstate(fi, DEV_STATE_STARTWAIT_RX);
2346 case DEV_STATE_STARTWAIT_RX:
2347 if (event == DEV_EVENT_TXDOWN)
2348 fsm_newstate(fi, DEV_STATE_STARTWAIT_RXTX);
2350 case DEV_STATE_STARTWAIT_TX:
2351 if (event == DEV_EVENT_RXDOWN)
2352 fsm_newstate(fi, DEV_STATE_STARTWAIT_RXTX);
2354 case DEV_STATE_STOPWAIT_RXTX:
2355 if (event == DEV_EVENT_TXDOWN)
2356 fsm_newstate(fi, DEV_STATE_STOPWAIT_RX);
2358 fsm_newstate(fi, DEV_STATE_STOPWAIT_TX);
2360 case DEV_STATE_STOPWAIT_RX:
2361 if (event == DEV_EVENT_RXDOWN)
2362 fsm_newstate(fi, DEV_STATE_STOPPED);
2364 case DEV_STATE_STOPWAIT_TX:
2365 if (event == DEV_EVENT_TXDOWN)
2366 fsm_newstate(fi, DEV_STATE_STOPPED);
2371 static const fsm_node dev_fsm[] = {
2372 {DEV_STATE_STOPPED, DEV_EVENT_START, dev_action_start},
2374 {DEV_STATE_STOPWAIT_RXTX, DEV_EVENT_START, dev_action_start },
2375 {DEV_STATE_STOPWAIT_RXTX, DEV_EVENT_RXDOWN, dev_action_chdown },
2376 {DEV_STATE_STOPWAIT_RXTX, DEV_EVENT_TXDOWN, dev_action_chdown },
2377 {DEV_STATE_STOPWAIT_RXTX, DEV_EVENT_RESTART, dev_action_restart },
2379 {DEV_STATE_STOPWAIT_RX, DEV_EVENT_START, dev_action_start },
2380 {DEV_STATE_STOPWAIT_RX, DEV_EVENT_RXUP, dev_action_chup },
2381 {DEV_STATE_STOPWAIT_RX, DEV_EVENT_TXUP, dev_action_chup },
2382 {DEV_STATE_STOPWAIT_RX, DEV_EVENT_RXDOWN, dev_action_chdown },
2383 {DEV_STATE_STOPWAIT_RX, DEV_EVENT_RESTART, dev_action_restart },
2385 {DEV_STATE_STOPWAIT_TX, DEV_EVENT_START, dev_action_start },
2386 {DEV_STATE_STOPWAIT_TX, DEV_EVENT_RXUP, dev_action_chup },
2387 {DEV_STATE_STOPWAIT_TX, DEV_EVENT_TXUP, dev_action_chup },
2388 {DEV_STATE_STOPWAIT_TX, DEV_EVENT_TXDOWN, dev_action_chdown },
2389 {DEV_STATE_STOPWAIT_TX, DEV_EVENT_RESTART, dev_action_restart },
2391 {DEV_STATE_STARTWAIT_RXTX, DEV_EVENT_STOP, dev_action_stop },
2392 {DEV_STATE_STARTWAIT_RXTX, DEV_EVENT_RXUP, dev_action_chup },
2393 {DEV_STATE_STARTWAIT_RXTX, DEV_EVENT_TXUP, dev_action_chup },
2394 {DEV_STATE_STARTWAIT_RXTX, DEV_EVENT_RXDOWN, dev_action_chdown },
2395 {DEV_STATE_STARTWAIT_RXTX, DEV_EVENT_TXDOWN, dev_action_chdown },
2396 {DEV_STATE_STARTWAIT_RXTX, DEV_EVENT_RESTART, dev_action_restart },
2398 {DEV_STATE_STARTWAIT_TX, DEV_EVENT_STOP, dev_action_stop },
2399 {DEV_STATE_STARTWAIT_TX, DEV_EVENT_RXUP, dev_action_chup },
2400 {DEV_STATE_STARTWAIT_TX, DEV_EVENT_TXUP, dev_action_chup },
2401 {DEV_STATE_STARTWAIT_TX, DEV_EVENT_RXDOWN, dev_action_chdown },
2402 {DEV_STATE_STARTWAIT_TX, DEV_EVENT_RESTART, dev_action_restart },
2404 {DEV_STATE_STARTWAIT_RX, DEV_EVENT_STOP, dev_action_stop },
2405 {DEV_STATE_STARTWAIT_RX, DEV_EVENT_RXUP, dev_action_chup },
2406 {DEV_STATE_STARTWAIT_RX, DEV_EVENT_TXUP, dev_action_chup },
2407 {DEV_STATE_STARTWAIT_RX, DEV_EVENT_TXDOWN, dev_action_chdown },
2408 {DEV_STATE_STARTWAIT_RX, DEV_EVENT_RESTART, dev_action_restart },
2410 {DEV_STATE_RUNNING, DEV_EVENT_STOP, dev_action_stop },
2411 {DEV_STATE_RUNNING, DEV_EVENT_RXDOWN, dev_action_chdown },
2412 {DEV_STATE_RUNNING, DEV_EVENT_TXDOWN, dev_action_chdown },
2413 {DEV_STATE_RUNNING, DEV_EVENT_TXUP, fsm_action_nop },
2414 {DEV_STATE_RUNNING, DEV_EVENT_RXUP, fsm_action_nop },
2415 {DEV_STATE_RUNNING, DEV_EVENT_RESTART, dev_action_restart },
2418 static const int DEV_FSM_LEN = sizeof (dev_fsm) / sizeof (fsm_node);
2421 * Transmit a packet.
2422 * This is a helper function for ctc_tx().
2424 * @param ch Channel to be used for sending.
2425 * @param skb Pointer to struct sk_buff of packet to send.
2426 * The linklevel header has already been set up
2429 * @return 0 on success, -ERRNO on failure. (Never fails.)
2432 transmit_skb(struct channel *ch, struct sk_buff *skb)
2434 unsigned long saveflags;
2435 struct ll_header header;
2438 DBF_TEXT(trace, 5, __FUNCTION__);
2439 if (fsm_getstate(ch->fsm) != CH_STATE_TXIDLE) {
2440 int l = skb->len + LL_HEADER_LENGTH;
2442 spin_lock_irqsave(&ch->collect_lock, saveflags);
2443 if (ch->collect_len + l > ch->max_bufsize - 2)
2446 atomic_inc(&skb->users);
2448 header.type = skb->protocol;
2450 memcpy(skb_push(skb, LL_HEADER_LENGTH), &header,
2452 skb_queue_tail(&ch->collect_queue, skb);
2453 ch->collect_len += l;
2455 spin_unlock_irqrestore(&ch->collect_lock, saveflags);
2459 struct sk_buff *nskb;
2463 * Protect skb against beeing free'd by upper
2466 atomic_inc(&skb->users);
2467 ch->prof.txlen += skb->len;
2468 header.length = skb->len + LL_HEADER_LENGTH;
2469 header.type = skb->protocol;
2471 memcpy(skb_push(skb, LL_HEADER_LENGTH), &header,
2473 block_len = skb->len + 2;
2474 *((__u16 *) skb_push(skb, 2)) = block_len;
2477 * IDAL support in CTC is broken, so we have to
2478 * care about skb's above 2G ourselves.
2480 hi = ((unsigned long) skb->tail + LL_HEADER_LENGTH) >> 31;
2482 nskb = alloc_skb(skb->len, GFP_ATOMIC | GFP_DMA);
2484 atomic_dec(&skb->users);
2485 skb_pull(skb, LL_HEADER_LENGTH + 2);
2488 memcpy(skb_put(nskb, skb->len),
2489 skb->data, skb->len);
2490 atomic_inc(&nskb->users);
2491 atomic_dec(&skb->users);
2492 dev_kfree_skb_irq(skb);
2497 ch->ccw[4].count = block_len;
2498 if (set_normalized_cda(&ch->ccw[4], skb->data)) {
2500 * idal allocation failed, try via copying to
2501 * trans_skb. trans_skb usually has a pre-allocated
2504 if (ctc_checkalloc_buffer(ch, 1)) {
2506 * Remove our header. It gets added
2507 * again on retransmit.
2509 atomic_dec(&skb->users);
2510 skb_pull(skb, LL_HEADER_LENGTH + 2);
2514 ch->trans_skb->tail = ch->trans_skb->data;
2515 ch->trans_skb->len = 0;
2516 ch->ccw[1].count = skb->len;
2517 memcpy(skb_put(ch->trans_skb, skb->len), skb->data,
2519 atomic_dec(&skb->users);
2520 dev_kfree_skb_irq(skb);
2523 skb_queue_tail(&ch->io_queue, skb);
2527 fsm_newstate(ch->fsm, CH_STATE_TX);
2528 fsm_addtimer(&ch->timer, CTC_TIMEOUT_5SEC, CH_EVENT_TIMER, ch);
2529 spin_lock_irqsave(get_ccwdev_lock(ch->cdev), saveflags);
2530 ch->prof.send_stamp = xtime;
2531 rc = ccw_device_start(ch->cdev, &ch->ccw[ccw_idx],
2532 (unsigned long) ch, 0xff, 0);
2533 spin_unlock_irqrestore(get_ccwdev_lock(ch->cdev), saveflags);
2535 ch->prof.doios_single++;
2537 fsm_deltimer(&ch->timer);
2538 ccw_check_return_code(ch, rc, "single skb TX");
2540 skb_dequeue_tail(&ch->io_queue);
2542 * Remove our header. It gets added
2543 * again on retransmit.
2545 skb_pull(skb, LL_HEADER_LENGTH + 2);
2548 struct net_device *dev = ch->netdev;
2549 struct ctc_priv *privptr = dev->priv;
2550 privptr->stats.tx_packets++;
2551 privptr->stats.tx_bytes +=
2552 skb->len - LL_HEADER_LENGTH;
2561 * Interface API for upper network layers
2562 *****************************************************************************/
2565 * Open an interface.
2566 * Called from generic network layer when ifconfig up is run.
2568 * @param dev Pointer to interface struct.
2570 * @return 0 on success, -ERRNO on failure. (Never fails.)
2573 ctc_open(struct net_device * dev)
2575 DBF_TEXT(trace, 5, __FUNCTION__);
2576 fsm_event(((struct ctc_priv *) dev->priv)->fsm, DEV_EVENT_START, dev);
2581 * Close an interface.
2582 * Called from generic network layer when ifconfig down is run.
2584 * @param dev Pointer to interface struct.
2586 * @return 0 on success, -ERRNO on failure. (Never fails.)
2589 ctc_close(struct net_device * dev)
2591 DBF_TEXT(trace, 5, __FUNCTION__);
2592 fsm_event(((struct ctc_priv *) dev->priv)->fsm, DEV_EVENT_STOP, dev);
2597 * Start transmission of a packet.
2598 * Called from generic network device layer.
2600 * @param skb Pointer to buffer containing the packet.
2601 * @param dev Pointer to interface struct.
2603 * @return 0 if packet consumed, !0 if packet rejected.
2604 * Note: If we return !0, then the packet is free'd by
2605 * the generic network layer.
2608 ctc_tx(struct sk_buff *skb, struct net_device * dev)
2611 struct ctc_priv *privptr = (struct ctc_priv *) dev->priv;
2613 DBF_TEXT(trace, 5, __FUNCTION__);
2615 * Some sanity checks ...
2618 ctc_pr_warn("%s: NULL sk_buff passed\n", dev->name);
2619 privptr->stats.tx_dropped++;
2622 if (skb_headroom(skb) < (LL_HEADER_LENGTH + 2)) {
2623 ctc_pr_warn("%s: Got sk_buff with head room < %ld bytes\n",
2624 dev->name, LL_HEADER_LENGTH + 2);
2626 privptr->stats.tx_dropped++;
2631 * If channels are not running, try to restart them
2632 * and throw away packet.
2634 if (fsm_getstate(privptr->fsm) != DEV_STATE_RUNNING) {
2635 fsm_event(privptr->fsm, DEV_EVENT_START, dev);
2636 if (privptr->protocol == CTC_PROTO_LINUX_TTY)
2639 privptr->stats.tx_dropped++;
2640 privptr->stats.tx_errors++;
2641 privptr->stats.tx_carrier_errors++;
2645 if (ctc_test_and_set_busy(dev))
2648 dev->trans_start = jiffies;
2649 if (transmit_skb(privptr->channel[WRITE], skb) != 0)
2651 ctc_clear_busy(dev);
2656 * Sets MTU of an interface.
2658 * @param dev Pointer to interface struct.
2659 * @param new_mtu The new MTU to use for this interface.
2661 * @return 0 on success, -EINVAL if MTU is out of valid range.
2662 * (valid range is 576 .. 65527). If VM is on the
2663 * remote side, maximum MTU is 32760, however this is
2664 * <em>not</em> checked here.
2667 ctc_change_mtu(struct net_device * dev, int new_mtu)
2669 struct ctc_priv *privptr = (struct ctc_priv *) dev->priv;
2671 DBF_TEXT(trace, 3, __FUNCTION__);
2672 if ((new_mtu < 576) || (new_mtu > 65527) ||
2673 (new_mtu > (privptr->channel[READ]->max_bufsize -
2674 LL_HEADER_LENGTH - 2)))
2677 dev->hard_header_len = LL_HEADER_LENGTH + 2;
2682 * Returns interface statistics of a device.
2684 * @param dev Pointer to interface struct.
2686 * @return Pointer to stats struct of this interface.
2688 static struct net_device_stats *
2689 ctc_stats(struct net_device * dev)
2691 return &((struct ctc_priv *) dev->priv)->stats;
2698 buffer_show(struct device *dev, char *buf)
2700 struct ctc_priv *priv;
2702 priv = dev->driver_data;
2705 return sprintf(buf, "%d\n",
2706 priv->channel[READ]->max_bufsize);
2710 buffer_write(struct device *dev, const char *buf, size_t count)
2712 struct ctc_priv *priv;
2713 struct net_device *ndev;
2716 DBF_TEXT(trace, 3, __FUNCTION__);
2717 priv = dev->driver_data;
2720 ndev = priv->channel[READ]->netdev;
2723 sscanf(buf, "%u", &bs1);
2725 if (bs1 > CTC_BUFSIZE_LIMIT)
2727 if ((ndev->flags & IFF_RUNNING) &&
2728 (bs1 < (ndev->mtu + LL_HEADER_LENGTH + 2)))
2730 if (bs1 < (576 + LL_HEADER_LENGTH + 2))
2733 priv->channel[READ]->max_bufsize =
2734 priv->channel[WRITE]->max_bufsize = bs1;
2735 if (!(ndev->flags & IFF_RUNNING))
2736 ndev->mtu = bs1 - LL_HEADER_LENGTH - 2;
2737 priv->channel[READ]->flags |= CHANNEL_FLAGS_BUFSIZE_CHANGED;
2738 priv->channel[WRITE]->flags |= CHANNEL_FLAGS_BUFSIZE_CHANGED;
2745 loglevel_show(struct device *dev, char *buf)
2747 struct ctc_priv *priv;
2749 priv = dev->driver_data;
2752 return sprintf(buf, "%d\n", loglevel);
2756 loglevel_write(struct device *dev, const char *buf, size_t count)
2758 struct ctc_priv *priv;
2761 DBF_TEXT(trace, 5, __FUNCTION__);
2762 priv = dev->driver_data;
2765 sscanf(buf, "%i", &ll1);
2767 if ((ll1 > CTC_LOGLEVEL_MAX) || (ll1 < 0))
2774 ctc_print_statistics(struct ctc_priv *priv)
2779 DBF_TEXT(trace, 4, __FUNCTION__);
2782 sbuf = (char *)kmalloc(2048, GFP_KERNEL);
2787 p += sprintf(p, " Device FSM state: %s\n",
2788 fsm_getstate_str(priv->fsm));
2789 p += sprintf(p, " RX channel FSM state: %s\n",
2790 fsm_getstate_str(priv->channel[READ]->fsm));
2791 p += sprintf(p, " TX channel FSM state: %s\n",
2792 fsm_getstate_str(priv->channel[WRITE]->fsm));
2793 p += sprintf(p, " Max. TX buffer used: %ld\n",
2794 priv->channel[WRITE]->prof.maxmulti);
2795 p += sprintf(p, " Max. chained SKBs: %ld\n",
2796 priv->channel[WRITE]->prof.maxcqueue);
2797 p += sprintf(p, " TX single write ops: %ld\n",
2798 priv->channel[WRITE]->prof.doios_single);
2799 p += sprintf(p, " TX multi write ops: %ld\n",
2800 priv->channel[WRITE]->prof.doios_multi);
2801 p += sprintf(p, " Netto bytes written: %ld\n",
2802 priv->channel[WRITE]->prof.txlen);
2803 p += sprintf(p, " Max. TX IO-time: %ld\n",
2804 priv->channel[WRITE]->prof.tx_time);
2806 ctc_pr_debug("Statistics for %s:\n%s",
2807 priv->channel[WRITE]->netdev->name, sbuf);
2813 stats_show(struct device *dev, char *buf)
2815 struct ctc_priv *priv = dev->driver_data;
2818 ctc_print_statistics(priv);
2819 return sprintf(buf, "0\n");
2823 stats_write(struct device *dev, const char *buf, size_t count)
2825 struct ctc_priv *priv = dev->driver_data;
2828 /* Reset statistics */
2829 memset(&priv->channel[WRITE]->prof, 0,
2830 sizeof(priv->channel[WRITE]->prof));
2834 static DEVICE_ATTR(buffer, 0644, buffer_show, buffer_write);
2835 static DEVICE_ATTR(loglevel, 0644, loglevel_show, loglevel_write);
2836 static DEVICE_ATTR(stats, 0644, stats_show, stats_write);
2839 ctc_add_attributes(struct device *dev)
2841 // device_create_file(dev, &dev_attr_buffer);
2842 device_create_file(dev, &dev_attr_loglevel);
2843 device_create_file(dev, &dev_attr_stats);
2848 ctc_remove_attributes(struct device *dev)
2850 device_remove_file(dev, &dev_attr_stats);
2851 device_remove_file(dev, &dev_attr_loglevel);
2852 // device_remove_file(dev, &dev_attr_buffer);
2857 ctc_netdev_unregister(struct net_device * dev)
2859 struct ctc_priv *privptr;
2863 privptr = (struct ctc_priv *) dev->priv;
2864 if (privptr->protocol != CTC_PROTO_LINUX_TTY)
2865 unregister_netdev(dev);
2867 ctc_tty_unregister_netdev(dev);
2871 ctc_netdev_register(struct net_device * dev)
2873 struct ctc_priv *privptr = (struct ctc_priv *) dev->priv;
2874 if (privptr->protocol != CTC_PROTO_LINUX_TTY)
2875 return register_netdev(dev);
2877 return ctc_tty_register_netdev(dev);
2881 ctc_free_netdevice(struct net_device * dev, int free_dev)
2883 struct ctc_priv *privptr;
2886 privptr = dev->priv;
2889 kfree_fsm(privptr->fsm);
2899 * Initialize everything of the net device except the name and the
2902 static struct net_device *
2903 ctc_init_netdevice(struct net_device * dev, int alloc_device,
2904 struct ctc_priv *privptr)
2909 DBF_TEXT(setup, 3, __FUNCTION__);
2911 dev = kmalloc(sizeof (struct net_device), GFP_KERNEL);
2914 memset(dev, 0, sizeof (struct net_device));
2917 dev->priv = privptr;
2918 privptr->fsm = init_fsm("ctcdev", dev_state_names,
2919 dev_event_names, NR_DEV_STATES, NR_DEV_EVENTS,
2920 dev_fsm, DEV_FSM_LEN, GFP_KERNEL);
2921 if (privptr->fsm == NULL) {
2926 fsm_newstate(privptr->fsm, DEV_STATE_STOPPED);
2927 fsm_settimer(privptr->fsm, &privptr->restart_timer);
2928 dev->mtu = CTC_BUFSIZE_DEFAULT - LL_HEADER_LENGTH - 2;
2929 dev->hard_start_xmit = ctc_tx;
2930 dev->open = ctc_open;
2931 dev->stop = ctc_close;
2932 dev->get_stats = ctc_stats;
2933 dev->change_mtu = ctc_change_mtu;
2934 dev->hard_header_len = LL_HEADER_LENGTH + 2;
2936 dev->type = ARPHRD_SLIP;
2937 dev->tx_queue_len = 100;
2938 dev->flags = IFF_POINTOPOINT | IFF_NOARP;
2939 SET_MODULE_OWNER(dev);
2944 ctc_proto_show(struct device *dev, char *buf)
2946 struct ctc_priv *priv;
2948 priv = dev->driver_data;
2952 return sprintf(buf, "%d\n", priv->protocol);
2956 ctc_proto_store(struct device *dev, const char *buf, size_t count)
2958 struct ctc_priv *priv;
2961 DBF_TEXT(trace, 3, __FUNCTION__);
2962 pr_debug("%s() called\n", __FUNCTION__);
2964 priv = dev->driver_data;
2967 sscanf(buf, "%u", &value);
2968 if ((value < 0) || (value > CTC_PROTO_MAX))
2970 priv->protocol = value;
2975 static DEVICE_ATTR(protocol, 0644, ctc_proto_show, ctc_proto_store);
2978 ctc_type_show(struct device *dev, char *buf)
2980 struct ccwgroup_device *cgdev;
2982 cgdev = to_ccwgroupdev(dev);
2986 return sprintf(buf, "%s\n", cu3088_type[cgdev->cdev[0]->id.driver_info]);
2989 static DEVICE_ATTR(type, 0444, ctc_type_show, NULL);
2991 static struct attribute *ctc_attr[] = {
2992 &dev_attr_protocol.attr,
2993 &dev_attr_type.attr,
2994 &dev_attr_buffer.attr,
2998 static struct attribute_group ctc_attr_group = {
3003 ctc_add_files(struct device *dev)
3005 pr_debug("%s() called\n", __FUNCTION__);
3007 return sysfs_create_group(&dev->kobj, &ctc_attr_group);
3011 ctc_remove_files(struct device *dev)
3013 pr_debug("%s() called\n", __FUNCTION__);
3015 sysfs_remove_group(&dev->kobj, &ctc_attr_group);
3019 * Add ctc specific attributes.
3020 * Add ctc private data.
3022 * @param cgdev pointer to ccwgroup_device just added
3024 * @returns 0 on success, !0 on failure.
3028 ctc_probe_device(struct ccwgroup_device *cgdev)
3030 struct ctc_priv *priv;
3033 pr_debug("%s() called\n", __FUNCTION__);
3034 DBF_TEXT(trace, 3, __FUNCTION__);
3036 if (!get_device(&cgdev->dev))
3039 priv = kmalloc(sizeof (struct ctc_priv), GFP_KERNEL);
3041 ctc_pr_err("%s: Out of memory\n", __func__);
3042 put_device(&cgdev->dev);
3046 memset(priv, 0, sizeof (struct ctc_priv));
3047 rc = ctc_add_files(&cgdev->dev);
3050 put_device(&cgdev->dev);
3054 cgdev->cdev[0]->handler = ctc_irq_handler;
3055 cgdev->cdev[1]->handler = ctc_irq_handler;
3056 cgdev->dev.driver_data = priv;
3063 * Setup an interface.
3065 * @param cgdev Device to be setup.
3067 * @returns 0 on success, !0 on failure.
3070 ctc_new_device(struct ccwgroup_device *cgdev)
3072 char read_id[CTC_ID_SIZE];
3073 char write_id[CTC_ID_SIZE];
3075 enum channel_types type;
3076 struct ctc_priv *privptr;
3077 struct net_device *dev;
3080 pr_debug("%s() called\n", __FUNCTION__);
3081 DBF_TEXT(setup, 3, __FUNCTION__);
3083 privptr = cgdev->dev.driver_data;
3087 type = get_channel_type(&cgdev->cdev[0]->id);
3089 snprintf(read_id, CTC_ID_SIZE, "ch-%s", cgdev->cdev[0]->dev.bus_id);
3090 snprintf(write_id, CTC_ID_SIZE, "ch-%s", cgdev->cdev[1]->dev.bus_id);
3092 if (add_channel(cgdev->cdev[0], type))
3094 if (add_channel(cgdev->cdev[1], type))
3097 ret = ccw_device_set_online(cgdev->cdev[0]);
3100 "ccw_device_set_online (cdev[0]) failed with ret = %d\n", ret);
3103 ret = ccw_device_set_online(cgdev->cdev[1]);
3106 "ccw_device_set_online (cdev[1]) failed with ret = %d\n", ret);
3109 dev = ctc_init_netdevice(NULL, 1, privptr);
3112 ctc_pr_warn("ctc_init_netdevice failed\n");
3116 if (privptr->protocol == CTC_PROTO_LINUX_TTY)
3117 strlcpy(dev->name, "ctctty%d", IFNAMSIZ);
3119 strlcpy(dev->name, "ctc%d", IFNAMSIZ);
3121 for (direction = READ; direction <= WRITE; direction++) {
3122 privptr->channel[direction] =
3123 channel_get(type, direction == READ ? read_id : write_id,
3125 if (privptr->channel[direction] == NULL) {
3126 if (direction == WRITE)
3127 channel_free(privptr->channel[READ]);
3129 ctc_free_netdevice(dev, 1);
3132 privptr->channel[direction]->netdev = dev;
3133 privptr->channel[direction]->protocol = privptr->protocol;
3134 privptr->channel[direction]->max_bufsize = CTC_BUFSIZE_DEFAULT;
3137 SET_NETDEV_DEV(dev, &cgdev->dev);
3139 if (ctc_netdev_register(dev) != 0) {
3140 ctc_free_netdevice(dev, 1);
3144 ctc_add_attributes(&cgdev->dev);
3146 strlcpy(privptr->fsm->name, dev->name, sizeof (privptr->fsm->name));
3150 ctc_pr_info("%s: read: %s, write: %s, proto: %d\n",
3151 dev->name, privptr->channel[READ]->id,
3152 privptr->channel[WRITE]->id, privptr->protocol);
3156 ccw_device_set_offline(cgdev->cdev[1]);
3157 ccw_device_set_offline(cgdev->cdev[0]);
3163 * Shutdown an interface.
3165 * @param cgdev Device to be shut down.
3167 * @returns 0 on success, !0 on failure.
3170 ctc_shutdown_device(struct ccwgroup_device *cgdev)
3172 struct ctc_priv *priv;
3173 struct net_device *ndev;
3175 DBF_TEXT(trace, 3, __FUNCTION__);
3176 pr_debug("%s() called\n", __FUNCTION__);
3178 priv = cgdev->dev.driver_data;
3183 if (priv->channel[READ]) {
3184 ndev = priv->channel[READ]->netdev;
3186 /* Close the device */
3188 ndev->flags &=~IFF_RUNNING;
3190 ctc_remove_attributes(&cgdev->dev);
3192 channel_free(priv->channel[READ]);
3194 if (priv->channel[WRITE])
3195 channel_free(priv->channel[WRITE]);
3198 ctc_netdev_unregister(ndev);
3200 ctc_free_netdevice(ndev, 1);
3204 kfree_fsm(priv->fsm);
3206 ccw_device_set_offline(cgdev->cdev[1]);
3207 ccw_device_set_offline(cgdev->cdev[0]);
3209 if (priv->channel[READ])
3210 channel_remove(priv->channel[READ]);
3211 if (priv->channel[WRITE])
3212 channel_remove(priv->channel[WRITE]);
3214 priv->channel[READ] = priv->channel[WRITE] = NULL;
3221 ctc_remove_device(struct ccwgroup_device *cgdev)
3223 struct ctc_priv *priv;
3225 pr_debug("%s() called\n", __FUNCTION__);
3226 DBF_TEXT(trace, 3, __FUNCTION__);
3228 priv = cgdev->dev.driver_data;
3231 if (cgdev->state == CCWGROUP_ONLINE)
3232 ctc_shutdown_device(cgdev);
3233 ctc_remove_files(&cgdev->dev);
3234 cgdev->dev.driver_data = NULL;
3236 put_device(&cgdev->dev);
3239 static struct ccwgroup_driver ctc_group_driver = {
3240 .owner = THIS_MODULE,
3243 .driver_id = 0xC3E3C3,
3244 .probe = ctc_probe_device,
3245 .remove = ctc_remove_device,
3246 .set_online = ctc_new_device,
3247 .set_offline = ctc_shutdown_device,
3251 * Module related routines
3252 *****************************************************************************/
3255 * Prepare to be unloaded. Free IRQ's and release all resources.
3256 * This is called just before this module is unloaded. It is
3257 * <em>not</em> called, if the usage count is !0, so we don't need to check
3263 unregister_cu3088_discipline(&ctc_group_driver);
3265 ctc_unregister_dbf_views();
3266 ctc_pr_info("CTC driver unloaded\n");
3270 * Initialize module.
3271 * This is called just after the module is loaded.
3273 * @return 0 on success, !0 on error.
3282 ret = ctc_register_dbf_views();
3284 ctc_pr_crit("ctc_init failed with ctc_register_dbf_views rc = %d\n", ret);
3288 ret = register_cu3088_discipline(&ctc_group_driver);
3291 ctc_unregister_dbf_views();
3296 module_init(ctc_init);
3297 module_exit(ctc_exit);
3299 /* --- This is the END my friend --- */