2 * $Id: ctcmain.c,v 1.65 2004/10/27 09:12:48 mschwide Exp $
4 * CTC / ESCON network driver
6 * Copyright (C) 2001 IBM Deutschland Entwicklung GmbH, IBM Corporation
7 * Author(s): Fritz Elfert (elfert@de.ibm.com, felfert@millenux.com)
8 * Fixes by : Jochen Röhrig (roehrig@de.ibm.com)
9 * Arnaldo Carvalho de Melo <acme@conectiva.com.br>
10 * Driver Model stuff by : Cornelia Huck <cohuck@de.ibm.com>
13 * - Principles of Operation (IBM doc#: SA22-7201-06)
14 * - Common IO/-Device Commands and Self Description (IBM doc#: SA22-7204-02)
15 * - Common IO/-Device Commands and Self Description (IBM doc#: SN22-5535)
16 * - ESCON Channel-to-Channel Adapter (IBM doc#: SA22-7203-00)
17 * - ESCON I/O Interface (IBM doc#: SA22-7202-029
19 * and the source of the original CTC driver by:
20 * Dieter Wellerdiek (wel@de.ibm.com)
21 * Martin Schwidefsky (schwidefsky@de.ibm.com)
22 * Denis Joseph Barrow (djbarrow@de.ibm.com,barrow_dj@yahoo.com)
23 * Jochen Röhrig (roehrig@de.ibm.com)
25 * This program is free software; you can redistribute it and/or modify
26 * it under the terms of the GNU General Public License as published by
27 * the Free Software Foundation; either version 2, or (at your option)
30 * This program is distributed in the hope that it will be useful,
31 * but WITHOUT ANY WARRANTY; without even the implied warranty of
32 * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
33 * GNU General Public License for more details.
35 * You should have received a copy of the GNU General Public License
36 * along with this program; if not, write to the Free Software
37 * Foundation, Inc., 675 Mass Ave, Cambridge, MA 02139, USA.
39 * RELEASE-TAG: CTC/ESCON network driver $Revision: 1.65 $
45 #include <linux/module.h>
46 #include <linux/init.h>
47 #include <linux/kernel.h>
48 #include <linux/slab.h>
49 #include <linux/errno.h>
50 #include <linux/types.h>
51 #include <linux/interrupt.h>
52 #include <linux/timer.h>
53 #include <linux/sched.h>
54 #include <linux/bitops.h>
56 #include <linux/signal.h>
57 #include <linux/string.h>
60 #include <linux/if_arp.h>
61 #include <linux/tcp.h>
62 #include <linux/skbuff.h>
63 #include <linux/ctype.h>
67 #include <asm/ccwdev.h>
68 #include <asm/ccwgroup.h>
69 #include <asm/uaccess.h>
71 #include <asm/idals.h>
78 MODULE_AUTHOR("(C) 2000 IBM Corp. by Fritz Elfert (felfert@millenux.com)");
79 MODULE_DESCRIPTION("Linux for S/390 CTC/Escon Driver");
80 MODULE_LICENSE("GPL");
83 * CCW commands, used in this driver.
85 #define CCW_CMD_WRITE 0x01
86 #define CCW_CMD_READ 0x02
87 #define CCW_CMD_SET_EXTENDED 0xc3
88 #define CCW_CMD_PREPARE 0xe3
90 #define CTC_PROTO_S390 0
91 #define CTC_PROTO_LINUX 1
92 #define CTC_PROTO_LINUX_TTY 2
93 #define CTC_PROTO_OS390 3
94 #define CTC_PROTO_MAX 3
96 #define CTC_BUFSIZE_LIMIT 65535
97 #define CTC_BUFSIZE_DEFAULT 32768
99 #define CTC_TIMEOUT_5SEC 5000
101 #define CTC_INITIAL_BLOCKLEN 2
106 #define CTC_ID_SIZE BUS_ID_SIZE+3
110 unsigned long maxmulti;
111 unsigned long maxcqueue;
112 unsigned long doios_single;
113 unsigned long doios_multi;
115 unsigned long tx_time;
116 struct timespec send_stamp;
120 * Definition of one channel
125 * Pointer to next channel in list.
127 struct channel *next;
128 char id[CTC_ID_SIZE];
129 struct ccw_device *cdev;
132 * Type of this channel.
133 * CTC/A or Escon for valid channels.
135 enum channel_types type;
138 * Misc. flags. See CHANNEL_FLAGS_... below
143 * The protocol of this channel
148 * I/O and irq related stuff
159 * Transmit/Receive buffer.
161 struct sk_buff *trans_skb;
164 * Universal I/O queue.
166 struct sk_buff_head io_queue;
169 * TX queue for collecting skb's during busy.
171 struct sk_buff_head collect_queue;
174 * Amount of data in collect_queue.
179 * spinlock for collect_queue and collect_len
181 spinlock_t collect_lock;
184 * Timer for detecting unresposive
190 * Retry counter for misc. operations.
195 * The finite state machine of this channel
200 * The corresponding net_device this channel
203 struct net_device *netdev;
205 struct ctc_profile prof;
207 unsigned char *trans_skb_data;
212 #define CHANNEL_FLAGS_READ 0
213 #define CHANNEL_FLAGS_WRITE 1
214 #define CHANNEL_FLAGS_INUSE 2
215 #define CHANNEL_FLAGS_BUFSIZE_CHANGED 4
216 #define CHANNEL_FLAGS_FAILED 8
217 #define CHANNEL_FLAGS_WAITIRQ 16
218 #define CHANNEL_FLAGS_RWMASK 1
219 #define CHANNEL_DIRECTION(f) (f & CHANNEL_FLAGS_RWMASK)
221 #define LOG_FLAG_ILLEGALPKT 1
222 #define LOG_FLAG_ILLEGALSIZE 2
223 #define LOG_FLAG_OVERRUN 4
224 #define LOG_FLAG_NOMEM 8
226 #define CTC_LOGLEVEL_INFO 1
227 #define CTC_LOGLEVEL_NOTICE 2
228 #define CTC_LOGLEVEL_WARN 4
229 #define CTC_LOGLEVEL_EMERG 8
230 #define CTC_LOGLEVEL_ERR 16
231 #define CTC_LOGLEVEL_DEBUG 32
232 #define CTC_LOGLEVEL_CRIT 64
234 #define CTC_LOGLEVEL_DEFAULT \
235 (CTC_LOGLEVEL_INFO | CTC_LOGLEVEL_NOTICE | CTC_LOGLEVEL_WARN | CTC_LOGLEVEL_CRIT)
237 #define CTC_LOGLEVEL_MAX ((CTC_LOGLEVEL_CRIT<<1)-1)
239 static int loglevel = CTC_LOGLEVEL_DEFAULT;
241 #define ctc_pr_debug(fmt, arg...) \
242 do { if (loglevel & CTC_LOGLEVEL_DEBUG) printk(KERN_DEBUG fmt,##arg); } while (0)
244 #define ctc_pr_info(fmt, arg...) \
245 do { if (loglevel & CTC_LOGLEVEL_INFO) printk(KERN_INFO fmt,##arg); } while (0)
247 #define ctc_pr_notice(fmt, arg...) \
248 do { if (loglevel & CTC_LOGLEVEL_NOTICE) printk(KERN_NOTICE fmt,##arg); } while (0)
250 #define ctc_pr_warn(fmt, arg...) \
251 do { if (loglevel & CTC_LOGLEVEL_WARN) printk(KERN_WARNING fmt,##arg); } while (0)
253 #define ctc_pr_emerg(fmt, arg...) \
254 do { if (loglevel & CTC_LOGLEVEL_EMERG) printk(KERN_EMERG fmt,##arg); } while (0)
256 #define ctc_pr_err(fmt, arg...) \
257 do { if (loglevel & CTC_LOGLEVEL_ERR) printk(KERN_ERR fmt,##arg); } while (0)
259 #define ctc_pr_crit(fmt, arg...) \
260 do { if (loglevel & CTC_LOGLEVEL_CRIT) printk(KERN_CRIT fmt,##arg); } while (0)
263 * Linked list of all detected channels.
265 static struct channel *channels = NULL;
268 struct net_device_stats stats;
271 * The finite state machine of this interface.
275 * The protocol of this device
279 * Timer for restarting after I/O Errors
281 fsm_timer restart_timer;
283 struct channel *channel[2];
287 * Definition of our link level header.
294 #define LL_HEADER_LENGTH (sizeof(struct ll_header))
297 * Compatibility macros for busy handling
298 * of network devices.
300 static __inline__ void
301 ctc_clear_busy(struct net_device * dev)
303 clear_bit(0, &(((struct ctc_priv *) dev->priv)->tbusy));
304 if (((struct ctc_priv *)dev->priv)->protocol != CTC_PROTO_LINUX_TTY)
305 netif_wake_queue(dev);
308 static __inline__ int
309 ctc_test_and_set_busy(struct net_device * dev)
311 if (((struct ctc_priv *)dev->priv)->protocol != CTC_PROTO_LINUX_TTY)
312 netif_stop_queue(dev);
313 return test_and_set_bit(0, &((struct ctc_priv *) dev->priv)->tbusy);
322 static int printed = 0;
323 char vbuf[] = "$Revision: 1.65 $";
324 char *version = vbuf;
328 if ((version = strchr(version, ':'))) {
329 char *p = strchr(version + 1, '$');
334 printk(KERN_INFO "CTC driver Version%s"
336 " (DEBUG-VERSION, " __DATE__ __TIME__ ")"
338 " initialized\n", version);
343 * Return type of a detected device.
345 static enum channel_types
346 get_channel_type(struct ccw_device_id *id)
348 enum channel_types type = (enum channel_types) id->driver_info;
350 if (type == channel_type_ficon)
351 type = channel_type_escon;
357 * States of the interface statemachine.
361 DEV_STATE_STARTWAIT_RXTX,
362 DEV_STATE_STARTWAIT_RX,
363 DEV_STATE_STARTWAIT_TX,
364 DEV_STATE_STOPWAIT_RXTX,
365 DEV_STATE_STOPWAIT_RX,
366 DEV_STATE_STOPWAIT_TX,
369 * MUST be always the last element!!
374 static const char *dev_state_names[] = {
386 * Events of the interface statemachine.
397 * MUST be always the last element!!
402 static const char *dev_event_names[] = {
413 * Events of the channel statemachine
417 * Events, representing return code of
418 * I/O operations (ccw_device_start, ccw_device_halt et al.)
431 * Events, representing unit-check
435 CH_EVENT_UC_TXTIMEOUT,
436 CH_EVENT_UC_TXPARITY,
438 CH_EVENT_UC_RXPARITY,
443 * Events, representing subchannel-check
448 * Events, representing machine checks
454 * Event, representing normal IRQ
460 * Event, representing timer expiry.
465 * Events, representing commands from upper levels.
471 * MUST be always the last element!!
476 static const char *ch_event_names[] = {
477 "ccw_device success",
481 "ccw_device unknown",
483 "Status ATTN & BUSY",
487 "Unit check remote reset",
488 "Unit check remote system reset",
489 "Unit check TX timeout",
490 "Unit check TX parity",
491 "Unit check Hardware failure",
492 "Unit check RX parity",
494 "Unit check Unknown",
496 "SubChannel check Unknown",
498 "Machine check failure",
499 "Machine check operational",
511 * States of the channel statemachine.
515 * Channel not assigned to any device,
516 * initial state, direction invalid
521 * Channel assigned but not operating
540 * MUST be always the last element!!
545 static const char *ch_state_names[] = {
566 * Dump header and first 16 bytes of an sk_buff for debugging purposes.
568 * @param skb The sk_buff to dump.
569 * @param offset Offset relative to skb-data, where to start the dump.
572 ctc_dump_skb(struct sk_buff *skb, int offset)
574 unsigned char *p = skb->data;
576 struct ll_header *header;
579 if (!(loglevel & CTC_LOGLEVEL_DEBUG))
584 header = (struct ll_header *) p;
587 printk(KERN_DEBUG "dump:\n");
588 printk(KERN_DEBUG "blocklen=%d %04x\n", bl, bl);
590 printk(KERN_DEBUG "h->length=%d %04x\n", header->length,
592 printk(KERN_DEBUG "h->type=%04x\n", header->type);
593 printk(KERN_DEBUG "h->unused=%04x\n", header->unused);
596 printk(KERN_DEBUG "data: ");
597 for (i = 0; i < bl; i++)
598 printk("%02x%s", *p++, (i % 16) ? " " : "\n<7>");
603 ctc_dump_skb(struct sk_buff *skb, int offset)
609 * Unpack a just received skb and hand it over to
612 * @param ch The channel where this skb has been received.
613 * @param pskb The received skb.
615 static __inline__ void
616 ctc_unpack_skb(struct channel *ch, struct sk_buff *pskb)
618 struct net_device *dev = ch->netdev;
619 struct ctc_priv *privptr = (struct ctc_priv *) dev->priv;
620 __u16 len = *((__u16 *) pskb->data);
622 DBF_TEXT(trace, 4, __FUNCTION__);
623 skb_put(pskb, 2 + LL_HEADER_LENGTH);
626 pskb->ip_summed = CHECKSUM_UNNECESSARY;
629 struct ll_header *header = (struct ll_header *) pskb->data;
631 skb_pull(pskb, LL_HEADER_LENGTH);
632 if ((ch->protocol == CTC_PROTO_S390) &&
633 (header->type != ETH_P_IP)) {
636 if (!(ch->logflags & LOG_FLAG_ILLEGALPKT)) {
639 * Check packet type only if we stick strictly
640 * to S/390's protocol of OS390. This only
641 * supports IP. Otherwise allow any packet
645 "%s Illegal packet type 0x%04x received, dropping\n",
646 dev->name, header->type);
647 ch->logflags |= LOG_FLAG_ILLEGALPKT;
652 ctc_dump_skb(pskb, -6);
654 privptr->stats.rx_dropped++;
655 privptr->stats.rx_frame_errors++;
658 pskb->protocol = ntohs(header->type);
659 if (header->length <= LL_HEADER_LENGTH) {
661 if (!(ch->logflags & LOG_FLAG_ILLEGALSIZE)) {
664 "%s Illegal packet size %d "
665 "received (MTU=%d blocklen=%d), "
666 "dropping\n", dev->name, header->length,
668 ch->logflags |= LOG_FLAG_ILLEGALSIZE;
673 ctc_dump_skb(pskb, -6);
675 privptr->stats.rx_dropped++;
676 privptr->stats.rx_length_errors++;
679 header->length -= LL_HEADER_LENGTH;
680 len -= LL_HEADER_LENGTH;
681 if ((header->length > skb_tailroom(pskb)) ||
682 (header->length > len)) {
684 if (!(ch->logflags & LOG_FLAG_OVERRUN)) {
687 "%s Illegal packet size %d "
688 "(beyond the end of received data), "
689 "dropping\n", dev->name, header->length);
690 ch->logflags |= LOG_FLAG_OVERRUN;
695 ctc_dump_skb(pskb, -6);
697 privptr->stats.rx_dropped++;
698 privptr->stats.rx_length_errors++;
701 skb_put(pskb, header->length);
702 pskb->mac.raw = pskb->data;
703 len -= header->length;
704 skb = dev_alloc_skb(pskb->len);
707 if (!(ch->logflags & LOG_FLAG_NOMEM)) {
710 "%s Out of memory in ctc_unpack_skb\n",
712 ch->logflags |= LOG_FLAG_NOMEM;
716 privptr->stats.rx_dropped++;
719 memcpy(skb_put(skb, pskb->len), pskb->data, pskb->len);
720 skb->mac.raw = skb->data;
721 skb->dev = pskb->dev;
722 skb->protocol = pskb->protocol;
723 pskb->ip_summed = CHECKSUM_UNNECESSARY;
724 if (ch->protocol == CTC_PROTO_LINUX_TTY)
725 ctc_tty_netif_rx(skb);
729 * Successful rx; reset logflags
732 dev->last_rx = jiffies;
733 privptr->stats.rx_packets++;
734 privptr->stats.rx_bytes += skb->len;
736 skb_pull(pskb, header->length);
737 if (skb_tailroom(pskb) < LL_HEADER_LENGTH) {
739 if (!(ch->logflags & LOG_FLAG_OVERRUN)) {
742 "%s Overrun in ctc_unpack_skb\n",
744 ch->logflags |= LOG_FLAG_OVERRUN;
750 skb_put(pskb, LL_HEADER_LENGTH);
756 * Check return code of a preceeding ccw_device call, halt_IO etc...
758 * @param ch The channel, the error belongs to.
759 * @param return_code The error code to inspect.
762 ccw_check_return_code(struct channel *ch, int return_code, char *msg)
764 DBF_TEXT(trace, 5, __FUNCTION__);
765 switch (return_code) {
767 fsm_event(ch->fsm, CH_EVENT_IO_SUCCESS, ch);
770 ctc_pr_warn("%s (%s): Busy !\n", ch->id, msg);
771 fsm_event(ch->fsm, CH_EVENT_IO_EBUSY, ch);
774 ctc_pr_emerg("%s (%s): Invalid device called for IO\n",
776 fsm_event(ch->fsm, CH_EVENT_IO_ENODEV, ch);
779 ctc_pr_emerg("%s (%s): Status pending... \n",
781 fsm_event(ch->fsm, CH_EVENT_IO_EIO, ch);
784 ctc_pr_emerg("%s (%s): Unknown error in do_IO %04x\n",
785 ch->id, msg, return_code);
786 fsm_event(ch->fsm, CH_EVENT_IO_UNKNOWN, ch);
791 * Check sense of a unit check.
793 * @param ch The channel, the sense code belongs to.
794 * @param sense The sense code to inspect.
797 ccw_unit_check(struct channel *ch, unsigned char sense)
799 DBF_TEXT(trace, 5, __FUNCTION__);
800 if (sense & SNS0_INTERVENTION_REQ) {
802 if (ch->protocol != CTC_PROTO_LINUX_TTY)
803 ctc_pr_debug("%s: Interface disc. or Sel. reset "
804 "(remote)\n", ch->id);
805 fsm_event(ch->fsm, CH_EVENT_UC_RCRESET, ch);
807 ctc_pr_debug("%s: System reset (remote)\n", ch->id);
808 fsm_event(ch->fsm, CH_EVENT_UC_RSRESET, ch);
810 } else if (sense & SNS0_EQUIPMENT_CHECK) {
811 if (sense & SNS0_BUS_OUT_CHECK) {
812 ctc_pr_warn("%s: Hardware malfunction (remote)\n",
814 fsm_event(ch->fsm, CH_EVENT_UC_HWFAIL, ch);
816 ctc_pr_warn("%s: Read-data parity error (remote)\n",
818 fsm_event(ch->fsm, CH_EVENT_UC_RXPARITY, ch);
820 } else if (sense & SNS0_BUS_OUT_CHECK) {
822 ctc_pr_warn("%s: Data-streaming timeout)\n", ch->id);
823 fsm_event(ch->fsm, CH_EVENT_UC_TXTIMEOUT, ch);
825 ctc_pr_warn("%s: Data-transfer parity error\n", ch->id);
826 fsm_event(ch->fsm, CH_EVENT_UC_TXPARITY, ch);
828 } else if (sense & SNS0_CMD_REJECT) {
829 ctc_pr_warn("%s: Command reject\n", ch->id);
830 } else if (sense == 0) {
831 ctc_pr_debug("%s: Unit check ZERO\n", ch->id);
832 fsm_event(ch->fsm, CH_EVENT_UC_ZERO, ch);
834 ctc_pr_warn("%s: Unit Check with sense code: %02x\n",
836 fsm_event(ch->fsm, CH_EVENT_UC_UNKNOWN, ch);
841 ctc_purge_skb_queue(struct sk_buff_head *q)
845 DBF_TEXT(trace, 5, __FUNCTION__);
847 while ((skb = skb_dequeue(q))) {
848 atomic_dec(&skb->users);
849 dev_kfree_skb_irq(skb);
853 static __inline__ int
854 ctc_checkalloc_buffer(struct channel *ch, int warn)
856 DBF_TEXT(trace, 5, __FUNCTION__);
857 if ((ch->trans_skb == NULL) ||
858 (ch->flags & CHANNEL_FLAGS_BUFSIZE_CHANGED)) {
859 if (ch->trans_skb != NULL)
860 dev_kfree_skb(ch->trans_skb);
861 clear_normalized_cda(&ch->ccw[1]);
862 ch->trans_skb = __dev_alloc_skb(ch->max_bufsize,
863 GFP_ATOMIC | GFP_DMA);
864 if (ch->trans_skb == NULL) {
867 "%s: Couldn't alloc %s trans_skb\n",
869 (CHANNEL_DIRECTION(ch->flags) == READ) ?
873 ch->ccw[1].count = ch->max_bufsize;
874 if (set_normalized_cda(&ch->ccw[1], ch->trans_skb->data)) {
875 dev_kfree_skb(ch->trans_skb);
876 ch->trans_skb = NULL;
879 "%s: set_normalized_cda for %s "
880 "trans_skb failed, dropping packets\n",
882 (CHANNEL_DIRECTION(ch->flags) == READ) ?
886 ch->ccw[1].count = 0;
887 ch->trans_skb_data = ch->trans_skb->data;
888 ch->flags &= ~CHANNEL_FLAGS_BUFSIZE_CHANGED;
894 * Dummy NOP action for statemachines
897 fsm_action_nop(fsm_instance * fi, int event, void *arg)
902 * Actions for channel - statemachines.
903 *****************************************************************************/
906 * Normal data has been send. Free the corresponding
907 * skb (it's in io_queue), reset dev->tbusy and
908 * revert to idle state.
910 * @param fi An instance of a channel statemachine.
911 * @param event The event, just happened.
912 * @param arg Generic pointer, casted from channel * upon call.
915 ch_action_txdone(fsm_instance * fi, int event, void *arg)
917 struct channel *ch = (struct channel *) arg;
918 struct net_device *dev = ch->netdev;
919 struct ctc_priv *privptr = dev->priv;
923 unsigned long duration;
924 struct timespec done_stamp = xtime;
926 DBF_TEXT(trace, 4, __FUNCTION__);
929 (done_stamp.tv_sec - ch->prof.send_stamp.tv_sec) * 1000000 +
930 (done_stamp.tv_nsec - ch->prof.send_stamp.tv_nsec) / 1000;
931 if (duration > ch->prof.tx_time)
932 ch->prof.tx_time = duration;
934 if (ch->irb->scsw.count != 0)
935 ctc_pr_debug("%s: TX not complete, remaining %d bytes\n",
936 dev->name, ch->irb->scsw.count);
937 fsm_deltimer(&ch->timer);
938 while ((skb = skb_dequeue(&ch->io_queue))) {
939 privptr->stats.tx_packets++;
940 privptr->stats.tx_bytes += skb->len - LL_HEADER_LENGTH;
942 privptr->stats.tx_bytes += 2;
945 atomic_dec(&skb->users);
946 dev_kfree_skb_irq(skb);
948 spin_lock(&ch->collect_lock);
949 clear_normalized_cda(&ch->ccw[4]);
950 if (ch->collect_len > 0) {
953 if (ctc_checkalloc_buffer(ch, 1)) {
954 spin_unlock(&ch->collect_lock);
957 ch->trans_skb->tail = ch->trans_skb->data = ch->trans_skb_data;
958 ch->trans_skb->len = 0;
959 if (ch->prof.maxmulti < (ch->collect_len + 2))
960 ch->prof.maxmulti = ch->collect_len + 2;
961 if (ch->prof.maxcqueue < skb_queue_len(&ch->collect_queue))
962 ch->prof.maxcqueue = skb_queue_len(&ch->collect_queue);
963 *((__u16 *) skb_put(ch->trans_skb, 2)) = ch->collect_len + 2;
965 while ((skb = skb_dequeue(&ch->collect_queue))) {
966 memcpy(skb_put(ch->trans_skb, skb->len), skb->data,
968 privptr->stats.tx_packets++;
969 privptr->stats.tx_bytes += skb->len - LL_HEADER_LENGTH;
970 atomic_dec(&skb->users);
971 dev_kfree_skb_irq(skb);
975 spin_unlock(&ch->collect_lock);
976 ch->ccw[1].count = ch->trans_skb->len;
977 fsm_addtimer(&ch->timer, CTC_TIMEOUT_5SEC, CH_EVENT_TIMER, ch);
978 ch->prof.send_stamp = xtime;
979 rc = ccw_device_start(ch->cdev, &ch->ccw[0],
980 (unsigned long) ch, 0xff, 0);
981 ch->prof.doios_multi++;
983 privptr->stats.tx_dropped += i;
984 privptr->stats.tx_errors += i;
985 fsm_deltimer(&ch->timer);
986 ccw_check_return_code(ch, rc, "chained TX");
989 spin_unlock(&ch->collect_lock);
990 fsm_newstate(fi, CH_STATE_TXIDLE);
996 * Initial data is sent.
997 * Notify device statemachine that we are up and
1000 * @param fi An instance of a channel statemachine.
1001 * @param event The event, just happened.
1002 * @param arg Generic pointer, casted from channel * upon call.
1005 ch_action_txidle(fsm_instance * fi, int event, void *arg)
1007 struct channel *ch = (struct channel *) arg;
1009 DBF_TEXT(trace, 4, __FUNCTION__);
1010 fsm_deltimer(&ch->timer);
1011 fsm_newstate(fi, CH_STATE_TXIDLE);
1012 fsm_event(((struct ctc_priv *) ch->netdev->priv)->fsm, DEV_EVENT_TXUP,
1017 * Got normal data, check for sanity, queue it up, allocate new buffer
1018 * trigger bottom half, and initiate next read.
1020 * @param fi An instance of a channel statemachine.
1021 * @param event The event, just happened.
1022 * @param arg Generic pointer, casted from channel * upon call.
1025 ch_action_rx(fsm_instance * fi, int event, void *arg)
1027 struct channel *ch = (struct channel *) arg;
1028 struct net_device *dev = ch->netdev;
1029 struct ctc_priv *privptr = dev->priv;
1030 int len = ch->max_bufsize - ch->irb->scsw.count;
1031 struct sk_buff *skb = ch->trans_skb;
1032 __u16 block_len = *((__u16 *) skb->data);
1036 DBF_TEXT(trace, 4, __FUNCTION__);
1037 fsm_deltimer(&ch->timer);
1039 ctc_pr_debug("%s: got packet with length %d < 8\n",
1041 privptr->stats.rx_dropped++;
1042 privptr->stats.rx_length_errors++;
1045 if (len > ch->max_bufsize) {
1046 ctc_pr_debug("%s: got packet with length %d > %d\n",
1047 dev->name, len, ch->max_bufsize);
1048 privptr->stats.rx_dropped++;
1049 privptr->stats.rx_length_errors++;
1054 * VM TCP seems to have a bug sending 2 trailing bytes of garbage.
1056 switch (ch->protocol) {
1057 case CTC_PROTO_S390:
1058 case CTC_PROTO_OS390:
1059 check_len = block_len + 2;
1062 check_len = block_len;
1065 if ((len < block_len) || (len > check_len)) {
1066 ctc_pr_debug("%s: got block length %d != rx length %d\n",
1067 dev->name, block_len, len);
1069 ctc_dump_skb(skb, 0);
1071 *((__u16 *) skb->data) = len;
1072 privptr->stats.rx_dropped++;
1073 privptr->stats.rx_length_errors++;
1077 if (block_len > 0) {
1078 *((__u16 *) skb->data) = block_len;
1079 ctc_unpack_skb(ch, skb);
1082 skb->data = skb->tail = ch->trans_skb_data;
1084 if (ctc_checkalloc_buffer(ch, 1))
1086 ch->ccw[1].count = ch->max_bufsize;
1087 rc = ccw_device_start(ch->cdev, &ch->ccw[0], (unsigned long) ch, 0xff, 0);
1089 ccw_check_return_code(ch, rc, "normal RX");
1092 static void ch_action_rxidle(fsm_instance * fi, int event, void *arg);
1095 * Initialize connection by sending a __u16 of value 0.
1097 * @param fi An instance of a channel statemachine.
1098 * @param event The event, just happened.
1099 * @param arg Generic pointer, casted from channel * upon call.
1102 ch_action_firstio(fsm_instance * fi, int event, void *arg)
1104 struct channel *ch = (struct channel *) arg;
1107 DBF_TEXT(trace, 4, __FUNCTION__);
1109 if (fsm_getstate(fi) == CH_STATE_TXIDLE)
1110 ctc_pr_debug("%s: remote side issued READ?, init ...\n", ch->id);
1111 fsm_deltimer(&ch->timer);
1112 if (ctc_checkalloc_buffer(ch, 1))
1114 if ((fsm_getstate(fi) == CH_STATE_SETUPWAIT) &&
1115 (ch->protocol == CTC_PROTO_OS390)) {
1116 /* OS/390 resp. z/OS */
1117 if (CHANNEL_DIRECTION(ch->flags) == READ) {
1118 *((__u16 *) ch->trans_skb->data) = CTC_INITIAL_BLOCKLEN;
1119 fsm_addtimer(&ch->timer, CTC_TIMEOUT_5SEC,
1120 CH_EVENT_TIMER, ch);
1121 ch_action_rxidle(fi, event, arg);
1123 struct net_device *dev = ch->netdev;
1124 fsm_newstate(fi, CH_STATE_TXIDLE);
1125 fsm_event(((struct ctc_priv *) dev->priv)->fsm,
1126 DEV_EVENT_TXUP, dev);
1132 * Don´t setup a timer for receiving the initial RX frame
1133 * if in compatibility mode, since VM TCP delays the initial
1134 * frame until it has some data to send.
1136 if ((CHANNEL_DIRECTION(ch->flags) == WRITE) ||
1137 (ch->protocol != CTC_PROTO_S390))
1138 fsm_addtimer(&ch->timer, CTC_TIMEOUT_5SEC, CH_EVENT_TIMER, ch);
1140 *((__u16 *) ch->trans_skb->data) = CTC_INITIAL_BLOCKLEN;
1141 ch->ccw[1].count = 2; /* Transfer only length */
1143 fsm_newstate(fi, (CHANNEL_DIRECTION(ch->flags) == READ)
1144 ? CH_STATE_RXINIT : CH_STATE_TXINIT);
1145 rc = ccw_device_start(ch->cdev, &ch->ccw[0], (unsigned long) ch, 0xff, 0);
1147 fsm_deltimer(&ch->timer);
1148 fsm_newstate(fi, CH_STATE_SETUPWAIT);
1149 ccw_check_return_code(ch, rc, "init IO");
1152 * If in compatibility mode since we don´t setup a timer, we
1153 * also signal RX channel up immediately. This enables us
1154 * to send packets early which in turn usually triggers some
1155 * reply from VM TCP which brings up the RX channel to it´s
1158 if ((CHANNEL_DIRECTION(ch->flags) == READ) &&
1159 (ch->protocol == CTC_PROTO_S390)) {
1160 struct net_device *dev = ch->netdev;
1161 fsm_event(((struct ctc_priv *) dev->priv)->fsm, DEV_EVENT_RXUP,
1167 * Got initial data, check it. If OK,
1168 * notify device statemachine that we are up and
1171 * @param fi An instance of a channel statemachine.
1172 * @param event The event, just happened.
1173 * @param arg Generic pointer, casted from channel * upon call.
1176 ch_action_rxidle(fsm_instance * fi, int event, void *arg)
1178 struct channel *ch = (struct channel *) arg;
1179 struct net_device *dev = ch->netdev;
1183 DBF_TEXT(trace, 4, __FUNCTION__);
1184 fsm_deltimer(&ch->timer);
1185 buflen = *((__u16 *) ch->trans_skb->data);
1187 ctc_pr_debug("%s: Initial RX count %d\n", dev->name, buflen);
1189 if (buflen >= CTC_INITIAL_BLOCKLEN) {
1190 if (ctc_checkalloc_buffer(ch, 1))
1192 ch->ccw[1].count = ch->max_bufsize;
1193 fsm_newstate(fi, CH_STATE_RXIDLE);
1194 rc = ccw_device_start(ch->cdev, &ch->ccw[0],
1195 (unsigned long) ch, 0xff, 0);
1197 fsm_newstate(fi, CH_STATE_RXINIT);
1198 ccw_check_return_code(ch, rc, "initial RX");
1200 fsm_event(((struct ctc_priv *) dev->priv)->fsm,
1201 DEV_EVENT_RXUP, dev);
1203 ctc_pr_debug("%s: Initial RX count %d not %d\n",
1204 dev->name, buflen, CTC_INITIAL_BLOCKLEN);
1205 ch_action_firstio(fi, event, arg);
1210 * Set channel into extended mode.
1212 * @param fi An instance of a channel statemachine.
1213 * @param event The event, just happened.
1214 * @param arg Generic pointer, casted from channel * upon call.
1217 ch_action_setmode(fsm_instance * fi, int event, void *arg)
1219 struct channel *ch = (struct channel *) arg;
1221 unsigned long saveflags;
1223 DBF_TEXT(trace, 4, __FUNCTION__);
1224 fsm_deltimer(&ch->timer);
1225 fsm_addtimer(&ch->timer, CTC_TIMEOUT_5SEC, CH_EVENT_TIMER, ch);
1226 fsm_newstate(fi, CH_STATE_SETUPWAIT);
1227 saveflags = 0; /* avoids compiler warning with
1228 spin_unlock_irqrestore */
1229 if (event == CH_EVENT_TIMER) // only for timer not yet locked
1230 spin_lock_irqsave(get_ccwdev_lock(ch->cdev), saveflags);
1231 rc = ccw_device_start(ch->cdev, &ch->ccw[6], (unsigned long) ch, 0xff, 0);
1232 if (event == CH_EVENT_TIMER)
1233 spin_unlock_irqrestore(get_ccwdev_lock(ch->cdev), saveflags);
1235 fsm_deltimer(&ch->timer);
1236 fsm_newstate(fi, CH_STATE_STARTWAIT);
1237 ccw_check_return_code(ch, rc, "set Mode");
1245 * @param fi An instance of a channel statemachine.
1246 * @param event The event, just happened.
1247 * @param arg Generic pointer, casted from channel * upon call.
1250 ch_action_start(fsm_instance * fi, int event, void *arg)
1252 struct channel *ch = (struct channel *) arg;
1253 unsigned long saveflags;
1255 struct net_device *dev;
1257 DBF_TEXT(trace, 4, __FUNCTION__);
1259 ctc_pr_warn("ch_action_start ch=NULL\n");
1262 if (ch->netdev == NULL) {
1263 ctc_pr_warn("ch_action_start dev=NULL, id=%s\n", ch->id);
1269 ctc_pr_debug("%s: %s channel start\n", dev->name,
1270 (CHANNEL_DIRECTION(ch->flags) == READ) ? "RX" : "TX");
1273 if (ch->trans_skb != NULL) {
1274 clear_normalized_cda(&ch->ccw[1]);
1275 dev_kfree_skb(ch->trans_skb);
1276 ch->trans_skb = NULL;
1278 if (CHANNEL_DIRECTION(ch->flags) == READ) {
1279 ch->ccw[1].cmd_code = CCW_CMD_READ;
1280 ch->ccw[1].flags = CCW_FLAG_SLI;
1281 ch->ccw[1].count = 0;
1283 ch->ccw[1].cmd_code = CCW_CMD_WRITE;
1284 ch->ccw[1].flags = CCW_FLAG_SLI | CCW_FLAG_CC;
1285 ch->ccw[1].count = 0;
1287 if (ctc_checkalloc_buffer(ch, 0)) {
1289 "%s: Could not allocate %s trans_skb, delaying "
1290 "allocation until first transfer\n",
1292 (CHANNEL_DIRECTION(ch->flags) == READ) ? "RX" : "TX");
1295 ch->ccw[0].cmd_code = CCW_CMD_PREPARE;
1296 ch->ccw[0].flags = CCW_FLAG_SLI | CCW_FLAG_CC;
1297 ch->ccw[0].count = 0;
1299 ch->ccw[2].cmd_code = CCW_CMD_NOOP; /* jointed CE + DE */
1300 ch->ccw[2].flags = CCW_FLAG_SLI;
1301 ch->ccw[2].count = 0;
1303 memcpy(&ch->ccw[3], &ch->ccw[0], sizeof (struct ccw1) * 3);
1305 ch->ccw[4].flags &= ~CCW_FLAG_IDA;
1307 fsm_newstate(fi, CH_STATE_STARTWAIT);
1308 fsm_addtimer(&ch->timer, 1000, CH_EVENT_TIMER, ch);
1309 spin_lock_irqsave(get_ccwdev_lock(ch->cdev), saveflags);
1310 rc = ccw_device_halt(ch->cdev, (unsigned long) ch);
1311 spin_unlock_irqrestore(get_ccwdev_lock(ch->cdev), saveflags);
1314 fsm_deltimer(&ch->timer);
1315 ccw_check_return_code(ch, rc, "initial HaltIO");
1318 ctc_pr_debug("ctc: %s(): leaving\n", __func__);
1323 * Shutdown a channel.
1325 * @param fi An instance of a channel statemachine.
1326 * @param event The event, just happened.
1327 * @param arg Generic pointer, casted from channel * upon call.
1330 ch_action_haltio(fsm_instance * fi, int event, void *arg)
1332 struct channel *ch = (struct channel *) arg;
1333 unsigned long saveflags;
1337 DBF_TEXT(trace, 3, __FUNCTION__);
1338 fsm_deltimer(&ch->timer);
1339 fsm_addtimer(&ch->timer, CTC_TIMEOUT_5SEC, CH_EVENT_TIMER, ch);
1340 saveflags = 0; /* avoids comp warning with
1341 spin_unlock_irqrestore */
1342 if (event == CH_EVENT_STOP) // only for STOP not yet locked
1343 spin_lock_irqsave(get_ccwdev_lock(ch->cdev), saveflags);
1344 oldstate = fsm_getstate(fi);
1345 fsm_newstate(fi, CH_STATE_TERM);
1346 rc = ccw_device_halt(ch->cdev, (unsigned long) ch);
1347 if (event == CH_EVENT_STOP)
1348 spin_unlock_irqrestore(get_ccwdev_lock(ch->cdev), saveflags);
1351 fsm_deltimer(&ch->timer);
1352 fsm_newstate(fi, oldstate);
1354 ccw_check_return_code(ch, rc, "HaltIO in ch_action_haltio");
1359 * A channel has successfully been halted.
1360 * Cleanup it's queue and notify interface statemachine.
1362 * @param fi An instance of a channel statemachine.
1363 * @param event The event, just happened.
1364 * @param arg Generic pointer, casted from channel * upon call.
1367 ch_action_stopped(fsm_instance * fi, int event, void *arg)
1369 struct channel *ch = (struct channel *) arg;
1370 struct net_device *dev = ch->netdev;
1372 DBF_TEXT(trace, 3, __FUNCTION__);
1373 fsm_deltimer(&ch->timer);
1374 fsm_newstate(fi, CH_STATE_STOPPED);
1375 if (ch->trans_skb != NULL) {
1376 clear_normalized_cda(&ch->ccw[1]);
1377 dev_kfree_skb(ch->trans_skb);
1378 ch->trans_skb = NULL;
1380 if (CHANNEL_DIRECTION(ch->flags) == READ) {
1381 skb_queue_purge(&ch->io_queue);
1382 fsm_event(((struct ctc_priv *) dev->priv)->fsm,
1383 DEV_EVENT_RXDOWN, dev);
1385 ctc_purge_skb_queue(&ch->io_queue);
1386 spin_lock(&ch->collect_lock);
1387 ctc_purge_skb_queue(&ch->collect_queue);
1388 ch->collect_len = 0;
1389 spin_unlock(&ch->collect_lock);
1390 fsm_event(((struct ctc_priv *) dev->priv)->fsm,
1391 DEV_EVENT_TXDOWN, dev);
1396 * A stop command from device statemachine arrived and we are in
1397 * not operational mode. Set state to stopped.
1399 * @param fi An instance of a channel statemachine.
1400 * @param event The event, just happened.
1401 * @param arg Generic pointer, casted from channel * upon call.
1404 ch_action_stop(fsm_instance * fi, int event, void *arg)
1406 fsm_newstate(fi, CH_STATE_STOPPED);
1410 * A machine check for no path, not operational status or gone device has
1412 * Cleanup queue and notify interface statemachine.
1414 * @param fi An instance of a channel statemachine.
1415 * @param event The event, just happened.
1416 * @param arg Generic pointer, casted from channel * upon call.
1419 ch_action_fail(fsm_instance * fi, int event, void *arg)
1421 struct channel *ch = (struct channel *) arg;
1422 struct net_device *dev = ch->netdev;
1424 DBF_TEXT(trace, 3, __FUNCTION__);
1425 fsm_deltimer(&ch->timer);
1426 fsm_newstate(fi, CH_STATE_NOTOP);
1427 if (CHANNEL_DIRECTION(ch->flags) == READ) {
1428 skb_queue_purge(&ch->io_queue);
1429 fsm_event(((struct ctc_priv *) dev->priv)->fsm,
1430 DEV_EVENT_RXDOWN, dev);
1432 ctc_purge_skb_queue(&ch->io_queue);
1433 spin_lock(&ch->collect_lock);
1434 ctc_purge_skb_queue(&ch->collect_queue);
1435 ch->collect_len = 0;
1436 spin_unlock(&ch->collect_lock);
1437 fsm_event(((struct ctc_priv *) dev->priv)->fsm,
1438 DEV_EVENT_TXDOWN, dev);
1443 * Handle error during setup of channel.
1445 * @param fi An instance of a channel statemachine.
1446 * @param event The event, just happened.
1447 * @param arg Generic pointer, casted from channel * upon call.
1450 ch_action_setuperr(fsm_instance * fi, int event, void *arg)
1452 struct channel *ch = (struct channel *) arg;
1453 struct net_device *dev = ch->netdev;
1455 DBF_TEXT(setup, 3, __FUNCTION__);
1457 * Special case: Got UC_RCRESET on setmode.
1458 * This means that remote side isn't setup. In this case
1459 * simply retry after some 10 secs...
1461 if ((fsm_getstate(fi) == CH_STATE_SETUPWAIT) &&
1462 ((event == CH_EVENT_UC_RCRESET) ||
1463 (event == CH_EVENT_UC_RSRESET))) {
1464 fsm_newstate(fi, CH_STATE_STARTRETRY);
1465 fsm_deltimer(&ch->timer);
1466 fsm_addtimer(&ch->timer, CTC_TIMEOUT_5SEC, CH_EVENT_TIMER, ch);
1467 if (CHANNEL_DIRECTION(ch->flags) == READ) {
1468 int rc = ccw_device_halt(ch->cdev, (unsigned long) ch);
1470 ccw_check_return_code(
1471 ch, rc, "HaltIO in ch_action_setuperr");
1476 ctc_pr_debug("%s: Error %s during %s channel setup state=%s\n",
1477 dev->name, ch_event_names[event],
1478 (CHANNEL_DIRECTION(ch->flags) == READ) ? "RX" : "TX",
1479 fsm_getstate_str(fi));
1480 if (CHANNEL_DIRECTION(ch->flags) == READ) {
1481 fsm_newstate(fi, CH_STATE_RXERR);
1482 fsm_event(((struct ctc_priv *) dev->priv)->fsm,
1483 DEV_EVENT_RXDOWN, dev);
1485 fsm_newstate(fi, CH_STATE_TXERR);
1486 fsm_event(((struct ctc_priv *) dev->priv)->fsm,
1487 DEV_EVENT_TXDOWN, dev);
1492 * Restart a channel after an error.
1494 * @param fi An instance of a channel statemachine.
1495 * @param event The event, just happened.
1496 * @param arg Generic pointer, casted from channel * upon call.
1499 ch_action_restart(fsm_instance * fi, int event, void *arg)
1501 unsigned long saveflags;
1505 struct channel *ch = (struct channel *) arg;
1506 struct net_device *dev = ch->netdev;
1508 DBF_TEXT(trace, 3, __FUNCTION__);
1509 fsm_deltimer(&ch->timer);
1510 ctc_pr_debug("%s: %s channel restart\n", dev->name,
1511 (CHANNEL_DIRECTION(ch->flags) == READ) ? "RX" : "TX");
1512 fsm_addtimer(&ch->timer, CTC_TIMEOUT_5SEC, CH_EVENT_TIMER, ch);
1513 oldstate = fsm_getstate(fi);
1514 fsm_newstate(fi, CH_STATE_STARTWAIT);
1515 saveflags = 0; /* avoids compiler warning with
1516 spin_unlock_irqrestore */
1517 if (event == CH_EVENT_TIMER) // only for timer not yet locked
1518 spin_lock_irqsave(get_ccwdev_lock(ch->cdev), saveflags);
1519 rc = ccw_device_halt(ch->cdev, (unsigned long) ch);
1520 if (event == CH_EVENT_TIMER)
1521 spin_unlock_irqrestore(get_ccwdev_lock(ch->cdev), saveflags);
1524 fsm_deltimer(&ch->timer);
1525 fsm_newstate(fi, oldstate);
1527 ccw_check_return_code(ch, rc, "HaltIO in ch_action_restart");
1532 * Handle error during RX initial handshake (exchange of
1533 * 0-length block header)
1535 * @param fi An instance of a channel statemachine.
1536 * @param event The event, just happened.
1537 * @param arg Generic pointer, casted from channel * upon call.
1540 ch_action_rxiniterr(fsm_instance * fi, int event, void *arg)
1542 struct channel *ch = (struct channel *) arg;
1543 struct net_device *dev = ch->netdev;
1545 DBF_TEXT(setup, 3, __FUNCTION__);
1546 if (event == CH_EVENT_TIMER) {
1547 fsm_deltimer(&ch->timer);
1548 ctc_pr_debug("%s: Timeout during RX init handshake\n", dev->name);
1549 if (ch->retry++ < 3)
1550 ch_action_restart(fi, event, arg);
1552 fsm_newstate(fi, CH_STATE_RXERR);
1553 fsm_event(((struct ctc_priv *) dev->priv)->fsm,
1554 DEV_EVENT_RXDOWN, dev);
1557 ctc_pr_warn("%s: Error during RX init handshake\n", dev->name);
1561 * Notify device statemachine if we gave up initialization
1564 * @param fi An instance of a channel statemachine.
1565 * @param event The event, just happened.
1566 * @param arg Generic pointer, casted from channel * upon call.
1569 ch_action_rxinitfail(fsm_instance * fi, int event, void *arg)
1571 struct channel *ch = (struct channel *) arg;
1572 struct net_device *dev = ch->netdev;
1574 DBF_TEXT(setup, 3, __FUNCTION__);
1575 fsm_newstate(fi, CH_STATE_RXERR);
1576 ctc_pr_warn("%s: RX initialization failed\n", dev->name);
1577 ctc_pr_warn("%s: RX <-> RX connection detected\n", dev->name);
1578 fsm_event(((struct ctc_priv *) dev->priv)->fsm, DEV_EVENT_RXDOWN, dev);
1582 * Handle RX Unit check remote reset (remote disconnected)
1584 * @param fi An instance of a channel statemachine.
1585 * @param event The event, just happened.
1586 * @param arg Generic pointer, casted from channel * upon call.
1589 ch_action_rxdisc(fsm_instance * fi, int event, void *arg)
1591 struct channel *ch = (struct channel *) arg;
1592 struct channel *ch2;
1593 struct net_device *dev = ch->netdev;
1595 DBF_TEXT(trace, 3, __FUNCTION__);
1596 fsm_deltimer(&ch->timer);
1597 ctc_pr_debug("%s: Got remote disconnect, re-initializing ...\n",
1601 * Notify device statemachine
1603 fsm_event(((struct ctc_priv *) dev->priv)->fsm, DEV_EVENT_RXDOWN, dev);
1604 fsm_event(((struct ctc_priv *) dev->priv)->fsm, DEV_EVENT_TXDOWN, dev);
1606 fsm_newstate(fi, CH_STATE_DTERM);
1607 ch2 = ((struct ctc_priv *) dev->priv)->channel[WRITE];
1608 fsm_newstate(ch2->fsm, CH_STATE_DTERM);
1610 ccw_device_halt(ch->cdev, (unsigned long) ch);
1611 ccw_device_halt(ch2->cdev, (unsigned long) ch2);
1615 * Handle error during TX channel initialization.
1617 * @param fi An instance of a channel statemachine.
1618 * @param event The event, just happened.
1619 * @param arg Generic pointer, casted from channel * upon call.
1622 ch_action_txiniterr(fsm_instance * fi, int event, void *arg)
1624 struct channel *ch = (struct channel *) arg;
1625 struct net_device *dev = ch->netdev;
1627 DBF_TEXT(setup, 2, __FUNCTION__);
1628 if (event == CH_EVENT_TIMER) {
1629 fsm_deltimer(&ch->timer);
1630 ctc_pr_debug("%s: Timeout during TX init handshake\n", dev->name);
1631 if (ch->retry++ < 3)
1632 ch_action_restart(fi, event, arg);
1634 fsm_newstate(fi, CH_STATE_TXERR);
1635 fsm_event(((struct ctc_priv *) dev->priv)->fsm,
1636 DEV_EVENT_TXDOWN, dev);
1639 ctc_pr_warn("%s: Error during TX init handshake\n", dev->name);
1643 * Handle TX timeout by retrying operation.
1645 * @param fi An instance of a channel statemachine.
1646 * @param event The event, just happened.
1647 * @param arg Generic pointer, casted from channel * upon call.
1650 ch_action_txretry(fsm_instance * fi, int event, void *arg)
1652 struct channel *ch = (struct channel *) arg;
1653 struct net_device *dev = ch->netdev;
1654 unsigned long saveflags;
1656 DBF_TEXT(trace, 4, __FUNCTION__);
1657 fsm_deltimer(&ch->timer);
1658 if (ch->retry++ > 3) {
1659 ctc_pr_debug("%s: TX retry failed, restarting channel\n",
1661 fsm_event(((struct ctc_priv *) dev->priv)->fsm,
1662 DEV_EVENT_TXDOWN, dev);
1663 ch_action_restart(fi, event, arg);
1665 struct sk_buff *skb;
1667 ctc_pr_debug("%s: TX retry %d\n", dev->name, ch->retry);
1668 if ((skb = skb_peek(&ch->io_queue))) {
1671 clear_normalized_cda(&ch->ccw[4]);
1672 ch->ccw[4].count = skb->len;
1673 if (set_normalized_cda(&ch->ccw[4], skb->data)) {
1675 "%s: IDAL alloc failed, chan restart\n",
1677 fsm_event(((struct ctc_priv *) dev->priv)->fsm,
1678 DEV_EVENT_TXDOWN, dev);
1679 ch_action_restart(fi, event, arg);
1682 fsm_addtimer(&ch->timer, 1000, CH_EVENT_TIMER, ch);
1683 saveflags = 0; /* avoids compiler warning with
1684 spin_unlock_irqrestore */
1685 if (event == CH_EVENT_TIMER) // only for TIMER not yet locked
1686 spin_lock_irqsave(get_ccwdev_lock(ch->cdev),
1688 rc = ccw_device_start(ch->cdev, &ch->ccw[3],
1689 (unsigned long) ch, 0xff, 0);
1690 if (event == CH_EVENT_TIMER)
1691 spin_unlock_irqrestore(get_ccwdev_lock(ch->cdev),
1694 fsm_deltimer(&ch->timer);
1695 ccw_check_return_code(ch, rc, "TX in ch_action_txretry");
1696 ctc_purge_skb_queue(&ch->io_queue);
1704 * Handle fatal errors during an I/O command.
1706 * @param fi An instance of a channel statemachine.
1707 * @param event The event, just happened.
1708 * @param arg Generic pointer, casted from channel * upon call.
1711 ch_action_iofatal(fsm_instance * fi, int event, void *arg)
1713 struct channel *ch = (struct channel *) arg;
1714 struct net_device *dev = ch->netdev;
1716 DBF_TEXT(trace, 3, __FUNCTION__);
1717 fsm_deltimer(&ch->timer);
1718 if (CHANNEL_DIRECTION(ch->flags) == READ) {
1719 ctc_pr_debug("%s: RX I/O error\n", dev->name);
1720 fsm_newstate(fi, CH_STATE_RXERR);
1721 fsm_event(((struct ctc_priv *) dev->priv)->fsm,
1722 DEV_EVENT_RXDOWN, dev);
1724 ctc_pr_debug("%s: TX I/O error\n", dev->name);
1725 fsm_newstate(fi, CH_STATE_TXERR);
1726 fsm_event(((struct ctc_priv *) dev->priv)->fsm,
1727 DEV_EVENT_TXDOWN, dev);
1732 ch_action_reinit(fsm_instance *fi, int event, void *arg)
1734 struct channel *ch = (struct channel *)arg;
1735 struct net_device *dev = ch->netdev;
1736 struct ctc_priv *privptr = dev->priv;
1738 DBF_TEXT(trace, 4, __FUNCTION__);
1739 ch_action_iofatal(fi, event, arg);
1740 fsm_addtimer(&privptr->restart_timer, 1000, DEV_EVENT_RESTART, dev);
1745 * The statemachine for a channel.
1747 static const fsm_node ch_fsm[] = {
1748 {CH_STATE_STOPPED, CH_EVENT_STOP, fsm_action_nop },
1749 {CH_STATE_STOPPED, CH_EVENT_START, ch_action_start },
1750 {CH_STATE_STOPPED, CH_EVENT_FINSTAT, fsm_action_nop },
1751 {CH_STATE_STOPPED, CH_EVENT_MC_FAIL, fsm_action_nop },
1753 {CH_STATE_NOTOP, CH_EVENT_STOP, ch_action_stop },
1754 {CH_STATE_NOTOP, CH_EVENT_START, fsm_action_nop },
1755 {CH_STATE_NOTOP, CH_EVENT_FINSTAT, fsm_action_nop },
1756 {CH_STATE_NOTOP, CH_EVENT_MC_FAIL, fsm_action_nop },
1757 {CH_STATE_NOTOP, CH_EVENT_MC_GOOD, ch_action_start },
1759 {CH_STATE_STARTWAIT, CH_EVENT_STOP, ch_action_haltio },
1760 {CH_STATE_STARTWAIT, CH_EVENT_START, fsm_action_nop },
1761 {CH_STATE_STARTWAIT, CH_EVENT_FINSTAT, ch_action_setmode },
1762 {CH_STATE_STARTWAIT, CH_EVENT_TIMER, ch_action_setuperr },
1763 {CH_STATE_STARTWAIT, CH_EVENT_IO_ENODEV, ch_action_iofatal },
1764 {CH_STATE_STARTWAIT, CH_EVENT_IO_EIO, ch_action_reinit },
1765 {CH_STATE_STARTWAIT, CH_EVENT_MC_FAIL, ch_action_fail },
1767 {CH_STATE_STARTRETRY, CH_EVENT_STOP, ch_action_haltio },
1768 {CH_STATE_STARTRETRY, CH_EVENT_TIMER, ch_action_setmode },
1769 {CH_STATE_STARTRETRY, CH_EVENT_FINSTAT, fsm_action_nop },
1770 {CH_STATE_STARTRETRY, CH_EVENT_MC_FAIL, ch_action_fail },
1772 {CH_STATE_SETUPWAIT, CH_EVENT_STOP, ch_action_haltio },
1773 {CH_STATE_SETUPWAIT, CH_EVENT_START, fsm_action_nop },
1774 {CH_STATE_SETUPWAIT, CH_EVENT_FINSTAT, ch_action_firstio },
1775 {CH_STATE_SETUPWAIT, CH_EVENT_UC_RCRESET, ch_action_setuperr },
1776 {CH_STATE_SETUPWAIT, CH_EVENT_UC_RSRESET, ch_action_setuperr },
1777 {CH_STATE_SETUPWAIT, CH_EVENT_TIMER, ch_action_setmode },
1778 {CH_STATE_SETUPWAIT, CH_EVENT_IO_ENODEV, ch_action_iofatal },
1779 {CH_STATE_SETUPWAIT, CH_EVENT_IO_EIO, ch_action_reinit },
1780 {CH_STATE_SETUPWAIT, CH_EVENT_MC_FAIL, ch_action_fail },
1782 {CH_STATE_RXINIT, CH_EVENT_STOP, ch_action_haltio },
1783 {CH_STATE_RXINIT, CH_EVENT_START, fsm_action_nop },
1784 {CH_STATE_RXINIT, CH_EVENT_FINSTAT, ch_action_rxidle },
1785 {CH_STATE_RXINIT, CH_EVENT_UC_RCRESET, ch_action_rxiniterr },
1786 {CH_STATE_RXINIT, CH_EVENT_UC_RSRESET, ch_action_rxiniterr },
1787 {CH_STATE_RXINIT, CH_EVENT_TIMER, ch_action_rxiniterr },
1788 {CH_STATE_RXINIT, CH_EVENT_ATTNBUSY, ch_action_rxinitfail },
1789 {CH_STATE_RXINIT, CH_EVENT_IO_ENODEV, ch_action_iofatal },
1790 {CH_STATE_RXINIT, CH_EVENT_IO_EIO, ch_action_reinit },
1791 {CH_STATE_RXINIT, CH_EVENT_UC_ZERO, ch_action_firstio },
1792 {CH_STATE_RXINIT, CH_EVENT_MC_FAIL, ch_action_fail },
1794 {CH_STATE_RXIDLE, CH_EVENT_STOP, ch_action_haltio },
1795 {CH_STATE_RXIDLE, CH_EVENT_START, fsm_action_nop },
1796 {CH_STATE_RXIDLE, CH_EVENT_FINSTAT, ch_action_rx },
1797 {CH_STATE_RXIDLE, CH_EVENT_UC_RCRESET, ch_action_rxdisc },
1798 // {CH_STATE_RXIDLE, CH_EVENT_UC_RSRESET, ch_action_rxretry },
1799 {CH_STATE_RXIDLE, CH_EVENT_IO_ENODEV, ch_action_iofatal },
1800 {CH_STATE_RXIDLE, CH_EVENT_IO_EIO, ch_action_reinit },
1801 {CH_STATE_RXIDLE, CH_EVENT_MC_FAIL, ch_action_fail },
1802 {CH_STATE_RXIDLE, CH_EVENT_UC_ZERO, ch_action_rx },
1804 {CH_STATE_TXINIT, CH_EVENT_STOP, ch_action_haltio },
1805 {CH_STATE_TXINIT, CH_EVENT_START, fsm_action_nop },
1806 {CH_STATE_TXINIT, CH_EVENT_FINSTAT, ch_action_txidle },
1807 {CH_STATE_TXINIT, CH_EVENT_UC_RCRESET, ch_action_txiniterr },
1808 {CH_STATE_TXINIT, CH_EVENT_UC_RSRESET, ch_action_txiniterr },
1809 {CH_STATE_TXINIT, CH_EVENT_TIMER, ch_action_txiniterr },
1810 {CH_STATE_TXINIT, CH_EVENT_IO_ENODEV, ch_action_iofatal },
1811 {CH_STATE_TXINIT, CH_EVENT_IO_EIO, ch_action_reinit },
1812 {CH_STATE_TXINIT, CH_EVENT_MC_FAIL, ch_action_fail },
1814 {CH_STATE_TXIDLE, CH_EVENT_STOP, ch_action_haltio },
1815 {CH_STATE_TXIDLE, CH_EVENT_START, fsm_action_nop },
1816 {CH_STATE_TXIDLE, CH_EVENT_FINSTAT, ch_action_firstio },
1817 {CH_STATE_TXIDLE, CH_EVENT_UC_RCRESET, fsm_action_nop },
1818 {CH_STATE_TXIDLE, CH_EVENT_UC_RSRESET, fsm_action_nop },
1819 {CH_STATE_TXIDLE, CH_EVENT_IO_ENODEV, ch_action_iofatal },
1820 {CH_STATE_TXIDLE, CH_EVENT_IO_EIO, ch_action_reinit },
1821 {CH_STATE_TXIDLE, CH_EVENT_MC_FAIL, ch_action_fail },
1823 {CH_STATE_TERM, CH_EVENT_STOP, fsm_action_nop },
1824 {CH_STATE_TERM, CH_EVENT_START, ch_action_restart },
1825 {CH_STATE_TERM, CH_EVENT_FINSTAT, ch_action_stopped },
1826 {CH_STATE_TERM, CH_EVENT_UC_RCRESET, fsm_action_nop },
1827 {CH_STATE_TERM, CH_EVENT_UC_RSRESET, fsm_action_nop },
1828 {CH_STATE_TERM, CH_EVENT_MC_FAIL, ch_action_fail },
1830 {CH_STATE_DTERM, CH_EVENT_STOP, ch_action_haltio },
1831 {CH_STATE_DTERM, CH_EVENT_START, ch_action_restart },
1832 {CH_STATE_DTERM, CH_EVENT_FINSTAT, ch_action_setmode },
1833 {CH_STATE_DTERM, CH_EVENT_UC_RCRESET, fsm_action_nop },
1834 {CH_STATE_DTERM, CH_EVENT_UC_RSRESET, fsm_action_nop },
1835 {CH_STATE_DTERM, CH_EVENT_MC_FAIL, ch_action_fail },
1837 {CH_STATE_TX, CH_EVENT_STOP, ch_action_haltio },
1838 {CH_STATE_TX, CH_EVENT_START, fsm_action_nop },
1839 {CH_STATE_TX, CH_EVENT_FINSTAT, ch_action_txdone },
1840 {CH_STATE_TX, CH_EVENT_UC_RCRESET, ch_action_txretry },
1841 {CH_STATE_TX, CH_EVENT_UC_RSRESET, ch_action_txretry },
1842 {CH_STATE_TX, CH_EVENT_TIMER, ch_action_txretry },
1843 {CH_STATE_TX, CH_EVENT_IO_ENODEV, ch_action_iofatal },
1844 {CH_STATE_TX, CH_EVENT_IO_EIO, ch_action_reinit },
1845 {CH_STATE_TX, CH_EVENT_MC_FAIL, ch_action_fail },
1847 {CH_STATE_RXERR, CH_EVENT_STOP, ch_action_haltio },
1848 {CH_STATE_TXERR, CH_EVENT_STOP, ch_action_haltio },
1849 {CH_STATE_TXERR, CH_EVENT_MC_FAIL, ch_action_fail },
1850 {CH_STATE_RXERR, CH_EVENT_MC_FAIL, ch_action_fail },
1853 static const int CH_FSM_LEN = sizeof (ch_fsm) / sizeof (fsm_node);
1856 * Functions related to setup and device detection.
1857 *****************************************************************************/
1860 less_than(char *id1, char *id2)
1864 for (i = 0; i < 5; i++) {
1868 dev1 = simple_strtoul(id1, &id1, 16);
1869 dev2 = simple_strtoul(id2, &id2, 16);
1871 return (dev1 < dev2);
1875 * Add a new channel to the list of channels.
1876 * Keeps the channel list sorted.
1878 * @param cdev The ccw_device to be added.
1879 * @param type The type class of the new channel.
1881 * @return 0 on success, !0 on error.
1884 add_channel(struct ccw_device *cdev, enum channel_types type)
1886 struct channel **c = &channels;
1889 DBF_TEXT(trace, 2, __FUNCTION__);
1891 (struct channel *) kmalloc(sizeof (struct channel),
1892 GFP_KERNEL)) == NULL) {
1893 ctc_pr_warn("ctc: Out of memory in add_channel\n");
1896 memset(ch, 0, sizeof (struct channel));
1897 if ((ch->ccw = (struct ccw1 *) kmalloc(sizeof (struct ccw1) * 8,
1898 GFP_KERNEL | GFP_DMA)) == NULL) {
1900 ctc_pr_warn("ctc: Out of memory in add_channel\n");
1905 * "static" ccws are used in the following way:
1907 * ccw[0..2] (Channel program for generic I/O):
1909 * 1: read or write (depending on direction) with fixed
1910 * buffer (idal allocated once when buffer is allocated)
1912 * ccw[3..5] (Channel program for direct write of packets)
1914 * 4: write (idal allocated on every write).
1916 * ccw[6..7] (Channel program for initial channel setup):
1917 * 3: set extended mode
1920 * ch->ccw[0..5] are initialized in ch_action_start because
1921 * the channel's direction is yet unknown here.
1923 ch->ccw[6].cmd_code = CCW_CMD_SET_EXTENDED;
1924 ch->ccw[6].flags = CCW_FLAG_SLI;
1925 ch->ccw[6].count = 0;
1928 ch->ccw[7].cmd_code = CCW_CMD_NOOP;
1929 ch->ccw[7].flags = CCW_FLAG_SLI;
1930 ch->ccw[7].count = 0;
1934 snprintf(ch->id, CTC_ID_SIZE, "ch-%s", cdev->dev.bus_id);
1936 loglevel = CTC_LOGLEVEL_DEFAULT;
1937 ch->fsm = init_fsm(ch->id, ch_state_names,
1938 ch_event_names, NR_CH_STATES, NR_CH_EVENTS,
1939 ch_fsm, CH_FSM_LEN, GFP_KERNEL);
1940 if (ch->fsm == NULL) {
1941 ctc_pr_warn("ctc: Could not create FSM in add_channel\n");
1945 fsm_newstate(ch->fsm, CH_STATE_IDLE);
1946 if ((ch->irb = (struct irb *) kmalloc(sizeof (struct irb),
1947 GFP_KERNEL)) == NULL) {
1948 ctc_pr_warn("ctc: Out of memory in add_channel\n");
1953 memset(ch->irb, 0, sizeof (struct irb));
1954 while (*c && less_than((*c)->id, ch->id))
1956 if (!strncmp((*c)->id, ch->id, CTC_ID_SIZE)) {
1958 "ctc: add_channel: device %s already in list, "
1959 "using old entry\n", (*c)->id);
1965 fsm_settimer(ch->fsm, &ch->timer);
1966 skb_queue_head_init(&ch->io_queue);
1967 skb_queue_head_init(&ch->collect_queue);
1974 * Release a specific channel in the channel list.
1976 * @param ch Pointer to channel struct to be released.
1979 channel_free(struct channel *ch)
1981 ch->flags &= ~CHANNEL_FLAGS_INUSE;
1982 fsm_newstate(ch->fsm, CH_STATE_IDLE);
1986 * Remove a specific channel in the channel list.
1988 * @param ch Pointer to channel struct to be released.
1991 channel_remove(struct channel *ch)
1993 struct channel **c = &channels;
1995 DBF_TEXT(trace, 2, __FUNCTION__);
2003 fsm_deltimer(&ch->timer);
2005 clear_normalized_cda(&ch->ccw[4]);
2006 if (ch->trans_skb != NULL) {
2007 clear_normalized_cda(&ch->ccw[1]);
2008 dev_kfree_skb(ch->trans_skb);
2018 * Get a specific channel from the channel list.
2020 * @param type Type of channel we are interested in.
2021 * @param id Id of channel we are interested in.
2022 * @param direction Direction we want to use this channel for.
2024 * @return Pointer to a channel or NULL if no matching channel available.
2026 static struct channel
2028 channel_get(enum channel_types type, char *id, int direction)
2030 struct channel *ch = channels;
2032 DBF_TEXT(trace, 3, __FUNCTION__);
2034 ctc_pr_debug("ctc: %s(): searching for ch with id %s and type %d\n",
2035 __func__, id, type);
2038 while (ch && ((strncmp(ch->id, id, CTC_ID_SIZE)) || (ch->type != type))) {
2040 ctc_pr_debug("ctc: %s(): ch=0x%p (id=%s, type=%d\n",
2041 __func__, ch, ch->id, ch->type);
2046 ctc_pr_debug("ctc: %s(): ch=0x%pq (id=%s, type=%d\n",
2047 __func__, ch, ch->id, ch->type);
2050 ctc_pr_warn("ctc: %s(): channel with id %s "
2051 "and type %d not found in channel list\n",
2052 __func__, id, type);
2054 if (ch->flags & CHANNEL_FLAGS_INUSE)
2057 ch->flags |= CHANNEL_FLAGS_INUSE;
2058 ch->flags &= ~CHANNEL_FLAGS_RWMASK;
2059 ch->flags |= (direction == WRITE)
2060 ? CHANNEL_FLAGS_WRITE : CHANNEL_FLAGS_READ;
2061 fsm_newstate(ch->fsm, CH_STATE_STOPPED);
2068 * Return the channel type by name.
2070 * @param name Name of network interface.
2072 * @return Type class of channel to be used for that interface.
2074 static enum channel_types inline
2075 extract_channel_media(char *name)
2077 enum channel_types ret = channel_type_unknown;
2080 if (strncmp(name, "ctc", 3) == 0)
2081 ret = channel_type_parallel;
2082 if (strncmp(name, "escon", 5) == 0)
2083 ret = channel_type_escon;
2089 __ctc_check_irb_error(struct ccw_device *cdev, struct irb *irb)
2094 switch (PTR_ERR(irb)) {
2096 ctc_pr_warn("i/o-error on device %s\n", cdev->dev.bus_id);
2097 // CTC_DBF_TEXT(trace, 2, "ckirberr");
2098 // CTC_DBF_TEXT_(trace, 2, " rc%d", -EIO);
2101 ctc_pr_warn("timeout on device %s\n", cdev->dev.bus_id);
2102 // CTC_DBF_TEXT(trace, 2, "ckirberr");
2103 // CTC_DBF_TEXT_(trace, 2, " rc%d", -ETIMEDOUT);
2106 ctc_pr_warn("unknown error %ld on device %s\n", PTR_ERR(irb),
2108 // CTC_DBF_TEXT(trace, 2, "ckirberr");
2109 // CTC_DBF_TEXT(trace, 2, " rc???");
2111 return PTR_ERR(irb);
2117 * @param cdev The ccw_device the interrupt is for.
2118 * @param intparm interruption parameter.
2119 * @param irb interruption response block.
2122 ctc_irq_handler(struct ccw_device *cdev, unsigned long intparm, struct irb *irb)
2125 struct net_device *dev;
2126 struct ctc_priv *priv;
2128 DBF_TEXT(trace, 5, __FUNCTION__);
2129 if (__ctc_check_irb_error(cdev, irb))
2132 /* Check for unsolicited interrupts. */
2133 if (!cdev->dev.driver_data) {
2134 ctc_pr_warn("ctc: Got unsolicited irq: %s c-%02x d-%02x\n",
2135 cdev->dev.bus_id, irb->scsw.cstat,
2140 priv = ((struct ccwgroup_device *)cdev->dev.driver_data)
2143 /* Try to extract channel from driver data. */
2144 if (priv->channel[READ]->cdev == cdev)
2145 ch = priv->channel[READ];
2146 else if (priv->channel[WRITE]->cdev == cdev)
2147 ch = priv->channel[WRITE];
2149 ctc_pr_err("ctc: Can't determine channel for interrupt, "
2150 "device %s\n", cdev->dev.bus_id);
2154 dev = (struct net_device *) (ch->netdev);
2156 ctc_pr_crit("ctc: ctc_irq_handler dev=NULL bus_id=%s, ch=0x%p\n",
2157 cdev->dev.bus_id, ch);
2162 ctc_pr_debug("%s: interrupt for device: %s received c-%02x d-%02x\n",
2163 dev->name, ch->id, irb->scsw.cstat, irb->scsw.dstat);
2166 /* Copy interruption response block. */
2167 memcpy(ch->irb, irb, sizeof(struct irb));
2169 /* Check for good subchannel return code, otherwise error message */
2170 if (ch->irb->scsw.cstat) {
2171 fsm_event(ch->fsm, CH_EVENT_SC_UNKNOWN, ch);
2172 ctc_pr_warn("%s: subchannel check for device: %s - %02x %02x\n",
2173 dev->name, ch->id, ch->irb->scsw.cstat,
2174 ch->irb->scsw.dstat);
2178 /* Check the reason-code of a unit check */
2179 if (ch->irb->scsw.dstat & DEV_STAT_UNIT_CHECK) {
2180 ccw_unit_check(ch, ch->irb->ecw[0]);
2183 if (ch->irb->scsw.dstat & DEV_STAT_BUSY) {
2184 if (ch->irb->scsw.dstat & DEV_STAT_ATTENTION)
2185 fsm_event(ch->fsm, CH_EVENT_ATTNBUSY, ch);
2187 fsm_event(ch->fsm, CH_EVENT_BUSY, ch);
2190 if (ch->irb->scsw.dstat & DEV_STAT_ATTENTION) {
2191 fsm_event(ch->fsm, CH_EVENT_ATTN, ch);
2194 if ((ch->irb->scsw.stctl & SCSW_STCTL_SEC_STATUS) ||
2195 (ch->irb->scsw.stctl == SCSW_STCTL_STATUS_PEND) ||
2196 (ch->irb->scsw.stctl ==
2197 (SCSW_STCTL_ALERT_STATUS | SCSW_STCTL_STATUS_PEND)))
2198 fsm_event(ch->fsm, CH_EVENT_FINSTAT, ch);
2200 fsm_event(ch->fsm, CH_EVENT_IRQ, ch);
2205 * Actions for interface - statemachine.
2206 *****************************************************************************/
2209 * Startup channels by sending CH_EVENT_START to each channel.
2211 * @param fi An instance of an interface statemachine.
2212 * @param event The event, just happened.
2213 * @param arg Generic pointer, casted from struct net_device * upon call.
2216 dev_action_start(fsm_instance * fi, int event, void *arg)
2218 struct net_device *dev = (struct net_device *) arg;
2219 struct ctc_priv *privptr = dev->priv;
2222 DBF_TEXT(setup, 3, __FUNCTION__);
2223 fsm_deltimer(&privptr->restart_timer);
2224 fsm_newstate(fi, DEV_STATE_STARTWAIT_RXTX);
2225 for (direction = READ; direction <= WRITE; direction++) {
2226 struct channel *ch = privptr->channel[direction];
2227 fsm_event(ch->fsm, CH_EVENT_START, ch);
2232 * Shutdown channels by sending CH_EVENT_STOP to each channel.
2234 * @param fi An instance of an interface statemachine.
2235 * @param event The event, just happened.
2236 * @param arg Generic pointer, casted from struct net_device * upon call.
2239 dev_action_stop(fsm_instance * fi, int event, void *arg)
2241 struct net_device *dev = (struct net_device *) arg;
2242 struct ctc_priv *privptr = dev->priv;
2245 DBF_TEXT(trace, 3, __FUNCTION__);
2246 fsm_newstate(fi, DEV_STATE_STOPWAIT_RXTX);
2247 for (direction = READ; direction <= WRITE; direction++) {
2248 struct channel *ch = privptr->channel[direction];
2249 fsm_event(ch->fsm, CH_EVENT_STOP, ch);
2253 dev_action_restart(fsm_instance *fi, int event, void *arg)
2255 struct net_device *dev = (struct net_device *)arg;
2256 struct ctc_priv *privptr = dev->priv;
2258 DBF_TEXT(trace, 3, __FUNCTION__);
2259 ctc_pr_debug("%s: Restarting\n", dev->name);
2260 dev_action_stop(fi, event, arg);
2261 fsm_event(privptr->fsm, DEV_EVENT_STOP, dev);
2262 fsm_addtimer(&privptr->restart_timer, CTC_TIMEOUT_5SEC,
2263 DEV_EVENT_START, dev);
2267 * Called from channel statemachine
2268 * when a channel is up and running.
2270 * @param fi An instance of an interface statemachine.
2271 * @param event The event, just happened.
2272 * @param arg Generic pointer, casted from struct net_device * upon call.
2275 dev_action_chup(fsm_instance * fi, int event, void *arg)
2277 struct net_device *dev = (struct net_device *) arg;
2278 struct ctc_priv *privptr = dev->priv;
2280 DBF_TEXT(trace, 3, __FUNCTION__);
2281 switch (fsm_getstate(fi)) {
2282 case DEV_STATE_STARTWAIT_RXTX:
2283 if (event == DEV_EVENT_RXUP)
2284 fsm_newstate(fi, DEV_STATE_STARTWAIT_TX);
2286 fsm_newstate(fi, DEV_STATE_STARTWAIT_RX);
2288 case DEV_STATE_STARTWAIT_RX:
2289 if (event == DEV_EVENT_RXUP) {
2290 fsm_newstate(fi, DEV_STATE_RUNNING);
2291 ctc_pr_info("%s: connected with remote side\n",
2293 if (privptr->protocol == CTC_PROTO_LINUX_TTY)
2294 ctc_tty_setcarrier(dev, 1);
2295 ctc_clear_busy(dev);
2298 case DEV_STATE_STARTWAIT_TX:
2299 if (event == DEV_EVENT_TXUP) {
2300 fsm_newstate(fi, DEV_STATE_RUNNING);
2301 ctc_pr_info("%s: connected with remote side\n",
2303 if (privptr->protocol == CTC_PROTO_LINUX_TTY)
2304 ctc_tty_setcarrier(dev, 1);
2305 ctc_clear_busy(dev);
2308 case DEV_STATE_STOPWAIT_TX:
2309 if (event == DEV_EVENT_RXUP)
2310 fsm_newstate(fi, DEV_STATE_STOPWAIT_RXTX);
2312 case DEV_STATE_STOPWAIT_RX:
2313 if (event == DEV_EVENT_TXUP)
2314 fsm_newstate(fi, DEV_STATE_STOPWAIT_RXTX);
2320 * Called from channel statemachine
2321 * when a channel has been shutdown.
2323 * @param fi An instance of an interface statemachine.
2324 * @param event The event, just happened.
2325 * @param arg Generic pointer, casted from struct net_device * upon call.
2328 dev_action_chdown(fsm_instance * fi, int event, void *arg)
2330 struct net_device *dev = (struct net_device *) arg;
2331 struct ctc_priv *privptr = dev->priv;
2333 DBF_TEXT(trace, 3, __FUNCTION__);
2334 switch (fsm_getstate(fi)) {
2335 case DEV_STATE_RUNNING:
2336 if (privptr->protocol == CTC_PROTO_LINUX_TTY)
2337 ctc_tty_setcarrier(dev, 0);
2338 if (event == DEV_EVENT_TXDOWN)
2339 fsm_newstate(fi, DEV_STATE_STARTWAIT_TX);
2341 fsm_newstate(fi, DEV_STATE_STARTWAIT_RX);
2343 case DEV_STATE_STARTWAIT_RX:
2344 if (event == DEV_EVENT_TXDOWN)
2345 fsm_newstate(fi, DEV_STATE_STARTWAIT_RXTX);
2347 case DEV_STATE_STARTWAIT_TX:
2348 if (event == DEV_EVENT_RXDOWN)
2349 fsm_newstate(fi, DEV_STATE_STARTWAIT_RXTX);
2351 case DEV_STATE_STOPWAIT_RXTX:
2352 if (event == DEV_EVENT_TXDOWN)
2353 fsm_newstate(fi, DEV_STATE_STOPWAIT_RX);
2355 fsm_newstate(fi, DEV_STATE_STOPWAIT_TX);
2357 case DEV_STATE_STOPWAIT_RX:
2358 if (event == DEV_EVENT_RXDOWN)
2359 fsm_newstate(fi, DEV_STATE_STOPPED);
2361 case DEV_STATE_STOPWAIT_TX:
2362 if (event == DEV_EVENT_TXDOWN)
2363 fsm_newstate(fi, DEV_STATE_STOPPED);
2368 static const fsm_node dev_fsm[] = {
2369 {DEV_STATE_STOPPED, DEV_EVENT_START, dev_action_start},
2371 {DEV_STATE_STOPWAIT_RXTX, DEV_EVENT_START, dev_action_start },
2372 {DEV_STATE_STOPWAIT_RXTX, DEV_EVENT_RXDOWN, dev_action_chdown },
2373 {DEV_STATE_STOPWAIT_RXTX, DEV_EVENT_TXDOWN, dev_action_chdown },
2374 {DEV_STATE_STOPWAIT_RXTX, DEV_EVENT_RESTART, dev_action_restart },
2376 {DEV_STATE_STOPWAIT_RX, DEV_EVENT_START, dev_action_start },
2377 {DEV_STATE_STOPWAIT_RX, DEV_EVENT_RXUP, dev_action_chup },
2378 {DEV_STATE_STOPWAIT_RX, DEV_EVENT_TXUP, dev_action_chup },
2379 {DEV_STATE_STOPWAIT_RX, DEV_EVENT_RXDOWN, dev_action_chdown },
2380 {DEV_STATE_STOPWAIT_RX, DEV_EVENT_RESTART, dev_action_restart },
2382 {DEV_STATE_STOPWAIT_TX, DEV_EVENT_START, dev_action_start },
2383 {DEV_STATE_STOPWAIT_TX, DEV_EVENT_RXUP, dev_action_chup },
2384 {DEV_STATE_STOPWAIT_TX, DEV_EVENT_TXUP, dev_action_chup },
2385 {DEV_STATE_STOPWAIT_TX, DEV_EVENT_TXDOWN, dev_action_chdown },
2386 {DEV_STATE_STOPWAIT_TX, DEV_EVENT_RESTART, dev_action_restart },
2388 {DEV_STATE_STARTWAIT_RXTX, DEV_EVENT_STOP, dev_action_stop },
2389 {DEV_STATE_STARTWAIT_RXTX, DEV_EVENT_RXUP, dev_action_chup },
2390 {DEV_STATE_STARTWAIT_RXTX, DEV_EVENT_TXUP, dev_action_chup },
2391 {DEV_STATE_STARTWAIT_RXTX, DEV_EVENT_RXDOWN, dev_action_chdown },
2392 {DEV_STATE_STARTWAIT_RXTX, DEV_EVENT_TXDOWN, dev_action_chdown },
2393 {DEV_STATE_STARTWAIT_RXTX, DEV_EVENT_RESTART, dev_action_restart },
2395 {DEV_STATE_STARTWAIT_TX, DEV_EVENT_STOP, dev_action_stop },
2396 {DEV_STATE_STARTWAIT_TX, DEV_EVENT_RXUP, dev_action_chup },
2397 {DEV_STATE_STARTWAIT_TX, DEV_EVENT_TXUP, dev_action_chup },
2398 {DEV_STATE_STARTWAIT_TX, DEV_EVENT_RXDOWN, dev_action_chdown },
2399 {DEV_STATE_STARTWAIT_TX, DEV_EVENT_RESTART, dev_action_restart },
2401 {DEV_STATE_STARTWAIT_RX, DEV_EVENT_STOP, dev_action_stop },
2402 {DEV_STATE_STARTWAIT_RX, DEV_EVENT_RXUP, dev_action_chup },
2403 {DEV_STATE_STARTWAIT_RX, DEV_EVENT_TXUP, dev_action_chup },
2404 {DEV_STATE_STARTWAIT_RX, DEV_EVENT_TXDOWN, dev_action_chdown },
2405 {DEV_STATE_STARTWAIT_RX, DEV_EVENT_RESTART, dev_action_restart },
2407 {DEV_STATE_RUNNING, DEV_EVENT_STOP, dev_action_stop },
2408 {DEV_STATE_RUNNING, DEV_EVENT_RXDOWN, dev_action_chdown },
2409 {DEV_STATE_RUNNING, DEV_EVENT_TXDOWN, dev_action_chdown },
2410 {DEV_STATE_RUNNING, DEV_EVENT_TXUP, fsm_action_nop },
2411 {DEV_STATE_RUNNING, DEV_EVENT_RXUP, fsm_action_nop },
2412 {DEV_STATE_RUNNING, DEV_EVENT_RESTART, dev_action_restart },
2415 static const int DEV_FSM_LEN = sizeof (dev_fsm) / sizeof (fsm_node);
2418 * Transmit a packet.
2419 * This is a helper function for ctc_tx().
2421 * @param ch Channel to be used for sending.
2422 * @param skb Pointer to struct sk_buff of packet to send.
2423 * The linklevel header has already been set up
2426 * @return 0 on success, -ERRNO on failure. (Never fails.)
2429 transmit_skb(struct channel *ch, struct sk_buff *skb)
2431 unsigned long saveflags;
2432 struct ll_header header;
2435 DBF_TEXT(trace, 5, __FUNCTION__);
2436 if (fsm_getstate(ch->fsm) != CH_STATE_TXIDLE) {
2437 int l = skb->len + LL_HEADER_LENGTH;
2439 spin_lock_irqsave(&ch->collect_lock, saveflags);
2440 if (ch->collect_len + l > ch->max_bufsize - 2)
2443 atomic_inc(&skb->users);
2445 header.type = skb->protocol;
2447 memcpy(skb_push(skb, LL_HEADER_LENGTH), &header,
2449 skb_queue_tail(&ch->collect_queue, skb);
2450 ch->collect_len += l;
2452 spin_unlock_irqrestore(&ch->collect_lock, saveflags);
2456 struct sk_buff *nskb;
2460 * Protect skb against beeing free'd by upper
2463 atomic_inc(&skb->users);
2464 ch->prof.txlen += skb->len;
2465 header.length = skb->len + LL_HEADER_LENGTH;
2466 header.type = skb->protocol;
2468 memcpy(skb_push(skb, LL_HEADER_LENGTH), &header,
2470 block_len = skb->len + 2;
2471 *((__u16 *) skb_push(skb, 2)) = block_len;
2474 * IDAL support in CTC is broken, so we have to
2475 * care about skb's above 2G ourselves.
2477 hi = ((unsigned long) skb->tail + LL_HEADER_LENGTH) >> 31;
2479 nskb = alloc_skb(skb->len, GFP_ATOMIC | GFP_DMA);
2481 atomic_dec(&skb->users);
2482 skb_pull(skb, LL_HEADER_LENGTH + 2);
2485 memcpy(skb_put(nskb, skb->len),
2486 skb->data, skb->len);
2487 atomic_inc(&nskb->users);
2488 atomic_dec(&skb->users);
2489 dev_kfree_skb_irq(skb);
2494 ch->ccw[4].count = block_len;
2495 if (set_normalized_cda(&ch->ccw[4], skb->data)) {
2497 * idal allocation failed, try via copying to
2498 * trans_skb. trans_skb usually has a pre-allocated
2501 if (ctc_checkalloc_buffer(ch, 1)) {
2503 * Remove our header. It gets added
2504 * again on retransmit.
2506 atomic_dec(&skb->users);
2507 skb_pull(skb, LL_HEADER_LENGTH + 2);
2511 ch->trans_skb->tail = ch->trans_skb->data;
2512 ch->trans_skb->len = 0;
2513 ch->ccw[1].count = skb->len;
2514 memcpy(skb_put(ch->trans_skb, skb->len), skb->data,
2516 atomic_dec(&skb->users);
2517 dev_kfree_skb_irq(skb);
2520 skb_queue_tail(&ch->io_queue, skb);
2524 fsm_newstate(ch->fsm, CH_STATE_TX);
2525 fsm_addtimer(&ch->timer, CTC_TIMEOUT_5SEC, CH_EVENT_TIMER, ch);
2526 spin_lock_irqsave(get_ccwdev_lock(ch->cdev), saveflags);
2527 ch->prof.send_stamp = xtime;
2528 rc = ccw_device_start(ch->cdev, &ch->ccw[ccw_idx],
2529 (unsigned long) ch, 0xff, 0);
2530 spin_unlock_irqrestore(get_ccwdev_lock(ch->cdev), saveflags);
2532 ch->prof.doios_single++;
2534 fsm_deltimer(&ch->timer);
2535 ccw_check_return_code(ch, rc, "single skb TX");
2537 skb_dequeue_tail(&ch->io_queue);
2539 * Remove our header. It gets added
2540 * again on retransmit.
2542 skb_pull(skb, LL_HEADER_LENGTH + 2);
2545 struct net_device *dev = ch->netdev;
2546 struct ctc_priv *privptr = dev->priv;
2547 privptr->stats.tx_packets++;
2548 privptr->stats.tx_bytes +=
2549 skb->len - LL_HEADER_LENGTH;
2558 * Interface API for upper network layers
2559 *****************************************************************************/
2562 * Open an interface.
2563 * Called from generic network layer when ifconfig up is run.
2565 * @param dev Pointer to interface struct.
2567 * @return 0 on success, -ERRNO on failure. (Never fails.)
2570 ctc_open(struct net_device * dev)
2572 DBF_TEXT(trace, 5, __FUNCTION__);
2573 fsm_event(((struct ctc_priv *) dev->priv)->fsm, DEV_EVENT_START, dev);
2578 * Close an interface.
2579 * Called from generic network layer when ifconfig down is run.
2581 * @param dev Pointer to interface struct.
2583 * @return 0 on success, -ERRNO on failure. (Never fails.)
2586 ctc_close(struct net_device * dev)
2588 DBF_TEXT(trace, 5, __FUNCTION__);
2589 fsm_event(((struct ctc_priv *) dev->priv)->fsm, DEV_EVENT_STOP, dev);
2594 * Start transmission of a packet.
2595 * Called from generic network device layer.
2597 * @param skb Pointer to buffer containing the packet.
2598 * @param dev Pointer to interface struct.
2600 * @return 0 if packet consumed, !0 if packet rejected.
2601 * Note: If we return !0, then the packet is free'd by
2602 * the generic network layer.
2605 ctc_tx(struct sk_buff *skb, struct net_device * dev)
2608 struct ctc_priv *privptr = (struct ctc_priv *) dev->priv;
2610 DBF_TEXT(trace, 5, __FUNCTION__);
2612 * Some sanity checks ...
2615 ctc_pr_warn("%s: NULL sk_buff passed\n", dev->name);
2616 privptr->stats.tx_dropped++;
2619 if (skb_headroom(skb) < (LL_HEADER_LENGTH + 2)) {
2620 ctc_pr_warn("%s: Got sk_buff with head room < %ld bytes\n",
2621 dev->name, LL_HEADER_LENGTH + 2);
2623 privptr->stats.tx_dropped++;
2628 * If channels are not running, try to restart them
2629 * and throw away packet.
2631 if (fsm_getstate(privptr->fsm) != DEV_STATE_RUNNING) {
2632 fsm_event(privptr->fsm, DEV_EVENT_START, dev);
2633 if (privptr->protocol == CTC_PROTO_LINUX_TTY)
2636 privptr->stats.tx_dropped++;
2637 privptr->stats.tx_errors++;
2638 privptr->stats.tx_carrier_errors++;
2642 if (ctc_test_and_set_busy(dev))
2645 dev->trans_start = jiffies;
2646 if (transmit_skb(privptr->channel[WRITE], skb) != 0)
2648 ctc_clear_busy(dev);
2653 * Sets MTU of an interface.
2655 * @param dev Pointer to interface struct.
2656 * @param new_mtu The new MTU to use for this interface.
2658 * @return 0 on success, -EINVAL if MTU is out of valid range.
2659 * (valid range is 576 .. 65527). If VM is on the
2660 * remote side, maximum MTU is 32760, however this is
2661 * <em>not</em> checked here.
2664 ctc_change_mtu(struct net_device * dev, int new_mtu)
2666 struct ctc_priv *privptr = (struct ctc_priv *) dev->priv;
2668 DBF_TEXT(trace, 3, __FUNCTION__);
2669 if ((new_mtu < 576) || (new_mtu > 65527) ||
2670 (new_mtu > (privptr->channel[READ]->max_bufsize -
2671 LL_HEADER_LENGTH - 2)))
2674 dev->hard_header_len = LL_HEADER_LENGTH + 2;
2679 * Returns interface statistics of a device.
2681 * @param dev Pointer to interface struct.
2683 * @return Pointer to stats struct of this interface.
2685 static struct net_device_stats *
2686 ctc_stats(struct net_device * dev)
2688 return &((struct ctc_priv *) dev->priv)->stats;
2695 buffer_show(struct device *dev, char *buf)
2697 struct ctc_priv *priv;
2699 priv = dev->driver_data;
2702 return sprintf(buf, "%d\n",
2703 priv->channel[READ]->max_bufsize);
2707 buffer_write(struct device *dev, const char *buf, size_t count)
2709 struct ctc_priv *priv;
2710 struct net_device *ndev;
2713 DBF_TEXT(trace, 5, __FUNCTION__);
2714 priv = dev->driver_data;
2717 ndev = priv->channel[READ]->netdev;
2720 sscanf(buf, "%u", &bs1);
2722 if (bs1 > CTC_BUFSIZE_LIMIT)
2724 if ((ndev->flags & IFF_RUNNING) &&
2725 (bs1 < (ndev->mtu + LL_HEADER_LENGTH + 2)))
2727 if (bs1 < (576 + LL_HEADER_LENGTH + 2))
2730 priv->channel[READ]->max_bufsize =
2731 priv->channel[WRITE]->max_bufsize = bs1;
2732 if (!(ndev->flags & IFF_RUNNING))
2733 ndev->mtu = bs1 - LL_HEADER_LENGTH - 2;
2734 priv->channel[READ]->flags |= CHANNEL_FLAGS_BUFSIZE_CHANGED;
2735 priv->channel[WRITE]->flags |= CHANNEL_FLAGS_BUFSIZE_CHANGED;
2742 loglevel_show(struct device *dev, char *buf)
2744 struct ctc_priv *priv;
2746 priv = dev->driver_data;
2749 return sprintf(buf, "%d\n", loglevel);
2753 loglevel_write(struct device *dev, const char *buf, size_t count)
2755 struct ctc_priv *priv;
2758 DBF_TEXT(trace, 5, __FUNCTION__);
2759 priv = dev->driver_data;
2762 sscanf(buf, "%i", &ll1);
2764 if ((ll1 > CTC_LOGLEVEL_MAX) || (ll1 < 0))
2771 ctc_print_statistics(struct ctc_priv *priv)
2776 DBF_TEXT(trace, 4, __FUNCTION__);
2779 sbuf = (char *)kmalloc(2048, GFP_KERNEL);
2784 p += sprintf(p, " Device FSM state: %s\n",
2785 fsm_getstate_str(priv->fsm));
2786 p += sprintf(p, " RX channel FSM state: %s\n",
2787 fsm_getstate_str(priv->channel[READ]->fsm));
2788 p += sprintf(p, " TX channel FSM state: %s\n",
2789 fsm_getstate_str(priv->channel[WRITE]->fsm));
2790 p += sprintf(p, " Max. TX buffer used: %ld\n",
2791 priv->channel[WRITE]->prof.maxmulti);
2792 p += sprintf(p, " Max. chained SKBs: %ld\n",
2793 priv->channel[WRITE]->prof.maxcqueue);
2794 p += sprintf(p, " TX single write ops: %ld\n",
2795 priv->channel[WRITE]->prof.doios_single);
2796 p += sprintf(p, " TX multi write ops: %ld\n",
2797 priv->channel[WRITE]->prof.doios_multi);
2798 p += sprintf(p, " Netto bytes written: %ld\n",
2799 priv->channel[WRITE]->prof.txlen);
2800 p += sprintf(p, " Max. TX IO-time: %ld\n",
2801 priv->channel[WRITE]->prof.tx_time);
2803 ctc_pr_debug("Statistics for %s:\n%s",
2804 priv->channel[WRITE]->netdev->name, sbuf);
2810 stats_show(struct device *dev, char *buf)
2812 struct ctc_priv *priv = dev->driver_data;
2815 ctc_print_statistics(priv);
2816 return sprintf(buf, "0\n");
2820 stats_write(struct device *dev, const char *buf, size_t count)
2822 struct ctc_priv *priv = dev->driver_data;
2825 /* Reset statistics */
2826 memset(&priv->channel[WRITE]->prof, 0,
2827 sizeof(priv->channel[WRITE]->prof));
2831 static DEVICE_ATTR(buffer, 0644, buffer_show, buffer_write);
2832 static DEVICE_ATTR(loglevel, 0644, loglevel_show, loglevel_write);
2833 static DEVICE_ATTR(stats, 0644, stats_show, stats_write);
2836 ctc_add_attributes(struct device *dev)
2838 device_create_file(dev, &dev_attr_buffer);
2839 device_create_file(dev, &dev_attr_loglevel);
2840 device_create_file(dev, &dev_attr_stats);
2845 ctc_remove_attributes(struct device *dev)
2847 device_remove_file(dev, &dev_attr_stats);
2848 device_remove_file(dev, &dev_attr_loglevel);
2849 device_remove_file(dev, &dev_attr_buffer);
2854 ctc_netdev_unregister(struct net_device * dev)
2856 struct ctc_priv *privptr;
2860 privptr = (struct ctc_priv *) dev->priv;
2861 if (privptr->protocol != CTC_PROTO_LINUX_TTY)
2862 unregister_netdev(dev);
2864 ctc_tty_unregister_netdev(dev);
2868 ctc_netdev_register(struct net_device * dev)
2870 struct ctc_priv *privptr = (struct ctc_priv *) dev->priv;
2871 if (privptr->protocol != CTC_PROTO_LINUX_TTY)
2872 return register_netdev(dev);
2874 return ctc_tty_register_netdev(dev);
2878 ctc_free_netdevice(struct net_device * dev, int free_dev)
2880 struct ctc_priv *privptr;
2883 privptr = dev->priv;
2886 kfree_fsm(privptr->fsm);
2896 * Initialize everything of the net device except the name and the
2899 static struct net_device *
2900 ctc_init_netdevice(struct net_device * dev, int alloc_device,
2901 struct ctc_priv *privptr)
2906 DBF_TEXT(setup, 3, __FUNCTION__);
2908 dev = kmalloc(sizeof (struct net_device), GFP_KERNEL);
2911 memset(dev, 0, sizeof (struct net_device));
2914 dev->priv = privptr;
2915 privptr->fsm = init_fsm("ctcdev", dev_state_names,
2916 dev_event_names, NR_DEV_STATES, NR_DEV_EVENTS,
2917 dev_fsm, DEV_FSM_LEN, GFP_KERNEL);
2918 if (privptr->fsm == NULL) {
2923 fsm_newstate(privptr->fsm, DEV_STATE_STOPPED);
2924 fsm_settimer(privptr->fsm, &privptr->restart_timer);
2925 dev->mtu = CTC_BUFSIZE_DEFAULT - LL_HEADER_LENGTH - 2;
2926 dev->hard_start_xmit = ctc_tx;
2927 dev->open = ctc_open;
2928 dev->stop = ctc_close;
2929 dev->get_stats = ctc_stats;
2930 dev->change_mtu = ctc_change_mtu;
2931 dev->hard_header_len = LL_HEADER_LENGTH + 2;
2933 dev->type = ARPHRD_SLIP;
2934 dev->tx_queue_len = 100;
2935 dev->flags = IFF_POINTOPOINT | IFF_NOARP;
2936 SET_MODULE_OWNER(dev);
2941 ctc_proto_show(struct device *dev, char *buf)
2943 struct ctc_priv *priv;
2945 priv = dev->driver_data;
2949 return sprintf(buf, "%d\n", priv->protocol);
2953 ctc_proto_store(struct device *dev, const char *buf, size_t count)
2955 struct ctc_priv *priv;
2958 DBF_TEXT(trace, 3, __FUNCTION__);
2959 pr_debug("%s() called\n", __FUNCTION__);
2961 priv = dev->driver_data;
2964 sscanf(buf, "%u", &value);
2965 if ((value < 0) || (value > CTC_PROTO_MAX))
2967 priv->protocol = value;
2972 static DEVICE_ATTR(protocol, 0644, ctc_proto_show, ctc_proto_store);
2975 ctc_type_show(struct device *dev, char *buf)
2977 struct ccwgroup_device *cgdev;
2979 cgdev = to_ccwgroupdev(dev);
2983 return sprintf(buf, "%s\n", cu3088_type[cgdev->cdev[0]->id.driver_info]);
2986 static DEVICE_ATTR(type, 0444, ctc_type_show, NULL);
2988 static struct attribute *ctc_attr[] = {
2989 &dev_attr_protocol.attr,
2990 &dev_attr_type.attr,
2994 static struct attribute_group ctc_attr_group = {
2999 ctc_add_files(struct device *dev)
3001 pr_debug("%s() called\n", __FUNCTION__);
3003 return sysfs_create_group(&dev->kobj, &ctc_attr_group);
3007 ctc_remove_files(struct device *dev)
3009 pr_debug("%s() called\n", __FUNCTION__);
3011 sysfs_remove_group(&dev->kobj, &ctc_attr_group);
3015 * Add ctc specific attributes.
3016 * Add ctc private data.
3018 * @param cgdev pointer to ccwgroup_device just added
3020 * @returns 0 on success, !0 on failure.
3024 ctc_probe_device(struct ccwgroup_device *cgdev)
3026 struct ctc_priv *priv;
3029 pr_debug("%s() called\n", __FUNCTION__);
3030 DBF_TEXT(trace, 3, __FUNCTION__);
3032 if (!get_device(&cgdev->dev))
3035 priv = kmalloc(sizeof (struct ctc_priv), GFP_KERNEL);
3037 ctc_pr_err("%s: Out of memory\n", __func__);
3038 put_device(&cgdev->dev);
3042 memset(priv, 0, sizeof (struct ctc_priv));
3043 rc = ctc_add_files(&cgdev->dev);
3046 put_device(&cgdev->dev);
3050 cgdev->cdev[0]->handler = ctc_irq_handler;
3051 cgdev->cdev[1]->handler = ctc_irq_handler;
3052 cgdev->dev.driver_data = priv;
3059 * Setup an interface.
3061 * @param cgdev Device to be setup.
3063 * @returns 0 on success, !0 on failure.
3066 ctc_new_device(struct ccwgroup_device *cgdev)
3068 char read_id[CTC_ID_SIZE];
3069 char write_id[CTC_ID_SIZE];
3071 enum channel_types type;
3072 struct ctc_priv *privptr;
3073 struct net_device *dev;
3076 pr_debug("%s() called\n", __FUNCTION__);
3077 DBF_TEXT(setup, 3, __FUNCTION__);
3079 privptr = cgdev->dev.driver_data;
3083 type = get_channel_type(&cgdev->cdev[0]->id);
3085 snprintf(read_id, CTC_ID_SIZE, "ch-%s", cgdev->cdev[0]->dev.bus_id);
3086 snprintf(write_id, CTC_ID_SIZE, "ch-%s", cgdev->cdev[1]->dev.bus_id);
3088 if (add_channel(cgdev->cdev[0], type))
3090 if (add_channel(cgdev->cdev[1], type))
3093 ret = ccw_device_set_online(cgdev->cdev[0]);
3096 "ccw_device_set_online (cdev[0]) failed with ret = %d\n", ret);
3099 ret = ccw_device_set_online(cgdev->cdev[1]);
3102 "ccw_device_set_online (cdev[1]) failed with ret = %d\n", ret);
3105 dev = ctc_init_netdevice(NULL, 1, privptr);
3108 ctc_pr_warn("ctc_init_netdevice failed\n");
3112 if (privptr->protocol == CTC_PROTO_LINUX_TTY)
3113 strlcpy(dev->name, "ctctty%d", IFNAMSIZ);
3115 strlcpy(dev->name, "ctc%d", IFNAMSIZ);
3117 for (direction = READ; direction <= WRITE; direction++) {
3118 privptr->channel[direction] =
3119 channel_get(type, direction == READ ? read_id : write_id,
3121 if (privptr->channel[direction] == NULL) {
3122 if (direction == WRITE)
3123 channel_free(privptr->channel[READ]);
3125 ctc_free_netdevice(dev, 1);
3128 privptr->channel[direction]->netdev = dev;
3129 privptr->channel[direction]->protocol = privptr->protocol;
3130 privptr->channel[direction]->max_bufsize = CTC_BUFSIZE_DEFAULT;
3133 SET_NETDEV_DEV(dev, &cgdev->dev);
3135 if (ctc_netdev_register(dev) != 0) {
3136 ctc_free_netdevice(dev, 1);
3140 ctc_add_attributes(&cgdev->dev);
3142 strlcpy(privptr->fsm->name, dev->name, sizeof (privptr->fsm->name));
3146 ctc_pr_info("%s: read: %s, write: %s, proto: %d\n",
3147 dev->name, privptr->channel[READ]->id,
3148 privptr->channel[WRITE]->id, privptr->protocol);
3152 ccw_device_set_offline(cgdev->cdev[1]);
3153 ccw_device_set_offline(cgdev->cdev[0]);
3159 * Shutdown an interface.
3161 * @param cgdev Device to be shut down.
3163 * @returns 0 on success, !0 on failure.
3166 ctc_shutdown_device(struct ccwgroup_device *cgdev)
3168 struct ctc_priv *priv;
3169 struct net_device *ndev;
3171 DBF_TEXT(trace, 3, __FUNCTION__);
3172 pr_debug("%s() called\n", __FUNCTION__);
3174 priv = cgdev->dev.driver_data;
3179 if (priv->channel[READ]) {
3180 ndev = priv->channel[READ]->netdev;
3182 /* Close the device */
3184 ndev->flags &=~IFF_RUNNING;
3186 ctc_remove_attributes(&cgdev->dev);
3188 channel_free(priv->channel[READ]);
3190 if (priv->channel[WRITE])
3191 channel_free(priv->channel[WRITE]);
3194 ctc_netdev_unregister(ndev);
3196 ctc_free_netdevice(ndev, 1);
3200 kfree_fsm(priv->fsm);
3202 ccw_device_set_offline(cgdev->cdev[1]);
3203 ccw_device_set_offline(cgdev->cdev[0]);
3205 if (priv->channel[READ])
3206 channel_remove(priv->channel[READ]);
3207 if (priv->channel[WRITE])
3208 channel_remove(priv->channel[WRITE]);
3210 priv->channel[READ] = priv->channel[WRITE] = NULL;
3217 ctc_remove_device(struct ccwgroup_device *cgdev)
3219 struct ctc_priv *priv;
3221 pr_debug("%s() called\n", __FUNCTION__);
3222 DBF_TEXT(trace, 3, __FUNCTION__);
3224 priv = cgdev->dev.driver_data;
3227 if (cgdev->state == CCWGROUP_ONLINE)
3228 ctc_shutdown_device(cgdev);
3229 ctc_remove_files(&cgdev->dev);
3230 cgdev->dev.driver_data = NULL;
3232 put_device(&cgdev->dev);
3235 static struct ccwgroup_driver ctc_group_driver = {
3236 .owner = THIS_MODULE,
3239 .driver_id = 0xC3E3C3,
3240 .probe = ctc_probe_device,
3241 .remove = ctc_remove_device,
3242 .set_online = ctc_new_device,
3243 .set_offline = ctc_shutdown_device,
3247 * Module related routines
3248 *****************************************************************************/
3251 * Prepare to be unloaded. Free IRQ's and release all resources.
3252 * This is called just before this module is unloaded. It is
3253 * <em>not</em> called, if the usage count is !0, so we don't need to check
3259 unregister_cu3088_discipline(&ctc_group_driver);
3261 ctc_unregister_dbf_views();
3262 ctc_pr_info("CTC driver unloaded\n");
3266 * Initialize module.
3267 * This is called just after the module is loaded.
3269 * @return 0 on success, !0 on error.
3278 ret = ctc_register_dbf_views();
3280 ctc_pr_crit("ctc_init failed with ctc_register_dbf_views rc = %d\n", ret);
3284 ret = register_cu3088_discipline(&ctc_group_driver);
3287 ctc_unregister_dbf_views();
3292 module_init(ctc_init);
3293 module_exit(ctc_exit);
3295 /* --- This is the END my friend --- */