2 * $Id: dmascc.c,v 1.27 2000/06/01 14:46:23 oe1kib Exp $
4 * Driver for high-speed SCC boards (those with DMA support)
5 * Copyright (C) 1997-2000 Klaus Kudielka
7 * S5SCC/DMA support by Janko Koleznik S52HI
9 * This program is free software; you can redistribute it and/or modify
10 * it under the terms of the GNU General Public License as published by
11 * the Free Software Foundation; either version 2 of the License, or
12 * (at your option) any later version.
14 * This program is distributed in the hope that it will be useful,
15 * but WITHOUT ANY WARRANTY; without even the implied warranty of
16 * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
17 * GNU General Public License for more details.
19 * You should have received a copy of the GNU General Public License
20 * along with this program; if not, write to the Free Software
21 * Foundation, Inc., 675 Mass Ave, Cambridge, MA 02139, USA.
26 #include <linux/module.h>
27 #include <linux/delay.h>
28 #include <linux/errno.h>
29 #include <linux/if_arp.h>
31 #include <linux/init.h>
32 #include <linux/interrupt.h>
33 #include <linux/ioport.h>
34 #include <linux/kernel.h>
36 #include <linux/netdevice.h>
37 #include <linux/rtnetlink.h>
38 #include <linux/sockios.h>
39 #include <linux/workqueue.h>
40 #include <linux/version.h>
41 #include <asm/atomic.h>
42 #include <asm/bitops.h>
46 #include <asm/uaccess.h>
51 /* Linux 2.2 and 2.3 compatibility */
53 #if LINUX_VERSION_CODE < KERNEL_VERSION(2,3,14)
54 #define net_device device
56 #if LINUX_VERSION_CODE < KERNEL_VERSION(2,3,43)
57 #define netif_start_queue(dev) { dev->tbusy = 0; }
58 #define netif_stop_queue(dev) { dev->tbusy = 1; }
59 #define netif_wake_queue(dev) { dev->tbusy = 0; mark_bh(NET_BH); }
61 #if LINUX_VERSION_CODE < KERNEL_VERSION(2,3,47)
62 #define netif_running(dev) (dev->flags & IFF_UP)
66 /* Number of buffers per channel */
68 #define NUM_TX_BUF 2 /* NUM_TX_BUF >= 1 (min. 2 recommended) */
69 #define NUM_RX_BUF 6 /* NUM_RX_BUF >= 1 (min. 2 recommended) */
70 #define BUF_SIZE 1576 /* BUF_SIZE >= mtu + hard_header_len */
75 #define HW_PI { "Ottawa PI", 0x300, 0x20, 0x10, 8, \
76 0, 8, 1843200, 3686400 }
77 #define HW_PI2 { "Ottawa PI2", 0x300, 0x20, 0x10, 8, \
78 0, 8, 3686400, 7372800 }
79 #define HW_TWIN { "Gracilis PackeTwin", 0x200, 0x10, 0x10, 32, \
80 0, 4, 6144000, 6144000 }
81 #define HW_S5 { "S5SCC/DMA", 0x200, 0x10, 0x10, 32, \
82 0, 8, 4915200, 9830400 }
84 #define HARDWARE { HW_PI, HW_PI2, HW_TWIN, HW_S5 }
86 #define TMR_0_HZ 25600 /* Frequency of timer 0 */
94 #define MAX_NUM_DEVS 32
97 /* SCC chips supported */
103 #define CHIPNAMES { "Z8530", "Z85C30", "Z85230" }
108 /* 8530 registers relative to card base */
109 #define SCCB_CMD 0x00
110 #define SCCB_DATA 0x01
111 #define SCCA_CMD 0x02
112 #define SCCA_DATA 0x03
114 /* 8253/8254 registers relative to card base */
115 #define TMR_CNT0 0x00
116 #define TMR_CNT1 0x01
117 #define TMR_CNT2 0x02
118 #define TMR_CTRL 0x03
120 /* Additional PI/PI2 registers relative to card base */
121 #define PI_DREQ_MASK 0x04
123 /* Additional PackeTwin registers relative to card base */
124 #define TWIN_INT_REG 0x08
125 #define TWIN_CLR_TMR1 0x09
126 #define TWIN_CLR_TMR2 0x0a
127 #define TWIN_SPARE_1 0x0b
128 #define TWIN_DMA_CFG 0x08
129 #define TWIN_SERIAL_CFG 0x09
130 #define TWIN_DMA_CLR_FF 0x0a
131 #define TWIN_SPARE_2 0x0b
134 /* PackeTwin I/O register values */
137 #define TWIN_SCC_MSK 0x01
138 #define TWIN_TMR1_MSK 0x02
139 #define TWIN_TMR2_MSK 0x04
140 #define TWIN_INT_MSK 0x07
143 #define TWIN_DTRA_ON 0x01
144 #define TWIN_DTRB_ON 0x02
145 #define TWIN_EXTCLKA 0x04
146 #define TWIN_EXTCLKB 0x08
147 #define TWIN_LOOPA_ON 0x10
148 #define TWIN_LOOPB_ON 0x20
152 #define TWIN_DMA_HDX_T1 0x08
153 #define TWIN_DMA_HDX_R1 0x0a
154 #define TWIN_DMA_HDX_T3 0x14
155 #define TWIN_DMA_HDX_R3 0x16
156 #define TWIN_DMA_FDX_T3R1 0x1b
157 #define TWIN_DMA_FDX_T1R3 0x1d
176 #define SIOCGSCCPARAM SIOCDEVPRIVATE
177 #define SIOCSSCCPARAM (SIOCDEVPRIVATE+1)
183 int pclk_hz; /* frequency of BRG input (don't change) */
184 int brg_tc; /* BRG terminal count; BRG disabled if < 0 */
185 int nrzi; /* 0 (nrz), 1 (nrzi) */
186 int clocks; /* see dmascc_cfg documentation */
187 int txdelay; /* [1/TMR_0_HZ] */
188 int txtimeout; /* [1/HZ] */
189 int txtail; /* [1/TMR_0_HZ] */
190 int waittime; /* [1/TMR_0_HZ] */
191 int slottime; /* [1/TMR_0_HZ] */
192 int persist; /* 1 ... 256 */
193 int dma; /* -1 (disable), 0, 1, 3 */
194 int txpause; /* [1/TMR_0_HZ] */
195 int rtsoff; /* [1/TMR_0_HZ] */
196 int dcdon; /* [1/TMR_0_HZ] */
197 int dcdoff; /* [1/TMR_0_HZ] */
200 struct scc_hardware {
213 #if LINUX_VERSION_CODE < KERNEL_VERSION(2,4,0)
218 struct net_device *dev;
219 struct scc_info *info;
220 struct net_device_stats stats;
222 int card_base, scc_cmd, scc_data;
223 int tmr_cnt, tmr_ctrl, tmr_mode;
224 struct scc_param param;
225 char rx_buf[NUM_RX_BUF][BUF_SIZE];
226 int rx_len[NUM_RX_BUF];
228 struct work_struct rx_work;
229 int rx_head, rx_tail, rx_count;
231 char tx_buf[NUM_TX_BUF][BUF_SIZE];
232 int tx_len[NUM_TX_BUF];
234 int tx_head, tx_tail, tx_count;
236 unsigned long tx_start;
238 spinlock_t *register_lock; /* Per scc_info */
239 spinlock_t ring_lock;
245 struct net_device *dev[2];
246 struct scc_priv priv[2];
247 struct scc_info *next;
248 spinlock_t register_lock; /* Per device register lock */
252 /* Function declarations */
253 static int setup_adapter(int card_base, int type, int n) __init;
255 static void write_scc(struct scc_priv *priv, int reg, int val);
256 static void write_scc_data(struct scc_priv *priv, int val, int fast);
257 static int read_scc(struct scc_priv *priv, int reg);
258 static int read_scc_data(struct scc_priv *priv);
260 static int scc_open(struct net_device *dev);
261 static int scc_close(struct net_device *dev);
262 static int scc_ioctl(struct net_device *dev, struct ifreq *ifr, int cmd);
263 static int scc_send_packet(struct sk_buff *skb, struct net_device *dev);
264 static struct net_device_stats *scc_get_stats(struct net_device *dev);
265 static int scc_set_mac_address(struct net_device *dev, void *sa);
267 static irqreturn_t scc_isr(int irq, void *dev_id, struct pt_regs * regs);
268 static inline void z8530_isr(struct scc_info *info);
269 static void rx_isr(struct scc_priv *priv);
270 static void special_condition(struct scc_priv *priv, int rc);
271 static void rx_bh(void *arg);
272 static void tx_isr(struct scc_priv *priv);
273 static void es_isr(struct scc_priv *priv);
274 static void tm_isr(struct scc_priv *priv);
276 static inline void tx_on(struct scc_priv *priv);
277 static inline void rx_on(struct scc_priv *priv);
278 static inline void rx_off(struct scc_priv *priv);
279 static void start_timer(struct scc_priv *priv, int t, int r15);
280 static inline unsigned char random(void);
283 /* Initialization variables */
285 static int io[MAX_NUM_DEVS] __initdata = { 0, };
286 /* Beware! hw[] is also used in cleanup_module(). */
287 static struct scc_hardware hw[NUM_TYPES] __initdata_or_module = HARDWARE;
288 static char ax25_broadcast[7] __initdata =
289 { 'Q'<<1, 'S'<<1, 'T'<<1, ' '<<1, ' '<<1, ' '<<1, '0'<<1 };
290 static char ax25_test[7] __initdata =
291 { 'L'<<1, 'I'<<1, 'N'<<1, 'U'<<1, 'X'<<1, ' '<<1, '1'<<1 };
294 /* Global variables */
296 static struct scc_info *first;
297 static unsigned long rand;
300 MODULE_AUTHOR("Klaus Kudielka");
301 MODULE_DESCRIPTION("Driver for high-speed SCC boards");
302 MODULE_PARM(io, "1-" __MODULE_STRING(MAX_NUM_DEVS) "i");
303 MODULE_LICENSE("GPL");
305 static void __exit dmascc_exit(void) {
307 struct scc_info *info;
312 /* Unregister devices */
313 for (i = 0; i < 2; i++)
314 unregister_netdev(info->dev[i]);
317 if (info->priv[0].type == TYPE_TWIN)
318 outb(0, info->dev[0]->base_addr + TWIN_SERIAL_CFG);
319 write_scc(&info->priv[0], R9, FHWRES);
320 release_region(info->dev[0]->base_addr,
321 hw[info->priv[0].type].io_size);
323 for (i = 0; i < 2; i++)
324 free_netdev(info->dev[i]);
333 void __init dmascc_setup(char *str, int *ints) {
336 for (i = 0; i < MAX_NUM_DEVS && i < ints[0]; i++)
341 static int __init dmascc_init(void) {
343 int base[MAX_NUM_DEVS], tcmd[MAX_NUM_DEVS], t0[MAX_NUM_DEVS],
346 unsigned long time, start[MAX_NUM_DEVS], delay[MAX_NUM_DEVS],
347 counting[MAX_NUM_DEVS];
349 /* Initialize random number generator */
351 /* Cards found = 0 */
353 /* Warning message */
354 if (!io[0]) printk(KERN_INFO "dmascc: autoprobing (dangerous)\n");
356 /* Run autodetection for each card type */
357 for (h = 0; h < NUM_TYPES; h++) {
360 /* User-specified I/O address regions */
361 for (i = 0; i < hw[h].num_devs; i++) base[i] = 0;
362 for (i = 0; i < MAX_NUM_DEVS && io[i]; i++) {
363 j = (io[i] - hw[h].io_region) / hw[h].io_delta;
365 j < hw[h].num_devs &&
366 hw[h].io_region + j * hw[h].io_delta == io[i]) {
371 /* Default I/O address regions */
372 for (i = 0; i < hw[h].num_devs; i++) {
373 base[i] = hw[h].io_region + i * hw[h].io_delta;
377 /* Check valid I/O address regions */
378 for (i = 0; i < hw[h].num_devs; i++)
380 if (!request_region(base[i], hw[h].io_size, "dmascc"))
383 tcmd[i] = base[i] + hw[h].tmr_offset + TMR_CTRL;
384 t0[i] = base[i] + hw[h].tmr_offset + TMR_CNT0;
385 t1[i] = base[i] + hw[h].tmr_offset + TMR_CNT1;
390 for (i = 0; i < hw[h].num_devs; i++)
392 /* Timer 0: LSB+MSB, Mode 3, TMR_0_HZ */
394 outb((hw[h].tmr_hz/TMR_0_HZ) & 0xFF, t0[i]);
395 outb((hw[h].tmr_hz/TMR_0_HZ) >> 8, t0[i]);
396 /* Timer 1: LSB+MSB, Mode 0, HZ/10 */
398 outb((TMR_0_HZ/HZ*10) & 0xFF, t1[i]);
399 outb((TMR_0_HZ/HZ*10) >> 8, t1[i]);
403 /* Timer 2: LSB+MSB, Mode 0 */
407 /* Wait until counter registers are loaded */
408 udelay(2000000/TMR_0_HZ);
411 while (jiffies - time < 13) {
412 for (i = 0; i < hw[h].num_devs; i++)
413 if (base[i] && counting[i]) {
414 /* Read back Timer 1: latch; read LSB; read MSB */
416 t_val = inb(t1[i]) + (inb(t1[i]) << 8);
417 /* Also check whether counter did wrap */
418 if (t_val == 0 || t_val > TMR_0_HZ/HZ*10) counting[i] = 0;
419 delay[i] = jiffies - start[i];
423 /* Evaluate measurements */
424 for (i = 0; i < hw[h].num_devs; i++)
426 if ((delay[i] >= 9 && delay[i] <= 11)&&
427 /* Ok, we have found an adapter */
428 (setup_adapter(base[i], h, n) == 0))
431 release_region(base[i], hw[h].io_size);
436 /* If any adapter was successfully initialized, return ok */
439 /* If no adapter found, return error */
440 printk(KERN_INFO "dmascc: no adapters found\n");
444 module_init(dmascc_init);
445 module_exit(dmascc_exit);
447 static void dev_setup(struct net_device *dev)
449 dev->type = ARPHRD_AX25;
450 dev->hard_header_len = 73;
453 dev->tx_queue_len = 64;
454 memcpy(dev->broadcast, ax25_broadcast, 7);
455 memcpy(dev->dev_addr, ax25_test, 7);
458 static int __init setup_adapter(int card_base, int type, int n)
461 struct scc_info *info;
462 struct net_device *dev;
463 struct scc_priv *priv;
466 int tmr_base = card_base + hw[type].tmr_offset;
467 int scc_base = card_base + hw[type].scc_offset;
468 char *chipnames[] = CHIPNAMES;
470 /* Allocate memory */
471 info = kmalloc(sizeof(struct scc_info), GFP_KERNEL | GFP_DMA);
473 printk(KERN_ERR "dmascc: "
474 "could not allocate memory for %s at %#3x\n",
475 hw[type].name, card_base);
479 /* Initialize what is necessary for write_scc and write_scc_data */
480 memset(info, 0, sizeof(struct scc_info));
482 info->dev[0] = alloc_netdev(0, "", dev_setup);
484 printk(KERN_ERR "dmascc: "
485 "could not allocate memory for %s at %#3x\n",
486 hw[type].name, card_base);
490 info->dev[1] = alloc_netdev(0, "", dev_setup);
492 printk(KERN_ERR "dmascc: "
493 "could not allocate memory for %s at %#3x\n",
494 hw[type].name, card_base);
497 spin_lock_init(&info->register_lock);
499 priv = &info->priv[0];
501 priv->card_base = card_base;
502 priv->scc_cmd = scc_base + SCCA_CMD;
503 priv->scc_data = scc_base + SCCA_DATA;
504 priv->register_lock = &info->register_lock;
507 write_scc(priv, R9, FHWRES | MIE | NV);
509 /* Determine type of chip by enabling SDLC/HDLC enhancements */
510 write_scc(priv, R15, SHDLCE);
511 if (!read_scc(priv, R15)) {
512 /* WR7' not present. This is an ordinary Z8530 SCC. */
515 /* Put one character in TX FIFO */
516 write_scc_data(priv, 0, 0);
517 if (read_scc(priv, R0) & Tx_BUF_EMP) {
518 /* TX FIFO not full. This is a Z85230 ESCC with a 4-byte FIFO. */
521 /* TX FIFO full. This is a Z85C30 SCC with a 1-byte FIFO. */
525 write_scc(priv, R15, 0);
527 /* Start IRQ auto-detection */
528 irqs = probe_irq_on();
530 /* Enable interrupts */
531 if (type == TYPE_TWIN) {
532 outb(0, card_base + TWIN_DMA_CFG);
533 inb(card_base + TWIN_CLR_TMR1);
534 inb(card_base + TWIN_CLR_TMR2);
535 info->twin_serial_cfg = TWIN_EI;
536 outb(info->twin_serial_cfg, card_base + TWIN_SERIAL_CFG);
538 write_scc(priv, R15, CTSIE);
539 write_scc(priv, R0, RES_EXT_INT);
540 write_scc(priv, R1, EXT_INT_ENAB);
544 outb(1, tmr_base + TMR_CNT1);
545 outb(0, tmr_base + TMR_CNT1);
547 /* Wait and detect IRQ */
548 time = jiffies; while (jiffies - time < 2 + HZ / TMR_0_HZ);
549 irq = probe_irq_off(irqs);
551 /* Clear pending interrupt, disable interrupts */
552 if (type == TYPE_TWIN) {
553 inb(card_base + TWIN_CLR_TMR1);
555 write_scc(priv, R1, 0);
556 write_scc(priv, R15, 0);
557 write_scc(priv, R0, RES_EXT_INT);
561 printk(KERN_ERR "dmascc: could not find irq of %s at %#3x (irq=%d)\n",
562 hw[type].name, card_base, irq);
566 /* Set up data structures */
567 for (i = 0; i < 2; i++) {
569 priv = &info->priv[i];
575 spin_lock_init(&priv->ring_lock);
576 priv->register_lock = &info->register_lock;
577 priv->card_base = card_base;
578 priv->scc_cmd = scc_base + (i ? SCCB_CMD : SCCA_CMD);
579 priv->scc_data = scc_base + (i ? SCCB_DATA : SCCA_DATA);
580 priv->tmr_cnt = tmr_base + (i ? TMR_CNT2 : TMR_CNT1);
581 priv->tmr_ctrl = tmr_base + TMR_CTRL;
582 priv->tmr_mode = i ? 0xb0 : 0x70;
583 priv->param.pclk_hz = hw[type].pclk_hz;
584 priv->param.brg_tc = -1;
585 priv->param.clocks = TCTRxCP | RCRTxCP;
586 priv->param.persist = 256;
587 priv->param.dma = -1;
588 INIT_WORK(&priv->rx_work, rx_bh, priv);
590 sprintf(dev->name, "dmascc%i", 2*n+i);
591 SET_MODULE_OWNER(dev);
592 dev->base_addr = card_base;
594 dev->open = scc_open;
595 dev->stop = scc_close;
596 dev->do_ioctl = scc_ioctl;
597 dev->hard_start_xmit = scc_send_packet;
598 dev->get_stats = scc_get_stats;
599 dev->hard_header = ax25_encapsulate;
600 dev->rebuild_header = ax25_rebuild_header;
601 dev->set_mac_address = scc_set_mac_address;
603 if (register_netdev(info->dev[0])) {
604 printk(KERN_ERR "dmascc: could not register %s\n",
608 if (register_netdev(info->dev[1])) {
609 printk(KERN_ERR "dmascc: could not register %s\n",
617 printk(KERN_INFO "dmascc: found %s (%s) at %#3x, irq %d\n", hw[type].name,
618 chipnames[chip], card_base, irq);
622 unregister_netdev(info->dev[0]);
624 if (info->priv[0].type == TYPE_TWIN)
625 outb(0, info->dev[0]->base_addr + TWIN_SERIAL_CFG);
626 write_scc(&info->priv[0], R9, FHWRES);
627 free_netdev(info->dev[1]);
629 free_netdev(info->dev[0]);
637 /* Driver functions */
639 static void write_scc(struct scc_priv *priv, int reg, int val) {
641 switch (priv->type) {
643 if (reg) outb(reg, priv->scc_cmd);
644 outb(val, priv->scc_cmd);
647 if (reg) outb_p(reg, priv->scc_cmd);
648 outb_p(val, priv->scc_cmd);
651 spin_lock_irqsave(priv->register_lock, flags);
652 outb_p(0, priv->card_base + PI_DREQ_MASK);
653 if (reg) outb_p(reg, priv->scc_cmd);
654 outb_p(val, priv->scc_cmd);
655 outb(1, priv->card_base + PI_DREQ_MASK);
656 spin_unlock_irqrestore(priv->register_lock, flags);
662 static void write_scc_data(struct scc_priv *priv, int val, int fast) {
664 switch (priv->type) {
666 outb(val, priv->scc_data);
669 outb_p(val, priv->scc_data);
672 if (fast) outb_p(val, priv->scc_data);
674 spin_lock_irqsave(priv->register_lock, flags);
675 outb_p(0, priv->card_base + PI_DREQ_MASK);
676 outb_p(val, priv->scc_data);
677 outb(1, priv->card_base + PI_DREQ_MASK);
678 spin_unlock_irqrestore(priv->register_lock, flags);
685 static int read_scc(struct scc_priv *priv, int reg) {
688 switch (priv->type) {
690 if (reg) outb(reg, priv->scc_cmd);
691 return inb(priv->scc_cmd);
693 if (reg) outb_p(reg, priv->scc_cmd);
694 return inb_p(priv->scc_cmd);
696 spin_lock_irqsave(priv->register_lock, flags);
697 outb_p(0, priv->card_base + PI_DREQ_MASK);
698 if (reg) outb_p(reg, priv->scc_cmd);
699 rc = inb_p(priv->scc_cmd);
700 outb(1, priv->card_base + PI_DREQ_MASK);
701 spin_unlock_irqrestore(priv->register_lock, flags);
707 static int read_scc_data(struct scc_priv *priv) {
710 switch (priv->type) {
712 return inb(priv->scc_data);
714 return inb_p(priv->scc_data);
716 spin_lock_irqsave(priv->register_lock, flags);
717 outb_p(0, priv->card_base + PI_DREQ_MASK);
718 rc = inb_p(priv->scc_data);
719 outb(1, priv->card_base + PI_DREQ_MASK);
720 spin_unlock_irqrestore(priv->register_lock, flags);
726 static int scc_open(struct net_device *dev) {
727 struct scc_priv *priv = dev->priv;
728 struct scc_info *info = priv->info;
729 int card_base = priv->card_base;
731 /* Request IRQ if not already used by other channel */
732 if (!info->irq_used) {
733 if (request_irq(dev->irq, scc_isr, 0, "dmascc", info)) {
739 /* Request DMA if required */
740 if (priv->param.dma >= 0) {
741 if (request_dma(priv->param.dma, "dmascc")) {
742 if (--info->irq_used == 0) free_irq(dev->irq, info);
745 unsigned long flags = claim_dma_lock();
746 clear_dma_ff(priv->param.dma);
747 release_dma_lock(flags);
751 /* Initialize local variables */
754 priv->rx_head = priv->rx_tail = priv->rx_count = 0;
756 priv->tx_head = priv->tx_tail = priv->tx_count = 0;
760 write_scc(priv, R9, (priv->channel ? CHRB : CHRA) | MIE | NV);
761 /* X1 clock, SDLC mode */
762 write_scc(priv, R4, SDLC | X1CLK);
764 write_scc(priv, R1, EXT_INT_ENAB | WT_FN_RDYFN);
765 /* 8 bit RX char, RX disable */
766 write_scc(priv, R3, Rx8);
767 /* 8 bit TX char, TX disable */
768 write_scc(priv, R5, Tx8);
769 /* SDLC address field */
770 write_scc(priv, R6, 0);
772 write_scc(priv, R7, FLAG);
773 switch (priv->chip) {
776 write_scc(priv, R15, SHDLCE);
778 write_scc(priv, R7, AUTOEOM);
779 write_scc(priv, R15, 0);
783 write_scc(priv, R15, SHDLCE);
784 /* The following bits are set (see 2.5.2.1):
785 - Automatic EOM reset
786 - Interrupt request if RX FIFO is half full
787 This bit should be ignored in DMA mode (according to the
788 documentation), but actually isn't. The receiver doesn't work if
789 it is set. Thus, we have to clear it in DMA mode.
790 - Interrupt/DMA request if TX FIFO is completely empty
791 a) If set, the ESCC behaves as if it had no TX FIFO (Z85C30
793 b) If cleared, DMA requests may follow each other very quickly,
794 filling up the TX FIFO.
795 Advantage: TX works even in case of high bus latency.
796 Disadvantage: Edge-triggered DMA request circuitry may miss
797 a request. No more data is delivered, resulting
798 in a TX FIFO underrun.
799 Both PI2 and S5SCC/DMA seem to work fine with TXFIFOE cleared.
800 The PackeTwin doesn't. I don't know about the PI, but let's
801 assume it behaves like the PI2.
803 if (priv->param.dma >= 0) {
804 if (priv->type == TYPE_TWIN) write_scc(priv, R7, AUTOEOM | TXFIFOE);
805 else write_scc(priv, R7, AUTOEOM);
807 write_scc(priv, R7, AUTOEOM | RXFIFOH);
809 write_scc(priv, R15, 0);
812 /* Preset CRC, NRZ(I) encoding */
813 write_scc(priv, R10, CRCPS | (priv->param.nrzi ? NRZI : NRZ));
815 /* Configure baud rate generator */
816 if (priv->param.brg_tc >= 0) {
817 /* Program BR generator */
818 write_scc(priv, R12, priv->param.brg_tc & 0xFF);
819 write_scc(priv, R13, (priv->param.brg_tc>>8) & 0xFF);
820 /* BRG source = SYS CLK; enable BRG; DTR REQ function (required by
821 PackeTwin, not connected on the PI2); set DPLL source to BRG */
822 write_scc(priv, R14, SSBR | DTRREQ | BRSRC | BRENABL);
824 write_scc(priv, R14, SEARCH | DTRREQ | BRSRC | BRENABL);
826 /* Disable BR generator */
827 write_scc(priv, R14, DTRREQ | BRSRC);
830 /* Configure clocks */
831 if (priv->type == TYPE_TWIN) {
832 /* Disable external TX clock receiver */
833 outb((info->twin_serial_cfg &=
834 ~(priv->channel ? TWIN_EXTCLKB : TWIN_EXTCLKA)),
835 card_base + TWIN_SERIAL_CFG);
837 write_scc(priv, R11, priv->param.clocks);
838 if ((priv->type == TYPE_TWIN) && !(priv->param.clocks & TRxCOI)) {
839 /* Enable external TX clock receiver */
840 outb((info->twin_serial_cfg |=
841 (priv->channel ? TWIN_EXTCLKB : TWIN_EXTCLKA)),
842 card_base + TWIN_SERIAL_CFG);
845 /* Configure PackeTwin */
846 if (priv->type == TYPE_TWIN) {
847 /* Assert DTR, enable interrupts */
848 outb((info->twin_serial_cfg |= TWIN_EI |
849 (priv->channel ? TWIN_DTRB_ON : TWIN_DTRA_ON)),
850 card_base + TWIN_SERIAL_CFG);
853 /* Read current status */
854 priv->rr0 = read_scc(priv, R0);
855 /* Enable DCD interrupt */
856 write_scc(priv, R15, DCDIE);
858 netif_start_queue(dev);
864 static int scc_close(struct net_device *dev) {
865 struct scc_priv *priv = dev->priv;
866 struct scc_info *info = priv->info;
867 int card_base = priv->card_base;
869 netif_stop_queue(dev);
871 if (priv->type == TYPE_TWIN) {
873 outb((info->twin_serial_cfg &=
874 (priv->channel ? ~TWIN_DTRB_ON : ~TWIN_DTRA_ON)),
875 card_base + TWIN_SERIAL_CFG);
878 /* Reset channel, free DMA and IRQ */
879 write_scc(priv, R9, (priv->channel ? CHRB : CHRA) | MIE | NV);
880 if (priv->param.dma >= 0) {
881 if (priv->type == TYPE_TWIN) outb(0, card_base + TWIN_DMA_CFG);
882 free_dma(priv->param.dma);
884 if (--info->irq_used == 0) free_irq(dev->irq, info);
890 static int scc_ioctl(struct net_device *dev, struct ifreq *ifr, int cmd) {
891 struct scc_priv *priv = dev->priv;
895 if (copy_to_user(ifr->ifr_data, &priv->param, sizeof(struct scc_param)))
899 if (!capable(CAP_NET_ADMIN)) return -EPERM;
900 if (netif_running(dev)) return -EAGAIN;
901 if (copy_from_user(&priv->param, ifr->ifr_data, sizeof(struct scc_param)))
910 static int scc_send_packet(struct sk_buff *skb, struct net_device *dev) {
911 struct scc_priv *priv = dev->priv;
915 /* Temporarily stop the scheduler feeding us packets */
916 netif_stop_queue(dev);
918 /* Transfer data to DMA buffer */
920 memcpy(priv->tx_buf[i], skb->data+1, skb->len-1);
921 priv->tx_len[i] = skb->len-1;
923 /* Clear interrupts while we touch our circular buffers */
925 spin_lock_irqsave(&priv->ring_lock, flags);
926 /* Move the ring buffer's head */
927 priv->tx_head = (i + 1) % NUM_TX_BUF;
930 /* If we just filled up the last buffer, leave queue stopped.
931 The higher layers must wait until we have a DMA buffer
932 to accept the data. */
933 if (priv->tx_count < NUM_TX_BUF) netif_wake_queue(dev);
935 /* Set new TX state */
936 if (priv->state == IDLE) {
937 /* Assert RTS, start timer */
938 priv->state = TX_HEAD;
939 priv->tx_start = jiffies;
940 write_scc(priv, R5, TxCRC_ENAB | RTS | TxENAB | Tx8);
941 write_scc(priv, R15, 0);
942 start_timer(priv, priv->param.txdelay, 0);
945 /* Turn interrupts back on and free buffer */
946 spin_unlock_irqrestore(&priv->ring_lock, flags);
953 static struct net_device_stats *scc_get_stats(struct net_device *dev) {
954 struct scc_priv *priv = dev->priv;
960 static int scc_set_mac_address(struct net_device *dev, void *sa) {
961 memcpy(dev->dev_addr, ((struct sockaddr *)sa)->sa_data, dev->addr_len);
966 static irqreturn_t scc_isr(int irq, void *dev_id, struct pt_regs * regs) {
967 struct scc_info *info = dev_id;
969 spin_lock(info->priv[0].register_lock);
970 /* At this point interrupts are enabled, and the interrupt under service
971 is already acknowledged, but masked off.
973 Interrupt processing: We loop until we know that the IRQ line is
974 low. If another positive edge occurs afterwards during the ISR,
975 another interrupt will be triggered by the interrupt controller
976 as soon as the IRQ level is enabled again (see asm/irq.h).
978 Bottom-half handlers will be processed after scc_isr(). This is
979 important, since we only have small ringbuffers and want new data
980 to be fetched/delivered immediately. */
982 if (info->priv[0].type == TYPE_TWIN) {
983 int is, card_base = info->priv[0].card_base;
984 while ((is = ~inb(card_base + TWIN_INT_REG)) &
986 if (is & TWIN_SCC_MSK) {
988 } else if (is & TWIN_TMR1_MSK) {
989 inb(card_base + TWIN_CLR_TMR1);
990 tm_isr(&info->priv[0]);
992 inb(card_base + TWIN_CLR_TMR2);
993 tm_isr(&info->priv[1]);
996 } else z8530_isr(info);
997 spin_unlock(info->priv[0].register_lock);
1002 static inline void z8530_isr(struct scc_info *info) {
1005 while ((is = read_scc(&info->priv[0], R3)) && i--) {
1007 rx_isr(&info->priv[0]);
1008 } else if (is & CHATxIP) {
1009 tx_isr(&info->priv[0]);
1010 } else if (is & CHAEXT) {
1011 es_isr(&info->priv[0]);
1012 } else if (is & CHBRxIP) {
1013 rx_isr(&info->priv[1]);
1014 } else if (is & CHBTxIP) {
1015 tx_isr(&info->priv[1]);
1017 es_isr(&info->priv[1]);
1019 write_scc(&info->priv[0], R0, RES_H_IUS);
1023 printk(KERN_ERR "dmascc: stuck in ISR with RR3=0x%02x.\n", is);
1025 /* Ok, no interrupts pending from this 8530. The INT line should
1030 static void rx_isr(struct scc_priv *priv) {
1031 if (priv->param.dma >= 0) {
1032 /* Check special condition and perform error reset. See 2.4.7.5. */
1033 special_condition(priv, read_scc(priv, R1));
1034 write_scc(priv, R0, ERR_RES);
1036 /* Check special condition for each character. Error reset not necessary.
1037 Same algorithm for SCC and ESCC. See 2.4.7.1 and 2.4.7.4. */
1039 while (read_scc(priv, R0) & Rx_CH_AV) {
1040 rc = read_scc(priv, R1);
1041 if (priv->rx_ptr < BUF_SIZE)
1042 priv->rx_buf[priv->rx_head][priv->rx_ptr++] =
1043 read_scc_data(priv);
1046 read_scc_data(priv);
1048 special_condition(priv, rc);
1054 static void special_condition(struct scc_priv *priv, int rc) {
1056 unsigned long flags;
1058 /* See Figure 2-15. Only overrun and EOF need to be checked. */
1061 /* Receiver overrun */
1063 if (priv->param.dma < 0) write_scc(priv, R0, ERR_RES);
1064 } else if (rc & END_FR) {
1065 /* End of frame. Get byte count */
1066 if (priv->param.dma >= 0) {
1067 flags = claim_dma_lock();
1068 cb = BUF_SIZE - get_dma_residue(priv->param.dma) - 2;
1069 release_dma_lock(flags);
1071 cb = priv->rx_ptr - 2;
1073 if (priv->rx_over) {
1074 /* We had an overrun */
1075 priv->stats.rx_errors++;
1076 if (priv->rx_over == 2) priv->stats.rx_length_errors++;
1077 else priv->stats.rx_fifo_errors++;
1079 } else if (rc & CRC_ERR) {
1080 /* Count invalid CRC only if packet length >= minimum */
1082 priv->stats.rx_errors++;
1083 priv->stats.rx_crc_errors++;
1087 if (priv->rx_count < NUM_RX_BUF - 1) {
1088 /* Put good frame in FIFO */
1089 priv->rx_len[priv->rx_head] = cb;
1090 priv->rx_head = (priv->rx_head + 1) % NUM_RX_BUF;
1092 schedule_work(&priv->rx_work);
1094 priv->stats.rx_errors++;
1095 priv->stats.rx_over_errors++;
1099 /* Get ready for new frame */
1100 if (priv->param.dma >= 0) {
1101 flags = claim_dma_lock();
1102 set_dma_addr(priv->param.dma, (int) priv->rx_buf[priv->rx_head]);
1103 set_dma_count(priv->param.dma, BUF_SIZE);
1104 release_dma_lock(flags);
1112 static void rx_bh(void *arg) {
1113 struct scc_priv *priv = arg;
1114 int i = priv->rx_tail;
1116 unsigned long flags;
1117 struct sk_buff *skb;
1118 unsigned char *data;
1120 spin_lock_irqsave(&priv->ring_lock, flags);
1121 while (priv->rx_count) {
1122 spin_unlock_irqrestore(&priv->ring_lock, flags);
1123 cb = priv->rx_len[i];
1124 /* Allocate buffer */
1125 skb = dev_alloc_skb(cb+1);
1128 priv->stats.rx_dropped++;
1131 data = skb_put(skb, cb+1);
1133 memcpy(&data[1], priv->rx_buf[i], cb);
1134 skb->dev = priv->dev;
1135 skb->protocol = ntohs(ETH_P_AX25);
1136 skb->mac.raw = skb->data;
1138 priv->dev->last_rx = jiffies;
1139 priv->stats.rx_packets++;
1140 priv->stats.rx_bytes += cb;
1142 spin_lock_irqsave(&priv->ring_lock, flags);
1144 priv->rx_tail = i = (i + 1) % NUM_RX_BUF;
1147 spin_unlock_irqrestore(&priv->ring_lock, flags);
1151 static void tx_isr(struct scc_priv *priv) {
1152 int i = priv->tx_tail, p = priv->tx_ptr;
1154 /* Suspend TX interrupts if we don't want to send anything.
1156 if (p == priv->tx_len[i]) {
1157 write_scc(priv, R0, RES_Tx_P);
1161 /* Write characters */
1162 while ((read_scc(priv, R0) & Tx_BUF_EMP) && p < priv->tx_len[i]) {
1163 write_scc_data(priv, priv->tx_buf[i][p++], 0);
1166 /* Reset EOM latch of Z8530 */
1167 if (!priv->tx_ptr && p && priv->chip == Z8530)
1168 write_scc(priv, R0, RES_EOM_L);
1174 static void es_isr(struct scc_priv *priv) {
1175 int i, rr0, drr0, res;
1176 unsigned long flags;
1178 /* Read status, reset interrupt bit (open latches) */
1179 rr0 = read_scc(priv, R0);
1180 write_scc(priv, R0, RES_EXT_INT);
1181 drr0 = priv->rr0 ^ rr0;
1184 /* Transmit underrun (2.4.9.6). We can't check the TxEOM flag, since
1185 it might have already been cleared again by AUTOEOM. */
1186 if (priv->state == TX_DATA) {
1187 /* Get remaining bytes */
1189 if (priv->param.dma >= 0) {
1190 disable_dma(priv->param.dma);
1191 flags = claim_dma_lock();
1192 res = get_dma_residue(priv->param.dma);
1193 release_dma_lock(flags);
1195 res = priv->tx_len[i] - priv->tx_ptr;
1198 /* Disable DREQ / TX interrupt */
1199 if (priv->param.dma >= 0 && priv->type == TYPE_TWIN)
1200 outb(0, priv->card_base + TWIN_DMA_CFG);
1202 write_scc(priv, R1, EXT_INT_ENAB | WT_FN_RDYFN);
1204 /* Update packet statistics */
1205 priv->stats.tx_errors++;
1206 priv->stats.tx_fifo_errors++;
1207 /* Other underrun interrupts may already be waiting */
1208 write_scc(priv, R0, RES_EXT_INT);
1209 write_scc(priv, R0, RES_EXT_INT);
1211 /* Update packet statistics */
1212 priv->stats.tx_packets++;
1213 priv->stats.tx_bytes += priv->tx_len[i];
1214 /* Remove frame from FIFO */
1215 priv->tx_tail = (i + 1) % NUM_TX_BUF;
1217 /* Inform upper layers */
1218 netif_wake_queue(priv->dev);
1221 write_scc(priv, R15, 0);
1222 if (priv->tx_count &&
1223 (jiffies - priv->tx_start) < priv->param.txtimeout) {
1224 priv->state = TX_PAUSE;
1225 start_timer(priv, priv->param.txpause, 0);
1227 priv->state = TX_TAIL;
1228 start_timer(priv, priv->param.txtail, 0);
1232 /* DCD transition */
1235 switch (priv->state) {
1238 priv->state = DCD_ON;
1239 write_scc(priv, R15, 0);
1240 start_timer(priv, priv->param.dcdon, 0);
1243 switch (priv->state) {
1246 priv->state = DCD_OFF;
1247 write_scc(priv, R15, 0);
1248 start_timer(priv, priv->param.dcdoff, 0);
1253 /* CTS transition */
1254 if ((drr0 & CTS) && (~rr0 & CTS) && priv->type != TYPE_TWIN)
1260 static void tm_isr(struct scc_priv *priv) {
1261 switch (priv->state) {
1265 priv->state = TX_DATA;
1268 write_scc(priv, R5, TxCRC_ENAB | Tx8);
1269 priv->state = RTS_OFF;
1270 if (priv->type != TYPE_TWIN) write_scc(priv, R15, 0);
1271 start_timer(priv, priv->param.rtsoff, 0);
1274 write_scc(priv, R15, DCDIE);
1275 priv->rr0 = read_scc(priv, R0);
1276 if (priv->rr0 & DCD) {
1277 priv->stats.collisions++;
1279 priv->state = RX_ON;
1282 start_timer(priv, priv->param.waittime, DCDIE);
1286 if (priv->tx_count) {
1287 priv->state = TX_HEAD;
1288 priv->tx_start = jiffies;
1289 write_scc(priv, R5, TxCRC_ENAB | RTS | TxENAB | Tx8);
1290 write_scc(priv, R15, 0);
1291 start_timer(priv, priv->param.txdelay, 0);
1294 if (priv->type != TYPE_TWIN) write_scc(priv, R15, DCDIE);
1299 write_scc(priv, R15, DCDIE);
1300 priv->rr0 = read_scc(priv, R0);
1301 if (priv->rr0 & DCD) {
1303 priv->state = RX_ON;
1307 random()/priv->param.persist*priv->param.slottime,
1315 static inline void tx_on(struct scc_priv *priv) {
1317 unsigned long flags;
1319 if (priv->param.dma >= 0) {
1320 n = (priv->chip == Z85230) ? 3 : 1;
1321 /* Program DMA controller */
1322 flags = claim_dma_lock();
1323 set_dma_mode(priv->param.dma, DMA_MODE_WRITE);
1324 set_dma_addr(priv->param.dma, (int) priv->tx_buf[priv->tx_tail]+n);
1325 set_dma_count(priv->param.dma, priv->tx_len[priv->tx_tail]-n);
1326 release_dma_lock(flags);
1327 /* Enable TX underrun interrupt */
1328 write_scc(priv, R15, TxUIE);
1329 /* Configure DREQ */
1330 if (priv->type == TYPE_TWIN)
1331 outb((priv->param.dma == 1) ? TWIN_DMA_HDX_T1 : TWIN_DMA_HDX_T3,
1332 priv->card_base + TWIN_DMA_CFG);
1334 write_scc(priv, R1, EXT_INT_ENAB | WT_FN_RDYFN | WT_RDY_ENAB);
1335 /* Write first byte(s) */
1336 spin_lock_irqsave(priv->register_lock, flags);
1337 for (i = 0; i < n; i++)
1338 write_scc_data(priv, priv->tx_buf[priv->tx_tail][i], 1);
1339 enable_dma(priv->param.dma);
1340 spin_unlock_irqrestore(priv->register_lock, flags);
1342 write_scc(priv, R15, TxUIE);
1343 write_scc(priv, R1, EXT_INT_ENAB | WT_FN_RDYFN | TxINT_ENAB);
1346 /* Reset EOM latch if we do not have the AUTOEOM feature */
1347 if (priv->chip == Z8530) write_scc(priv, R0, RES_EOM_L);
1351 static inline void rx_on(struct scc_priv *priv) {
1352 unsigned long flags;
1355 while (read_scc(priv, R0) & Rx_CH_AV) read_scc_data(priv);
1357 if (priv->param.dma >= 0) {
1358 /* Program DMA controller */
1359 flags = claim_dma_lock();
1360 set_dma_mode(priv->param.dma, DMA_MODE_READ);
1361 set_dma_addr(priv->param.dma, (int) priv->rx_buf[priv->rx_head]);
1362 set_dma_count(priv->param.dma, BUF_SIZE);
1363 release_dma_lock(flags);
1364 enable_dma(priv->param.dma);
1365 /* Configure PackeTwin DMA */
1366 if (priv->type == TYPE_TWIN) {
1367 outb((priv->param.dma == 1) ? TWIN_DMA_HDX_R1 : TWIN_DMA_HDX_R3,
1368 priv->card_base + TWIN_DMA_CFG);
1370 /* Sp. cond. intr. only, ext int enable, RX DMA enable */
1371 write_scc(priv, R1, EXT_INT_ENAB | INT_ERR_Rx |
1372 WT_RDY_RT | WT_FN_RDYFN | WT_RDY_ENAB);
1374 /* Reset current frame */
1376 /* Intr. on all Rx characters and Sp. cond., ext int enable */
1377 write_scc(priv, R1, EXT_INT_ENAB | INT_ALL_Rx | WT_RDY_RT |
1380 write_scc(priv, R0, ERR_RES);
1381 write_scc(priv, R3, RxENABLE | Rx8 | RxCRC_ENAB);
1385 static inline void rx_off(struct scc_priv *priv) {
1386 /* Disable receiver */
1387 write_scc(priv, R3, Rx8);
1388 /* Disable DREQ / RX interrupt */
1389 if (priv->param.dma >= 0 && priv->type == TYPE_TWIN)
1390 outb(0, priv->card_base + TWIN_DMA_CFG);
1392 write_scc(priv, R1, EXT_INT_ENAB | WT_FN_RDYFN);
1394 if (priv->param.dma >= 0) disable_dma(priv->param.dma);
1398 static void start_timer(struct scc_priv *priv, int t, int r15) {
1399 unsigned long flags;
1401 outb(priv->tmr_mode, priv->tmr_ctrl);
1407 outb(t & 0xFF, priv->tmr_cnt);
1408 outb((t >> 8) & 0xFF, priv->tmr_cnt);
1409 if (priv->type != TYPE_TWIN) {
1410 write_scc(priv, R15, r15 | CTSIE);
1413 restore_flags(flags);
1418 static inline unsigned char random(void) {
1419 /* See "Numerical Recipes in C", second edition, p. 284 */
1420 rand = rand * 1664525L + 1013904223L;
1421 return (unsigned char) (rand >> 24);