2 * $Id: dmascc.c,v 1.27 2000/06/01 14:46:23 oe1kib Exp $
4 * Driver for high-speed SCC boards (those with DMA support)
5 * Copyright (C) 1997-2000 Klaus Kudielka
7 * S5SCC/DMA support by Janko Koleznik S52HI
9 * This program is free software; you can redistribute it and/or modify
10 * it under the terms of the GNU General Public License as published by
11 * the Free Software Foundation; either version 2 of the License, or
12 * (at your option) any later version.
14 * This program is distributed in the hope that it will be useful,
15 * but WITHOUT ANY WARRANTY; without even the implied warranty of
16 * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
17 * GNU General Public License for more details.
19 * You should have received a copy of the GNU General Public License
20 * along with this program; if not, write to the Free Software
21 * Foundation, Inc., 675 Mass Ave, Cambridge, MA 02139, USA.
26 #include <linux/module.h>
27 #include <linux/delay.h>
28 #include <linux/errno.h>
29 #include <linux/if_arp.h>
31 #include <linux/init.h>
32 #include <linux/interrupt.h>
33 #include <linux/ioport.h>
34 #include <linux/kernel.h>
36 #include <linux/netdevice.h>
37 #include <linux/rtnetlink.h>
38 #include <linux/sockios.h>
39 #include <linux/workqueue.h>
40 #include <linux/version.h>
41 #include <asm/atomic.h>
42 #include <asm/bitops.h>
46 #include <asm/uaccess.h>
51 /* Number of buffers per channel */
53 #define NUM_TX_BUF 2 /* NUM_TX_BUF >= 1 (min. 2 recommended) */
54 #define NUM_RX_BUF 6 /* NUM_RX_BUF >= 1 (min. 2 recommended) */
55 #define BUF_SIZE 1576 /* BUF_SIZE >= mtu + hard_header_len */
60 #define HW_PI { "Ottawa PI", 0x300, 0x20, 0x10, 8, \
61 0, 8, 1843200, 3686400 }
62 #define HW_PI2 { "Ottawa PI2", 0x300, 0x20, 0x10, 8, \
63 0, 8, 3686400, 7372800 }
64 #define HW_TWIN { "Gracilis PackeTwin", 0x200, 0x10, 0x10, 32, \
65 0, 4, 6144000, 6144000 }
66 #define HW_S5 { "S5SCC/DMA", 0x200, 0x10, 0x10, 32, \
67 0, 8, 4915200, 9830400 }
69 #define HARDWARE { HW_PI, HW_PI2, HW_TWIN, HW_S5 }
71 #define TMR_0_HZ 25600 /* Frequency of timer 0 */
79 #define MAX_NUM_DEVS 32
82 /* SCC chips supported */
88 #define CHIPNAMES { "Z8530", "Z85C30", "Z85230" }
93 /* 8530 registers relative to card base */
95 #define SCCB_DATA 0x01
97 #define SCCA_DATA 0x03
99 /* 8253/8254 registers relative to card base */
100 #define TMR_CNT0 0x00
101 #define TMR_CNT1 0x01
102 #define TMR_CNT2 0x02
103 #define TMR_CTRL 0x03
105 /* Additional PI/PI2 registers relative to card base */
106 #define PI_DREQ_MASK 0x04
108 /* Additional PackeTwin registers relative to card base */
109 #define TWIN_INT_REG 0x08
110 #define TWIN_CLR_TMR1 0x09
111 #define TWIN_CLR_TMR2 0x0a
112 #define TWIN_SPARE_1 0x0b
113 #define TWIN_DMA_CFG 0x08
114 #define TWIN_SERIAL_CFG 0x09
115 #define TWIN_DMA_CLR_FF 0x0a
116 #define TWIN_SPARE_2 0x0b
119 /* PackeTwin I/O register values */
122 #define TWIN_SCC_MSK 0x01
123 #define TWIN_TMR1_MSK 0x02
124 #define TWIN_TMR2_MSK 0x04
125 #define TWIN_INT_MSK 0x07
128 #define TWIN_DTRA_ON 0x01
129 #define TWIN_DTRB_ON 0x02
130 #define TWIN_EXTCLKA 0x04
131 #define TWIN_EXTCLKB 0x08
132 #define TWIN_LOOPA_ON 0x10
133 #define TWIN_LOOPB_ON 0x20
137 #define TWIN_DMA_HDX_T1 0x08
138 #define TWIN_DMA_HDX_R1 0x0a
139 #define TWIN_DMA_HDX_T3 0x14
140 #define TWIN_DMA_HDX_R3 0x16
141 #define TWIN_DMA_FDX_T3R1 0x1b
142 #define TWIN_DMA_FDX_T1R3 0x1d
161 #define SIOCGSCCPARAM SIOCDEVPRIVATE
162 #define SIOCSSCCPARAM (SIOCDEVPRIVATE+1)
168 int pclk_hz; /* frequency of BRG input (don't change) */
169 int brg_tc; /* BRG terminal count; BRG disabled if < 0 */
170 int nrzi; /* 0 (nrz), 1 (nrzi) */
171 int clocks; /* see dmascc_cfg documentation */
172 int txdelay; /* [1/TMR_0_HZ] */
173 int txtimeout; /* [1/HZ] */
174 int txtail; /* [1/TMR_0_HZ] */
175 int waittime; /* [1/TMR_0_HZ] */
176 int slottime; /* [1/TMR_0_HZ] */
177 int persist; /* 1 ... 256 */
178 int dma; /* -1 (disable), 0, 1, 3 */
179 int txpause; /* [1/TMR_0_HZ] */
180 int rtsoff; /* [1/TMR_0_HZ] */
181 int dcdon; /* [1/TMR_0_HZ] */
182 int dcdoff; /* [1/TMR_0_HZ] */
185 struct scc_hardware {
200 struct net_device *dev;
201 struct scc_info *info;
202 struct net_device_stats stats;
204 int card_base, scc_cmd, scc_data;
205 int tmr_cnt, tmr_ctrl, tmr_mode;
206 struct scc_param param;
207 char rx_buf[NUM_RX_BUF][BUF_SIZE];
208 int rx_len[NUM_RX_BUF];
210 struct work_struct rx_work;
211 int rx_head, rx_tail, rx_count;
213 char tx_buf[NUM_TX_BUF][BUF_SIZE];
214 int tx_len[NUM_TX_BUF];
216 int tx_head, tx_tail, tx_count;
218 unsigned long tx_start;
220 spinlock_t *register_lock; /* Per scc_info */
221 spinlock_t ring_lock;
227 struct net_device *dev[2];
228 struct scc_priv priv[2];
229 struct scc_info *next;
230 spinlock_t register_lock; /* Per device register lock */
234 /* Function declarations */
235 static int setup_adapter(int card_base, int type, int n) __init;
237 static void write_scc(struct scc_priv *priv, int reg, int val);
238 static void write_scc_data(struct scc_priv *priv, int val, int fast);
239 static int read_scc(struct scc_priv *priv, int reg);
240 static int read_scc_data(struct scc_priv *priv);
242 static int scc_open(struct net_device *dev);
243 static int scc_close(struct net_device *dev);
244 static int scc_ioctl(struct net_device *dev, struct ifreq *ifr, int cmd);
245 static int scc_send_packet(struct sk_buff *skb, struct net_device *dev);
246 static struct net_device_stats *scc_get_stats(struct net_device *dev);
247 static int scc_set_mac_address(struct net_device *dev, void *sa);
249 static irqreturn_t scc_isr(int irq, void *dev_id, struct pt_regs * regs);
250 static inline void z8530_isr(struct scc_info *info);
251 static void rx_isr(struct scc_priv *priv);
252 static void special_condition(struct scc_priv *priv, int rc);
253 static void rx_bh(void *arg);
254 static void tx_isr(struct scc_priv *priv);
255 static void es_isr(struct scc_priv *priv);
256 static void tm_isr(struct scc_priv *priv);
258 static inline void tx_on(struct scc_priv *priv);
259 static inline void rx_on(struct scc_priv *priv);
260 static inline void rx_off(struct scc_priv *priv);
261 static void start_timer(struct scc_priv *priv, int t, int r15);
262 static inline unsigned char random(void);
265 /* Initialization variables */
267 static int io[MAX_NUM_DEVS] __initdata = { 0, };
268 /* Beware! hw[] is also used in cleanup_module(). */
269 static struct scc_hardware hw[NUM_TYPES] __initdata_or_module = HARDWARE;
270 static char ax25_broadcast[7] __initdata =
271 { 'Q'<<1, 'S'<<1, 'T'<<1, ' '<<1, ' '<<1, ' '<<1, '0'<<1 };
272 static char ax25_test[7] __initdata =
273 { 'L'<<1, 'I'<<1, 'N'<<1, 'U'<<1, 'X'<<1, ' '<<1, '1'<<1 };
276 /* Global variables */
278 static struct scc_info *first;
279 static unsigned long rand;
282 MODULE_AUTHOR("Klaus Kudielka");
283 MODULE_DESCRIPTION("Driver for high-speed SCC boards");
284 MODULE_PARM(io, "1-" __MODULE_STRING(MAX_NUM_DEVS) "i");
285 MODULE_LICENSE("GPL");
287 static void __exit dmascc_exit(void) {
289 struct scc_info *info;
294 /* Unregister devices */
295 for (i = 0; i < 2; i++)
296 unregister_netdev(info->dev[i]);
299 if (info->priv[0].type == TYPE_TWIN)
300 outb(0, info->dev[0]->base_addr + TWIN_SERIAL_CFG);
301 write_scc(&info->priv[0], R9, FHWRES);
302 release_region(info->dev[0]->base_addr,
303 hw[info->priv[0].type].io_size);
305 for (i = 0; i < 2; i++)
306 free_netdev(info->dev[i]);
315 void __init dmascc_setup(char *str, int *ints) {
318 for (i = 0; i < MAX_NUM_DEVS && i < ints[0]; i++)
323 static int __init dmascc_init(void) {
325 int base[MAX_NUM_DEVS], tcmd[MAX_NUM_DEVS], t0[MAX_NUM_DEVS],
328 unsigned long time, start[MAX_NUM_DEVS], delay[MAX_NUM_DEVS],
329 counting[MAX_NUM_DEVS];
331 /* Initialize random number generator */
333 /* Cards found = 0 */
335 /* Warning message */
336 if (!io[0]) printk(KERN_INFO "dmascc: autoprobing (dangerous)\n");
338 /* Run autodetection for each card type */
339 for (h = 0; h < NUM_TYPES; h++) {
342 /* User-specified I/O address regions */
343 for (i = 0; i < hw[h].num_devs; i++) base[i] = 0;
344 for (i = 0; i < MAX_NUM_DEVS && io[i]; i++) {
345 j = (io[i] - hw[h].io_region) / hw[h].io_delta;
347 j < hw[h].num_devs &&
348 hw[h].io_region + j * hw[h].io_delta == io[i]) {
353 /* Default I/O address regions */
354 for (i = 0; i < hw[h].num_devs; i++) {
355 base[i] = hw[h].io_region + i * hw[h].io_delta;
359 /* Check valid I/O address regions */
360 for (i = 0; i < hw[h].num_devs; i++)
362 if (!request_region(base[i], hw[h].io_size, "dmascc"))
365 tcmd[i] = base[i] + hw[h].tmr_offset + TMR_CTRL;
366 t0[i] = base[i] + hw[h].tmr_offset + TMR_CNT0;
367 t1[i] = base[i] + hw[h].tmr_offset + TMR_CNT1;
372 for (i = 0; i < hw[h].num_devs; i++)
374 /* Timer 0: LSB+MSB, Mode 3, TMR_0_HZ */
376 outb((hw[h].tmr_hz/TMR_0_HZ) & 0xFF, t0[i]);
377 outb((hw[h].tmr_hz/TMR_0_HZ) >> 8, t0[i]);
378 /* Timer 1: LSB+MSB, Mode 0, HZ/10 */
380 outb((TMR_0_HZ/HZ*10) & 0xFF, t1[i]);
381 outb((TMR_0_HZ/HZ*10) >> 8, t1[i]);
385 /* Timer 2: LSB+MSB, Mode 0 */
389 /* Wait until counter registers are loaded */
390 udelay(2000000/TMR_0_HZ);
393 while (jiffies - time < 13) {
394 for (i = 0; i < hw[h].num_devs; i++)
395 if (base[i] && counting[i]) {
396 /* Read back Timer 1: latch; read LSB; read MSB */
398 t_val = inb(t1[i]) + (inb(t1[i]) << 8);
399 /* Also check whether counter did wrap */
400 if (t_val == 0 || t_val > TMR_0_HZ/HZ*10) counting[i] = 0;
401 delay[i] = jiffies - start[i];
405 /* Evaluate measurements */
406 for (i = 0; i < hw[h].num_devs; i++)
408 if ((delay[i] >= 9 && delay[i] <= 11)&&
409 /* Ok, we have found an adapter */
410 (setup_adapter(base[i], h, n) == 0))
413 release_region(base[i], hw[h].io_size);
418 /* If any adapter was successfully initialized, return ok */
421 /* If no adapter found, return error */
422 printk(KERN_INFO "dmascc: no adapters found\n");
426 module_init(dmascc_init);
427 module_exit(dmascc_exit);
429 static void dev_setup(struct net_device *dev)
431 dev->type = ARPHRD_AX25;
432 dev->hard_header_len = 73;
435 dev->tx_queue_len = 64;
436 memcpy(dev->broadcast, ax25_broadcast, 7);
437 memcpy(dev->dev_addr, ax25_test, 7);
440 static int __init setup_adapter(int card_base, int type, int n)
443 struct scc_info *info;
444 struct net_device *dev;
445 struct scc_priv *priv;
448 int tmr_base = card_base + hw[type].tmr_offset;
449 int scc_base = card_base + hw[type].scc_offset;
450 char *chipnames[] = CHIPNAMES;
452 /* Allocate memory */
453 info = kmalloc(sizeof(struct scc_info), GFP_KERNEL | GFP_DMA);
455 printk(KERN_ERR "dmascc: "
456 "could not allocate memory for %s at %#3x\n",
457 hw[type].name, card_base);
461 /* Initialize what is necessary for write_scc and write_scc_data */
462 memset(info, 0, sizeof(struct scc_info));
464 info->dev[0] = alloc_netdev(0, "", dev_setup);
466 printk(KERN_ERR "dmascc: "
467 "could not allocate memory for %s at %#3x\n",
468 hw[type].name, card_base);
472 info->dev[1] = alloc_netdev(0, "", dev_setup);
474 printk(KERN_ERR "dmascc: "
475 "could not allocate memory for %s at %#3x\n",
476 hw[type].name, card_base);
479 spin_lock_init(&info->register_lock);
481 priv = &info->priv[0];
483 priv->card_base = card_base;
484 priv->scc_cmd = scc_base + SCCA_CMD;
485 priv->scc_data = scc_base + SCCA_DATA;
486 priv->register_lock = &info->register_lock;
489 write_scc(priv, R9, FHWRES | MIE | NV);
491 /* Determine type of chip by enabling SDLC/HDLC enhancements */
492 write_scc(priv, R15, SHDLCE);
493 if (!read_scc(priv, R15)) {
494 /* WR7' not present. This is an ordinary Z8530 SCC. */
497 /* Put one character in TX FIFO */
498 write_scc_data(priv, 0, 0);
499 if (read_scc(priv, R0) & Tx_BUF_EMP) {
500 /* TX FIFO not full. This is a Z85230 ESCC with a 4-byte FIFO. */
503 /* TX FIFO full. This is a Z85C30 SCC with a 1-byte FIFO. */
507 write_scc(priv, R15, 0);
509 /* Start IRQ auto-detection */
510 irqs = probe_irq_on();
512 /* Enable interrupts */
513 if (type == TYPE_TWIN) {
514 outb(0, card_base + TWIN_DMA_CFG);
515 inb(card_base + TWIN_CLR_TMR1);
516 inb(card_base + TWIN_CLR_TMR2);
517 info->twin_serial_cfg = TWIN_EI;
518 outb(info->twin_serial_cfg, card_base + TWIN_SERIAL_CFG);
520 write_scc(priv, R15, CTSIE);
521 write_scc(priv, R0, RES_EXT_INT);
522 write_scc(priv, R1, EXT_INT_ENAB);
526 outb(1, tmr_base + TMR_CNT1);
527 outb(0, tmr_base + TMR_CNT1);
529 /* Wait and detect IRQ */
530 time = jiffies; while (jiffies - time < 2 + HZ / TMR_0_HZ);
531 irq = probe_irq_off(irqs);
533 /* Clear pending interrupt, disable interrupts */
534 if (type == TYPE_TWIN) {
535 inb(card_base + TWIN_CLR_TMR1);
537 write_scc(priv, R1, 0);
538 write_scc(priv, R15, 0);
539 write_scc(priv, R0, RES_EXT_INT);
543 printk(KERN_ERR "dmascc: could not find irq of %s at %#3x (irq=%d)\n",
544 hw[type].name, card_base, irq);
548 /* Set up data structures */
549 for (i = 0; i < 2; i++) {
551 priv = &info->priv[i];
557 spin_lock_init(&priv->ring_lock);
558 priv->register_lock = &info->register_lock;
559 priv->card_base = card_base;
560 priv->scc_cmd = scc_base + (i ? SCCB_CMD : SCCA_CMD);
561 priv->scc_data = scc_base + (i ? SCCB_DATA : SCCA_DATA);
562 priv->tmr_cnt = tmr_base + (i ? TMR_CNT2 : TMR_CNT1);
563 priv->tmr_ctrl = tmr_base + TMR_CTRL;
564 priv->tmr_mode = i ? 0xb0 : 0x70;
565 priv->param.pclk_hz = hw[type].pclk_hz;
566 priv->param.brg_tc = -1;
567 priv->param.clocks = TCTRxCP | RCRTxCP;
568 priv->param.persist = 256;
569 priv->param.dma = -1;
570 INIT_WORK(&priv->rx_work, rx_bh, priv);
572 sprintf(dev->name, "dmascc%i", 2*n+i);
573 SET_MODULE_OWNER(dev);
574 dev->base_addr = card_base;
576 dev->open = scc_open;
577 dev->stop = scc_close;
578 dev->do_ioctl = scc_ioctl;
579 dev->hard_start_xmit = scc_send_packet;
580 dev->get_stats = scc_get_stats;
581 dev->hard_header = ax25_encapsulate;
582 dev->rebuild_header = ax25_rebuild_header;
583 dev->set_mac_address = scc_set_mac_address;
585 if (register_netdev(info->dev[0])) {
586 printk(KERN_ERR "dmascc: could not register %s\n",
590 if (register_netdev(info->dev[1])) {
591 printk(KERN_ERR "dmascc: could not register %s\n",
599 printk(KERN_INFO "dmascc: found %s (%s) at %#3x, irq %d\n", hw[type].name,
600 chipnames[chip], card_base, irq);
604 unregister_netdev(info->dev[0]);
606 if (info->priv[0].type == TYPE_TWIN)
607 outb(0, info->dev[0]->base_addr + TWIN_SERIAL_CFG);
608 write_scc(&info->priv[0], R9, FHWRES);
609 free_netdev(info->dev[1]);
611 free_netdev(info->dev[0]);
619 /* Driver functions */
621 static void write_scc(struct scc_priv *priv, int reg, int val) {
623 switch (priv->type) {
625 if (reg) outb(reg, priv->scc_cmd);
626 outb(val, priv->scc_cmd);
629 if (reg) outb_p(reg, priv->scc_cmd);
630 outb_p(val, priv->scc_cmd);
633 spin_lock_irqsave(priv->register_lock, flags);
634 outb_p(0, priv->card_base + PI_DREQ_MASK);
635 if (reg) outb_p(reg, priv->scc_cmd);
636 outb_p(val, priv->scc_cmd);
637 outb(1, priv->card_base + PI_DREQ_MASK);
638 spin_unlock_irqrestore(priv->register_lock, flags);
644 static void write_scc_data(struct scc_priv *priv, int val, int fast) {
646 switch (priv->type) {
648 outb(val, priv->scc_data);
651 outb_p(val, priv->scc_data);
654 if (fast) outb_p(val, priv->scc_data);
656 spin_lock_irqsave(priv->register_lock, flags);
657 outb_p(0, priv->card_base + PI_DREQ_MASK);
658 outb_p(val, priv->scc_data);
659 outb(1, priv->card_base + PI_DREQ_MASK);
660 spin_unlock_irqrestore(priv->register_lock, flags);
667 static int read_scc(struct scc_priv *priv, int reg) {
670 switch (priv->type) {
672 if (reg) outb(reg, priv->scc_cmd);
673 return inb(priv->scc_cmd);
675 if (reg) outb_p(reg, priv->scc_cmd);
676 return inb_p(priv->scc_cmd);
678 spin_lock_irqsave(priv->register_lock, flags);
679 outb_p(0, priv->card_base + PI_DREQ_MASK);
680 if (reg) outb_p(reg, priv->scc_cmd);
681 rc = inb_p(priv->scc_cmd);
682 outb(1, priv->card_base + PI_DREQ_MASK);
683 spin_unlock_irqrestore(priv->register_lock, flags);
689 static int read_scc_data(struct scc_priv *priv) {
692 switch (priv->type) {
694 return inb(priv->scc_data);
696 return inb_p(priv->scc_data);
698 spin_lock_irqsave(priv->register_lock, flags);
699 outb_p(0, priv->card_base + PI_DREQ_MASK);
700 rc = inb_p(priv->scc_data);
701 outb(1, priv->card_base + PI_DREQ_MASK);
702 spin_unlock_irqrestore(priv->register_lock, flags);
708 static int scc_open(struct net_device *dev) {
709 struct scc_priv *priv = dev->priv;
710 struct scc_info *info = priv->info;
711 int card_base = priv->card_base;
713 /* Request IRQ if not already used by other channel */
714 if (!info->irq_used) {
715 if (request_irq(dev->irq, scc_isr, 0, "dmascc", info)) {
721 /* Request DMA if required */
722 if (priv->param.dma >= 0) {
723 if (request_dma(priv->param.dma, "dmascc")) {
724 if (--info->irq_used == 0) free_irq(dev->irq, info);
727 unsigned long flags = claim_dma_lock();
728 clear_dma_ff(priv->param.dma);
729 release_dma_lock(flags);
733 /* Initialize local variables */
736 priv->rx_head = priv->rx_tail = priv->rx_count = 0;
738 priv->tx_head = priv->tx_tail = priv->tx_count = 0;
742 write_scc(priv, R9, (priv->channel ? CHRB : CHRA) | MIE | NV);
743 /* X1 clock, SDLC mode */
744 write_scc(priv, R4, SDLC | X1CLK);
746 write_scc(priv, R1, EXT_INT_ENAB | WT_FN_RDYFN);
747 /* 8 bit RX char, RX disable */
748 write_scc(priv, R3, Rx8);
749 /* 8 bit TX char, TX disable */
750 write_scc(priv, R5, Tx8);
751 /* SDLC address field */
752 write_scc(priv, R6, 0);
754 write_scc(priv, R7, FLAG);
755 switch (priv->chip) {
758 write_scc(priv, R15, SHDLCE);
760 write_scc(priv, R7, AUTOEOM);
761 write_scc(priv, R15, 0);
765 write_scc(priv, R15, SHDLCE);
766 /* The following bits are set (see 2.5.2.1):
767 - Automatic EOM reset
768 - Interrupt request if RX FIFO is half full
769 This bit should be ignored in DMA mode (according to the
770 documentation), but actually isn't. The receiver doesn't work if
771 it is set. Thus, we have to clear it in DMA mode.
772 - Interrupt/DMA request if TX FIFO is completely empty
773 a) If set, the ESCC behaves as if it had no TX FIFO (Z85C30
775 b) If cleared, DMA requests may follow each other very quickly,
776 filling up the TX FIFO.
777 Advantage: TX works even in case of high bus latency.
778 Disadvantage: Edge-triggered DMA request circuitry may miss
779 a request. No more data is delivered, resulting
780 in a TX FIFO underrun.
781 Both PI2 and S5SCC/DMA seem to work fine with TXFIFOE cleared.
782 The PackeTwin doesn't. I don't know about the PI, but let's
783 assume it behaves like the PI2.
785 if (priv->param.dma >= 0) {
786 if (priv->type == TYPE_TWIN) write_scc(priv, R7, AUTOEOM | TXFIFOE);
787 else write_scc(priv, R7, AUTOEOM);
789 write_scc(priv, R7, AUTOEOM | RXFIFOH);
791 write_scc(priv, R15, 0);
794 /* Preset CRC, NRZ(I) encoding */
795 write_scc(priv, R10, CRCPS | (priv->param.nrzi ? NRZI : NRZ));
797 /* Configure baud rate generator */
798 if (priv->param.brg_tc >= 0) {
799 /* Program BR generator */
800 write_scc(priv, R12, priv->param.brg_tc & 0xFF);
801 write_scc(priv, R13, (priv->param.brg_tc>>8) & 0xFF);
802 /* BRG source = SYS CLK; enable BRG; DTR REQ function (required by
803 PackeTwin, not connected on the PI2); set DPLL source to BRG */
804 write_scc(priv, R14, SSBR | DTRREQ | BRSRC | BRENABL);
806 write_scc(priv, R14, SEARCH | DTRREQ | BRSRC | BRENABL);
808 /* Disable BR generator */
809 write_scc(priv, R14, DTRREQ | BRSRC);
812 /* Configure clocks */
813 if (priv->type == TYPE_TWIN) {
814 /* Disable external TX clock receiver */
815 outb((info->twin_serial_cfg &=
816 ~(priv->channel ? TWIN_EXTCLKB : TWIN_EXTCLKA)),
817 card_base + TWIN_SERIAL_CFG);
819 write_scc(priv, R11, priv->param.clocks);
820 if ((priv->type == TYPE_TWIN) && !(priv->param.clocks & TRxCOI)) {
821 /* Enable external TX clock receiver */
822 outb((info->twin_serial_cfg |=
823 (priv->channel ? TWIN_EXTCLKB : TWIN_EXTCLKA)),
824 card_base + TWIN_SERIAL_CFG);
827 /* Configure PackeTwin */
828 if (priv->type == TYPE_TWIN) {
829 /* Assert DTR, enable interrupts */
830 outb((info->twin_serial_cfg |= TWIN_EI |
831 (priv->channel ? TWIN_DTRB_ON : TWIN_DTRA_ON)),
832 card_base + TWIN_SERIAL_CFG);
835 /* Read current status */
836 priv->rr0 = read_scc(priv, R0);
837 /* Enable DCD interrupt */
838 write_scc(priv, R15, DCDIE);
840 netif_start_queue(dev);
846 static int scc_close(struct net_device *dev) {
847 struct scc_priv *priv = dev->priv;
848 struct scc_info *info = priv->info;
849 int card_base = priv->card_base;
851 netif_stop_queue(dev);
853 if (priv->type == TYPE_TWIN) {
855 outb((info->twin_serial_cfg &=
856 (priv->channel ? ~TWIN_DTRB_ON : ~TWIN_DTRA_ON)),
857 card_base + TWIN_SERIAL_CFG);
860 /* Reset channel, free DMA and IRQ */
861 write_scc(priv, R9, (priv->channel ? CHRB : CHRA) | MIE | NV);
862 if (priv->param.dma >= 0) {
863 if (priv->type == TYPE_TWIN) outb(0, card_base + TWIN_DMA_CFG);
864 free_dma(priv->param.dma);
866 if (--info->irq_used == 0) free_irq(dev->irq, info);
872 static int scc_ioctl(struct net_device *dev, struct ifreq *ifr, int cmd) {
873 struct scc_priv *priv = dev->priv;
877 if (copy_to_user(ifr->ifr_data, &priv->param, sizeof(struct scc_param)))
881 if (!capable(CAP_NET_ADMIN)) return -EPERM;
882 if (netif_running(dev)) return -EAGAIN;
883 if (copy_from_user(&priv->param, ifr->ifr_data, sizeof(struct scc_param)))
892 static int scc_send_packet(struct sk_buff *skb, struct net_device *dev) {
893 struct scc_priv *priv = dev->priv;
897 /* Temporarily stop the scheduler feeding us packets */
898 netif_stop_queue(dev);
900 /* Transfer data to DMA buffer */
902 memcpy(priv->tx_buf[i], skb->data+1, skb->len-1);
903 priv->tx_len[i] = skb->len-1;
905 /* Clear interrupts while we touch our circular buffers */
907 spin_lock_irqsave(&priv->ring_lock, flags);
908 /* Move the ring buffer's head */
909 priv->tx_head = (i + 1) % NUM_TX_BUF;
912 /* If we just filled up the last buffer, leave queue stopped.
913 The higher layers must wait until we have a DMA buffer
914 to accept the data. */
915 if (priv->tx_count < NUM_TX_BUF) netif_wake_queue(dev);
917 /* Set new TX state */
918 if (priv->state == IDLE) {
919 /* Assert RTS, start timer */
920 priv->state = TX_HEAD;
921 priv->tx_start = jiffies;
922 write_scc(priv, R5, TxCRC_ENAB | RTS | TxENAB | Tx8);
923 write_scc(priv, R15, 0);
924 start_timer(priv, priv->param.txdelay, 0);
927 /* Turn interrupts back on and free buffer */
928 spin_unlock_irqrestore(&priv->ring_lock, flags);
935 static struct net_device_stats *scc_get_stats(struct net_device *dev) {
936 struct scc_priv *priv = dev->priv;
942 static int scc_set_mac_address(struct net_device *dev, void *sa) {
943 memcpy(dev->dev_addr, ((struct sockaddr *)sa)->sa_data, dev->addr_len);
948 static irqreturn_t scc_isr(int irq, void *dev_id, struct pt_regs * regs) {
949 struct scc_info *info = dev_id;
951 spin_lock(info->priv[0].register_lock);
952 /* At this point interrupts are enabled, and the interrupt under service
953 is already acknowledged, but masked off.
955 Interrupt processing: We loop until we know that the IRQ line is
956 low. If another positive edge occurs afterwards during the ISR,
957 another interrupt will be triggered by the interrupt controller
958 as soon as the IRQ level is enabled again (see asm/irq.h).
960 Bottom-half handlers will be processed after scc_isr(). This is
961 important, since we only have small ringbuffers and want new data
962 to be fetched/delivered immediately. */
964 if (info->priv[0].type == TYPE_TWIN) {
965 int is, card_base = info->priv[0].card_base;
966 while ((is = ~inb(card_base + TWIN_INT_REG)) &
968 if (is & TWIN_SCC_MSK) {
970 } else if (is & TWIN_TMR1_MSK) {
971 inb(card_base + TWIN_CLR_TMR1);
972 tm_isr(&info->priv[0]);
974 inb(card_base + TWIN_CLR_TMR2);
975 tm_isr(&info->priv[1]);
978 } else z8530_isr(info);
979 spin_unlock(info->priv[0].register_lock);
984 static inline void z8530_isr(struct scc_info *info) {
987 while ((is = read_scc(&info->priv[0], R3)) && i--) {
989 rx_isr(&info->priv[0]);
990 } else if (is & CHATxIP) {
991 tx_isr(&info->priv[0]);
992 } else if (is & CHAEXT) {
993 es_isr(&info->priv[0]);
994 } else if (is & CHBRxIP) {
995 rx_isr(&info->priv[1]);
996 } else if (is & CHBTxIP) {
997 tx_isr(&info->priv[1]);
999 es_isr(&info->priv[1]);
1001 write_scc(&info->priv[0], R0, RES_H_IUS);
1005 printk(KERN_ERR "dmascc: stuck in ISR with RR3=0x%02x.\n", is);
1007 /* Ok, no interrupts pending from this 8530. The INT line should
1012 static void rx_isr(struct scc_priv *priv) {
1013 if (priv->param.dma >= 0) {
1014 /* Check special condition and perform error reset. See 2.4.7.5. */
1015 special_condition(priv, read_scc(priv, R1));
1016 write_scc(priv, R0, ERR_RES);
1018 /* Check special condition for each character. Error reset not necessary.
1019 Same algorithm for SCC and ESCC. See 2.4.7.1 and 2.4.7.4. */
1021 while (read_scc(priv, R0) & Rx_CH_AV) {
1022 rc = read_scc(priv, R1);
1023 if (priv->rx_ptr < BUF_SIZE)
1024 priv->rx_buf[priv->rx_head][priv->rx_ptr++] =
1025 read_scc_data(priv);
1028 read_scc_data(priv);
1030 special_condition(priv, rc);
1036 static void special_condition(struct scc_priv *priv, int rc) {
1038 unsigned long flags;
1040 /* See Figure 2-15. Only overrun and EOF need to be checked. */
1043 /* Receiver overrun */
1045 if (priv->param.dma < 0) write_scc(priv, R0, ERR_RES);
1046 } else if (rc & END_FR) {
1047 /* End of frame. Get byte count */
1048 if (priv->param.dma >= 0) {
1049 flags = claim_dma_lock();
1050 cb = BUF_SIZE - get_dma_residue(priv->param.dma) - 2;
1051 release_dma_lock(flags);
1053 cb = priv->rx_ptr - 2;
1055 if (priv->rx_over) {
1056 /* We had an overrun */
1057 priv->stats.rx_errors++;
1058 if (priv->rx_over == 2) priv->stats.rx_length_errors++;
1059 else priv->stats.rx_fifo_errors++;
1061 } else if (rc & CRC_ERR) {
1062 /* Count invalid CRC only if packet length >= minimum */
1064 priv->stats.rx_errors++;
1065 priv->stats.rx_crc_errors++;
1069 if (priv->rx_count < NUM_RX_BUF - 1) {
1070 /* Put good frame in FIFO */
1071 priv->rx_len[priv->rx_head] = cb;
1072 priv->rx_head = (priv->rx_head + 1) % NUM_RX_BUF;
1074 schedule_work(&priv->rx_work);
1076 priv->stats.rx_errors++;
1077 priv->stats.rx_over_errors++;
1081 /* Get ready for new frame */
1082 if (priv->param.dma >= 0) {
1083 flags = claim_dma_lock();
1084 set_dma_addr(priv->param.dma, (int) priv->rx_buf[priv->rx_head]);
1085 set_dma_count(priv->param.dma, BUF_SIZE);
1086 release_dma_lock(flags);
1094 static void rx_bh(void *arg) {
1095 struct scc_priv *priv = arg;
1096 int i = priv->rx_tail;
1098 unsigned long flags;
1099 struct sk_buff *skb;
1100 unsigned char *data;
1102 spin_lock_irqsave(&priv->ring_lock, flags);
1103 while (priv->rx_count) {
1104 spin_unlock_irqrestore(&priv->ring_lock, flags);
1105 cb = priv->rx_len[i];
1106 /* Allocate buffer */
1107 skb = dev_alloc_skb(cb+1);
1110 priv->stats.rx_dropped++;
1113 data = skb_put(skb, cb+1);
1115 memcpy(&data[1], priv->rx_buf[i], cb);
1116 skb->dev = priv->dev;
1117 skb->protocol = ntohs(ETH_P_AX25);
1118 skb->mac.raw = skb->data;
1120 priv->dev->last_rx = jiffies;
1121 priv->stats.rx_packets++;
1122 priv->stats.rx_bytes += cb;
1124 spin_lock_irqsave(&priv->ring_lock, flags);
1126 priv->rx_tail = i = (i + 1) % NUM_RX_BUF;
1129 spin_unlock_irqrestore(&priv->ring_lock, flags);
1133 static void tx_isr(struct scc_priv *priv) {
1134 int i = priv->tx_tail, p = priv->tx_ptr;
1136 /* Suspend TX interrupts if we don't want to send anything.
1138 if (p == priv->tx_len[i]) {
1139 write_scc(priv, R0, RES_Tx_P);
1143 /* Write characters */
1144 while ((read_scc(priv, R0) & Tx_BUF_EMP) && p < priv->tx_len[i]) {
1145 write_scc_data(priv, priv->tx_buf[i][p++], 0);
1148 /* Reset EOM latch of Z8530 */
1149 if (!priv->tx_ptr && p && priv->chip == Z8530)
1150 write_scc(priv, R0, RES_EOM_L);
1156 static void es_isr(struct scc_priv *priv) {
1157 int i, rr0, drr0, res;
1158 unsigned long flags;
1160 /* Read status, reset interrupt bit (open latches) */
1161 rr0 = read_scc(priv, R0);
1162 write_scc(priv, R0, RES_EXT_INT);
1163 drr0 = priv->rr0 ^ rr0;
1166 /* Transmit underrun (2.4.9.6). We can't check the TxEOM flag, since
1167 it might have already been cleared again by AUTOEOM. */
1168 if (priv->state == TX_DATA) {
1169 /* Get remaining bytes */
1171 if (priv->param.dma >= 0) {
1172 disable_dma(priv->param.dma);
1173 flags = claim_dma_lock();
1174 res = get_dma_residue(priv->param.dma);
1175 release_dma_lock(flags);
1177 res = priv->tx_len[i] - priv->tx_ptr;
1180 /* Disable DREQ / TX interrupt */
1181 if (priv->param.dma >= 0 && priv->type == TYPE_TWIN)
1182 outb(0, priv->card_base + TWIN_DMA_CFG);
1184 write_scc(priv, R1, EXT_INT_ENAB | WT_FN_RDYFN);
1186 /* Update packet statistics */
1187 priv->stats.tx_errors++;
1188 priv->stats.tx_fifo_errors++;
1189 /* Other underrun interrupts may already be waiting */
1190 write_scc(priv, R0, RES_EXT_INT);
1191 write_scc(priv, R0, RES_EXT_INT);
1193 /* Update packet statistics */
1194 priv->stats.tx_packets++;
1195 priv->stats.tx_bytes += priv->tx_len[i];
1196 /* Remove frame from FIFO */
1197 priv->tx_tail = (i + 1) % NUM_TX_BUF;
1199 /* Inform upper layers */
1200 netif_wake_queue(priv->dev);
1203 write_scc(priv, R15, 0);
1204 if (priv->tx_count &&
1205 (jiffies - priv->tx_start) < priv->param.txtimeout) {
1206 priv->state = TX_PAUSE;
1207 start_timer(priv, priv->param.txpause, 0);
1209 priv->state = TX_TAIL;
1210 start_timer(priv, priv->param.txtail, 0);
1214 /* DCD transition */
1217 switch (priv->state) {
1220 priv->state = DCD_ON;
1221 write_scc(priv, R15, 0);
1222 start_timer(priv, priv->param.dcdon, 0);
1225 switch (priv->state) {
1228 priv->state = DCD_OFF;
1229 write_scc(priv, R15, 0);
1230 start_timer(priv, priv->param.dcdoff, 0);
1235 /* CTS transition */
1236 if ((drr0 & CTS) && (~rr0 & CTS) && priv->type != TYPE_TWIN)
1242 static void tm_isr(struct scc_priv *priv) {
1243 switch (priv->state) {
1247 priv->state = TX_DATA;
1250 write_scc(priv, R5, TxCRC_ENAB | Tx8);
1251 priv->state = RTS_OFF;
1252 if (priv->type != TYPE_TWIN) write_scc(priv, R15, 0);
1253 start_timer(priv, priv->param.rtsoff, 0);
1256 write_scc(priv, R15, DCDIE);
1257 priv->rr0 = read_scc(priv, R0);
1258 if (priv->rr0 & DCD) {
1259 priv->stats.collisions++;
1261 priv->state = RX_ON;
1264 start_timer(priv, priv->param.waittime, DCDIE);
1268 if (priv->tx_count) {
1269 priv->state = TX_HEAD;
1270 priv->tx_start = jiffies;
1271 write_scc(priv, R5, TxCRC_ENAB | RTS | TxENAB | Tx8);
1272 write_scc(priv, R15, 0);
1273 start_timer(priv, priv->param.txdelay, 0);
1276 if (priv->type != TYPE_TWIN) write_scc(priv, R15, DCDIE);
1281 write_scc(priv, R15, DCDIE);
1282 priv->rr0 = read_scc(priv, R0);
1283 if (priv->rr0 & DCD) {
1285 priv->state = RX_ON;
1289 random()/priv->param.persist*priv->param.slottime,
1297 static inline void tx_on(struct scc_priv *priv) {
1299 unsigned long flags;
1301 if (priv->param.dma >= 0) {
1302 n = (priv->chip == Z85230) ? 3 : 1;
1303 /* Program DMA controller */
1304 flags = claim_dma_lock();
1305 set_dma_mode(priv->param.dma, DMA_MODE_WRITE);
1306 set_dma_addr(priv->param.dma, (int) priv->tx_buf[priv->tx_tail]+n);
1307 set_dma_count(priv->param.dma, priv->tx_len[priv->tx_tail]-n);
1308 release_dma_lock(flags);
1309 /* Enable TX underrun interrupt */
1310 write_scc(priv, R15, TxUIE);
1311 /* Configure DREQ */
1312 if (priv->type == TYPE_TWIN)
1313 outb((priv->param.dma == 1) ? TWIN_DMA_HDX_T1 : TWIN_DMA_HDX_T3,
1314 priv->card_base + TWIN_DMA_CFG);
1316 write_scc(priv, R1, EXT_INT_ENAB | WT_FN_RDYFN | WT_RDY_ENAB);
1317 /* Write first byte(s) */
1318 spin_lock_irqsave(priv->register_lock, flags);
1319 for (i = 0; i < n; i++)
1320 write_scc_data(priv, priv->tx_buf[priv->tx_tail][i], 1);
1321 enable_dma(priv->param.dma);
1322 spin_unlock_irqrestore(priv->register_lock, flags);
1324 write_scc(priv, R15, TxUIE);
1325 write_scc(priv, R1, EXT_INT_ENAB | WT_FN_RDYFN | TxINT_ENAB);
1328 /* Reset EOM latch if we do not have the AUTOEOM feature */
1329 if (priv->chip == Z8530) write_scc(priv, R0, RES_EOM_L);
1333 static inline void rx_on(struct scc_priv *priv) {
1334 unsigned long flags;
1337 while (read_scc(priv, R0) & Rx_CH_AV) read_scc_data(priv);
1339 if (priv->param.dma >= 0) {
1340 /* Program DMA controller */
1341 flags = claim_dma_lock();
1342 set_dma_mode(priv->param.dma, DMA_MODE_READ);
1343 set_dma_addr(priv->param.dma, (int) priv->rx_buf[priv->rx_head]);
1344 set_dma_count(priv->param.dma, BUF_SIZE);
1345 release_dma_lock(flags);
1346 enable_dma(priv->param.dma);
1347 /* Configure PackeTwin DMA */
1348 if (priv->type == TYPE_TWIN) {
1349 outb((priv->param.dma == 1) ? TWIN_DMA_HDX_R1 : TWIN_DMA_HDX_R3,
1350 priv->card_base + TWIN_DMA_CFG);
1352 /* Sp. cond. intr. only, ext int enable, RX DMA enable */
1353 write_scc(priv, R1, EXT_INT_ENAB | INT_ERR_Rx |
1354 WT_RDY_RT | WT_FN_RDYFN | WT_RDY_ENAB);
1356 /* Reset current frame */
1358 /* Intr. on all Rx characters and Sp. cond., ext int enable */
1359 write_scc(priv, R1, EXT_INT_ENAB | INT_ALL_Rx | WT_RDY_RT |
1362 write_scc(priv, R0, ERR_RES);
1363 write_scc(priv, R3, RxENABLE | Rx8 | RxCRC_ENAB);
1367 static inline void rx_off(struct scc_priv *priv) {
1368 /* Disable receiver */
1369 write_scc(priv, R3, Rx8);
1370 /* Disable DREQ / RX interrupt */
1371 if (priv->param.dma >= 0 && priv->type == TYPE_TWIN)
1372 outb(0, priv->card_base + TWIN_DMA_CFG);
1374 write_scc(priv, R1, EXT_INT_ENAB | WT_FN_RDYFN);
1376 if (priv->param.dma >= 0) disable_dma(priv->param.dma);
1380 static void start_timer(struct scc_priv *priv, int t, int r15) {
1381 unsigned long flags;
1383 outb(priv->tmr_mode, priv->tmr_ctrl);
1389 outb(t & 0xFF, priv->tmr_cnt);
1390 outb((t >> 8) & 0xFF, priv->tmr_cnt);
1391 if (priv->type != TYPE_TWIN) {
1392 write_scc(priv, R15, r15 | CTSIE);
1395 restore_flags(flags);
1400 static inline unsigned char random(void) {
1401 /* See "Numerical Recipes in C", second edition, p. 284 */
1402 rand = rand * 1664525L + 1013904223L;
1403 return (unsigned char) (rand >> 24);