2 A Davicom DM9102/DM9102A/DM9102A+DM9801/DM9102A+DM9802 NIC fast
3 ethernet driver for Linux.
4 Copyright (C) 1997 Sten Wang
6 This program is free software; you can redistribute it and/or
7 modify it under the terms of the GNU General Public License
8 as published by the Free Software Foundation; either version 2
9 of the License, or (at your option) any later version.
11 This program is distributed in the hope that it will be useful,
12 but WITHOUT ANY WARRANTY; without even the implied warranty of
13 MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
14 GNU General Public License for more details.
16 DAVICOM Web-Site: www.davicom.com.tw
18 Author: Sten Wang, 886-3-5798797-8517, E-mail: sten_wang@davicom.com.tw
19 Maintainer: Tobias Ringstrom <tori@unhappy.mine.nu>
21 (C)Copyright 1997-1998 DAVICOM Semiconductor,Inc. All Rights Reserved.
23 Marcelo Tosatti <marcelo@conectiva.com.br> :
24 Made it compile in 2.3 (device to net_device)
26 Alan Cox <alan@redhat.com> :
27 Cleaned up for kernel merge.
28 Removed the back compatibility support
29 Reformatted, fixing spelling etc as I went
30 Removed IRQ 0-15 assumption
32 Jeff Garzik <jgarzik@pobox.com> :
33 Updated to use new PCI driver API.
34 Resource usage cleanups.
35 Report driver version to user.
37 Tobias Ringstrom <tori@unhappy.mine.nu> :
38 Cleaned up and added SMP safety. Thanks go to Jeff Garzik,
39 Andrew Morton and Frank Davis for the SMP safety fixes.
41 Vojtech Pavlik <vojtech@suse.cz> :
42 Cleaned up pointer arithmetics.
43 Fixed a lot of 64bit issues.
44 Cleaned up printk()s a bit.
45 Fixed some obvious big endian problems.
47 Tobias Ringstrom <tori@unhappy.mine.nu> :
48 Use time_after for jiffies calculation. Added ethtool
49 support. Updated PCI resource allocation. Do not
50 forget to unmap PCI mapped skbs.
52 Alan Cox <alan@redhat.com>
53 Added new PCI identifiers provided by Clear Zhang at ALi
54 for their 1563 ethernet device.
58 Implement pci_driver::suspend() and pci_driver::resume()
59 power management methods.
61 Check on 64 bit boxes.
62 Check and fix on big endian boxes.
64 Test and make sure PCI latency is now correct for all cases.
67 #define DRV_NAME "dmfe"
68 #define DRV_VERSION "1.36.4"
69 #define DRV_RELDATE "2002-01-17"
71 #include <linux/module.h>
72 #include <linux/kernel.h>
73 #include <linux/string.h>
74 #include <linux/timer.h>
75 #include <linux/ptrace.h>
76 #include <linux/errno.h>
77 #include <linux/ioport.h>
78 #include <linux/slab.h>
79 #include <linux/interrupt.h>
80 #include <linux/pci.h>
81 #include <linux/init.h>
82 #include <linux/netdevice.h>
83 #include <linux/etherdevice.h>
84 #include <linux/ethtool.h>
85 #include <linux/skbuff.h>
86 #include <linux/delay.h>
87 #include <linux/spinlock.h>
88 #include <linux/crc32.h>
90 #include <asm/processor.h>
91 #include <asm/bitops.h>
94 #include <asm/uaccess.h>
97 /* Board/System/Debug information/definition ---------------- */
98 #define PCI_DM9132_ID 0x91321282 /* Davicom DM9132 ID */
99 #define PCI_DM9102_ID 0x91021282 /* Davicom DM9102 ID */
100 #define PCI_DM9100_ID 0x91001282 /* Davicom DM9100 ID */
101 #define PCI_DM9009_ID 0x90091282 /* Davicom DM9009 ID */
103 #define DM9102_IO_SIZE 0x80
104 #define DM9102A_IO_SIZE 0x100
105 #define TX_MAX_SEND_CNT 0x1 /* Maximum tx packet per time */
106 #define TX_DESC_CNT 0x10 /* Allocated Tx descriptors */
107 #define RX_DESC_CNT 0x20 /* Allocated Rx descriptors */
108 #define TX_FREE_DESC_CNT (TX_DESC_CNT - 2) /* Max TX packet count */
109 #define TX_WAKE_DESC_CNT (TX_DESC_CNT - 3) /* TX wakeup count */
110 #define DESC_ALL_CNT (TX_DESC_CNT + RX_DESC_CNT)
111 #define TX_BUF_ALLOC 0x600
112 #define RX_ALLOC_SIZE 0x620
113 #define DM910X_RESET 1
114 #define CR0_DEFAULT 0x00E00000 /* TX & RX burst mode */
115 #define CR6_DEFAULT 0x00080000 /* HD */
116 #define CR7_DEFAULT 0x180c1
117 #define CR15_DEFAULT 0x06 /* TxJabber RxWatchdog */
118 #define TDES0_ERR_MASK 0x4302 /* TXJT, LC, EC, FUE */
119 #define MAX_PACKET_SIZE 1514
120 #define DMFE_MAX_MULTICAST 14
121 #define RX_COPY_SIZE 100
122 #define MAX_CHECK_PACKET 0x8000
123 #define DM9801_NOISE_FLOOR 8
124 #define DM9802_NOISE_FLOOR 5
127 #define DMFE_100MHF 1
129 #define DMFE_100MFD 5
131 #define DMFE_1M_HPNA 0x10
133 #define DMFE_TXTH_72 0x400000 /* TX TH 72 byte */
134 #define DMFE_TXTH_96 0x404000 /* TX TH 96 byte */
135 #define DMFE_TXTH_128 0x0000 /* TX TH 128 byte */
136 #define DMFE_TXTH_256 0x4000 /* TX TH 256 byte */
137 #define DMFE_TXTH_512 0x8000 /* TX TH 512 byte */
138 #define DMFE_TXTH_1K 0xC000 /* TX TH 1K byte */
140 #define DMFE_TIMER_WUT (jiffies + HZ * 1)/* timer wakeup time : 1 second */
141 #define DMFE_TX_TIMEOUT ((3*HZ)/2) /* tx packet time-out time 1.5 s" */
142 #define DMFE_TX_KICK (HZ/2) /* tx packet Kick-out time 0.5 s" */
144 #define DMFE_DBUG(dbug_now, msg, value) if (dmfe_debug || (dbug_now)) printk(KERN_ERR DRV_NAME ": %s %lx\n", (msg), (long) (value))
146 #define SHOW_MEDIA_TYPE(mode) printk(KERN_ERR DRV_NAME ": Change Speed to %sMhz %s duplex\n",mode & 1 ?"100":"10", mode & 4 ? "full":"half");
149 /* CR9 definition: SROM/MII */
150 #define CR9_SROM_READ 0x4800
152 #define CR9_SRCLK 0x2
153 #define CR9_CRDOUT 0x8
154 #define SROM_DATA_0 0x0
155 #define SROM_DATA_1 0x4
156 #define PHY_DATA_1 0x20000
157 #define PHY_DATA_0 0x00000
158 #define MDCLKH 0x10000
160 #define PHY_POWER_DOWN 0x800
162 #define SROM_V41_CODE 0x14
164 #define SROM_CLK_WRITE(data, ioaddr) outl(data|CR9_SROM_READ|CR9_SRCS,ioaddr);udelay(5);outl(data|CR9_SROM_READ|CR9_SRCS|CR9_SRCLK,ioaddr);udelay(5);outl(data|CR9_SROM_READ|CR9_SRCS,ioaddr);udelay(5);
166 #define __CHK_IO_SIZE(pci_id, dev_rev) ( ((pci_id)==PCI_DM9132_ID) || ((dev_rev) >= 0x02000030) ) ? DM9102A_IO_SIZE: DM9102_IO_SIZE
167 #define CHK_IO_SIZE(pci_dev, dev_rev) __CHK_IO_SIZE(((pci_dev)->device << 16) | (pci_dev)->vendor, dev_rev)
170 #define DEVICE net_device
172 /* Structure/enum declaration ------------------------------- */
174 u32 tdes0, tdes1, tdes2, tdes3; /* Data for the card */
175 char *tx_buf_ptr; /* Data for us */
176 struct tx_desc *next_tx_desc;
177 } __attribute__(( aligned(32) ));
180 u32 rdes0, rdes1, rdes2, rdes3; /* Data for the card */
181 struct sk_buff *rx_skb_ptr; /* Data for us */
182 struct rx_desc *next_rx_desc;
183 } __attribute__(( aligned(32) ));
185 struct dmfe_board_info {
186 u32 chip_id; /* Chip vendor/Device ID */
187 u32 chip_revision; /* Chip revision */
188 struct DEVICE *next_dev; /* next device */
189 struct pci_dev *pdev; /* PCI device */
192 long ioaddr; /* I/O base address */
199 /* pointer for memory physical address */
200 dma_addr_t buf_pool_dma_ptr; /* Tx buffer pool memory */
201 dma_addr_t buf_pool_dma_start; /* Tx buffer pool align dword */
202 dma_addr_t desc_pool_dma_ptr; /* descriptor pool memory */
203 dma_addr_t first_tx_desc_dma;
204 dma_addr_t first_rx_desc_dma;
206 /* descriptor pointer */
207 unsigned char *buf_pool_ptr; /* Tx buffer pool memory */
208 unsigned char *buf_pool_start; /* Tx buffer pool align dword */
209 unsigned char *desc_pool_ptr; /* descriptor pool memory */
210 struct tx_desc *first_tx_desc;
211 struct tx_desc *tx_insert_ptr;
212 struct tx_desc *tx_remove_ptr;
213 struct rx_desc *first_rx_desc;
214 struct rx_desc *rx_insert_ptr;
215 struct rx_desc *rx_ready_ptr; /* packet come pointer */
216 unsigned long tx_packet_cnt; /* transmitted packet count */
217 unsigned long tx_queue_cnt; /* wait to send packet count */
218 unsigned long rx_avail_cnt; /* available rx descriptor count */
219 unsigned long interval_rx_cnt; /* rx packet count a callback time */
221 u16 HPNA_command; /* For HPNA register 16 */
222 u16 HPNA_timer; /* For HPNA remote device check */
224 u16 NIC_capability; /* NIC media capability */
225 u16 PHY_reg4; /* Saved Phyxcer register 4 value */
227 u8 HPNA_present; /* 0:none, 1:DM9801, 2:DM9802 */
228 u8 chip_type; /* Keep DM9102A chip type */
229 u8 media_mode; /* user specify media mode */
230 u8 op_mode; /* real work media mode */
232 u8 link_failed; /* Ever link failed */
233 u8 wait_reset; /* Hardware failed, need to reset */
234 u8 dm910x_chk_mode; /* Operating mode check */
235 u8 first_in_callback; /* Flag to record state */
236 struct timer_list timer;
238 /* System defined statistic counter */
239 struct net_device_stats stats;
241 /* Driver defined statistic counter */
242 unsigned long tx_fifo_underrun;
243 unsigned long tx_loss_carrier;
244 unsigned long tx_no_carrier;
245 unsigned long tx_late_collision;
246 unsigned long tx_excessive_collision;
247 unsigned long tx_jabber_timeout;
248 unsigned long reset_count;
249 unsigned long reset_cr8;
250 unsigned long reset_fatal;
251 unsigned long reset_TXtimeout;
254 unsigned char srom[128];
258 DCR0 = 0x00, DCR1 = 0x08, DCR2 = 0x10, DCR3 = 0x18, DCR4 = 0x20,
259 DCR5 = 0x28, DCR6 = 0x30, DCR7 = 0x38, DCR8 = 0x40, DCR9 = 0x48,
260 DCR10 = 0x50, DCR11 = 0x58, DCR12 = 0x60, DCR13 = 0x68, DCR14 = 0x70,
265 CR6_RXSC = 0x2, CR6_PBF = 0x8, CR6_PM = 0x40, CR6_PAM = 0x80,
266 CR6_FDM = 0x200, CR6_TXSC = 0x2000, CR6_STI = 0x100000,
267 CR6_SFT = 0x200000, CR6_RXA = 0x40000000, CR6_NO_PURGE = 0x20000000
270 /* Global variable declaration ----------------------------- */
271 static int __devinitdata printed_version;
272 static char version[] __devinitdata =
273 KERN_INFO DRV_NAME ": Davicom DM9xxx net driver, version "
274 DRV_VERSION " (" DRV_RELDATE ")\n";
276 static int dmfe_debug;
277 static unsigned char dmfe_media_mode = DMFE_AUTO;
278 static u32 dmfe_cr6_user_set;
280 /* For module input parameter */
283 static unsigned char mode = 8;
284 static u8 chkmode = 1;
285 static u8 HPNA_mode; /* Default: Low Power/High Speed */
286 static u8 HPNA_rx_cmd; /* Default: Disable Rx remote command */
287 static u8 HPNA_tx_cmd; /* Default: Don't issue remote command */
288 static u8 HPNA_NoiseFloor; /* Default: HPNA NoiseFloor */
289 static u8 SF_mode; /* Special Function: 1:VLAN, 2:RX Flow Control
290 4: TX pause packet */
293 /* function declaration ------------------------------------- */
294 static int dmfe_open(struct DEVICE *);
295 static int dmfe_start_xmit(struct sk_buff *, struct DEVICE *);
296 static int dmfe_stop(struct DEVICE *);
297 static struct net_device_stats * dmfe_get_stats(struct DEVICE *);
298 static void dmfe_set_filter_mode(struct DEVICE *);
299 static struct ethtool_ops netdev_ethtool_ops;
300 static u16 read_srom_word(long ,int);
301 static irqreturn_t dmfe_interrupt(int , void *, struct pt_regs *);
302 static void dmfe_descriptor_init(struct dmfe_board_info *, unsigned long);
303 static void allocate_rx_buffer(struct dmfe_board_info *);
304 static void update_cr6(u32, unsigned long);
305 static void send_filter_frame(struct DEVICE * ,int);
306 static void dm9132_id_table(struct DEVICE * ,int);
307 static u16 phy_read(unsigned long, u8, u8, u32);
308 static void phy_write(unsigned long, u8, u8, u16, u32);
309 static void phy_write_1bit(unsigned long, u32);
310 static u16 phy_read_1bit(unsigned long);
311 static u8 dmfe_sense_speed(struct dmfe_board_info *);
312 static void dmfe_process_mode(struct dmfe_board_info *);
313 static void dmfe_timer(unsigned long);
314 static void dmfe_rx_packet(struct DEVICE *, struct dmfe_board_info *);
315 static void dmfe_free_tx_pkt(struct DEVICE *, struct dmfe_board_info *);
316 static void dmfe_reuse_skb(struct dmfe_board_info *, struct sk_buff *);
317 static void dmfe_dynamic_reset(struct DEVICE *);
318 static void dmfe_free_rxbuffer(struct dmfe_board_info *);
319 static void dmfe_init_dm910x(struct DEVICE *);
320 static inline u32 cal_CRC(unsigned char *, unsigned int, u8);
321 static void dmfe_parse_srom(struct dmfe_board_info *);
322 static void dmfe_program_DM9801(struct dmfe_board_info *, int);
323 static void dmfe_program_DM9802(struct dmfe_board_info *);
324 static void dmfe_HPNA_remote_cmd_chk(struct dmfe_board_info * );
325 static void dmfe_set_phyxcer(struct dmfe_board_info *);
327 /* DM910X network baord routine ---------------------------- */
330 * Search DM910X board ,allocate space and register it
333 static int __devinit dmfe_init_one (struct pci_dev *pdev,
334 const struct pci_device_id *ent)
336 struct dmfe_board_info *db; /* board information structure */
337 struct net_device *dev;
338 u32 dev_rev, pci_pmr;
341 DMFE_DBUG(0, "dmfe_init_one()", 0);
343 if (!printed_version++)
346 /* Init network device */
347 dev = alloc_etherdev(sizeof(*db));
350 SET_MODULE_OWNER(dev);
351 SET_NETDEV_DEV(dev, &pdev->dev);
353 if (pci_set_dma_mask(pdev, 0xffffffff)) {
354 printk(KERN_WARNING DRV_NAME ": 32-bit PCI DMA not available.\n");
359 /* Enable Master/IO access, Disable memory access */
360 err = pci_enable_device(pdev);
364 if (!pci_resource_start(pdev, 0)) {
365 printk(KERN_ERR DRV_NAME ": I/O base is zero\n");
367 goto err_out_disable;
370 /* Read Chip revision */
371 pci_read_config_dword(pdev, PCI_REVISION_ID, &dev_rev);
373 if (pci_resource_len(pdev, 0) < (CHK_IO_SIZE(pdev, dev_rev)) ) {
374 printk(KERN_ERR DRV_NAME ": Allocated I/O size too small\n");
376 goto err_out_disable;
379 #if 0 /* pci_{enable_device,set_master} sets minimum latency for us now */
381 /* Set Latency Timer 80h */
382 /* FIXME: setting values > 32 breaks some SiS 559x stuff.
383 Need a PCI quirk.. */
385 pci_write_config_byte(pdev, PCI_LATENCY_TIMER, 0x80);
388 if (pci_request_regions(pdev, DRV_NAME)) {
389 printk(KERN_ERR DRV_NAME ": Failed to request PCI regions\n");
391 goto err_out_disable;
394 /* Init system & device */
395 db = netdev_priv(dev);
397 /* Allocate Tx/Rx descriptor memory */
398 db->desc_pool_ptr = pci_alloc_consistent(pdev, sizeof(struct tx_desc) * DESC_ALL_CNT + 0x20, &db->desc_pool_dma_ptr);
399 db->buf_pool_ptr = pci_alloc_consistent(pdev, TX_BUF_ALLOC * TX_DESC_CNT + 4, &db->buf_pool_dma_ptr);
401 db->first_tx_desc = (struct tx_desc *) db->desc_pool_ptr;
402 db->first_tx_desc_dma = db->desc_pool_dma_ptr;
403 db->buf_pool_start = db->buf_pool_ptr;
404 db->buf_pool_dma_start = db->buf_pool_dma_ptr;
406 db->chip_id = ent->driver_data;
407 db->ioaddr = pci_resource_start(pdev, 0);
408 db->chip_revision = dev_rev;
412 dev->base_addr = db->ioaddr;
413 dev->irq = pdev->irq;
414 pci_set_drvdata(pdev, dev);
415 dev->open = &dmfe_open;
416 dev->hard_start_xmit = &dmfe_start_xmit;
417 dev->stop = &dmfe_stop;
418 dev->get_stats = &dmfe_get_stats;
419 dev->set_multicast_list = &dmfe_set_filter_mode;
420 dev->ethtool_ops = &netdev_ethtool_ops;
421 spin_lock_init(&db->lock);
423 pci_read_config_dword(pdev, 0x50, &pci_pmr);
425 if ( (pci_pmr == 0x10000) && (dev_rev == 0x02000031) )
426 db->chip_type = 1; /* DM9102A E3 */
430 /* read 64 word srom data */
431 for (i = 0; i < 64; i++)
432 ((u16 *) db->srom)[i] = cpu_to_le16(read_srom_word(db->ioaddr, i));
434 /* Set Node address */
435 for (i = 0; i < 6; i++)
436 dev->dev_addr[i] = db->srom[20 + i];
438 err = register_netdev (dev);
442 printk(KERN_INFO "%s: Davicom DM%04lx at pci%s,",
444 ent->driver_data >> 16,
446 for (i = 0; i < 6; i++)
447 printk("%c%02x", i ? ':' : ' ', dev->dev_addr[i]);
448 printk(", irq %d.\n", dev->irq);
450 pci_set_master(pdev);
455 pci_release_regions(pdev);
457 pci_disable_device(pdev);
459 pci_set_drvdata(pdev, NULL);
466 static void __devexit dmfe_remove_one (struct pci_dev *pdev)
468 struct net_device *dev = pci_get_drvdata(pdev);
469 struct dmfe_board_info *db = netdev_priv(dev);
471 DMFE_DBUG(0, "dmfe_remove_one()", 0);
474 pci_free_consistent(db->pdev, sizeof(struct tx_desc) *
475 DESC_ALL_CNT + 0x20, db->desc_pool_ptr,
476 db->desc_pool_dma_ptr);
477 pci_free_consistent(db->pdev, TX_BUF_ALLOC * TX_DESC_CNT + 4,
478 db->buf_pool_ptr, db->buf_pool_dma_ptr);
479 unregister_netdev(dev);
480 pci_release_regions(pdev);
481 free_netdev(dev); /* free board information */
482 pci_set_drvdata(pdev, NULL);
485 DMFE_DBUG(0, "dmfe_remove_one() exit", 0);
490 * Open the interface.
491 * The interface is opened whenever "ifconfig" actives it.
494 static int dmfe_open(struct DEVICE *dev)
497 struct dmfe_board_info *db = netdev_priv(dev);
499 DMFE_DBUG(0, "dmfe_open", 0);
501 ret = request_irq(dev->irq, &dmfe_interrupt, SA_SHIRQ, dev->name, dev);
505 /* system variable init */
506 db->cr6_data = CR6_DEFAULT | dmfe_cr6_user_set;
507 db->tx_packet_cnt = 0;
508 db->tx_queue_cnt = 0;
509 db->rx_avail_cnt = 0;
513 db->first_in_callback = 0;
514 db->NIC_capability = 0xf; /* All capability*/
515 db->PHY_reg4 = 0x1e0;
517 /* CR6 operation mode decision */
518 if ( !chkmode || (db->chip_id == PCI_DM9132_ID) ||
519 (db->chip_revision >= 0x02000030) ) {
520 db->cr6_data |= DMFE_TXTH_256;
521 db->cr0_data = CR0_DEFAULT;
522 db->dm910x_chk_mode=4; /* Enter the normal mode */
524 db->cr6_data |= CR6_SFT; /* Store & Forward mode */
526 db->dm910x_chk_mode = 1; /* Enter the check mode */
529 /* Initilize DM910X board */
530 dmfe_init_dm910x(dev);
532 /* Active System Interface */
533 netif_wake_queue(dev);
535 /* set and active a timer process */
536 init_timer(&db->timer);
537 db->timer.expires = DMFE_TIMER_WUT + HZ * 2;
538 db->timer.data = (unsigned long)dev;
539 db->timer.function = &dmfe_timer;
540 add_timer(&db->timer);
546 /* Initilize DM910X board
548 * Initilize TX/Rx descriptor chain structure
549 * Send the set-up frame
550 * Enable Tx/Rx machine
553 static void dmfe_init_dm910x(struct DEVICE *dev)
555 struct dmfe_board_info *db = netdev_priv(dev);
556 unsigned long ioaddr = db->ioaddr;
558 DMFE_DBUG(0, "dmfe_init_dm910x()", 0);
560 /* Reset DM910x MAC controller */
561 outl(DM910X_RESET, ioaddr + DCR0); /* RESET MAC */
563 outl(db->cr0_data, ioaddr + DCR0);
566 /* Phy addr : DM910(A)2/DM9132/9801, phy address = 1 */
569 /* Parser SROM and media mode */
571 db->media_mode = dmfe_media_mode;
573 /* RESET Phyxcer Chip by GPR port bit 7 */
574 outl(0x180, ioaddr + DCR12); /* Let bit 7 output port */
575 if (db->chip_id == PCI_DM9009_ID) {
576 outl(0x80, ioaddr + DCR12); /* Issue RESET signal */
577 mdelay(300); /* Delay 300 ms */
579 outl(0x0, ioaddr + DCR12); /* Clear RESET signal */
581 /* Process Phyxcer Media Mode */
582 if ( !(db->media_mode & 0x10) ) /* Force 1M mode */
583 dmfe_set_phyxcer(db);
585 /* Media Mode Process */
586 if ( !(db->media_mode & DMFE_AUTO) )
587 db->op_mode = db->media_mode; /* Force Mode */
589 /* Initiliaze Transmit/Receive decriptor and CR3/4 */
590 dmfe_descriptor_init(db, ioaddr);
592 /* Init CR6 to program DM910x operation */
593 update_cr6(db->cr6_data, ioaddr);
595 /* Send setup frame */
596 if (db->chip_id == PCI_DM9132_ID)
597 dm9132_id_table(dev, dev->mc_count); /* DM9132 */
599 send_filter_frame(dev, dev->mc_count); /* DM9102/DM9102A */
601 /* Init CR7, interrupt active bit */
602 db->cr7_data = CR7_DEFAULT;
603 outl(db->cr7_data, ioaddr + DCR7);
605 /* Init CR15, Tx jabber and Rx watchdog timer */
606 outl(db->cr15_data, ioaddr + DCR15);
608 /* Enable DM910X Tx/Rx function */
609 db->cr6_data |= CR6_RXSC | CR6_TXSC | 0x40000;
610 update_cr6(db->cr6_data, ioaddr);
615 * Hardware start transmission.
616 * Send a packet to media from the upper layer.
619 static int dmfe_start_xmit(struct sk_buff *skb, struct DEVICE *dev)
621 struct dmfe_board_info *db = netdev_priv(dev);
622 struct tx_desc *txptr;
625 DMFE_DBUG(0, "dmfe_start_xmit", 0);
627 /* Resource flag check */
628 netif_stop_queue(dev);
630 /* Too large packet check */
631 if (skb->len > MAX_PACKET_SIZE) {
632 printk(KERN_ERR DRV_NAME ": big packet = %d\n", (u16)skb->len);
637 spin_lock_irqsave(&db->lock, flags);
639 /* No Tx resource check, it never happen nromally */
640 if (db->tx_queue_cnt >= TX_FREE_DESC_CNT) {
641 spin_unlock_irqrestore(&db->lock, flags);
642 printk(KERN_ERR DRV_NAME ": No Tx resource %ld\n", db->tx_queue_cnt);
646 /* Disable NIC interrupt */
647 outl(0, dev->base_addr + DCR7);
649 /* transmit this packet */
650 txptr = db->tx_insert_ptr;
651 memcpy(txptr->tx_buf_ptr, skb->data, skb->len);
652 txptr->tdes1 = cpu_to_le32(0xe1000000 | skb->len);
654 /* Point to next transmit free descriptor */
655 db->tx_insert_ptr = txptr->next_tx_desc;
657 /* Transmit Packet Process */
658 if ( (!db->tx_queue_cnt) && (db->tx_packet_cnt < TX_MAX_SEND_CNT) ) {
659 txptr->tdes0 = cpu_to_le32(0x80000000); /* Set owner bit */
660 db->tx_packet_cnt++; /* Ready to send */
661 outl(0x1, dev->base_addr + DCR1); /* Issue Tx polling */
662 dev->trans_start = jiffies; /* saved time stamp */
664 db->tx_queue_cnt++; /* queue TX packet */
665 outl(0x1, dev->base_addr + DCR1); /* Issue Tx polling */
668 /* Tx resource check */
669 if ( db->tx_queue_cnt < TX_FREE_DESC_CNT )
670 netif_wake_queue(dev);
672 /* Restore CR7 to enable interrupt */
673 spin_unlock_irqrestore(&db->lock, flags);
674 outl(db->cr7_data, dev->base_addr + DCR7);
684 * Stop the interface.
685 * The interface is stopped when it is brought.
688 static int dmfe_stop(struct DEVICE *dev)
690 struct dmfe_board_info *db = netdev_priv(dev);
691 unsigned long ioaddr = dev->base_addr;
693 DMFE_DBUG(0, "dmfe_stop", 0);
696 netif_stop_queue(dev);
699 del_timer_sync(&db->timer);
701 /* Reset & stop DM910X board */
702 outl(DM910X_RESET, ioaddr + DCR0);
704 phy_write(db->ioaddr, db->phy_addr, 0, 0x8000, db->chip_id);
707 free_irq(dev->irq, dev);
709 /* free allocated rx buffer */
710 dmfe_free_rxbuffer(db);
713 /* show statistic counter */
714 printk(DRV_NAME ": FU:%lx EC:%lx LC:%lx NC:%lx LOC:%lx TXJT:%lx RESET:%lx RCR8:%lx FAL:%lx TT:%lx\n",
715 db->tx_fifo_underrun, db->tx_excessive_collision,
716 db->tx_late_collision, db->tx_no_carrier, db->tx_loss_carrier,
717 db->tx_jabber_timeout, db->reset_count, db->reset_cr8,
718 db->reset_fatal, db->reset_TXtimeout);
726 * DM9102 insterrupt handler
727 * receive the packet to upper layer, free the transmitted packet
730 static irqreturn_t dmfe_interrupt(int irq, void *dev_id, struct pt_regs *regs)
732 struct DEVICE *dev = dev_id;
733 struct dmfe_board_info *db = netdev_priv(dev);
734 unsigned long ioaddr = dev->base_addr;
737 DMFE_DBUG(0, "dmfe_interrupt()", 0);
740 DMFE_DBUG(1, "dmfe_interrupt() without DEVICE arg", 0);
744 spin_lock_irqsave(&db->lock, flags);
746 /* Got DM910X status */
747 db->cr5_data = inl(ioaddr + DCR5);
748 outl(db->cr5_data, ioaddr + DCR5);
749 if ( !(db->cr5_data & 0xc1) ) {
750 spin_unlock_irqrestore(&db->lock, flags);
754 /* Disable all interrupt in CR7 to solve the interrupt edge problem */
755 outl(0, ioaddr + DCR7);
757 /* Check system status */
758 if (db->cr5_data & 0x2000) {
759 /* system bus error happen */
760 DMFE_DBUG(1, "System bus error happen. CR5=", db->cr5_data);
762 db->wait_reset = 1; /* Need to RESET */
763 spin_unlock_irqrestore(&db->lock, flags);
767 /* Received the coming packet */
768 if ( (db->cr5_data & 0x40) && db->rx_avail_cnt )
769 dmfe_rx_packet(dev, db);
771 /* reallocate rx descriptor buffer */
772 if (db->rx_avail_cnt<RX_DESC_CNT)
773 allocate_rx_buffer(db);
775 /* Free the transmitted descriptor */
776 if ( db->cr5_data & 0x01)
777 dmfe_free_tx_pkt(dev, db);
780 if (db->dm910x_chk_mode & 0x2) {
781 db->dm910x_chk_mode = 0x4;
782 db->cr6_data |= 0x100;
783 update_cr6(db->cr6_data, db->ioaddr);
786 /* Restore CR7 to enable interrupt mask */
787 outl(db->cr7_data, ioaddr + DCR7);
789 spin_unlock_irqrestore(&db->lock, flags);
795 * Free TX resource after TX complete
798 static void dmfe_free_tx_pkt(struct DEVICE *dev, struct dmfe_board_info * db)
800 struct tx_desc *txptr;
801 unsigned long ioaddr = dev->base_addr;
804 txptr = db->tx_remove_ptr;
805 while(db->tx_packet_cnt) {
806 tdes0 = le32_to_cpu(txptr->tdes0);
807 /* printk(DRV_NAME ": tdes0=%x\n", tdes0); */
808 if (tdes0 & 0x80000000)
811 /* A packet sent completed */
813 db->stats.tx_packets++;
815 /* Transmit statistic counter */
816 if ( tdes0 != 0x7fffffff ) {
817 /* printk(DRV_NAME ": tdes0=%x\n", tdes0); */
818 db->stats.collisions += (tdes0 >> 3) & 0xf;
819 db->stats.tx_bytes += le32_to_cpu(txptr->tdes1) & 0x7ff;
820 if (tdes0 & TDES0_ERR_MASK) {
821 db->stats.tx_errors++;
823 if (tdes0 & 0x0002) { /* UnderRun */
824 db->tx_fifo_underrun++;
825 if ( !(db->cr6_data & CR6_SFT) ) {
826 db->cr6_data = db->cr6_data | CR6_SFT;
827 update_cr6(db->cr6_data, db->ioaddr);
831 db->tx_excessive_collision++;
833 db->tx_late_collision++;
837 db->tx_loss_carrier++;
839 db->tx_jabber_timeout++;
843 txptr = txptr->next_tx_desc;
846 /* Update TX remove pointer to next */
847 db->tx_remove_ptr = txptr;
849 /* Send the Tx packet in queue */
850 if ( (db->tx_packet_cnt < TX_MAX_SEND_CNT) && db->tx_queue_cnt ) {
851 txptr->tdes0 = cpu_to_le32(0x80000000); /* Set owner bit */
852 db->tx_packet_cnt++; /* Ready to send */
854 outl(0x1, ioaddr + DCR1); /* Issue Tx polling */
855 dev->trans_start = jiffies; /* saved time stamp */
858 /* Resource available check */
859 if ( db->tx_queue_cnt < TX_WAKE_DESC_CNT )
860 netif_wake_queue(dev); /* Active upper layer, send again */
865 * Receive the come packet and pass to upper layer
868 static void dmfe_rx_packet(struct DEVICE *dev, struct dmfe_board_info * db)
870 struct rx_desc *rxptr;
875 rxptr = db->rx_ready_ptr;
877 while(db->rx_avail_cnt) {
878 rdes0 = le32_to_cpu(rxptr->rdes0);
879 if (rdes0 & 0x80000000) /* packet owner check */
883 db->interval_rx_cnt++;
885 pci_unmap_single(db->pdev, le32_to_cpu(rxptr->rdes2), RX_ALLOC_SIZE, PCI_DMA_FROMDEVICE);
886 if ( (rdes0 & 0x300) != 0x300) {
887 /* A packet without First/Last flag */
889 DMFE_DBUG(0, "Reuse SK buffer, rdes0", rdes0);
890 dmfe_reuse_skb(db, rxptr->rx_skb_ptr);
892 /* A packet with First/Last flag */
893 rxlen = ( (rdes0 >> 16) & 0x3fff) - 4;
895 /* error summary bit check */
896 if (rdes0 & 0x8000) {
897 /* This is a error packet */
898 //printk(DRV_NAME ": rdes0: %lx\n", rdes0);
899 db->stats.rx_errors++;
901 db->stats.rx_fifo_errors++;
903 db->stats.rx_crc_errors++;
905 db->stats.rx_length_errors++;
908 if ( !(rdes0 & 0x8000) ||
909 ((db->cr6_data & CR6_PM) && (rxlen>6)) ) {
910 skb = rxptr->rx_skb_ptr;
912 /* Received Packet CRC check need or not */
913 if ( (db->dm910x_chk_mode & 1) &&
914 (cal_CRC(skb->tail, rxlen, 1) !=
915 (*(u32 *) (skb->tail+rxlen) ))) { /* FIXME (?) */
916 /* Found a error received packet */
917 dmfe_reuse_skb(db, rxptr->rx_skb_ptr);
918 db->dm910x_chk_mode = 3;
920 /* Good packet, send to upper layer */
921 /* Shorst packet used new SKB */
922 if ( (rxlen < RX_COPY_SIZE) &&
923 ( (skb = dev_alloc_skb(rxlen + 2) )
925 /* size less than COPY_SIZE, allocate a rxlen SKB */
927 skb_reserve(skb, 2); /* 16byte align */
928 memcpy(skb_put(skb, rxlen), rxptr->rx_skb_ptr->tail, rxlen);
929 dmfe_reuse_skb(db, rxptr->rx_skb_ptr);
934 skb->protocol = eth_type_trans(skb, dev);
936 dev->last_rx = jiffies;
937 db->stats.rx_packets++;
938 db->stats.rx_bytes += rxlen;
941 /* Reuse SKB buffer when the packet is error */
942 DMFE_DBUG(0, "Reuse SK buffer, rdes0", rdes0);
943 dmfe_reuse_skb(db, rxptr->rx_skb_ptr);
947 rxptr = rxptr->next_rx_desc;
950 db->rx_ready_ptr = rxptr;
955 * Get statistics from driver.
958 static struct net_device_stats * dmfe_get_stats(struct DEVICE *dev)
960 struct dmfe_board_info *db = netdev_priv(dev);
962 DMFE_DBUG(0, "dmfe_get_stats", 0);
968 * Set DM910X multicast address
971 static void dmfe_set_filter_mode(struct DEVICE * dev)
973 struct dmfe_board_info *db = netdev_priv(dev);
976 DMFE_DBUG(0, "dmfe_set_filter_mode()", 0);
977 spin_lock_irqsave(&db->lock, flags);
979 if (dev->flags & IFF_PROMISC) {
980 DMFE_DBUG(0, "Enable PROM Mode", 0);
981 db->cr6_data |= CR6_PM | CR6_PBF;
982 update_cr6(db->cr6_data, db->ioaddr);
983 spin_unlock_irqrestore(&db->lock, flags);
987 if (dev->flags & IFF_ALLMULTI || dev->mc_count > DMFE_MAX_MULTICAST) {
988 DMFE_DBUG(0, "Pass all multicast address", dev->mc_count);
989 db->cr6_data &= ~(CR6_PM | CR6_PBF);
990 db->cr6_data |= CR6_PAM;
991 spin_unlock_irqrestore(&db->lock, flags);
995 DMFE_DBUG(0, "Set multicast address", dev->mc_count);
996 if (db->chip_id == PCI_DM9132_ID)
997 dm9132_id_table(dev, dev->mc_count); /* DM9132 */
999 send_filter_frame(dev, dev->mc_count); /* DM9102/DM9102A */
1000 spin_unlock_irqrestore(&db->lock, flags);
1003 static void netdev_get_drvinfo(struct net_device *dev,
1004 struct ethtool_drvinfo *info)
1006 struct dmfe_board_info *np = netdev_priv(dev);
1008 strcpy(info->driver, DRV_NAME);
1009 strcpy(info->version, DRV_VERSION);
1011 strcpy(info->bus_info, pci_name(np->pdev));
1013 sprintf(info->bus_info, "EISA 0x%lx %d",
1014 dev->base_addr, dev->irq);
1017 static struct ethtool_ops netdev_ethtool_ops = {
1018 .get_drvinfo = netdev_get_drvinfo,
1022 * A periodic timer routine
1023 * Dynamic media sense, allocate Rx buffer...
1026 static void dmfe_timer(unsigned long data)
1029 unsigned char tmp_cr12;
1030 struct DEVICE *dev = (struct DEVICE *) data;
1031 struct dmfe_board_info *db = netdev_priv(dev);
1032 unsigned long flags;
1034 DMFE_DBUG(0, "dmfe_timer()", 0);
1035 spin_lock_irqsave(&db->lock, flags);
1037 /* Media mode process when Link OK before enter this route */
1038 if (db->first_in_callback == 0) {
1039 db->first_in_callback = 1;
1040 if (db->chip_type && (db->chip_id==PCI_DM9102_ID)) {
1041 db->cr6_data &= ~0x40000;
1042 update_cr6(db->cr6_data, db->ioaddr);
1043 phy_write(db->ioaddr, db->phy_addr, 0, 0x1000, db->chip_id);
1044 db->cr6_data |= 0x40000;
1045 update_cr6(db->cr6_data, db->ioaddr);
1046 db->timer.expires = DMFE_TIMER_WUT + HZ * 2;
1047 add_timer(&db->timer);
1048 spin_unlock_irqrestore(&db->lock, flags);
1054 /* Operating Mode Check */
1055 if ( (db->dm910x_chk_mode & 0x1) &&
1056 (db->stats.rx_packets > MAX_CHECK_PACKET) )
1057 db->dm910x_chk_mode = 0x4;
1059 /* Dynamic reset DM910X : system error or transmit time-out */
1060 tmp_cr8 = inl(db->ioaddr + DCR8);
1061 if ( (db->interval_rx_cnt==0) && (tmp_cr8) ) {
1065 db->interval_rx_cnt = 0;
1067 /* TX polling kick monitor */
1068 if ( db->tx_packet_cnt &&
1069 time_after(jiffies, dev->trans_start + DMFE_TX_KICK) ) {
1070 outl(0x1, dev->base_addr + DCR1); /* Tx polling again */
1073 if ( time_after(jiffies, dev->trans_start + DMFE_TX_TIMEOUT) ) {
1074 db->reset_TXtimeout++;
1076 printk(KERN_WARNING "%s: Tx timeout - resetting\n",
1081 if (db->wait_reset) {
1082 DMFE_DBUG(0, "Dynamic Reset device", db->tx_packet_cnt);
1084 dmfe_dynamic_reset(dev);
1085 db->first_in_callback = 0;
1086 db->timer.expires = DMFE_TIMER_WUT;
1087 add_timer(&db->timer);
1088 spin_unlock_irqrestore(&db->lock, flags);
1092 /* Link status check, Dynamic media type change */
1093 if (db->chip_id == PCI_DM9132_ID)
1094 tmp_cr12 = inb(db->ioaddr + DCR9 + 3); /* DM9132 */
1096 tmp_cr12 = inb(db->ioaddr + DCR12); /* DM9102/DM9102A */
1098 if ( ((db->chip_id == PCI_DM9102_ID) &&
1099 (db->chip_revision == 0x02000030)) ||
1100 ((db->chip_id == PCI_DM9132_ID) &&
1101 (db->chip_revision == 0x02000010)) ) {
1104 tmp_cr12 = 0x0; /* Link failed */
1106 tmp_cr12 = 0x3; /* Link OK */
1109 if ( !(tmp_cr12 & 0x3) && !db->link_failed ) {
1111 DMFE_DBUG(0, "Link Failed", tmp_cr12);
1112 db->link_failed = 1;
1114 /* For Force 10/100M Half/Full mode: Enable Auto-Nego mode */
1115 /* AUTO or force 1M Homerun/Longrun don't need */
1116 if ( !(db->media_mode & 0x38) )
1117 phy_write(db->ioaddr, db->phy_addr, 0, 0x1000, db->chip_id);
1119 /* AUTO mode, if INT phyxcer link failed, select EXT device */
1120 if (db->media_mode & DMFE_AUTO) {
1121 /* 10/100M link failed, used 1M Home-Net */
1122 db->cr6_data|=0x00040000; /* bit18=1, MII */
1123 db->cr6_data&=~0x00000200; /* bit9=0, HD mode */
1124 update_cr6(db->cr6_data, db->ioaddr);
1127 if ((tmp_cr12 & 0x3) && db->link_failed) {
1128 DMFE_DBUG(0, "Link link OK", tmp_cr12);
1129 db->link_failed = 0;
1131 /* Auto Sense Speed */
1132 if ( (db->media_mode & DMFE_AUTO) &&
1133 dmfe_sense_speed(db) )
1134 db->link_failed = 1;
1135 dmfe_process_mode(db);
1136 /* SHOW_MEDIA_TYPE(db->op_mode); */
1139 /* HPNA remote command check */
1140 if (db->HPNA_command & 0xf00) {
1142 if (!db->HPNA_timer)
1143 dmfe_HPNA_remote_cmd_chk(db);
1146 /* Timer active again */
1147 db->timer.expires = DMFE_TIMER_WUT;
1148 add_timer(&db->timer);
1149 spin_unlock_irqrestore(&db->lock, flags);
1154 * Dynamic reset the DM910X board
1156 * Free Tx/Rx allocated memory
1157 * Reset DM910X board
1158 * Re-initilize DM910X board
1161 static void dmfe_dynamic_reset(struct DEVICE *dev)
1163 struct dmfe_board_info *db = netdev_priv(dev);
1165 DMFE_DBUG(0, "dmfe_dynamic_reset()", 0);
1167 /* Sopt MAC controller */
1168 db->cr6_data &= ~(CR6_RXSC | CR6_TXSC); /* Disable Tx/Rx */
1169 update_cr6(db->cr6_data, dev->base_addr);
1170 outl(0, dev->base_addr + DCR7); /* Disable Interrupt */
1171 outl(inl(dev->base_addr + DCR5), dev->base_addr + DCR5);
1173 /* Disable upper layer interface */
1174 netif_stop_queue(dev);
1176 /* Free Rx Allocate buffer */
1177 dmfe_free_rxbuffer(db);
1179 /* system variable init */
1180 db->tx_packet_cnt = 0;
1181 db->tx_queue_cnt = 0;
1182 db->rx_avail_cnt = 0;
1183 db->link_failed = 1;
1186 /* Re-initilize DM910X board */
1187 dmfe_init_dm910x(dev);
1189 /* Restart upper layer interface */
1190 netif_wake_queue(dev);
1195 * free all allocated rx buffer
1198 static void dmfe_free_rxbuffer(struct dmfe_board_info * db)
1200 DMFE_DBUG(0, "dmfe_free_rxbuffer()", 0);
1202 /* free allocated rx buffer */
1203 while (db->rx_avail_cnt) {
1204 dev_kfree_skb(db->rx_ready_ptr->rx_skb_ptr);
1205 db->rx_ready_ptr = db->rx_ready_ptr->next_rx_desc;
1212 * Reuse the SK buffer
1215 static void dmfe_reuse_skb(struct dmfe_board_info *db, struct sk_buff * skb)
1217 struct rx_desc *rxptr = db->rx_insert_ptr;
1219 if (!(rxptr->rdes0 & cpu_to_le32(0x80000000))) {
1220 rxptr->rx_skb_ptr = skb;
1221 rxptr->rdes2 = cpu_to_le32( pci_map_single(db->pdev, skb->tail, RX_ALLOC_SIZE, PCI_DMA_FROMDEVICE) );
1223 rxptr->rdes0 = cpu_to_le32(0x80000000);
1225 db->rx_insert_ptr = rxptr->next_rx_desc;
1227 DMFE_DBUG(0, "SK Buffer reuse method error", db->rx_avail_cnt);
1232 * Initialize transmit/Receive descriptor
1233 * Using Chain structure, and allocate Tx/Rx buffer
1236 static void dmfe_descriptor_init(struct dmfe_board_info *db, unsigned long ioaddr)
1238 struct tx_desc *tmp_tx;
1239 struct rx_desc *tmp_rx;
1240 unsigned char *tmp_buf;
1241 dma_addr_t tmp_tx_dma, tmp_rx_dma;
1242 dma_addr_t tmp_buf_dma;
1245 DMFE_DBUG(0, "dmfe_descriptor_init()", 0);
1247 /* tx descriptor start pointer */
1248 db->tx_insert_ptr = db->first_tx_desc;
1249 db->tx_remove_ptr = db->first_tx_desc;
1250 outl(db->first_tx_desc_dma, ioaddr + DCR4); /* TX DESC address */
1252 /* rx descriptor start pointer */
1253 db->first_rx_desc = (void *)db->first_tx_desc + sizeof(struct tx_desc) * TX_DESC_CNT;
1254 db->first_rx_desc_dma = db->first_tx_desc_dma + sizeof(struct tx_desc) * TX_DESC_CNT;
1255 db->rx_insert_ptr = db->first_rx_desc;
1256 db->rx_ready_ptr = db->first_rx_desc;
1257 outl(db->first_rx_desc_dma, ioaddr + DCR3); /* RX DESC address */
1259 /* Init Transmit chain */
1260 tmp_buf = db->buf_pool_start;
1261 tmp_buf_dma = db->buf_pool_dma_start;
1262 tmp_tx_dma = db->first_tx_desc_dma;
1263 for (tmp_tx = db->first_tx_desc, i = 0; i < TX_DESC_CNT; i++, tmp_tx++) {
1264 tmp_tx->tx_buf_ptr = tmp_buf;
1265 tmp_tx->tdes0 = cpu_to_le32(0);
1266 tmp_tx->tdes1 = cpu_to_le32(0x81000000); /* IC, chain */
1267 tmp_tx->tdes2 = cpu_to_le32(tmp_buf_dma);
1268 tmp_tx_dma += sizeof(struct tx_desc);
1269 tmp_tx->tdes3 = cpu_to_le32(tmp_tx_dma);
1270 tmp_tx->next_tx_desc = tmp_tx + 1;
1271 tmp_buf = tmp_buf + TX_BUF_ALLOC;
1272 tmp_buf_dma = tmp_buf_dma + TX_BUF_ALLOC;
1274 (--tmp_tx)->tdes3 = cpu_to_le32(db->first_tx_desc_dma);
1275 tmp_tx->next_tx_desc = db->first_tx_desc;
1277 /* Init Receive descriptor chain */
1278 tmp_rx_dma=db->first_rx_desc_dma;
1279 for (tmp_rx = db->first_rx_desc, i = 0; i < RX_DESC_CNT; i++, tmp_rx++) {
1280 tmp_rx->rdes0 = cpu_to_le32(0);
1281 tmp_rx->rdes1 = cpu_to_le32(0x01000600);
1282 tmp_rx_dma += sizeof(struct rx_desc);
1283 tmp_rx->rdes3 = cpu_to_le32(tmp_rx_dma);
1284 tmp_rx->next_rx_desc = tmp_rx + 1;
1286 (--tmp_rx)->rdes3 = cpu_to_le32(db->first_rx_desc_dma);
1287 tmp_rx->next_rx_desc = db->first_rx_desc;
1289 /* pre-allocate Rx buffer */
1290 allocate_rx_buffer(db);
1296 * Firstly stop DM910X , then written value and start
1299 static void update_cr6(u32 cr6_data, unsigned long ioaddr)
1303 cr6_tmp = cr6_data & ~0x2002; /* stop Tx/Rx */
1304 outl(cr6_tmp, ioaddr + DCR6);
1306 outl(cr6_data, ioaddr + DCR6);
1312 * Send a setup frame for DM9132
1313 * This setup frame initilize DM910X address filter mode
1316 static void dm9132_id_table(struct DEVICE *dev, int mc_cnt)
1318 struct dev_mc_list *mcptr;
1320 unsigned long ioaddr = dev->base_addr+0xc0; /* ID Table */
1322 u16 i, hash_table[4];
1324 DMFE_DBUG(0, "dm9132_id_table()", 0);
1327 addrptr = (u16 *) dev->dev_addr;
1328 outw(addrptr[0], ioaddr);
1330 outw(addrptr[1], ioaddr);
1332 outw(addrptr[2], ioaddr);
1335 /* Clear Hash Table */
1336 for (i = 0; i < 4; i++)
1337 hash_table[i] = 0x0;
1339 /* broadcast address */
1340 hash_table[3] = 0x8000;
1342 /* the multicast address in Hash Table : 64 bits */
1343 for (mcptr = dev->mc_list, i = 0; i < mc_cnt; i++, mcptr = mcptr->next) {
1344 hash_val = cal_CRC( (char *) mcptr->dmi_addr, 6, 0) & 0x3f;
1345 hash_table[hash_val / 16] |= (u16) 1 << (hash_val % 16);
1348 /* Write the hash table to MAC MD table */
1349 for (i = 0; i < 4; i++, ioaddr += 4)
1350 outw(hash_table[i], ioaddr);
1355 * Send a setup frame for DM9102/DM9102A
1356 * This setup frame initilize DM910X address filter mode
1359 static void send_filter_frame(struct DEVICE *dev, int mc_cnt)
1361 struct dmfe_board_info *db = netdev_priv(dev);
1362 struct dev_mc_list *mcptr;
1363 struct tx_desc *txptr;
1368 DMFE_DBUG(0, "send_filter_frame()", 0);
1370 txptr = db->tx_insert_ptr;
1371 suptr = (u32 *) txptr->tx_buf_ptr;
1374 addrptr = (u16 *) dev->dev_addr;
1375 *suptr++ = addrptr[0];
1376 *suptr++ = addrptr[1];
1377 *suptr++ = addrptr[2];
1379 /* broadcast address */
1384 /* fit the multicast address */
1385 for (mcptr = dev->mc_list, i = 0; i < mc_cnt; i++, mcptr = mcptr->next) {
1386 addrptr = (u16 *) mcptr->dmi_addr;
1387 *suptr++ = addrptr[0];
1388 *suptr++ = addrptr[1];
1389 *suptr++ = addrptr[2];
1398 /* prepare the setup frame */
1399 db->tx_insert_ptr = txptr->next_tx_desc;
1400 txptr->tdes1 = cpu_to_le32(0x890000c0);
1402 /* Resource Check and Send the setup packet */
1403 if (!db->tx_packet_cnt) {
1404 /* Resource Empty */
1405 db->tx_packet_cnt++;
1406 txptr->tdes0 = cpu_to_le32(0x80000000);
1407 update_cr6(db->cr6_data | 0x2000, dev->base_addr);
1408 outl(0x1, dev->base_addr + DCR1); /* Issue Tx polling */
1409 update_cr6(db->cr6_data, dev->base_addr);
1410 dev->trans_start = jiffies;
1412 db->tx_queue_cnt++; /* Put in TX queue */
1417 * Allocate rx buffer,
1418 * As possible as allocate maxiumn Rx buffer
1421 static void allocate_rx_buffer(struct dmfe_board_info *db)
1423 struct rx_desc *rxptr;
1424 struct sk_buff *skb;
1426 rxptr = db->rx_insert_ptr;
1428 while(db->rx_avail_cnt < RX_DESC_CNT) {
1429 if ( ( skb = dev_alloc_skb(RX_ALLOC_SIZE) ) == NULL )
1431 rxptr->rx_skb_ptr = skb; /* FIXME (?) */
1432 rxptr->rdes2 = cpu_to_le32( pci_map_single(db->pdev, skb->tail, RX_ALLOC_SIZE, PCI_DMA_FROMDEVICE) );
1434 rxptr->rdes0 = cpu_to_le32(0x80000000);
1435 rxptr = rxptr->next_rx_desc;
1439 db->rx_insert_ptr = rxptr;
1444 * Read one word data from the serial ROM
1447 static u16 read_srom_word(long ioaddr, int offset)
1451 long cr9_ioaddr = ioaddr + DCR9;
1453 outl(CR9_SROM_READ, cr9_ioaddr);
1454 outl(CR9_SROM_READ | CR9_SRCS, cr9_ioaddr);
1456 /* Send the Read Command 110b */
1457 SROM_CLK_WRITE(SROM_DATA_1, cr9_ioaddr);
1458 SROM_CLK_WRITE(SROM_DATA_1, cr9_ioaddr);
1459 SROM_CLK_WRITE(SROM_DATA_0, cr9_ioaddr);
1461 /* Send the offset */
1462 for (i = 5; i >= 0; i--) {
1463 srom_data = (offset & (1 << i)) ? SROM_DATA_1 : SROM_DATA_0;
1464 SROM_CLK_WRITE(srom_data, cr9_ioaddr);
1467 outl(CR9_SROM_READ | CR9_SRCS, cr9_ioaddr);
1469 for (i = 16; i > 0; i--) {
1470 outl(CR9_SROM_READ | CR9_SRCS | CR9_SRCLK, cr9_ioaddr);
1472 srom_data = (srom_data << 1) | ((inl(cr9_ioaddr) & CR9_CRDOUT) ? 1 : 0);
1473 outl(CR9_SROM_READ | CR9_SRCS, cr9_ioaddr);
1477 outl(CR9_SROM_READ, cr9_ioaddr);
1483 * Auto sense the media mode
1486 static u8 dmfe_sense_speed(struct dmfe_board_info * db)
1491 /* CR6 bit18=0, select 10/100M */
1492 update_cr6( (db->cr6_data & ~0x40000), db->ioaddr);
1494 phy_mode = phy_read(db->ioaddr, db->phy_addr, 1, db->chip_id);
1495 phy_mode = phy_read(db->ioaddr, db->phy_addr, 1, db->chip_id);
1497 if ( (phy_mode & 0x24) == 0x24 ) {
1498 if (db->chip_id == PCI_DM9132_ID) /* DM9132 */
1499 phy_mode = phy_read(db->ioaddr, db->phy_addr, 7, db->chip_id) & 0xf000;
1500 else /* DM9102/DM9102A */
1501 phy_mode = phy_read(db->ioaddr, db->phy_addr, 17, db->chip_id) & 0xf000;
1502 /* printk(DRV_NAME ": Phy_mode %x ",phy_mode); */
1504 case 0x1000: db->op_mode = DMFE_10MHF; break;
1505 case 0x2000: db->op_mode = DMFE_10MFD; break;
1506 case 0x4000: db->op_mode = DMFE_100MHF; break;
1507 case 0x8000: db->op_mode = DMFE_100MFD; break;
1508 default: db->op_mode = DMFE_10MHF;
1513 db->op_mode = DMFE_10MHF;
1514 DMFE_DBUG(0, "Link Failed :", phy_mode);
1523 * Set 10/100 phyxcer capability
1524 * AUTO mode : phyxcer register4 is NIC capability
1525 * Force mode: phyxcer register4 is the force media
1528 static void dmfe_set_phyxcer(struct dmfe_board_info *db)
1532 /* Select 10/100M phyxcer */
1533 db->cr6_data &= ~0x40000;
1534 update_cr6(db->cr6_data, db->ioaddr);
1536 /* DM9009 Chip: Phyxcer reg18 bit12=0 */
1537 if (db->chip_id == PCI_DM9009_ID) {
1538 phy_reg = phy_read(db->ioaddr, db->phy_addr, 18, db->chip_id) & ~0x1000;
1539 phy_write(db->ioaddr, db->phy_addr, 18, phy_reg, db->chip_id);
1542 /* Phyxcer capability setting */
1543 phy_reg = phy_read(db->ioaddr, db->phy_addr, 4, db->chip_id) & ~0x01e0;
1545 if (db->media_mode & DMFE_AUTO) {
1547 phy_reg |= db->PHY_reg4;
1550 switch(db->media_mode) {
1551 case DMFE_10MHF: phy_reg |= 0x20; break;
1552 case DMFE_10MFD: phy_reg |= 0x40; break;
1553 case DMFE_100MHF: phy_reg |= 0x80; break;
1554 case DMFE_100MFD: phy_reg |= 0x100; break;
1556 if (db->chip_id == PCI_DM9009_ID) phy_reg &= 0x61;
1559 /* Write new capability to Phyxcer Reg4 */
1560 if ( !(phy_reg & 0x01e0)) {
1561 phy_reg|=db->PHY_reg4;
1562 db->media_mode|=DMFE_AUTO;
1564 phy_write(db->ioaddr, db->phy_addr, 4, phy_reg, db->chip_id);
1566 /* Restart Auto-Negotiation */
1567 if ( db->chip_type && (db->chip_id == PCI_DM9102_ID) )
1568 phy_write(db->ioaddr, db->phy_addr, 0, 0x1800, db->chip_id);
1569 if ( !db->chip_type )
1570 phy_write(db->ioaddr, db->phy_addr, 0, 0x1200, db->chip_id);
1576 * AUTO mode : PHY controller in Auto-negotiation Mode
1577 * Force mode: PHY controller in force mode with HUB
1578 * N-way force capability with SWITCH
1581 static void dmfe_process_mode(struct dmfe_board_info *db)
1585 /* Full Duplex Mode Check */
1586 if (db->op_mode & 0x4)
1587 db->cr6_data |= CR6_FDM; /* Set Full Duplex Bit */
1589 db->cr6_data &= ~CR6_FDM; /* Clear Full Duplex Bit */
1591 /* Transciver Selection */
1592 if (db->op_mode & 0x10) /* 1M HomePNA */
1593 db->cr6_data |= 0x40000;/* External MII select */
1595 db->cr6_data &= ~0x40000;/* Internal 10/100 transciver */
1597 update_cr6(db->cr6_data, db->ioaddr);
1599 /* 10/100M phyxcer force mode need */
1600 if ( !(db->media_mode & 0x18)) {
1602 phy_reg = phy_read(db->ioaddr, db->phy_addr, 6, db->chip_id);
1603 if ( !(phy_reg & 0x1) ) {
1604 /* parter without N-Way capability */
1606 switch(db->op_mode) {
1607 case DMFE_10MHF: phy_reg = 0x0; break;
1608 case DMFE_10MFD: phy_reg = 0x100; break;
1609 case DMFE_100MHF: phy_reg = 0x2000; break;
1610 case DMFE_100MFD: phy_reg = 0x2100; break;
1612 phy_write(db->ioaddr, db->phy_addr, 0, phy_reg, db->chip_id);
1613 if ( db->chip_type && (db->chip_id == PCI_DM9102_ID) )
1615 phy_write(db->ioaddr, db->phy_addr, 0, phy_reg, db->chip_id);
1622 * Write a word to Phy register
1625 static void phy_write(unsigned long iobase, u8 phy_addr, u8 offset, u16 phy_data, u32 chip_id)
1628 unsigned long ioaddr;
1630 if (chip_id == PCI_DM9132_ID) {
1631 ioaddr = iobase + 0x80 + offset * 4;
1632 outw(phy_data, ioaddr);
1634 /* DM9102/DM9102A Chip */
1635 ioaddr = iobase + DCR9;
1637 /* Send 33 synchronization clock to Phy controller */
1638 for (i = 0; i < 35; i++)
1639 phy_write_1bit(ioaddr, PHY_DATA_1);
1641 /* Send start command(01) to Phy */
1642 phy_write_1bit(ioaddr, PHY_DATA_0);
1643 phy_write_1bit(ioaddr, PHY_DATA_1);
1645 /* Send write command(01) to Phy */
1646 phy_write_1bit(ioaddr, PHY_DATA_0);
1647 phy_write_1bit(ioaddr, PHY_DATA_1);
1649 /* Send Phy address */
1650 for (i = 0x10; i > 0; i = i >> 1)
1651 phy_write_1bit(ioaddr, phy_addr & i ? PHY_DATA_1 : PHY_DATA_0);
1653 /* Send register address */
1654 for (i = 0x10; i > 0; i = i >> 1)
1655 phy_write_1bit(ioaddr, offset & i ? PHY_DATA_1 : PHY_DATA_0);
1657 /* written trasnition */
1658 phy_write_1bit(ioaddr, PHY_DATA_1);
1659 phy_write_1bit(ioaddr, PHY_DATA_0);
1661 /* Write a word data to PHY controller */
1662 for ( i = 0x8000; i > 0; i >>= 1)
1663 phy_write_1bit(ioaddr, phy_data & i ? PHY_DATA_1 : PHY_DATA_0);
1669 * Read a word data from phy register
1672 static u16 phy_read(unsigned long iobase, u8 phy_addr, u8 offset, u32 chip_id)
1676 unsigned long ioaddr;
1678 if (chip_id == PCI_DM9132_ID) {
1680 ioaddr = iobase + 0x80 + offset * 4;
1681 phy_data = inw(ioaddr);
1683 /* DM9102/DM9102A Chip */
1684 ioaddr = iobase + DCR9;
1686 /* Send 33 synchronization clock to Phy controller */
1687 for (i = 0; i < 35; i++)
1688 phy_write_1bit(ioaddr, PHY_DATA_1);
1690 /* Send start command(01) to Phy */
1691 phy_write_1bit(ioaddr, PHY_DATA_0);
1692 phy_write_1bit(ioaddr, PHY_DATA_1);
1694 /* Send read command(10) to Phy */
1695 phy_write_1bit(ioaddr, PHY_DATA_1);
1696 phy_write_1bit(ioaddr, PHY_DATA_0);
1698 /* Send Phy address */
1699 for (i = 0x10; i > 0; i = i >> 1)
1700 phy_write_1bit(ioaddr, phy_addr & i ? PHY_DATA_1 : PHY_DATA_0);
1702 /* Send register address */
1703 for (i = 0x10; i > 0; i = i >> 1)
1704 phy_write_1bit(ioaddr, offset & i ? PHY_DATA_1 : PHY_DATA_0);
1706 /* Skip transition state */
1707 phy_read_1bit(ioaddr);
1709 /* read 16bit data */
1710 for (phy_data = 0, i = 0; i < 16; i++) {
1712 phy_data |= phy_read_1bit(ioaddr);
1721 * Write one bit data to Phy Controller
1724 static void phy_write_1bit(unsigned long ioaddr, u32 phy_data)
1726 outl(phy_data, ioaddr); /* MII Clock Low */
1728 outl(phy_data | MDCLKH, ioaddr); /* MII Clock High */
1730 outl(phy_data, ioaddr); /* MII Clock Low */
1736 * Read one bit phy data from PHY controller
1739 static u16 phy_read_1bit(unsigned long ioaddr)
1743 outl(0x50000, ioaddr);
1745 phy_data = ( inl(ioaddr) >> 19 ) & 0x1;
1746 outl(0x40000, ioaddr);
1754 * Calculate the CRC valude of the Rx packet
1755 * flag = 1 : return the reverse CRC (for the received packet CRC)
1756 * 0 : return the normal CRC (for Hash Table index)
1759 static inline u32 cal_CRC(unsigned char * Data, unsigned int Len, u8 flag)
1761 u32 crc = crc32(~0, Data, Len);
1762 if (flag) crc = ~crc;
1768 * Parser SROM and media mode
1771 static void dmfe_parse_srom(struct dmfe_board_info * db)
1773 char * srom = db->srom;
1774 int dmfe_mode, tmp_reg;
1776 DMFE_DBUG(0, "dmfe_parse_srom() ", 0);
1779 db->cr15_data = CR15_DEFAULT;
1781 /* Check SROM Version */
1782 if ( ( (int) srom[18] & 0xff) == SROM_V41_CODE) {
1784 /* Get NIC support media mode */
1785 db->NIC_capability = le16_to_cpup(srom + 34);
1787 for (tmp_reg = 1; tmp_reg < 0x10; tmp_reg <<= 1) {
1788 switch( db->NIC_capability & tmp_reg ) {
1789 case 0x1: db->PHY_reg4 |= 0x0020; break;
1790 case 0x2: db->PHY_reg4 |= 0x0040; break;
1791 case 0x4: db->PHY_reg4 |= 0x0080; break;
1792 case 0x8: db->PHY_reg4 |= 0x0100; break;
1796 /* Media Mode Force or not check */
1797 dmfe_mode = le32_to_cpup(srom + 34) & le32_to_cpup(srom + 36);
1799 case 0x4: dmfe_media_mode = DMFE_100MHF; break; /* 100MHF */
1800 case 0x2: dmfe_media_mode = DMFE_10MFD; break; /* 10MFD */
1801 case 0x8: dmfe_media_mode = DMFE_100MFD; break; /* 100MFD */
1803 case 0x200: dmfe_media_mode = DMFE_1M_HPNA; break;/* HomePNA */
1806 /* Special Function setting */
1808 if ( (SF_mode & 0x1) || (srom[43] & 0x80) )
1809 db->cr15_data |= 0x40;
1812 if ( (SF_mode & 0x2) || (srom[40] & 0x1) )
1813 db->cr15_data |= 0x400;
1815 /* TX pause packet */
1816 if ( (SF_mode & 0x4) || (srom[40] & 0xe) )
1817 db->cr15_data |= 0x9800;
1820 /* Parse HPNA parameter */
1821 db->HPNA_command = 1;
1823 /* Accept remote command or not */
1824 if (HPNA_rx_cmd == 0)
1825 db->HPNA_command |= 0x8000;
1827 /* Issue remote command & operation mode */
1828 if (HPNA_tx_cmd == 1)
1829 switch(HPNA_mode) { /* Issue Remote Command */
1830 case 0: db->HPNA_command |= 0x0904; break;
1831 case 1: db->HPNA_command |= 0x0a00; break;
1832 case 2: db->HPNA_command |= 0x0506; break;
1833 case 3: db->HPNA_command |= 0x0602; break;
1836 switch(HPNA_mode) { /* Don't Issue */
1837 case 0: db->HPNA_command |= 0x0004; break;
1838 case 1: db->HPNA_command |= 0x0000; break;
1839 case 2: db->HPNA_command |= 0x0006; break;
1840 case 3: db->HPNA_command |= 0x0002; break;
1843 /* Check DM9801 or DM9802 present or not */
1844 db->HPNA_present = 0;
1845 update_cr6(db->cr6_data|0x40000, db->ioaddr);
1846 tmp_reg = phy_read(db->ioaddr, db->phy_addr, 3, db->chip_id);
1847 if ( ( tmp_reg & 0xfff0 ) == 0xb900 ) {
1848 /* DM9801 or DM9802 present */
1850 if ( phy_read(db->ioaddr, db->phy_addr, 31, db->chip_id) == 0x4404) {
1851 /* DM9801 HomeRun */
1852 db->HPNA_present = 1;
1853 dmfe_program_DM9801(db, tmp_reg);
1855 /* DM9802 LongRun */
1856 db->HPNA_present = 2;
1857 dmfe_program_DM9802(db);
1865 * Init HomeRun DM9801
1868 static void dmfe_program_DM9801(struct dmfe_board_info * db, int HPNA_rev)
1872 if ( !HPNA_NoiseFloor ) HPNA_NoiseFloor = DM9801_NOISE_FLOOR;
1874 case 0xb900: /* DM9801 E3 */
1875 db->HPNA_command |= 0x1000;
1876 reg25 = phy_read(db->ioaddr, db->phy_addr, 24, db->chip_id);
1877 reg25 = ( (reg25 + HPNA_NoiseFloor) & 0xff) | 0xf000;
1878 reg17 = phy_read(db->ioaddr, db->phy_addr, 17, db->chip_id);
1880 case 0xb901: /* DM9801 E4 */
1881 reg25 = phy_read(db->ioaddr, db->phy_addr, 25, db->chip_id);
1882 reg25 = (reg25 & 0xff00) + HPNA_NoiseFloor;
1883 reg17 = phy_read(db->ioaddr, db->phy_addr, 17, db->chip_id);
1884 reg17 = (reg17 & 0xfff0) + HPNA_NoiseFloor + 3;
1886 case 0xb902: /* DM9801 E5 */
1887 case 0xb903: /* DM9801 E6 */
1889 db->HPNA_command |= 0x1000;
1890 reg25 = phy_read(db->ioaddr, db->phy_addr, 25, db->chip_id);
1891 reg25 = (reg25 & 0xff00) + HPNA_NoiseFloor - 5;
1892 reg17 = phy_read(db->ioaddr, db->phy_addr, 17, db->chip_id);
1893 reg17 = (reg17 & 0xfff0) + HPNA_NoiseFloor;
1896 phy_write(db->ioaddr, db->phy_addr, 16, db->HPNA_command, db->chip_id);
1897 phy_write(db->ioaddr, db->phy_addr, 17, reg17, db->chip_id);
1898 phy_write(db->ioaddr, db->phy_addr, 25, reg25, db->chip_id);
1903 * Init HomeRun DM9802
1906 static void dmfe_program_DM9802(struct dmfe_board_info * db)
1910 if ( !HPNA_NoiseFloor ) HPNA_NoiseFloor = DM9802_NOISE_FLOOR;
1911 phy_write(db->ioaddr, db->phy_addr, 16, db->HPNA_command, db->chip_id);
1912 phy_reg = phy_read(db->ioaddr, db->phy_addr, 25, db->chip_id);
1913 phy_reg = ( phy_reg & 0xff00) + HPNA_NoiseFloor;
1914 phy_write(db->ioaddr, db->phy_addr, 25, phy_reg, db->chip_id);
1919 * Check remote HPNA power and speed status. If not correct,
1920 * issue command again.
1923 static void dmfe_HPNA_remote_cmd_chk(struct dmfe_board_info * db)
1927 /* Got remote device status */
1928 phy_reg = phy_read(db->ioaddr, db->phy_addr, 17, db->chip_id) & 0x60;
1930 case 0x00: phy_reg = 0x0a00;break; /* LP/LS */
1931 case 0x20: phy_reg = 0x0900;break; /* LP/HS */
1932 case 0x40: phy_reg = 0x0600;break; /* HP/LS */
1933 case 0x60: phy_reg = 0x0500;break; /* HP/HS */
1936 /* Check remote device status match our setting ot not */
1937 if ( phy_reg != (db->HPNA_command & 0x0f00) ) {
1938 phy_write(db->ioaddr, db->phy_addr, 16, db->HPNA_command, db->chip_id);
1941 db->HPNA_timer=600; /* Match, every 10 minutes, check */
1946 static struct pci_device_id dmfe_pci_tbl[] = {
1947 { 0x1282, 0x9132, PCI_ANY_ID, PCI_ANY_ID, 0, 0, PCI_DM9132_ID },
1948 { 0x1282, 0x9102, PCI_ANY_ID, PCI_ANY_ID, 0, 0, PCI_DM9102_ID },
1949 { 0x1282, 0x9100, PCI_ANY_ID, PCI_ANY_ID, 0, 0, PCI_DM9100_ID },
1950 { 0x1282, 0x9009, PCI_ANY_ID, PCI_ANY_ID, 0, 0, PCI_DM9009_ID },
1951 { 0x10B9, 0x5261, PCI_ANY_ID, PCI_ANY_ID, 0, 0, PCI_DM9102_ID },
1954 MODULE_DEVICE_TABLE(pci, dmfe_pci_tbl);
1957 static struct pci_driver dmfe_driver = {
1959 .id_table = dmfe_pci_tbl,
1960 .probe = dmfe_init_one,
1961 .remove = __devexit_p(dmfe_remove_one),
1964 MODULE_AUTHOR("Sten Wang, sten_wang@davicom.com.tw");
1965 MODULE_DESCRIPTION("Davicom DM910X fast ethernet driver");
1966 MODULE_LICENSE("GPL");
1968 MODULE_PARM(debug, "i");
1969 MODULE_PARM(mode, "i");
1970 MODULE_PARM(cr6set, "i");
1971 MODULE_PARM(chkmode, "i");
1972 MODULE_PARM(HPNA_mode, "i");
1973 MODULE_PARM(HPNA_rx_cmd, "i");
1974 MODULE_PARM(HPNA_tx_cmd, "i");
1975 MODULE_PARM(HPNA_NoiseFloor, "i");
1976 MODULE_PARM(SF_mode, "i");
1977 MODULE_PARM_DESC(debug, "Davicom DM9xxx enable debugging (0-1)");
1978 MODULE_PARM_DESC(mode, "Davicom DM9xxx: Bit 0: 10/100Mbps, bit 2: duplex, bit 8: HomePNA");
1979 MODULE_PARM_DESC(SF_mode, "Davicom DM9xxx special function (bit 0: VLAN, bit 1 Flow Control, bit 2: TX pause packet)");
1982 * when user used insmod to add module, system invoked init_module()
1983 * to initilize and register.
1986 static int __init dmfe_init_module(void)
1991 printed_version = 1;
1993 DMFE_DBUG(0, "init_module() ", debug);
1996 dmfe_debug = debug; /* set debug flag */
1998 dmfe_cr6_user_set = cr6set;
2006 dmfe_media_mode = mode;
2008 default:dmfe_media_mode = DMFE_AUTO;
2013 HPNA_mode = 0; /* Default: LP/HS */
2014 if (HPNA_rx_cmd > 1)
2015 HPNA_rx_cmd = 0; /* Default: Ignored remote cmd */
2016 if (HPNA_tx_cmd > 1)
2017 HPNA_tx_cmd = 0; /* Default: Don't issue remote cmd */
2018 if (HPNA_NoiseFloor > 15)
2019 HPNA_NoiseFloor = 0;
2021 rc = pci_module_init(&dmfe_driver);
2031 * when user used rmmod to delete module, system invoked clean_module()
2032 * to un-register all registered services.
2035 static void __exit dmfe_cleanup_module(void)
2037 DMFE_DBUG(0, "dmfe_clean_module() ", debug);
2038 pci_unregister_driver(&dmfe_driver);
2041 module_init(dmfe_init_module);
2042 module_exit(dmfe_cleanup_module);