2 * This code is derived from the VIA reference driver (copyright message
3 * below) provided to Red Hat by VIA Networking Technologies, Inc. for
4 * addition to the Linux kernel.
6 * The code has been merged into one source file, cleaned up to follow
7 * Linux coding style, ported to the Linux 2.6 kernel tree and cleaned
8 * for 64bit hardware platforms.
12 * rx_copybreak/alignment
16 * The changes are (c) Copyright 2004, Red Hat Inc. <alan@redhat.com>
17 * Additional fixes and clean up: Francois Romieu
19 * This source has not been verified for use in safety critical systems.
21 * Please direct queries about the revamped driver to the linux-kernel
26 * Copyright (c) 1996, 2003 VIA Networking Technologies, Inc.
27 * All rights reserved.
29 * This software may be redistributed and/or modified under
30 * the terms of the GNU General Public License as published by the Free
31 * Software Foundation; either version 2 of the License, or
34 * This program is distributed in the hope that it will be useful, but
35 * WITHOUT ANY WARRANTY; without even the implied warranty of MERCHANTABILITY
36 * or FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License
39 * Author: Chuang Liang-Shing, AJ Jiang
43 * MODULE_LICENSE("GPL");
48 #include <linux/module.h>
49 #include <linux/types.h>
50 #include <linux/config.h>
51 #include <linux/init.h>
53 #include <linux/errno.h>
54 #include <linux/ioport.h>
55 #include <linux/pci.h>
56 #include <linux/kernel.h>
57 #include <linux/netdevice.h>
58 #include <linux/etherdevice.h>
59 #include <linux/skbuff.h>
60 #include <linux/delay.h>
61 #include <linux/timer.h>
62 #include <linux/slab.h>
63 #include <linux/interrupt.h>
64 #include <linux/version.h>
65 #include <linux/string.h>
66 #include <linux/wait.h>
69 #include <linux/config.h>
70 #include <asm/uaccess.h>
71 #include <linux/proc_fs.h>
72 #include <linux/inetdevice.h>
73 #include <linux/reboot.h>
74 #include <linux/ethtool.h>
75 #include <linux/mii.h>
77 #include <linux/if_arp.h>
79 #include <linux/tcp.h>
80 #include <linux/udp.h>
82 #include "via-velocity.h"
85 static int velocity_nics = 0;
86 static int msglevel = MSG_LEVEL_INFO;
89 static int velocity_mii_ioctl(struct net_device *dev, struct ifreq *ifr, int cmd);
90 static struct ethtool_ops velocity_ethtool_ops;
96 MODULE_AUTHOR("VIA Networking Technologies, Inc.");
97 MODULE_LICENSE("GPL");
98 MODULE_DESCRIPTION("VIA Networking Velocity Family Gigabit Ethernet Adapter Driver");
100 #define VELOCITY_PARAM(N,D) \
101 static const int N[MAX_UNITS]=OPTION_DEFAULT;\
102 MODULE_PARM(N, "1-" __MODULE_STRING(MAX_UNITS) "i");\
103 MODULE_PARM_DESC(N, D);
105 #define RX_DESC_MIN 64
106 #define RX_DESC_MAX 255
107 #define RX_DESC_DEF 64
108 VELOCITY_PARAM(RxDescriptors, "Number of receive descriptors");
110 #define TX_DESC_MIN 16
111 #define TX_DESC_MAX 256
112 #define TX_DESC_DEF 64
113 VELOCITY_PARAM(TxDescriptors, "Number of transmit descriptors");
115 #define VLAN_ID_MIN 0
116 #define VLAN_ID_MAX 4095
117 #define VLAN_ID_DEF 0
118 /* VID_setting[] is used for setting the VID of NIC.
122 VELOCITY_PARAM(VID_setting, "802.1Q VLAN ID");
124 #define RX_THRESH_MIN 0
125 #define RX_THRESH_MAX 3
126 #define RX_THRESH_DEF 0
127 /* rx_thresh[] is used for controlling the receive fifo threshold.
128 0: indicate the rxfifo threshold is 128 bytes.
129 1: indicate the rxfifo threshold is 512 bytes.
130 2: indicate the rxfifo threshold is 1024 bytes.
131 3: indicate the rxfifo threshold is store & forward.
133 VELOCITY_PARAM(rx_thresh, "Receive fifo threshold");
135 #define DMA_LENGTH_MIN 0
136 #define DMA_LENGTH_MAX 7
137 #define DMA_LENGTH_DEF 0
139 /* DMA_length[] is used for controlling the DMA length
146 6: SF(flush till emply)
147 7: SF(flush till emply)
149 VELOCITY_PARAM(DMA_length, "DMA length");
151 #define TAGGING_DEF 0
152 /* enable_tagging[] is used for enabling 802.1Q VID tagging.
153 0: disable VID seeting(default).
154 1: enable VID setting.
156 VELOCITY_PARAM(enable_tagging, "Enable 802.1Q tagging");
158 #define IP_ALIG_DEF 0
159 /* IP_byte_align[] is used for IP header DWORD byte aligned
160 0: indicate the IP header won't be DWORD byte aligned.(Default) .
161 1: indicate the IP header will be DWORD byte aligned.
162 In some enviroment, the IP header should be DWORD byte aligned,
163 or the packet will be droped when we receive it. (eg: IPVS)
165 VELOCITY_PARAM(IP_byte_align, "Enable IP header dword aligned");
167 #define TX_CSUM_DEF 1
168 /* txcsum_offload[] is used for setting the checksum offload ability of NIC.
169 (We only support RX checksum offload now)
170 0: disable csum_offload[checksum offload
171 1: enable checksum offload. (Default)
173 VELOCITY_PARAM(txcsum_offload, "Enable transmit packet checksum offload");
175 #define FLOW_CNTL_DEF 1
176 #define FLOW_CNTL_MIN 1
177 #define FLOW_CNTL_MAX 5
179 /* flow_control[] is used for setting the flow control ability of NIC.
180 1: hardware deafult - AUTO (default). Use Hardware default value in ANAR.
181 2: enable TX flow control.
182 3: enable RX flow control.
183 4: enable RX/TX flow control.
186 VELOCITY_PARAM(flow_control, "Enable flow control ability");
188 #define MED_LNK_DEF 0
189 #define MED_LNK_MIN 0
190 #define MED_LNK_MAX 4
191 /* speed_duplex[] is used for setting the speed and duplex mode of NIC.
192 0: indicate autonegotiation for both speed and duplex mode
193 1: indicate 100Mbps half duplex mode
194 2: indicate 100Mbps full duplex mode
195 3: indicate 10Mbps half duplex mode
196 4: indicate 10Mbps full duplex mode
199 if EEPROM have been set to the force mode, this option is ignored
202 VELOCITY_PARAM(speed_duplex, "Setting the speed and duplex mode");
204 #define VAL_PKT_LEN_DEF 0
205 /* ValPktLen[] is used for setting the checksum offload ability of NIC.
206 0: Receive frame with invalid layer 2 length (Default)
207 1: Drop frame with invalid layer 2 length
209 VELOCITY_PARAM(ValPktLen, "Receiving or Drop invalid 802.3 frame");
211 #define WOL_OPT_DEF 0
212 #define WOL_OPT_MIN 0
213 #define WOL_OPT_MAX 7
214 /* wol_opts[] is used for controlling wake on lan behavior.
215 0: Wake up if recevied a magic packet. (Default)
216 1: Wake up if link status is on/off.
217 2: Wake up if recevied an arp packet.
218 4: Wake up if recevied any unicast packet.
219 Those value can be sumed up to support more than one option.
221 VELOCITY_PARAM(wol_opts, "Wake On Lan options");
223 #define INT_WORKS_DEF 20
224 #define INT_WORKS_MIN 10
225 #define INT_WORKS_MAX 64
227 VELOCITY_PARAM(int_works, "Number of packets per interrupt services");
229 static int velocity_found1(struct pci_dev *pdev, const struct pci_device_id *ent);
230 static void velocity_init_info(struct pci_dev *pdev, struct velocity_info *vptr, struct velocity_info_tbl *info);
231 static int velocity_get_pci_info(struct velocity_info *, struct pci_dev *pdev);
232 static void velocity_print_info(struct velocity_info *vptr);
233 static int velocity_open(struct net_device *dev);
234 static int velocity_change_mtu(struct net_device *dev, int mtu);
235 static int velocity_xmit(struct sk_buff *skb, struct net_device *dev);
236 static int velocity_intr(int irq, void *dev_instance, struct pt_regs *regs);
237 static void velocity_set_multi(struct net_device *dev);
238 static struct net_device_stats *velocity_get_stats(struct net_device *dev);
239 static int velocity_ioctl(struct net_device *dev, struct ifreq *rq, int cmd);
240 static int velocity_close(struct net_device *dev);
241 static int velocity_rx_srv(struct velocity_info *vptr, int status);
242 static int velocity_receive_frame(struct velocity_info *, int idx);
243 static int velocity_alloc_rx_buf(struct velocity_info *, int idx);
244 static void velocity_init_registers(struct velocity_info *vptr, enum velocity_init_type type);
245 static void velocity_free_rd_ring(struct velocity_info *vptr);
246 static void velocity_free_tx_buf(struct velocity_info *vptr, struct velocity_td_info *);
247 static int velocity_soft_reset(struct velocity_info *vptr);
248 static void mii_init(struct velocity_info *vptr, u32 mii_status);
249 static u32 velocity_get_opt_media_mode(struct velocity_info *vptr);
250 static void velocity_print_link_status(struct velocity_info *vptr);
251 static void safe_disable_mii_autopoll(struct mac_regs * regs);
252 static void velocity_shutdown(struct velocity_info *vptr);
253 static void enable_flow_control_ability(struct velocity_info *vptr);
254 static void enable_mii_autopoll(struct mac_regs * regs);
255 static int velocity_mii_read(struct mac_regs *, u8 byIdx, u16 * pdata);
256 static int velocity_mii_write(struct mac_regs *, u8 byMiiAddr, u16 data);
257 static int velocity_set_wol(struct velocity_info *vptr);
258 static void velocity_save_context(struct velocity_info *vptr, struct velocity_context *context);
259 static void velocity_restore_context(struct velocity_info *vptr, struct velocity_context *context);
260 static u32 mii_check_media_mode(struct mac_regs * regs);
261 static u32 check_connection_type(struct mac_regs * regs);
262 static void velocity_init_cam_filter(struct velocity_info *vptr);
263 static int velocity_set_media_mode(struct velocity_info *vptr, u32 mii_status);
266 static int velocity_suspend(struct pci_dev *pdev, u32 state);
267 static int velocity_resume(struct pci_dev *pdev);
269 static int velocity_netdev_event(struct notifier_block *nb, unsigned long notification, void *ptr);
271 static struct notifier_block velocity_inetaddr_notifier = {
272 notifier_call:velocity_netdev_event,
275 #endif /* CONFIG_PM */
278 * Internal board variants. At the moment we have only one
281 static struct velocity_info_tbl chip_info_table[] = {
282 {CHIP_TYPE_VT6110, "VIA Networking Velocity Family Gigabit Ethernet Adapter", 256, 1, 0x00FFFFFFUL},
287 * Describe the PCI device identifiers that we support in this
288 * device driver. Used for hotplug autoloading.
291 static struct pci_device_id velocity_id_table[] __devinitdata = {
292 {0x1106, 0x3119, PCI_ANY_ID, PCI_ANY_ID, 0, 0, (unsigned long) &chip_info_table[0]},
296 MODULE_DEVICE_TABLE(pci, velocity_id_table);
299 * get_chip_name - identifier to name
300 * @id: chip identifier
302 * Given a chip identifier return a suitable description. Returns
303 * a pointer a static string valid while the driver is loaded.
306 static char __devinit *get_chip_name(enum chip_type chip_id)
309 for (i = 0; chip_info_table[i].name != NULL; i++)
310 if (chip_info_table[i].chip_id == chip_id)
312 return chip_info_table[i].name;
316 * velocity_remove1 - device unplug
317 * @pdev: PCI device being removed
319 * Device unload callback. Called on an unplug or on module
320 * unload for each active device that is present. Disconnects
321 * the device from the network layer and frees all the resources
324 static void __devexit velocity_remove1(struct pci_dev *pdev)
326 struct net_device *dev = pci_get_drvdata(pdev);
327 struct velocity_info *vptr = dev->priv;
329 unregister_netdev(dev);
330 iounmap(vptr->mac_regs);
331 pci_release_regions(pdev);
332 pci_disable_device(pdev);
333 pci_set_drvdata(pdev, NULL);
338 * velocity_set_int_opt - parser for integer options
339 * @opt: pointer to option value
340 * @val: value the user requested (or -1 for default)
341 * @min: lowest value allowed
342 * @max: highest value allowed
343 * @def: default value
344 * @name: property name
347 * Set an integer property in the module options. This function does
348 * all the verification and checking as well as reporting so that
349 * we don't duplicate code for each option.
352 static void __devinit velocity_set_int_opt(int *opt, int val, int min, int max, int def, char *name, char *devname)
356 else if (val < min || val > max) {
357 VELOCITY_PRT(MSG_LEVEL_INFO, KERN_NOTICE "%s: the value of parameter %s is invalid, the valid range is (%d-%d)\n",
358 devname, name, min, max);
361 VELOCITY_PRT(MSG_LEVEL_INFO, KERN_INFO "%s: set value of parameter %s to %d\n",
368 * velocity_set_bool_opt - parser for boolean options
369 * @opt: pointer to option value
370 * @val: value the user requested (or -1 for default)
371 * @def: default value (yes/no)
372 * @flag: numeric value to set for true.
373 * @name: property name
376 * Set a boolean property in the module options. This function does
377 * all the verification and checking as well as reporting so that
378 * we don't duplicate code for each option.
381 static void __devinit velocity_set_bool_opt(u32 * opt, int val, int def, u32 flag, char *name, char *devname)
385 *opt |= (def ? flag : 0);
386 else if (val < 0 || val > 1) {
387 printk(KERN_NOTICE "%s: the value of parameter %s is invalid, the valid range is (0-1)\n",
389 *opt |= (def ? flag : 0);
391 printk(KERN_INFO "%s: set parameter %s to %s\n",
392 devname, name, val ? "TRUE" : "FALSE");
393 *opt |= (val ? flag : 0);
398 * velocity_get_options - set options on device
399 * @opts: option structure for the device
400 * @index: index of option to use in module options array
401 * @devname: device name
403 * Turn the module and command options into a single structure
404 * for the current device
407 static void __devinit velocity_get_options(struct velocity_opt *opts, int index, char *devname)
410 velocity_set_int_opt(&opts->rx_thresh, rx_thresh[index], RX_THRESH_MIN, RX_THRESH_MAX, RX_THRESH_DEF, "rx_thresh", devname);
411 velocity_set_int_opt(&opts->DMA_length, DMA_length[index], DMA_LENGTH_MIN, DMA_LENGTH_MAX, DMA_LENGTH_DEF, "DMA_length", devname);
412 velocity_set_int_opt(&opts->numrx, RxDescriptors[index], RX_DESC_MIN, RX_DESC_MAX, RX_DESC_DEF, "RxDescriptors", devname);
413 velocity_set_int_opt(&opts->numtx, TxDescriptors[index], TX_DESC_MIN, TX_DESC_MAX, TX_DESC_DEF, "TxDescriptors", devname);
414 velocity_set_int_opt(&opts->vid, VID_setting[index], VLAN_ID_MIN, VLAN_ID_MAX, VLAN_ID_DEF, "VID_setting", devname);
415 velocity_set_bool_opt(&opts->flags, enable_tagging[index], TAGGING_DEF, VELOCITY_FLAGS_TAGGING, "enable_tagging", devname);
416 velocity_set_bool_opt(&opts->flags, txcsum_offload[index], TX_CSUM_DEF, VELOCITY_FLAGS_TX_CSUM, "txcsum_offload", devname);
417 velocity_set_int_opt(&opts->flow_cntl, flow_control[index], FLOW_CNTL_MIN, FLOW_CNTL_MAX, FLOW_CNTL_DEF, "flow_control", devname);
418 velocity_set_bool_opt(&opts->flags, IP_byte_align[index], IP_ALIG_DEF, VELOCITY_FLAGS_IP_ALIGN, "IP_byte_align", devname);
419 velocity_set_bool_opt(&opts->flags, ValPktLen[index], VAL_PKT_LEN_DEF, VELOCITY_FLAGS_VAL_PKT_LEN, "ValPktLen", devname);
420 velocity_set_int_opt((int *) &opts->spd_dpx, speed_duplex[index], MED_LNK_MIN, MED_LNK_MAX, MED_LNK_DEF, "Media link mode", devname);
421 velocity_set_int_opt((int *) &opts->wol_opts, wol_opts[index], WOL_OPT_MIN, WOL_OPT_MAX, WOL_OPT_DEF, "Wake On Lan options", devname);
422 velocity_set_int_opt((int *) &opts->int_works, int_works[index], INT_WORKS_MIN, INT_WORKS_MAX, INT_WORKS_DEF, "Interrupt service works", devname);
423 opts->numrx = (opts->numrx & ~3);
427 * velocity_init_cam_filter - initialise CAM
428 * @vptr: velocity to program
430 * Initialize the content addressable memory used for filters. Load
431 * appropriately according to the presence of VLAN
434 static void velocity_init_cam_filter(struct velocity_info *vptr)
436 struct mac_regs * regs = vptr->mac_regs;
438 /* T urn on MCFG_PQEN, turn off MCFG_RTGOPT */
439 WORD_REG_BITS_SET(MCFG_PQEN, MCFG_RTGOPT, ®s->MCFG);
440 WORD_REG_BITS_ON(MCFG_VIDFR, ®s->MCFG);
442 /* Disable all CAMs */
443 memset(vptr->vCAMmask, 0, sizeof(u8) * 8);
444 memset(vptr->mCAMmask, 0, sizeof(u8) * 8);
445 mac_set_cam_mask(regs, vptr->vCAMmask, VELOCITY_VLAN_ID_CAM);
446 mac_set_cam_mask(regs, vptr->mCAMmask, VELOCITY_MULTICAST_CAM);
448 /* Enable first VCAM */
449 if (vptr->flags & VELOCITY_FLAGS_TAGGING) {
450 /* If Tagging option is enabled and VLAN ID is not zero, then
451 turn on MCFG_RTGOPT also */
452 if (vptr->options.vid != 0)
453 WORD_REG_BITS_ON(MCFG_RTGOPT, ®s->MCFG);
455 mac_set_cam(regs, 0, (u8 *) & (vptr->options.vid), VELOCITY_VLAN_ID_CAM);
456 vptr->vCAMmask[0] |= 1;
457 mac_set_cam_mask(regs, vptr->vCAMmask, VELOCITY_VLAN_ID_CAM);
460 mac_set_cam(regs, 0, (u8 *) &temp, VELOCITY_VLAN_ID_CAM);
462 mac_set_cam_mask(regs, (u8 *) &temp, VELOCITY_VLAN_ID_CAM);
467 * velocity_rx_reset - handle a receive reset
468 * @vptr: velocity we are resetting
470 * Reset the ownership and status for the receive ring side.
471 * Hand all the receive queue to the NIC.
474 static void velocity_rx_reset(struct velocity_info *vptr)
477 struct mac_regs * regs = vptr->mac_regs;
480 vptr->rd_used = vptr->rd_curr = 0;
483 * Init state, all RD entries belong to the NIC
485 for (i = 0; i < vptr->options.numrx; ++i)
486 vptr->rd_ring[i].rdesc0.owner = cpu_to_le32(OWNED_BY_NIC);
488 writew(vptr->options.numrx, ®s->RBRDU);
489 writel(vptr->rd_pool_dma, ®s->RDBaseLo);
490 writew(0, ®s->RDIdx);
491 writew(vptr->options.numrx - 1, ®s->RDCSize);
495 * velocity_init_registers - initialise MAC registers
496 * @vptr: velocity to init
497 * @type: type of initialisation (hot or cold)
499 * Initialise the MAC on a reset or on first set up on the
503 static void velocity_init_registers(struct velocity_info *vptr,
504 enum velocity_init_type type)
506 struct mac_regs * regs = vptr->mac_regs;
512 case VELOCITY_INIT_RESET:
513 case VELOCITY_INIT_WOL:
515 netif_stop_queue(vptr->dev);
518 * Reset RX to prevent RX pointer not on the 4X location
520 velocity_rx_reset(vptr);
521 mac_rx_queue_run(regs);
522 mac_rx_queue_wake(regs);
524 mii_status = velocity_get_opt_media_mode(vptr);
525 if (velocity_set_media_mode(vptr, mii_status) != VELOCITY_LINK_CHANGE) {
526 velocity_print_link_status(vptr);
527 if (!(vptr->mii_status & VELOCITY_LINK_FAIL))
528 netif_wake_queue(vptr->dev);
531 enable_flow_control_ability(vptr);
534 writel(CR0_STOP, ®s->CR0Clr);
535 writel((CR0_DPOLL | CR0_TXON | CR0_RXON | CR0_STRT),
540 case VELOCITY_INIT_COLD:
545 velocity_soft_reset(vptr);
548 mac_eeprom_reload(regs);
549 for (i = 0; i < 6; i++) {
550 writeb(vptr->dev->dev_addr[i], &(regs->PAR[i]));
553 * clear Pre_ACPI bit.
555 BYTE_REG_BITS_OFF(CFGA_PACPI, &(regs->CFGA));
556 mac_set_rx_thresh(regs, vptr->options.rx_thresh);
557 mac_set_dma_length(regs, vptr->options.DMA_length);
559 writeb(WOLCFG_SAM | WOLCFG_SAB, ®s->WOLCFGSet);
561 * Bback off algorithm use original IEEE standard
563 BYTE_REG_BITS_SET(CFGB_OFSET, (CFGB_CRANDOM | CFGB_CAP | CFGB_MBA | CFGB_BAKOPT), ®s->CFGB);
566 * Set packet filter: Receive directed and broadcast address
568 velocity_set_multi(vptr->dev);
571 * Enable MII auto-polling
573 enable_mii_autopoll(regs);
575 vptr->int_mask = INT_MASK_DEF;
577 writel(cpu_to_le32(vptr->rd_pool_dma), ®s->RDBaseLo);
578 writew(vptr->options.numrx - 1, ®s->RDCSize);
579 mac_rx_queue_run(regs);
580 mac_rx_queue_wake(regs);
582 writew(vptr->options.numtx - 1, ®s->TDCSize);
584 for (i = 0; i < vptr->num_txq; i++) {
585 writel(cpu_to_le32(vptr->td_pool_dma[i]), &(regs->TDBaseLo[i]));
586 mac_tx_queue_run(regs, i);
589 velocity_init_cam_filter(vptr);
591 init_flow_control_register(vptr);
593 writel(CR0_STOP, ®s->CR0Clr);
594 writel((CR0_DPOLL | CR0_TXON | CR0_RXON | CR0_STRT), ®s->CR0Set);
596 mii_status = velocity_get_opt_media_mode(vptr);
597 netif_stop_queue(vptr->dev);
600 mii_init(vptr, mii_status);
602 if (velocity_set_media_mode(vptr, mii_status) != VELOCITY_LINK_CHANGE) {
603 velocity_print_link_status(vptr);
604 if (!(vptr->mii_status & VELOCITY_LINK_FAIL))
605 netif_wake_queue(vptr->dev);
608 enable_flow_control_ability(vptr);
609 mac_hw_mibs_init(regs);
610 mac_write_int_mask(vptr->int_mask, regs);
617 * velocity_soft_reset - soft reset
618 * @vptr: velocity to reset
620 * Kick off a soft reset of the velocity adapter and then poll
621 * until the reset sequence has completed before returning.
624 static int velocity_soft_reset(struct velocity_info *vptr)
626 struct mac_regs * regs = vptr->mac_regs;
629 writel(CR0_SFRST, ®s->CR0Set);
631 for (i = 0; i < W_MAX_TIMEOUT; i++) {
633 if (!DWORD_REG_BITS_IS_ON(CR0_SFRST, ®s->CR0Set))
637 if (i == W_MAX_TIMEOUT) {
638 writel(CR0_FORSRST, ®s->CR0Set);
639 /* FIXME: PCI POSTING */
647 * velocity_found1 - set up discovered velocity card
649 * @ent: PCI device table entry that matched
651 * Configure a discovered adapter from scratch. Return a negative
652 * errno error code on failure paths.
655 static int __devinit velocity_found1(struct pci_dev *pdev, const struct pci_device_id *ent)
657 static int first = 1;
658 struct net_device *dev;
660 struct velocity_info_tbl *info = (struct velocity_info_tbl *) ent->driver_data;
661 struct velocity_info *vptr;
662 struct mac_regs * regs;
665 if (velocity_nics++ >= MAX_UNITS) {
666 printk(KERN_NOTICE VELOCITY_NAME ": already found %d NICs.\n",
671 dev = alloc_etherdev(sizeof(struct velocity_info));
674 printk(KERN_ERR VELOCITY_NAME ": allocate net device failed.\n");
678 /* Chain it all together */
680 SET_MODULE_OWNER(dev);
681 SET_NETDEV_DEV(dev, &pdev->dev);
686 printk(KERN_INFO "%s Ver. %s\n",
687 VELOCITY_FULL_DRV_NAM, VELOCITY_VERSION);
688 printk(KERN_INFO "Copyright (c) 2002, 2003 VIA Networking Technologies, Inc.\n");
689 printk(KERN_INFO "Copyright (c) 2004 Red Hat Inc.\n");
693 velocity_init_info(pdev, vptr, info);
698 dev->irq = pdev->irq;
700 ret = pci_enable_device(pdev);
704 ret = velocity_get_pci_info(vptr, pdev);
706 printk(KERN_ERR VELOCITY_NAME ": Failed to find PCI device.\n");
710 ret = pci_request_regions(pdev, VELOCITY_NAME);
712 printk(KERN_ERR VELOCITY_NAME ": Failed to find PCI device.\n");
716 regs = ioremap(vptr->memaddr, vptr->io_size);
719 goto err_release_res;
722 vptr->mac_regs = regs;
726 dev->base_addr = vptr->ioaddr;
728 for (i = 0; i < 6; i++)
729 dev->dev_addr[i] = readb(®s->PAR[i]);
732 velocity_get_options(&vptr->options, velocity_nics - 1, dev->name);
735 * Mask out the options cannot be set to the chip
738 vptr->options.flags &= info->flags;
741 * Enable the chip specified capbilities
744 vptr->flags = vptr->options.flags | (info->flags & 0xFF000000UL);
746 vptr->wol_opts = vptr->options.wol_opts;
747 vptr->flags |= VELOCITY_FLAGS_WOL_ENABLED;
749 vptr->phy_id = MII_GET_PHY_ID(vptr->mac_regs);
751 dev->irq = pdev->irq;
752 dev->open = velocity_open;
753 dev->hard_start_xmit = velocity_xmit;
754 dev->stop = velocity_close;
755 dev->get_stats = velocity_get_stats;
756 dev->set_multicast_list = velocity_set_multi;
757 dev->do_ioctl = velocity_ioctl;
758 dev->ethtool_ops = &velocity_ethtool_ops;
759 dev->change_mtu = velocity_change_mtu;
760 #ifdef VELOCITY_ZERO_COPY_SUPPORT
761 dev->features |= NETIF_F_SG;
764 if (vptr->flags & VELOCITY_FLAGS_TX_CSUM) {
765 dev->features |= NETIF_F_HW_CSUM;
768 ret = register_netdev(dev);
772 velocity_print_info(vptr);
773 pci_set_drvdata(pdev, dev);
775 /* and leave the chip powered down */
777 pci_set_power_state(pdev, 3);
784 pci_release_regions(pdev);
786 pci_disable_device(pdev);
793 * velocity_print_info - per driver data
796 * Print per driver data as the kernel driver finds Velocity
800 static void __devinit velocity_print_info(struct velocity_info *vptr)
802 struct net_device *dev = vptr->dev;
804 printk(KERN_INFO "%s: %s\n", dev->name, get_chip_name(vptr->chip_id));
805 printk(KERN_INFO "%s: Ethernet Address: %2.2X:%2.2X:%2.2X:%2.2X:%2.2X:%2.2X\n",
807 dev->dev_addr[0], dev->dev_addr[1], dev->dev_addr[2],
808 dev->dev_addr[3], dev->dev_addr[4], dev->dev_addr[5]);
812 * velocity_init_info - init private data
814 * @vptr: Velocity info
817 * Set up the initial velocity_info struct for the device that has been
821 static void __devinit velocity_init_info(struct pci_dev *pdev, struct velocity_info *vptr, struct velocity_info_tbl *info)
823 memset(vptr, 0, sizeof(struct velocity_info));
826 vptr->chip_id = info->chip_id;
827 vptr->io_size = info->io_size;
828 vptr->num_txq = info->txqueue;
829 vptr->multicast_limit = MCAM_SIZE;
831 spin_lock_init(&vptr->lock);
832 spin_lock_init(&vptr->xmit_lock);
836 * velocity_get_pci_info - retrieve PCI info for device
837 * @vptr: velocity device
838 * @pdev: PCI device it matches
840 * Retrieve the PCI configuration space data that interests us from
841 * the kernel PCI layer
844 static int __devinit velocity_get_pci_info(struct velocity_info *vptr, struct pci_dev *pdev)
847 if(pci_read_config_byte(pdev, PCI_REVISION_ID, &vptr->rev_id) < 0)
850 pci_set_master(pdev);
852 vptr->ioaddr = pci_resource_start(pdev, 0);
853 vptr->memaddr = pci_resource_start(pdev, 1);
855 if(!(pci_resource_flags(pdev, 0) & IORESOURCE_IO))
857 printk(KERN_ERR "%s: region #0 is not an I/O resource, aborting.\n",
862 if((pci_resource_flags(pdev, 1) & IORESOURCE_IO))
864 printk(KERN_ERR "%s: region #1 is an I/O resource, aborting.\n",
869 if(pci_resource_len(pdev, 1) < 256)
871 printk(KERN_ERR "%s: region #1 is too small.\n",
881 * velocity_init_rings - set up DMA rings
882 * @vptr: Velocity to set up
884 * Allocate PCI mapped DMA rings for the receive and transmit layer
888 static int velocity_init_rings(struct velocity_info *vptr)
897 * Allocate all RD/TD rings a single pool
900 psize = vptr->options.numrx * sizeof(struct rx_desc) +
901 vptr->options.numtx * sizeof(struct tx_desc) * vptr->num_txq;
904 * pci_alloc_consistent() fulfills the requirement for 64 bytes
907 pool = pci_alloc_consistent(vptr->pdev, psize, &pool_dma);
910 printk(KERN_ERR "%s : DMA memory allocation failed.\n",
915 memset(pool, 0, psize);
917 vptr->rd_ring = (struct rx_desc *) pool;
919 vptr->rd_pool_dma = pool_dma;
921 tsize = vptr->options.numtx * PKT_BUF_SZ * vptr->num_txq;
922 vptr->tx_bufs = pci_alloc_consistent(vptr->pdev, tsize,
925 if (vptr->tx_bufs == NULL) {
926 printk(KERN_ERR "%s: DMA memory allocation failed.\n",
928 pci_free_consistent(vptr->pdev, psize, pool, pool_dma);
932 memset(vptr->tx_bufs, 0, vptr->options.numtx * PKT_BUF_SZ * vptr->num_txq);
934 i = vptr->options.numrx * sizeof(struct rx_desc);
937 for (i = 0; i < vptr->num_txq; i++) {
938 int offset = vptr->options.numtx * sizeof(struct tx_desc);
940 vptr->td_pool_dma[i] = pool_dma;
941 vptr->td_rings[i] = (struct tx_desc *) pool;
949 * velocity_free_rings - free PCI ring pointers
950 * @vptr: Velocity to free from
952 * Clean up the PCI ring buffers allocated to this velocity.
955 static void velocity_free_rings(struct velocity_info *vptr)
959 size = vptr->options.numrx * sizeof(struct rx_desc) +
960 vptr->options.numtx * sizeof(struct tx_desc) * vptr->num_txq;
962 pci_free_consistent(vptr->pdev, size, vptr->rd_ring, vptr->rd_pool_dma);
964 size = vptr->options.numtx * PKT_BUF_SZ * vptr->num_txq;
966 pci_free_consistent(vptr->pdev, size, vptr->tx_bufs, vptr->tx_bufs_dma);
970 * velocity_init_rd_ring - set up receive ring
971 * @vptr: velocity to configure
973 * Allocate and set up the receive buffers for each ring slot and
974 * assign them to the network adapter.
977 static int velocity_init_rd_ring(struct velocity_info *vptr)
979 int i, ret = -ENOMEM;
981 struct velocity_rd_info *rd_info;
982 unsigned int rsize = sizeof(struct velocity_rd_info) *
985 vptr->rd_info = kmalloc(rsize, GFP_KERNEL);
986 if(vptr->rd_info == NULL)
988 memset(vptr->rd_info, 0, rsize);
990 /* Init the RD ring entries */
991 for (i = 0; i < vptr->options.numrx; i++) {
992 rd = &(vptr->rd_ring[i]);
993 rd_info = &(vptr->rd_info[i]);
995 ret = velocity_alloc_rx_buf(vptr, i);
997 VELOCITY_PRT(MSG_LEVEL_ERR, KERN_ERR
998 "%s: failed to allocate RX buffer.\n",
1000 velocity_free_rd_ring(vptr);
1003 rd->rdesc0.owner = OWNED_BY_NIC;
1005 vptr->rd_used = vptr->rd_curr = 0;
1011 * velocity_free_rd_ring - set up receive ring
1012 * @vptr: velocity to clean up
1014 * Free the receive buffers for each ring slot and any
1015 * attached socket buffers that need to go away.
1018 static void velocity_free_rd_ring(struct velocity_info *vptr)
1022 if (vptr->rd_info == NULL)
1025 for (i = 0; i < vptr->options.numrx; i++) {
1026 struct velocity_rd_info *rd_info = &(vptr->rd_info[i]);
1028 if (!rd_info->skb_dma)
1030 pci_unmap_single(vptr->pdev, rd_info->skb_dma, vptr->rx_buf_sz,
1031 PCI_DMA_FROMDEVICE);
1032 rd_info->skb_dma = (dma_addr_t) NULL;
1034 dev_kfree_skb(rd_info->skb);
1035 rd_info->skb = NULL;
1038 kfree(vptr->rd_info);
1039 vptr->rd_info = NULL;
1043 * velocity_init_td_ring - set up transmit ring
1046 * Set up the transmit ring and chain the ring pointers together.
1047 * Returns zero on success or a negative posix errno code for
1051 static int velocity_init_td_ring(struct velocity_info *vptr)
1056 struct velocity_td_info *td_info;
1057 unsigned int tsize = sizeof(struct velocity_td_info) *
1058 vptr->options.numtx;
1060 /* Init the TD ring entries */
1061 for (j = 0; j < vptr->num_txq; j++) {
1062 curr = vptr->td_pool_dma[j];
1064 vptr->td_infos[j] = kmalloc(tsize, GFP_KERNEL);
1065 if(vptr->td_infos[j] == NULL)
1068 kfree(vptr->td_infos[j]);
1071 memset(vptr->td_infos[j], 0, tsize);
1073 for (i = 0; i < vptr->options.numtx; i++, curr += sizeof(struct tx_desc)) {
1074 td = &(vptr->td_rings[j][i]);
1075 td_info = &(vptr->td_infos[j][i]);
1076 td_info->buf = vptr->tx_bufs + (i + j) * PKT_BUF_SZ;
1077 td_info->buf_dma = vptr->tx_bufs_dma + (i + j) * PKT_BUF_SZ;
1079 vptr->td_tail[j] = vptr->td_curr[j] = vptr->td_used[j] = 0;
1085 * FIXME: could we merge this with velocity_free_tx_buf ?
1088 static void velocity_free_td_ring_entry(struct velocity_info *vptr,
1091 struct velocity_td_info * td_info = &(vptr->td_infos[q][n]);
1094 if (td_info == NULL)
1098 for (i = 0; i < td_info->nskb_dma; i++)
1100 if (td_info->skb_dma[i]) {
1101 pci_unmap_single(vptr->pdev, td_info->skb_dma[i],
1102 td_info->skb->len, PCI_DMA_TODEVICE);
1103 td_info->skb_dma[i] = (dma_addr_t) NULL;
1106 dev_kfree_skb(td_info->skb);
1107 td_info->skb = NULL;
1112 * velocity_free_td_ring - free td ring
1115 * Free up the transmit ring for this particular velocity adapter.
1116 * We free the ring contents but not the ring itself.
1119 static void velocity_free_td_ring(struct velocity_info *vptr)
1123 for (j = 0; j < vptr->num_txq; j++) {
1124 if (vptr->td_infos[j] == NULL)
1126 for (i = 0; i < vptr->options.numtx; i++) {
1127 velocity_free_td_ring_entry(vptr, j, i);
1130 if (vptr->td_infos[j]) {
1131 kfree(vptr->td_infos[j]);
1132 vptr->td_infos[j] = NULL;
1138 * velocity_rx_srv - service RX interrupt
1140 * @status: adapter status (unused)
1142 * Walk the receive ring of the velocity adapter and remove
1143 * any received packets from the receive queue. Hand the ring
1144 * slots back to the adapter for reuse.
1147 static int velocity_rx_srv(struct velocity_info *vptr, int status)
1150 struct net_device_stats *stats = &vptr->stats;
1151 struct mac_regs * regs = vptr->mac_regs;
1152 int rd_curr = vptr->rd_curr;
1157 rd = &(vptr->rd_ring[rd_curr]);
1159 if ((vptr->rd_info[rd_curr]).skb == NULL) {
1160 if (velocity_alloc_rx_buf(vptr, rd_curr) < 0)
1167 if (rd->rdesc0.owner == OWNED_BY_NIC)
1171 * Don't drop CE or RL error frame although RXOK is off
1172 * FIXME: need to handle copybreak
1174 if ((rd->rdesc0.RSR & RSR_RXOK) || (!(rd->rdesc0.RSR & RSR_RXOK) && (rd->rdesc0.RSR & (RSR_CE | RSR_RL)))) {
1175 if (velocity_receive_frame(vptr, rd_curr) == 0) {
1176 if (velocity_alloc_rx_buf(vptr, rd_curr) < 0) {
1177 VELOCITY_PRT(MSG_LEVEL_ERR, KERN_ERR "%s: can not allocate rx buf\n", vptr->dev->name);
1181 stats->rx_dropped++;
1184 if (rd->rdesc0.RSR & RSR_CRC)
1185 stats->rx_crc_errors++;
1186 if (rd->rdesc0.RSR & RSR_FAE)
1187 stats->rx_frame_errors++;
1189 stats->rx_dropped++;
1194 if (++vptr->rd_used >= 4) {
1195 int i, rd_prev = rd_curr;
1196 for (i = 0; i < 4; i++) {
1198 rd_prev = vptr->options.numrx - 1;
1200 rd = &(vptr->rd_ring[rd_prev]);
1201 rd->rdesc0.owner = OWNED_BY_NIC;
1203 writew(4, &(regs->RBRDU));
1207 vptr->dev->last_rx = jiffies;
1210 if (rd_curr >= vptr->options.numrx)
1213 vptr->rd_curr = rd_curr;
1219 * velocity_rx_csum - checksum process
1220 * @rd: receive packet descriptor
1221 * @skb: network layer packet buffer
1223 * Process the status bits for the received packet and determine
1224 * if the checksum was computed and verified by the hardware
1227 static inline void velocity_rx_csum(struct rx_desc *rd, struct sk_buff *skb)
1229 skb->ip_summed = CHECKSUM_NONE;
1231 if (rd->rdesc1.CSM & CSM_IPKT) {
1232 if (rd->rdesc1.CSM & CSM_IPOK) {
1233 if ((rd->rdesc1.CSM & CSM_TCPKT) ||
1234 (rd->rdesc1.CSM & CSM_UDPKT)) {
1235 if (!(rd->rdesc1.CSM & CSM_TUPOK)) {
1239 skb->ip_summed = CHECKSUM_UNNECESSARY;
1245 * velocity_receive_frame - received packet processor
1246 * @vptr: velocity we are handling
1249 * A packet has arrived. We process the packet and if appropriate
1250 * pass the frame up the network stack
1253 static int velocity_receive_frame(struct velocity_info *vptr, int idx)
1255 struct net_device_stats *stats = &vptr->stats;
1256 struct velocity_rd_info *rd_info = &(vptr->rd_info[idx]);
1257 struct rx_desc *rd = &(vptr->rd_ring[idx]);
1258 struct sk_buff *skb;
1260 if (rd->rdesc0.RSR & (RSR_STP | RSR_EDP)) {
1261 VELOCITY_PRT(MSG_LEVEL_VERBOSE, KERN_ERR " %s : the received frame span multple RDs.\n", vptr->dev->name);
1262 stats->rx_length_errors++;
1266 if (rd->rdesc0.RSR & RSR_MAR)
1267 vptr->stats.multicast++;
1270 skb->dev = vptr->dev;
1272 pci_unmap_single(vptr->pdev, rd_info->skb_dma, vptr->rx_buf_sz,
1273 PCI_DMA_FROMDEVICE);
1274 rd_info->skb_dma = (dma_addr_t) NULL;
1275 rd_info->skb = NULL;
1277 /* FIXME - memmove ? */
1278 if (vptr->flags & VELOCITY_FLAGS_IP_ALIGN) {
1280 for (i = rd->rdesc0.len + 4; i >= 0; i--)
1281 *(skb->data + i + 2) = *(skb->data + i);
1286 skb_put(skb, (rd->rdesc0.len - 4));
1287 skb->protocol = eth_type_trans(skb, skb->dev);
1290 * Drop frame not meeting IEEE 802.3
1293 if (vptr->flags & VELOCITY_FLAGS_VAL_PKT_LEN) {
1294 if (rd->rdesc0.RSR & RSR_RL) {
1295 stats->rx_length_errors++;
1300 velocity_rx_csum(rd, skb);
1303 * FIXME: need rx_copybreak handling
1306 stats->rx_bytes += skb->len;
1313 * velocity_alloc_rx_buf - allocate aligned receive buffer
1317 * Allocate a new full sized buffer for the reception of a frame and
1318 * map it into PCI space for the hardware to use. The hardware
1319 * requires *64* byte alignment of the buffer which makes life
1320 * less fun than would be ideal.
1323 static int velocity_alloc_rx_buf(struct velocity_info *vptr, int idx)
1325 struct rx_desc *rd = &(vptr->rd_ring[idx]);
1326 struct velocity_rd_info *rd_info = &(vptr->rd_info[idx]);
1328 rd_info->skb = dev_alloc_skb(vptr->rx_buf_sz + 64);
1329 if (rd_info->skb == NULL)
1333 * Do the gymnastics to get the buffer head for data at
1336 skb_reserve(rd_info->skb, (unsigned long) rd_info->skb->tail & 63);
1337 rd_info->skb->dev = vptr->dev;
1338 rd_info->skb_dma = pci_map_single(vptr->pdev, rd_info->skb->tail, vptr->rx_buf_sz, PCI_DMA_FROMDEVICE);
1341 * Fill in the descriptor to match
1344 *((u32 *) & (rd->rdesc0)) = 0;
1345 rd->len = cpu_to_le32(vptr->rx_buf_sz);
1347 rd->pa_low = cpu_to_le32(rd_info->skb_dma);
1353 * tx_srv - transmit interrupt service
1357 * Scan the queues looking for transmitted packets that
1358 * we can complete and clean up. Update any statistics as
1362 static int velocity_tx_srv(struct velocity_info *vptr, u32 status)
1369 struct velocity_td_info *tdinfo;
1370 struct net_device_stats *stats = &vptr->stats;
1372 for (qnum = 0; qnum < vptr->num_txq; qnum++) {
1373 for (idx = vptr->td_tail[qnum]; vptr->td_used[qnum] > 0;
1374 idx = (idx + 1) % vptr->options.numtx) {
1379 td = &(vptr->td_rings[qnum][idx]);
1380 tdinfo = &(vptr->td_infos[qnum][idx]);
1382 if (td->tdesc0.owner == OWNED_BY_NIC)
1388 if (td->tdesc0.TSR & TSR0_TERR) {
1390 stats->tx_dropped++;
1391 if (td->tdesc0.TSR & TSR0_CDH)
1392 stats->tx_heartbeat_errors++;
1393 if (td->tdesc0.TSR & TSR0_CRS)
1394 stats->tx_carrier_errors++;
1395 if (td->tdesc0.TSR & TSR0_ABT)
1396 stats->tx_aborted_errors++;
1397 if (td->tdesc0.TSR & TSR0_OWC)
1398 stats->tx_window_errors++;
1400 stats->tx_packets++;
1401 stats->tx_bytes += tdinfo->skb->len;
1403 velocity_free_tx_buf(vptr, tdinfo);
1404 vptr->td_used[qnum]--;
1406 vptr->td_tail[qnum] = idx;
1408 if (AVAIL_TD(vptr, qnum) < 1) {
1413 * Look to see if we should kick the transmit network
1414 * layer for more work.
1416 if (netif_queue_stopped(vptr->dev) && (full == 0)
1417 && (!(vptr->mii_status & VELOCITY_LINK_FAIL))) {
1418 netif_wake_queue(vptr->dev);
1424 * velocity_print_link_status - link status reporting
1425 * @vptr: velocity to report on
1427 * Turn the link status of the velocity card into a kernel log
1428 * description of the new link state, detailing speed and duplex
1432 static void velocity_print_link_status(struct velocity_info *vptr)
1435 if (vptr->mii_status & VELOCITY_LINK_FAIL) {
1436 VELOCITY_PRT(MSG_LEVEL_INFO, KERN_NOTICE "%s: failed to detect cable link\n", vptr->dev->name);
1437 } else if (vptr->options.spd_dpx == SPD_DPX_AUTO) {
1438 VELOCITY_PRT(MSG_LEVEL_INFO, KERN_NOTICE "%s: Link autonegation", vptr->dev->name);
1440 if (vptr->mii_status & VELOCITY_SPEED_1000)
1441 VELOCITY_PRT(MSG_LEVEL_INFO, " speed 1000M bps");
1442 else if (vptr->mii_status & VELOCITY_SPEED_100)
1443 VELOCITY_PRT(MSG_LEVEL_INFO, " speed 100M bps");
1445 VELOCITY_PRT(MSG_LEVEL_INFO, " speed 10M bps");
1447 if (vptr->mii_status & VELOCITY_DUPLEX_FULL)
1448 VELOCITY_PRT(MSG_LEVEL_INFO, " full duplex\n");
1450 VELOCITY_PRT(MSG_LEVEL_INFO, " half duplex\n");
1452 VELOCITY_PRT(MSG_LEVEL_INFO, KERN_NOTICE "%s: Link forced", vptr->dev->name);
1453 switch (vptr->options.spd_dpx) {
1454 case SPD_DPX_100_HALF:
1455 VELOCITY_PRT(MSG_LEVEL_INFO, " speed 100M bps half duplex\n");
1457 case SPD_DPX_100_FULL:
1458 VELOCITY_PRT(MSG_LEVEL_INFO, " speed 100M bps full duplex\n");
1460 case SPD_DPX_10_HALF:
1461 VELOCITY_PRT(MSG_LEVEL_INFO, " speed 10M bps half duplex\n");
1463 case SPD_DPX_10_FULL:
1464 VELOCITY_PRT(MSG_LEVEL_INFO, " speed 10M bps full duplex\n");
1473 * velocity_error - handle error from controller
1475 * @status: card status
1477 * Process an error report from the hardware and attempt to recover
1478 * the card itself. At the moment we cannot recover from some
1479 * theoretically impossible errors but this could be fixed using
1480 * the pci_device_failed logic to bounce the hardware
1484 static void velocity_error(struct velocity_info *vptr, int status)
1487 if (status & ISR_TXSTLI) {
1488 struct mac_regs * regs = vptr->mac_regs;
1490 printk(KERN_ERR "TD structure errror TDindex=%hx\n", readw(®s->TDIdx[0]));
1491 BYTE_REG_BITS_ON(TXESR_TDSTR, ®s->TXESR);
1492 writew(TRDCSR_RUN, ®s->TDCSRClr);
1493 netif_stop_queue(vptr->dev);
1495 /* FIXME: port over the pci_device_failed code and use it
1499 if (status & ISR_SRCI) {
1500 struct mac_regs * regs = vptr->mac_regs;
1503 if (vptr->options.spd_dpx == SPD_DPX_AUTO) {
1504 vptr->mii_status = check_connection_type(regs);
1507 * If it is a 3119, disable frame bursting in
1508 * halfduplex mode and enable it in fullduplex
1511 if (vptr->rev_id < REV_ID_VT3216_A0) {
1512 if (vptr->mii_status | VELOCITY_DUPLEX_FULL)
1513 BYTE_REG_BITS_ON(TCR_TB2BDIS, ®s->TCR);
1515 BYTE_REG_BITS_OFF(TCR_TB2BDIS, ®s->TCR);
1518 * Only enable CD heart beat counter in 10HD mode
1520 if (!(vptr->mii_status & VELOCITY_DUPLEX_FULL) && (vptr->mii_status & VELOCITY_SPEED_10)) {
1521 BYTE_REG_BITS_OFF(TESTCFG_HBDIS, ®s->TESTCFG);
1523 BYTE_REG_BITS_ON(TESTCFG_HBDIS, ®s->TESTCFG);
1527 * Get link status from PHYSR0
1529 linked = readb(®s->PHYSR0) & PHYSR0_LINKGD;
1532 vptr->mii_status &= ~VELOCITY_LINK_FAIL;
1534 vptr->mii_status |= VELOCITY_LINK_FAIL;
1537 velocity_print_link_status(vptr);
1538 enable_flow_control_ability(vptr);
1541 * Re-enable auto-polling because SRCI will disable
1545 enable_mii_autopoll(regs);
1547 if (vptr->mii_status & VELOCITY_LINK_FAIL)
1548 netif_stop_queue(vptr->dev);
1550 netif_wake_queue(vptr->dev);
1553 if (status & ISR_MIBFI)
1554 velocity_update_hw_mibs(vptr);
1555 if (status & ISR_LSTEI)
1556 mac_rx_queue_wake(vptr->mac_regs);
1560 * velocity_free_tx_buf - free transmit buffer
1564 * Release an transmit buffer. If the buffer was preallocated then
1565 * recycle it, if not then unmap the buffer.
1568 static void velocity_free_tx_buf(struct velocity_info *vptr, struct velocity_td_info *tdinfo)
1570 struct sk_buff *skb = tdinfo->skb;
1574 * Don't unmap the pre-allocated tx_bufs
1576 if (tdinfo->skb_dma && (tdinfo->skb_dma[0] != tdinfo->buf_dma)) {
1578 for (i = 0; i < tdinfo->nskb_dma; i++) {
1579 #ifdef VELOCITY_ZERO_COPY_SUPPORT
1580 pci_unmap_single(vptr->pdev, tdinfo->skb_dma[i], td->tdesc1.len, PCI_DMA_TODEVICE);
1582 pci_unmap_single(vptr->pdev, tdinfo->skb_dma[i], skb->len, PCI_DMA_TODEVICE);
1584 tdinfo->skb_dma[i] = 0;
1587 dev_kfree_skb_irq(skb);
1592 * velocity_open - interface activation callback
1593 * @dev: network layer device to open
1595 * Called when the network layer brings the interface up. Returns
1596 * a negative posix error code on failure, or zero on success.
1598 * All the ring allocation and set up is done on open for this
1599 * adapter to minimise memory usage when inactive
1602 static int velocity_open(struct net_device *dev)
1604 struct velocity_info *vptr = dev->priv;
1607 vptr->rx_buf_sz = (dev->mtu <= 1504 ? PKT_BUF_SZ : dev->mtu + 32);
1609 ret = velocity_init_rings(vptr);
1613 ret = velocity_init_rd_ring(vptr);
1615 goto err_free_desc_rings;
1617 ret = velocity_init_td_ring(vptr);
1619 goto err_free_rd_ring;
1621 /* Ensure chip is running */
1622 pci_set_power_state(vptr->pdev, 0);
1624 velocity_init_registers(vptr, VELOCITY_INIT_COLD);
1626 ret = request_irq(vptr->pdev->irq, &velocity_intr, SA_SHIRQ,
1629 /* Power down the chip */
1630 pci_set_power_state(vptr->pdev, 3);
1631 goto err_free_td_ring;
1634 mac_enable_int(vptr->mac_regs);
1635 netif_start_queue(dev);
1636 vptr->flags |= VELOCITY_FLAGS_OPENED;
1641 velocity_free_td_ring(vptr);
1643 velocity_free_rd_ring(vptr);
1644 err_free_desc_rings:
1645 velocity_free_rings(vptr);
1650 * velocity_change_mtu - MTU change callback
1651 * @dev: network device
1652 * @new_mtu: desired MTU
1654 * Handle requests from the networking layer for MTU change on
1655 * this interface. It gets called on a change by the network layer.
1656 * Return zero for success or negative posix error code.
1659 static int velocity_change_mtu(struct net_device *dev, int new_mtu)
1661 struct velocity_info *vptr = dev->priv;
1662 unsigned long flags;
1663 int oldmtu = dev->mtu;
1666 if ((new_mtu < VELOCITY_MIN_MTU) || new_mtu > (VELOCITY_MAX_MTU)) {
1667 VELOCITY_PRT(MSG_LEVEL_ERR, KERN_NOTICE "%s: Invalid MTU.\n",
1672 if (new_mtu != oldmtu) {
1673 spin_lock_irqsave(&vptr->lock, flags);
1675 netif_stop_queue(dev);
1676 velocity_shutdown(vptr);
1678 velocity_free_td_ring(vptr);
1679 velocity_free_rd_ring(vptr);
1683 vptr->rx_buf_sz = 9 * 1024;
1684 else if (new_mtu > 4096)
1685 vptr->rx_buf_sz = 8192;
1687 vptr->rx_buf_sz = 4 * 1024;
1689 ret = velocity_init_rd_ring(vptr);
1693 ret = velocity_init_td_ring(vptr);
1697 velocity_init_registers(vptr, VELOCITY_INIT_COLD);
1699 mac_enable_int(vptr->mac_regs);
1700 netif_start_queue(dev);
1702 spin_unlock_irqrestore(&vptr->lock, flags);
1709 * velocity_shutdown - shut down the chip
1710 * @vptr: velocity to deactivate
1712 * Shuts down the internal operations of the velocity and
1713 * disables interrupts, autopolling, transmit and receive
1716 static void velocity_shutdown(struct velocity_info *vptr)
1718 struct mac_regs * regs = vptr->mac_regs;
1719 mac_disable_int(regs);
1720 writel(CR0_STOP, ®s->CR0Set);
1721 writew(0xFFFF, ®s->TDCSRClr);
1722 writeb(0xFF, ®s->RDCSRClr);
1723 safe_disable_mii_autopoll(regs);
1724 mac_clear_isr(regs);
1728 * velocity_close - close adapter callback
1729 * @dev: network device
1731 * Callback from the network layer when the velocity is being
1732 * deactivated by the network layer
1735 static int velocity_close(struct net_device *dev)
1737 struct velocity_info *vptr = dev->priv;
1739 netif_stop_queue(dev);
1740 velocity_shutdown(vptr);
1742 if (vptr->flags & VELOCITY_FLAGS_WOL_ENABLED)
1743 velocity_get_ip(vptr);
1745 free_irq(dev->irq, dev);
1747 /* Power down the chip */
1748 pci_set_power_state(vptr->pdev, 3);
1750 /* Free the resources */
1751 velocity_free_td_ring(vptr);
1752 velocity_free_rd_ring(vptr);
1753 velocity_free_rings(vptr);
1755 vptr->flags &= (~VELOCITY_FLAGS_OPENED);
1760 * velocity_xmit - transmit packet callback
1761 * @skb: buffer to transmit
1762 * @dev: network device
1764 * Called by the networ layer to request a packet is queued to
1765 * the velocity. Returns zero on success.
1768 static int velocity_xmit(struct sk_buff *skb, struct net_device *dev)
1770 struct velocity_info *vptr = dev->priv;
1772 struct tx_desc *td_ptr;
1773 struct velocity_td_info *tdinfo;
1774 unsigned long flags;
1777 int pktlen = skb->len;
1779 spin_lock_irqsave(&vptr->lock, flags);
1781 index = vptr->td_curr[qnum];
1782 td_ptr = &(vptr->td_rings[qnum][index]);
1783 tdinfo = &(vptr->td_infos[qnum][index]);
1785 td_ptr->tdesc1.TCPLS = TCPLS_NORMAL;
1786 td_ptr->tdesc1.TCR = TCR0_TIC;
1787 td_ptr->td_buf[0].queue = 0;
1792 if (pktlen < ETH_ZLEN) {
1793 /* Cannot occur until ZC support */
1794 if(skb_linearize(skb, GFP_ATOMIC))
1797 memcpy(tdinfo->buf, skb->data, skb->len);
1798 memset(tdinfo->buf + skb->len, 0, ETH_ZLEN - skb->len);
1800 tdinfo->skb_dma[0] = tdinfo->buf_dma;
1801 td_ptr->tdesc0.pktsize = pktlen;
1802 td_ptr->td_buf[0].pa_low = cpu_to_le32(tdinfo->skb_dma[0]);
1803 td_ptr->td_buf[0].pa_high = 0;
1804 td_ptr->td_buf[0].bufsize = td_ptr->tdesc0.pktsize;
1805 tdinfo->nskb_dma = 1;
1806 td_ptr->tdesc1.CMDZ = 2;
1808 #ifdef VELOCITY_ZERO_COPY_SUPPORT
1809 if (skb_shinfo(skb)->nr_frags > 0) {
1810 int nfrags = skb_shinfo(skb)->nr_frags;
1813 skb_linearize(skb, GFP_ATOMIC);
1814 memcpy(tdinfo->buf, skb->data, skb->len);
1815 tdinfo->skb_dma[0] = tdinfo->buf_dma;
1816 td_ptr->tdesc0.pktsize =
1817 td_ptr->td_buf[0].pa_low = cpu_to_le32(tdinfo->skb_dma[0]);
1818 td_ptr->td_buf[0].pa_high = 0;
1819 td_ptr->td_buf[0].bufsize = td_ptr->tdesc0.pktsize;
1820 tdinfo->nskb_dma = 1;
1821 td_ptr->tdesc1.CMDZ = 2;
1824 tdinfo->nskb_dma = 0;
1825 tdinfo->skb_dma[i] = pci_map_single(vptr->pdev, skb->data, skb->len - skb->data_len, PCI_DMA_TODEVICE);
1827 td_ptr->tdesc0.pktsize = pktlen;
1829 /* FIXME: support 48bit DMA later */
1830 td_ptr->td_buf[i].pa_low = cpu_to_le32(tdinfo->skb_dma);
1831 td_ptr->td_buf[i].pa_high = 0;
1832 td_ptr->td_buf[i].bufsize = skb->len->skb->data_len;
1834 for (i = 0; i < nfrags; i++) {
1835 skb_frag_t *frag = &skb_shinfo(skb)->frags[i];
1836 void *addr = ((void *) page_address(frag->page + frag->page_offset));
1838 tdinfo->skb_dma[i + 1] = pci_map_single(vptr->pdev, addr, frag->size, PCI_DMA_TODEVICE);
1840 td_ptr->td_buf[i + 1].pa_low = cpu_to_le32(tdinfo->skb_dma[i + 1]);
1841 td_ptr->td_buf[i + 1].pa_high = 0;
1842 td_ptr->td_buf[i + 1].bufsize = frag->size;
1844 tdinfo->nskb_dma = i - 1;
1845 td_ptr->tdesc1.CMDZ = i;
1852 * Map the linear network buffer into PCI space and
1853 * add it to the transmit ring.
1856 tdinfo->skb_dma[0] = pci_map_single(vptr->pdev, skb->data, pktlen, PCI_DMA_TODEVICE);
1857 td_ptr->tdesc0.pktsize = pktlen;
1858 td_ptr->td_buf[0].pa_low = cpu_to_le32(tdinfo->skb_dma[0]);
1859 td_ptr->td_buf[0].pa_high = 0;
1860 td_ptr->td_buf[0].bufsize = td_ptr->tdesc0.pktsize;
1861 tdinfo->nskb_dma = 1;
1862 td_ptr->tdesc1.CMDZ = 2;
1865 if (vptr->flags & VELOCITY_FLAGS_TAGGING) {
1866 td_ptr->tdesc1.pqinf.VID = (vptr->options.vid & 0xfff);
1867 td_ptr->tdesc1.pqinf.priority = 0;
1868 td_ptr->tdesc1.pqinf.CFI = 0;
1869 td_ptr->tdesc1.TCR |= TCR0_VETAG;
1873 * Handle hardware checksum
1875 if ((vptr->flags & VELOCITY_FLAGS_TX_CSUM)
1876 && (skb->ip_summed == CHECKSUM_HW)) {
1877 struct iphdr *ip = skb->nh.iph;
1878 if (ip->protocol == IPPROTO_TCP)
1879 td_ptr->tdesc1.TCR |= TCR0_TCPCK;
1880 else if (ip->protocol == IPPROTO_UDP)
1881 td_ptr->tdesc1.TCR |= (TCR0_UDPCK);
1882 td_ptr->tdesc1.TCR |= TCR0_IPCK;
1886 int prev = index - 1;
1889 prev = vptr->options.numtx - 1;
1890 td_ptr->tdesc0.owner = OWNED_BY_NIC;
1891 vptr->td_used[qnum]++;
1892 vptr->td_curr[qnum] = (index + 1) % vptr->options.numtx;
1894 if (AVAIL_TD(vptr, qnum) < 1)
1895 netif_stop_queue(dev);
1897 td_ptr = &(vptr->td_rings[qnum][prev]);
1898 td_ptr->td_buf[0].queue = 1;
1899 mac_tx_queue_wake(vptr->mac_regs, qnum);
1901 dev->trans_start = jiffies;
1902 spin_unlock_irqrestore(&vptr->lock, flags);
1907 * velocity_intr - interrupt callback
1908 * @irq: interrupt number
1909 * @dev_instance: interrupting device
1910 * @pt_regs: CPU register state at interrupt
1912 * Called whenever an interrupt is generated by the velocity
1913 * adapter IRQ line. We may not be the source of the interrupt
1914 * and need to identify initially if we are, and if not exit as
1915 * efficiently as possible.
1918 static int velocity_intr(int irq, void *dev_instance, struct pt_regs *regs)
1920 struct net_device *dev = dev_instance;
1921 struct velocity_info *vptr = dev->priv;
1926 spin_lock(&vptr->lock);
1927 isr_status = mac_read_isr(vptr->mac_regs);
1930 if (isr_status == 0) {
1931 spin_unlock(&vptr->lock);
1935 mac_disable_int(vptr->mac_regs);
1938 * Keep processing the ISR until we have completed
1939 * processing and the isr_status becomes zero
1942 while (isr_status != 0) {
1943 mac_write_isr(vptr->mac_regs, isr_status);
1944 if (isr_status & (~(ISR_PRXI | ISR_PPRXI | ISR_PTXI | ISR_PPTXI)))
1945 velocity_error(vptr, isr_status);
1946 if (isr_status & (ISR_PRXI | ISR_PPRXI))
1947 max_count += velocity_rx_srv(vptr, isr_status);
1948 if (isr_status & (ISR_PTXI | ISR_PPTXI))
1949 max_count += velocity_tx_srv(vptr, isr_status);
1950 isr_status = mac_read_isr(vptr->mac_regs);
1951 if (max_count > vptr->options.int_works)
1953 printk(KERN_WARNING "%s: excessive work at interrupt.\n",
1958 spin_unlock(&vptr->lock);
1959 mac_enable_int(vptr->mac_regs);
1966 * ether_crc - ethernet CRC function
1968 * Compute an ethernet CRC hash of the data block provided. This
1969 * is not performance optimised but is not needed in performance
1970 * critical code paths.
1972 * FIXME: could we use shared code here ?
1975 static inline u32 ether_crc(int length, unsigned char *data)
1977 static unsigned const ethernet_polynomial = 0x04c11db7U;
1981 while (--length >= 0) {
1982 unsigned char current_octet = *data++;
1984 for (bit = 0; bit < 8; bit++, current_octet >>= 1) {
1985 crc = (crc << 1) ^ ((crc < 0) ^ (current_octet & 1) ? ethernet_polynomial : 0);
1992 * velocity_set_multi - filter list change callback
1993 * @dev: network device
1995 * Called by the network layer when the filter lists need to change
1996 * for a velocity adapter. Reload the CAMs with the new address
2000 static void velocity_set_multi(struct net_device *dev)
2002 struct velocity_info *vptr = dev->priv;
2003 struct mac_regs * regs = vptr->mac_regs;
2006 struct dev_mc_list *mclist;
2008 if (dev->flags & IFF_PROMISC) { /* Set promiscuous. */
2009 /* Unconditionally log net taps. */
2010 printk(KERN_NOTICE "%s: Promiscuous mode enabled.\n", dev->name);
2011 writel(0xffffffff, ®s->MARCAM[0]);
2012 writel(0xffffffff, ®s->MARCAM[4]);
2013 rx_mode = (RCR_AM | RCR_AB | RCR_PROM);
2014 } else if ((dev->mc_count > vptr->multicast_limit)
2015 || (dev->flags & IFF_ALLMULTI)) {
2016 writel(0xffffffff, ®s->MARCAM[0]);
2017 writel(0xffffffff, ®s->MARCAM[4]);
2018 rx_mode = (RCR_AM | RCR_AB);
2020 int offset = MCAM_SIZE - vptr->multicast_limit;
2021 mac_get_cam_mask(regs, vptr->mCAMmask, VELOCITY_MULTICAST_CAM);
2023 for (i = 0, mclist = dev->mc_list; mclist && i < dev->mc_count; i++, mclist = mclist->next) {
2024 mac_set_cam(regs, i + offset, mclist->dmi_addr, VELOCITY_MULTICAST_CAM);
2025 vptr->mCAMmask[(offset + i) / 8] |= 1 << ((offset + i) & 7);
2028 mac_set_cam_mask(regs, vptr->mCAMmask, VELOCITY_MULTICAST_CAM);
2029 rx_mode = (RCR_AM | RCR_AB);
2031 if (dev->mtu > 1500)
2034 BYTE_REG_BITS_ON(rx_mode, ®s->RCR);
2039 * velocity_get_status - statistics callback
2040 * @dev: network device
2042 * Callback from the network layer to allow driver statistics
2043 * to be resynchronized with hardware collected state. In the
2044 * case of the velocity we need to pull the MIB counters from
2045 * the hardware into the counters before letting the network
2046 * layer display them.
2049 static struct net_device_stats *velocity_get_stats(struct net_device *dev)
2051 struct velocity_info *vptr = dev->priv;
2053 /* If the hardware is down, don't touch MII */
2054 if(!netif_running(dev))
2055 return &vptr->stats;
2057 spin_lock_irq(&vptr->lock);
2058 velocity_update_hw_mibs(vptr);
2059 spin_unlock_irq(&vptr->lock);
2061 vptr->stats.rx_packets = vptr->mib_counter[HW_MIB_ifRxAllPkts];
2062 vptr->stats.rx_errors = vptr->mib_counter[HW_MIB_ifRxErrorPkts];
2063 vptr->stats.rx_length_errors = vptr->mib_counter[HW_MIB_ifInRangeLengthErrors];
2065 // unsigned long rx_dropped; /* no space in linux buffers */
2066 vptr->stats.collisions = vptr->mib_counter[HW_MIB_ifTxEtherCollisions];
2067 /* detailed rx_errors: */
2068 // unsigned long rx_length_errors;
2069 // unsigned long rx_over_errors; /* receiver ring buff overflow */
2070 vptr->stats.rx_crc_errors = vptr->mib_counter[HW_MIB_ifRxPktCRCE];
2071 // unsigned long rx_frame_errors; /* recv'd frame alignment error */
2072 // unsigned long rx_fifo_errors; /* recv'r fifo overrun */
2073 // unsigned long rx_missed_errors; /* receiver missed packet */
2075 /* detailed tx_errors */
2076 // unsigned long tx_fifo_errors;
2078 return &vptr->stats;
2083 * velocity_ioctl - ioctl entry point
2084 * @dev: network device
2085 * @rq: interface request ioctl
2086 * @cmd: command code
2088 * Called when the user issues an ioctl request to the network
2089 * device in question. The velocity interface supports MII.
2092 static int velocity_ioctl(struct net_device *dev, struct ifreq *rq, int cmd)
2094 struct velocity_info *vptr = dev->priv;
2097 /* If we are asked for information and the device is power
2098 saving then we need to bring the device back up to talk to it */
2100 if(!netif_running(dev))
2101 pci_set_power_state(vptr->pdev, 0);
2104 case SIOCGMIIPHY: /* Get address of MII PHY in use. */
2105 case SIOCGMIIREG: /* Read MII PHY register. */
2106 case SIOCSMIIREG: /* Write to MII PHY register. */
2107 ret = velocity_mii_ioctl(dev, rq, cmd);
2113 if(!netif_running(dev))
2114 pci_set_power_state(vptr->pdev, 3);
2121 * Definition for our device driver. The PCI layer interface
2122 * uses this to handle all our card discover and plugging
2125 static struct pci_driver velocity_driver = {
2127 id_table:velocity_id_table,
2128 probe:velocity_found1,
2129 remove:velocity_remove1,
2131 suspend:velocity_suspend,
2132 resume:velocity_resume,
2137 * velocity_init_module - load time function
2139 * Called when the velocity module is loaded. The PCI driver
2140 * is registered with the PCI layer, and in turn will call
2141 * the probe functions for each velocity adapter installed
2145 static int __init velocity_init_module(void)
2148 ret = pci_module_init(&velocity_driver);
2151 register_inetaddr_notifier(&velocity_inetaddr_notifier);
2157 * velocity_cleanup - module unload
2159 * When the velocity hardware is unloaded this function is called.
2160 * It will clean up the notifiers and the unregister the PCI
2161 * driver interface for this hardware. This in turn cleans up
2162 * all discovered interfaces before returning from the function
2165 static void __exit velocity_cleanup_module(void)
2168 unregister_inetaddr_notifier(&velocity_inetaddr_notifier);
2170 pci_unregister_driver(&velocity_driver);
2173 module_init(velocity_init_module);
2174 module_exit(velocity_cleanup_module);
2178 * MII access , media link mode setting functions
2183 * mii_init - set up MII
2184 * @vptr: velocity adapter
2185 * @mii_status: links tatus
2187 * Set up the PHY for the current link state.
2190 static void mii_init(struct velocity_info *vptr, u32 mii_status)
2194 switch (PHYID_GET_PHY_ID(vptr->phy_id)) {
2195 case PHYID_CICADA_CS8201:
2197 * Reset to hardware default
2199 MII_REG_BITS_OFF((ANAR_ASMDIR | ANAR_PAUSE), MII_REG_ANAR, vptr->mac_regs);
2201 * Turn on ECHODIS bit in NWay-forced full mode and turn it
2202 * off it in NWay-forced half mode for NWay-forced v.s.
2203 * legacy-forced issue.
2205 if (vptr->mii_status & VELOCITY_DUPLEX_FULL)
2206 MII_REG_BITS_ON(TCSR_ECHODIS, MII_REG_TCSR, vptr->mac_regs);
2208 MII_REG_BITS_OFF(TCSR_ECHODIS, MII_REG_TCSR, vptr->mac_regs);
2210 * Turn on Link/Activity LED enable bit for CIS8201
2212 MII_REG_BITS_ON(PLED_LALBE, MII_REG_PLED, vptr->mac_regs);
2214 case PHYID_VT3216_32BIT:
2215 case PHYID_VT3216_64BIT:
2217 * Reset to hardware default
2219 MII_REG_BITS_ON((ANAR_ASMDIR | ANAR_PAUSE), MII_REG_ANAR, vptr->mac_regs);
2221 * Turn on ECHODIS bit in NWay-forced full mode and turn it
2222 * off it in NWay-forced half mode for NWay-forced v.s.
2223 * legacy-forced issue
2225 if (vptr->mii_status & VELOCITY_DUPLEX_FULL)
2226 MII_REG_BITS_ON(TCSR_ECHODIS, MII_REG_TCSR, vptr->mac_regs);
2228 MII_REG_BITS_OFF(TCSR_ECHODIS, MII_REG_TCSR, vptr->mac_regs);
2231 case PHYID_MARVELL_1000:
2232 case PHYID_MARVELL_1000S:
2234 * Assert CRS on Transmit
2236 MII_REG_BITS_ON(PSCR_ACRSTX, MII_REG_PSCR, vptr->mac_regs);
2238 * Reset to hardware default
2240 MII_REG_BITS_ON((ANAR_ASMDIR | ANAR_PAUSE), MII_REG_ANAR, vptr->mac_regs);
2245 velocity_mii_read(vptr->mac_regs, MII_REG_BMCR, &BMCR);
2246 if (BMCR & BMCR_ISO) {
2248 velocity_mii_write(vptr->mac_regs, MII_REG_BMCR, BMCR);
2253 * safe_disable_mii_autopoll - autopoll off
2254 * @regs: velocity registers
2256 * Turn off the autopoll and wait for it to disable on the chip
2259 static void safe_disable_mii_autopoll(struct mac_regs * regs)
2263 /* turn off MAUTO */
2264 writeb(0, ®s->MIICR);
2265 for (ww = 0; ww < W_MAX_TIMEOUT; ww++) {
2267 if (BYTE_REG_BITS_IS_ON(MIISR_MIDLE, ®s->MIISR))
2273 * enable_mii_autopoll - turn on autopolling
2274 * @regs: velocity registers
2276 * Enable the MII link status autopoll feature on the Velocity
2277 * hardware. Wait for it to enable.
2280 static void enable_mii_autopoll(struct mac_regs * regs)
2284 writeb(0, &(regs->MIICR));
2285 writeb(MIIADR_SWMPL, ®s->MIIADR);
2287 for (ii = 0; ii < W_MAX_TIMEOUT; ii++) {
2289 if (BYTE_REG_BITS_IS_ON(MIISR_MIDLE, ®s->MIISR))
2293 writeb(MIICR_MAUTO, ®s->MIICR);
2295 for (ii = 0; ii < W_MAX_TIMEOUT; ii++) {
2297 if (!BYTE_REG_BITS_IS_ON(MIISR_MIDLE, ®s->MIISR))
2304 * velocity_mii_read - read MII data
2305 * @regs: velocity registers
2306 * @index: MII register index
2307 * @data: buffer for received data
2309 * Perform a single read of an MII 16bit register. Returns zero
2310 * on success or -ETIMEDOUT if the PHY did not respond.
2313 static int velocity_mii_read(struct mac_regs * regs, u8 index, u16 *data)
2318 * Disable MIICR_MAUTO, so that mii addr can be set normally
2320 safe_disable_mii_autopoll(regs);
2322 writeb(index, ®s->MIIADR);
2324 BYTE_REG_BITS_ON(MIICR_RCMD, ®s->MIICR);
2326 for (ww = 0; ww < W_MAX_TIMEOUT; ww++) {
2327 if (!(readb(®s->MIICR) & MIICR_RCMD))
2331 *data = readw(®s->MIIDATA);
2333 enable_mii_autopoll(regs);
2334 if (ww == W_MAX_TIMEOUT)
2340 * velocity_mii_write - write MII data
2341 * @regs: velocity registers
2342 * @index: MII register index
2343 * @data: 16bit data for the MII register
2345 * Perform a single write to an MII 16bit register. Returns zero
2346 * on success or -ETIMEDOUT if the PHY did not respond.
2349 static int velocity_mii_write(struct mac_regs * regs, u8 mii_addr, u16 data)
2354 * Disable MIICR_MAUTO, so that mii addr can be set normally
2356 safe_disable_mii_autopoll(regs);
2358 /* MII reg offset */
2359 writeb(mii_addr, ®s->MIIADR);
2361 writew(data, ®s->MIIDATA);
2363 /* turn on MIICR_WCMD */
2364 BYTE_REG_BITS_ON(MIICR_WCMD, ®s->MIICR);
2366 /* W_MAX_TIMEOUT is the timeout period */
2367 for (ww = 0; ww < W_MAX_TIMEOUT; ww++) {
2369 if (!(readb(®s->MIICR) & MIICR_WCMD))
2372 enable_mii_autopoll(regs);
2374 if (ww == W_MAX_TIMEOUT)
2380 * velocity_get_opt_media_mode - get media selection
2381 * @vptr: velocity adapter
2383 * Get the media mode stored in EEPROM or module options and load
2384 * mii_status accordingly. The requested link state information
2388 static u32 velocity_get_opt_media_mode(struct velocity_info *vptr)
2392 switch (vptr->options.spd_dpx) {
2394 status = VELOCITY_AUTONEG_ENABLE;
2396 case SPD_DPX_100_FULL:
2397 status = VELOCITY_SPEED_100 | VELOCITY_DUPLEX_FULL;
2399 case SPD_DPX_10_FULL:
2400 status = VELOCITY_SPEED_10 | VELOCITY_DUPLEX_FULL;
2402 case SPD_DPX_100_HALF:
2403 status = VELOCITY_SPEED_100;
2405 case SPD_DPX_10_HALF:
2406 status = VELOCITY_SPEED_10;
2409 vptr->mii_status = status;
2414 * mii_set_auto_on - autonegotiate on
2417 * Enable autonegotation on this interface
2420 static void mii_set_auto_on(struct velocity_info *vptr)
2422 if (MII_REG_BITS_IS_ON(BMCR_AUTO, MII_REG_BMCR, vptr->mac_regs))
2423 MII_REG_BITS_ON(BMCR_REAUTO, MII_REG_BMCR, vptr->mac_regs);
2425 MII_REG_BITS_ON(BMCR_AUTO, MII_REG_BMCR, vptr->mac_regs);
2430 static void mii_set_auto_off(struct velocity_info * vptr)
2432 MII_REG_BITS_OFF(BMCR_AUTO, MII_REG_BMCR, vptr->mac_regs);
2437 * set_mii_flow_control - flow control setup
2438 * @vptr: velocity interface
2440 * Set up the flow control on this interface according to
2441 * the supplied user/eeprom options.
2444 static void set_mii_flow_control(struct velocity_info *vptr)
2446 /*Enable or Disable PAUSE in ANAR */
2447 switch (vptr->options.flow_cntl) {
2449 MII_REG_BITS_OFF(ANAR_PAUSE, MII_REG_ANAR, vptr->mac_regs);
2450 MII_REG_BITS_ON(ANAR_ASMDIR, MII_REG_ANAR, vptr->mac_regs);
2454 MII_REG_BITS_ON(ANAR_PAUSE, MII_REG_ANAR, vptr->mac_regs);
2455 MII_REG_BITS_ON(ANAR_ASMDIR, MII_REG_ANAR, vptr->mac_regs);
2458 case FLOW_CNTL_TX_RX:
2459 MII_REG_BITS_ON(ANAR_PAUSE, MII_REG_ANAR, vptr->mac_regs);
2460 MII_REG_BITS_ON(ANAR_ASMDIR, MII_REG_ANAR, vptr->mac_regs);
2463 case FLOW_CNTL_DISABLE:
2464 MII_REG_BITS_OFF(ANAR_PAUSE, MII_REG_ANAR, vptr->mac_regs);
2465 MII_REG_BITS_OFF(ANAR_ASMDIR, MII_REG_ANAR, vptr->mac_regs);
2473 * velocity_set_media_mode - set media mode
2474 * @mii_status: old MII link state
2476 * Check the media link state and configure the flow control
2477 * PHY and also velocity hardware setup accordingly. In particular
2478 * we need to set up CD polling and frame bursting.
2481 static int velocity_set_media_mode(struct velocity_info *vptr, u32 mii_status)
2484 struct mac_regs * regs = vptr->mac_regs;
2486 vptr->mii_status = mii_check_media_mode(vptr->mac_regs);
2487 curr_status = vptr->mii_status & (~VELOCITY_LINK_FAIL);
2489 /* Set mii link status */
2490 set_mii_flow_control(vptr);
2493 Check if new status is consisent with current status
2494 if (((mii_status & curr_status) & VELOCITY_AUTONEG_ENABLE)
2495 || (mii_status==curr_status)) {
2496 vptr->mii_status=mii_check_media_mode(vptr->mac_regs);
2497 vptr->mii_status=check_connection_type(vptr->mac_regs);
2498 VELOCITY_PRT(MSG_LEVEL_INFO, "Velocity link no change\n");
2503 if (PHYID_GET_PHY_ID(vptr->phy_id) == PHYID_CICADA_CS8201) {
2504 MII_REG_BITS_ON(AUXCR_MDPPS, MII_REG_AUXCR, vptr->mac_regs);
2508 * If connection type is AUTO
2510 if (mii_status & VELOCITY_AUTONEG_ENABLE) {
2511 VELOCITY_PRT(MSG_LEVEL_INFO, "Velocity is AUTO mode\n");
2512 /* clear force MAC mode bit */
2513 BYTE_REG_BITS_OFF(CHIPGCR_FCMODE, ®s->CHIPGCR);
2514 /* set duplex mode of MAC according to duplex mode of MII */
2515 MII_REG_BITS_ON(ANAR_TXFD | ANAR_TX | ANAR_10FD | ANAR_10, MII_REG_ANAR, vptr->mac_regs);
2516 MII_REG_BITS_ON(G1000CR_1000FD | G1000CR_1000, MII_REG_G1000CR, vptr->mac_regs);
2517 MII_REG_BITS_ON(BMCR_SPEED1G, MII_REG_BMCR, vptr->mac_regs);
2519 /* enable AUTO-NEGO mode */
2520 mii_set_auto_on(vptr);
2526 * 1. if it's 3119, disable frame bursting in halfduplex mode
2527 * and enable it in fullduplex mode
2528 * 2. set correct MII/GMII and half/full duplex mode in CHIPGCR
2529 * 3. only enable CD heart beat counter in 10HD mode
2532 /* set force MAC mode bit */
2533 BYTE_REG_BITS_ON(CHIPGCR_FCMODE, ®s->CHIPGCR);
2535 CHIPGCR = readb(®s->CHIPGCR);
2536 CHIPGCR &= ~CHIPGCR_FCGMII;
2538 if (mii_status & VELOCITY_DUPLEX_FULL) {
2539 CHIPGCR |= CHIPGCR_FCFDX;
2540 writeb(CHIPGCR, ®s->CHIPGCR);
2541 VELOCITY_PRT(MSG_LEVEL_INFO, "set Velocity to forced full mode\n");
2542 if (vptr->rev_id < REV_ID_VT3216_A0)
2543 BYTE_REG_BITS_OFF(TCR_TB2BDIS, ®s->TCR);
2545 CHIPGCR &= ~CHIPGCR_FCFDX;
2546 VELOCITY_PRT(MSG_LEVEL_INFO, "set Velocity to forced half mode\n");
2547 writeb(CHIPGCR, ®s->CHIPGCR);
2548 if (vptr->rev_id < REV_ID_VT3216_A0)
2549 BYTE_REG_BITS_ON(TCR_TB2BDIS, ®s->TCR);
2552 MII_REG_BITS_OFF(G1000CR_1000FD | G1000CR_1000, MII_REG_G1000CR, vptr->mac_regs);
2554 if (!(mii_status & VELOCITY_DUPLEX_FULL) && (mii_status & VELOCITY_SPEED_10)) {
2555 BYTE_REG_BITS_OFF(TESTCFG_HBDIS, ®s->TESTCFG);
2557 BYTE_REG_BITS_ON(TESTCFG_HBDIS, ®s->TESTCFG);
2559 /* MII_REG_BITS_OFF(BMCR_SPEED1G, MII_REG_BMCR, vptr->mac_regs); */
2560 velocity_mii_read(vptr->mac_regs, MII_REG_ANAR, &ANAR);
2561 ANAR &= (~(ANAR_TXFD | ANAR_TX | ANAR_10FD | ANAR_10));
2562 if (mii_status & VELOCITY_SPEED_100) {
2563 if (mii_status & VELOCITY_DUPLEX_FULL)
2568 if (mii_status & VELOCITY_DUPLEX_FULL)
2573 velocity_mii_write(vptr->mac_regs, MII_REG_ANAR, ANAR);
2574 /* enable AUTO-NEGO mode */
2575 mii_set_auto_on(vptr);
2576 /* MII_REG_BITS_ON(BMCR_AUTO, MII_REG_BMCR, vptr->mac_regs); */
2578 /* vptr->mii_status=mii_check_media_mode(vptr->mac_regs); */
2579 /* vptr->mii_status=check_connection_type(vptr->mac_regs); */
2580 return VELOCITY_LINK_CHANGE;
2584 * mii_check_media_mode - check media state
2585 * @regs: velocity registers
2587 * Check the current MII status and determine the link status
2591 static u32 mii_check_media_mode(struct mac_regs * regs)
2596 if (!MII_REG_BITS_IS_ON(BMSR_LNK, MII_REG_BMSR, regs))
2597 status |= VELOCITY_LINK_FAIL;
2599 if (MII_REG_BITS_IS_ON(G1000CR_1000FD, MII_REG_G1000CR, regs))
2600 status |= VELOCITY_SPEED_1000 | VELOCITY_DUPLEX_FULL;
2601 else if (MII_REG_BITS_IS_ON(G1000CR_1000, MII_REG_G1000CR, regs))
2602 status |= (VELOCITY_SPEED_1000);
2604 velocity_mii_read(regs, MII_REG_ANAR, &ANAR);
2605 if (ANAR & ANAR_TXFD)
2606 status |= (VELOCITY_SPEED_100 | VELOCITY_DUPLEX_FULL);
2607 else if (ANAR & ANAR_TX)
2608 status |= VELOCITY_SPEED_100;
2609 else if (ANAR & ANAR_10FD)
2610 status |= (VELOCITY_SPEED_10 | VELOCITY_DUPLEX_FULL);
2612 status |= (VELOCITY_SPEED_10);
2615 if (MII_REG_BITS_IS_ON(BMCR_AUTO, MII_REG_BMCR, regs)) {
2616 velocity_mii_read(regs, MII_REG_ANAR, &ANAR);
2617 if ((ANAR & (ANAR_TXFD | ANAR_TX | ANAR_10FD | ANAR_10))
2618 == (ANAR_TXFD | ANAR_TX | ANAR_10FD | ANAR_10)) {
2619 if (MII_REG_BITS_IS_ON(G1000CR_1000 | G1000CR_1000FD, MII_REG_G1000CR, regs))
2620 status |= VELOCITY_AUTONEG_ENABLE;
2627 static u32 check_connection_type(struct mac_regs * regs)
2632 PHYSR0 = readb(®s->PHYSR0);
2635 if (!(PHYSR0 & PHYSR0_LINKGD))
2636 status|=VELOCITY_LINK_FAIL;
2639 if (PHYSR0 & PHYSR0_FDPX)
2640 status |= VELOCITY_DUPLEX_FULL;
2642 if (PHYSR0 & PHYSR0_SPDG)
2643 status |= VELOCITY_SPEED_1000;
2644 if (PHYSR0 & PHYSR0_SPD10)
2645 status |= VELOCITY_SPEED_10;
2647 status |= VELOCITY_SPEED_100;
2649 if (MII_REG_BITS_IS_ON(BMCR_AUTO, MII_REG_BMCR, regs)) {
2650 velocity_mii_read(regs, MII_REG_ANAR, &ANAR);
2651 if ((ANAR & (ANAR_TXFD | ANAR_TX | ANAR_10FD | ANAR_10))
2652 == (ANAR_TXFD | ANAR_TX | ANAR_10FD | ANAR_10)) {
2653 if (MII_REG_BITS_IS_ON(G1000CR_1000 | G1000CR_1000FD, MII_REG_G1000CR, regs))
2654 status |= VELOCITY_AUTONEG_ENABLE;
2662 * enable_flow_control_ability - flow control
2663 * @vptr: veloity to configure
2665 * Set up flow control according to the flow control options
2666 * determined by the eeprom/configuration.
2669 static void enable_flow_control_ability(struct velocity_info *vptr)
2672 struct mac_regs * regs = vptr->mac_regs;
2674 switch (vptr->options.flow_cntl) {
2676 case FLOW_CNTL_DEFAULT:
2677 if (BYTE_REG_BITS_IS_ON(PHYSR0_RXFLC, ®s->PHYSR0))
2678 writel(CR0_FDXRFCEN, ®s->CR0Set);
2680 writel(CR0_FDXRFCEN, ®s->CR0Clr);
2682 if (BYTE_REG_BITS_IS_ON(PHYSR0_TXFLC, ®s->PHYSR0))
2683 writel(CR0_FDXTFCEN, ®s->CR0Set);
2685 writel(CR0_FDXTFCEN, ®s->CR0Clr);
2689 writel(CR0_FDXTFCEN, ®s->CR0Set);
2690 writel(CR0_FDXRFCEN, ®s->CR0Clr);
2694 writel(CR0_FDXRFCEN, ®s->CR0Set);
2695 writel(CR0_FDXTFCEN, ®s->CR0Clr);
2698 case FLOW_CNTL_TX_RX:
2699 writel(CR0_FDXTFCEN, ®s->CR0Set);
2700 writel(CR0_FDXRFCEN, ®s->CR0Set);
2703 case FLOW_CNTL_DISABLE:
2704 writel(CR0_FDXRFCEN, ®s->CR0Clr);
2705 writel(CR0_FDXTFCEN, ®s->CR0Clr);
2716 * velocity_ethtool_up - pre hook for ethtool
2717 * @dev: network device
2719 * Called before an ethtool operation. We need to make sure the
2720 * chip is out of D3 state before we poke at it.
2723 static int velocity_ethtool_up(struct net_device *dev)
2725 struct velocity_info *vptr = dev->priv;
2726 if(!netif_running(dev))
2727 pci_set_power_state(vptr->pdev, 0);
2732 * velocity_ethtool_down - post hook for ethtool
2733 * @dev: network device
2735 * Called after an ethtool operation. Restore the chip back to D3
2736 * state if it isn't running.
2739 static void velocity_ethtool_down(struct net_device *dev)
2741 struct velocity_info *vptr = dev->priv;
2742 if(!netif_running(dev))
2743 pci_set_power_state(vptr->pdev, 3);
2746 static int velocity_get_settings(struct net_device *dev, struct ethtool_cmd *cmd)
2748 struct velocity_info *vptr = dev->priv;
2749 struct mac_regs * regs = vptr->mac_regs;
2751 status = check_connection_type(vptr->mac_regs);
2753 cmd->supported = SUPPORTED_TP | SUPPORTED_Autoneg | SUPPORTED_10baseT_Half | SUPPORTED_10baseT_Full | SUPPORTED_100baseT_Half | SUPPORTED_100baseT_Full | SUPPORTED_1000baseT_Half | SUPPORTED_1000baseT_Full;
2754 if (status & VELOCITY_SPEED_100)
2755 cmd->speed = SPEED_100;
2757 cmd->speed = SPEED_10;
2758 cmd->autoneg = (status & VELOCITY_AUTONEG_ENABLE) ? AUTONEG_ENABLE : AUTONEG_DISABLE;
2759 cmd->port = PORT_TP;
2760 cmd->transceiver = XCVR_INTERNAL;
2761 cmd->phy_address = readb(®s->MIIADR) & 0x1F;
2763 if (status & VELOCITY_DUPLEX_FULL)
2764 cmd->duplex = DUPLEX_FULL;
2766 cmd->duplex = DUPLEX_HALF;
2771 static int velocity_set_settings(struct net_device *dev, struct ethtool_cmd *cmd)
2773 struct velocity_info *vptr = dev->priv;
2778 curr_status = check_connection_type(vptr->mac_regs);
2779 curr_status &= (~VELOCITY_LINK_FAIL);
2781 new_status |= ((cmd->autoneg) ? VELOCITY_AUTONEG_ENABLE : 0);
2782 new_status |= ((cmd->speed == SPEED_100) ? VELOCITY_SPEED_100 : 0);
2783 new_status |= ((cmd->speed == SPEED_10) ? VELOCITY_SPEED_10 : 0);
2784 new_status |= ((cmd->duplex == DUPLEX_FULL) ? VELOCITY_DUPLEX_FULL : 0);
2786 if ((new_status & VELOCITY_AUTONEG_ENABLE) && (new_status != (curr_status | VELOCITY_AUTONEG_ENABLE)))
2789 velocity_set_media_mode(vptr, new_status);
2794 static u32 velocity_get_link(struct net_device *dev)
2796 struct velocity_info *vptr = dev->priv;
2797 struct mac_regs * regs = vptr->mac_regs;
2798 return BYTE_REG_BITS_IS_ON(PHYSR0_LINKGD, ®s->PHYSR0) ? 0 : 1;
2801 static void velocity_get_drvinfo(struct net_device *dev, struct ethtool_drvinfo *info)
2803 struct velocity_info *vptr = dev->priv;
2804 strcpy(info->driver, VELOCITY_NAME);
2805 strcpy(info->version, VELOCITY_VERSION);
2806 strcpy(info->bus_info, vptr->pdev->slot_name);
2809 static void velocity_ethtool_get_wol(struct net_device *dev, struct ethtool_wolinfo *wol)
2811 struct velocity_info *vptr = dev->priv;
2812 wol->supported = WAKE_PHY | WAKE_MAGIC | WAKE_UCAST | WAKE_ARP;
2813 wol->wolopts |= WAKE_MAGIC;
2815 if (vptr->wol_opts & VELOCITY_WOL_PHY)
2816 wol.wolopts|=WAKE_PHY;
2818 if (vptr->wol_opts & VELOCITY_WOL_UCAST)
2819 wol->wolopts |= WAKE_UCAST;
2820 if (vptr->wol_opts & VELOCITY_WOL_ARP)
2821 wol->wolopts |= WAKE_ARP;
2822 memcpy(&wol->sopass, vptr->wol_passwd, 6);
2825 static int velocity_ethtool_set_wol(struct net_device *dev, struct ethtool_wolinfo *wol)
2827 struct velocity_info *vptr = dev->priv;
2829 if (!(wol->wolopts & (WAKE_PHY | WAKE_MAGIC | WAKE_UCAST | WAKE_ARP)))
2831 vptr->wol_opts = VELOCITY_WOL_MAGIC;
2834 if (wol.wolopts & WAKE_PHY) {
2835 vptr->wol_opts|=VELOCITY_WOL_PHY;
2836 vptr->flags |=VELOCITY_FLAGS_WOL_ENABLED;
2840 if (wol->wolopts & WAKE_MAGIC) {
2841 vptr->wol_opts |= VELOCITY_WOL_MAGIC;
2842 vptr->flags |= VELOCITY_FLAGS_WOL_ENABLED;
2844 if (wol->wolopts & WAKE_UCAST) {
2845 vptr->wol_opts |= VELOCITY_WOL_UCAST;
2846 vptr->flags |= VELOCITY_FLAGS_WOL_ENABLED;
2848 if (wol->wolopts & WAKE_ARP) {
2849 vptr->wol_opts |= VELOCITY_WOL_ARP;
2850 vptr->flags |= VELOCITY_FLAGS_WOL_ENABLED;
2852 memcpy(vptr->wol_passwd, wol->sopass, 6);
2856 static u32 velocity_get_msglevel(struct net_device *dev)
2861 static void velocity_set_msglevel(struct net_device *dev, u32 value)
2866 static struct ethtool_ops velocity_ethtool_ops = {
2867 .get_settings = velocity_get_settings,
2868 .set_settings = velocity_set_settings,
2869 .get_drvinfo = velocity_get_drvinfo,
2870 .get_wol = velocity_ethtool_get_wol,
2871 .set_wol = velocity_ethtool_set_wol,
2872 .get_msglevel = velocity_get_msglevel,
2873 .set_msglevel = velocity_set_msglevel,
2874 .get_link = velocity_get_link,
2875 .begin = velocity_ethtool_up,
2876 .complete = velocity_ethtool_down
2880 * velocity_mii_ioctl - MII ioctl handler
2881 * @dev: network device
2882 * @ifr: the ifreq block for the ioctl
2885 * Process MII requests made via ioctl from the network layer. These
2886 * are used by tools like kudzu to interrogate the link state of the
2890 static int velocity_mii_ioctl(struct net_device *dev, struct ifreq *ifr, int cmd)
2892 struct velocity_info *vptr = dev->priv;
2893 struct mac_regs * regs = vptr->mac_regs;
2894 unsigned long flags;
2895 struct mii_ioctl_data *miidata = (struct mii_ioctl_data *) &(ifr->ifr_data);
2900 miidata->phy_id = readb(®s->MIIADR) & 0x1f;
2903 if (!capable(CAP_NET_ADMIN))
2905 if(velocity_mii_read(vptr->mac_regs, miidata->reg_num & 0x1f, &(miidata->val_out)) < 0)
2909 if (!capable(CAP_NET_ADMIN))
2911 spin_lock_irqsave(&vptr->lock, flags);
2912 err = velocity_mii_write(vptr->mac_regs, miidata->reg_num & 0x1f, miidata->val_in);
2913 spin_unlock_irqrestore(&vptr->lock, flags);
2914 check_connection_type(vptr->mac_regs);
2927 * velocity_save_context - save registers
2929 * @context: buffer for stored context
2931 * Retrieve the current configuration from the velocity hardware
2932 * and stash it in the context structure, for use by the context
2933 * restore functions. This allows us to save things we need across
2937 static void velocity_save_context(struct velocity_info *vptr, struct velocity_context * context)
2939 struct mac_regs * regs = vptr->mac_regs;
2941 u8 *ptr = (u8 *)regs;
2943 for (i = MAC_REG_PAR; i < MAC_REG_CR0_CLR; i += 4)
2944 *((u32 *) (context->mac_reg + i)) = readl(ptr + i);
2946 for (i = MAC_REG_MAR; i < MAC_REG_TDCSR_CLR; i += 4)
2947 *((u32 *) (context->mac_reg + i)) = readl(ptr + i);
2949 for (i = MAC_REG_RDBASE_LO; i < MAC_REG_FIFO_TEST0; i += 4)
2950 *((u32 *) (context->mac_reg + i)) = readl(ptr + i);
2955 * velocity_restore_context - restore registers
2957 * @context: buffer for stored context
2959 * Reload the register configuration from the velocity context
2960 * created by velocity_save_context.
2963 static void velocity_restore_context(struct velocity_info *vptr, struct velocity_context *context)
2965 struct mac_regs * regs = vptr->mac_regs;
2967 u8 *ptr = (u8 *)regs;
2969 for (i = MAC_REG_PAR; i < MAC_REG_CR0_SET; i += 4) {
2970 writel(*((u32 *) (context->mac_reg + i)), ptr + i);
2974 for (i = MAC_REG_CR1_SET; i < MAC_REG_CR0_CLR; i++) {
2976 writeb(~(*((u8 *) (context->mac_reg + i))), ptr + i + 4);
2978 writeb(*((u8 *) (context->mac_reg + i)), ptr + i);
2981 for (i = MAC_REG_MAR; i < MAC_REG_IMR; i += 4) {
2982 writel(*((u32 *) (context->mac_reg + i)), ptr + i);
2985 for (i = MAC_REG_RDBASE_LO; i < MAC_REG_FIFO_TEST0; i += 4) {
2986 writel(*((u32 *) (context->mac_reg + i)), ptr + i);
2989 for (i = MAC_REG_TDCSR_SET; i <= MAC_REG_RDCSR_SET; i++) {
2990 writeb(*((u8 *) (context->mac_reg + i)), ptr + i);
2995 static int velocity_suspend(struct pci_dev *pdev, u32 state)
2997 struct velocity_info *vptr = pci_get_drvdata(pdev);
2998 unsigned long flags;
3000 if(!netif_running(vptr->dev))
3003 netif_device_detach(vptr->dev);
3005 spin_lock_irqsave(&vptr->lock, flags);
3006 pci_save_state(pdev, vptr->pci_state);
3008 if (vptr->flags & VELOCITY_FLAGS_WOL_ENABLED) {
3009 velocity_get_ip(vptr);
3010 velocity_save_context(vptr, &vptr->context);
3011 velocity_shutdown(vptr);
3012 velocity_set_wol(vptr);
3013 pci_enable_wake(pdev, 3, 1);
3014 pci_set_power_state(pdev, 3);
3016 velocity_save_context(vptr, &vptr->context);
3017 velocity_shutdown(vptr);
3018 pci_disable_device(pdev);
3019 pci_set_power_state(pdev, state);
3022 pci_set_power_state(pdev, state);
3024 spin_unlock_irqrestore(&vptr->lock, flags);
3028 static int velocity_resume(struct pci_dev *pdev)
3030 struct velocity_info *vptr = pci_get_drvdata(pdev);
3031 unsigned long flags;
3034 if(!netif_running(vptr->dev))
3037 pci_set_power_state(pdev, 0);
3038 pci_enable_wake(pdev, 0, 0);
3039 pci_restore_state(pdev, vptr->pci_state);
3041 mac_wol_reset(vptr->mac_regs);
3043 spin_lock_irqsave(&vptr->lock, flags);
3044 velocity_restore_context(vptr, &vptr->context);
3045 velocity_init_registers(vptr, VELOCITY_INIT_WOL);
3046 mac_disable_int(vptr->mac_regs);
3048 velocity_tx_srv(vptr, 0);
3050 for (i = 0; i < vptr->num_txq; i++) {
3051 if (vptr->td_used[i]) {
3052 mac_tx_queue_wake(vptr->mac_regs, i);
3056 mac_enable_int(vptr->mac_regs);
3057 spin_unlock_irqrestore(&vptr->lock, flags);
3058 netif_device_attach(vptr->dev);
3063 static int velocity_netdev_event(struct notifier_block *nb, unsigned long notification, void *ptr)
3065 struct in_ifaddr *ifa = (struct in_ifaddr *) ptr;
3066 struct net_device *dev;
3067 struct velocity_info *vptr;
3070 dev = ifa->ifa_dev->dev;
3072 velocity_get_ip(vptr);
3079 * Purpose: Functions to set WOL.
3082 const static unsigned short crc16_tab[256] = {
3083 0x0000, 0x1189, 0x2312, 0x329b, 0x4624, 0x57ad, 0x6536, 0x74bf,
3084 0x8c48, 0x9dc1, 0xaf5a, 0xbed3, 0xca6c, 0xdbe5, 0xe97e, 0xf8f7,
3085 0x1081, 0x0108, 0x3393, 0x221a, 0x56a5, 0x472c, 0x75b7, 0x643e,
3086 0x9cc9, 0x8d40, 0xbfdb, 0xae52, 0xdaed, 0xcb64, 0xf9ff, 0xe876,
3087 0x2102, 0x308b, 0x0210, 0x1399, 0x6726, 0x76af, 0x4434, 0x55bd,
3088 0xad4a, 0xbcc3, 0x8e58, 0x9fd1, 0xeb6e, 0xfae7, 0xc87c, 0xd9f5,
3089 0x3183, 0x200a, 0x1291, 0x0318, 0x77a7, 0x662e, 0x54b5, 0x453c,
3090 0xbdcb, 0xac42, 0x9ed9, 0x8f50, 0xfbef, 0xea66, 0xd8fd, 0xc974,
3091 0x4204, 0x538d, 0x6116, 0x709f, 0x0420, 0x15a9, 0x2732, 0x36bb,
3092 0xce4c, 0xdfc5, 0xed5e, 0xfcd7, 0x8868, 0x99e1, 0xab7a, 0xbaf3,
3093 0x5285, 0x430c, 0x7197, 0x601e, 0x14a1, 0x0528, 0x37b3, 0x263a,
3094 0xdecd, 0xcf44, 0xfddf, 0xec56, 0x98e9, 0x8960, 0xbbfb, 0xaa72,
3095 0x6306, 0x728f, 0x4014, 0x519d, 0x2522, 0x34ab, 0x0630, 0x17b9,
3096 0xef4e, 0xfec7, 0xcc5c, 0xddd5, 0xa96a, 0xb8e3, 0x8a78, 0x9bf1,
3097 0x7387, 0x620e, 0x5095, 0x411c, 0x35a3, 0x242a, 0x16b1, 0x0738,
3098 0xffcf, 0xee46, 0xdcdd, 0xcd54, 0xb9eb, 0xa862, 0x9af9, 0x8b70,
3099 0x8408, 0x9581, 0xa71a, 0xb693, 0xc22c, 0xd3a5, 0xe13e, 0xf0b7,
3100 0x0840, 0x19c9, 0x2b52, 0x3adb, 0x4e64, 0x5fed, 0x6d76, 0x7cff,
3101 0x9489, 0x8500, 0xb79b, 0xa612, 0xd2ad, 0xc324, 0xf1bf, 0xe036,
3102 0x18c1, 0x0948, 0x3bd3, 0x2a5a, 0x5ee5, 0x4f6c, 0x7df7, 0x6c7e,
3103 0xa50a, 0xb483, 0x8618, 0x9791, 0xe32e, 0xf2a7, 0xc03c, 0xd1b5,
3104 0x2942, 0x38cb, 0x0a50, 0x1bd9, 0x6f66, 0x7eef, 0x4c74, 0x5dfd,
3105 0xb58b, 0xa402, 0x9699, 0x8710, 0xf3af, 0xe226, 0xd0bd, 0xc134,
3106 0x39c3, 0x284a, 0x1ad1, 0x0b58, 0x7fe7, 0x6e6e, 0x5cf5, 0x4d7c,
3107 0xc60c, 0xd785, 0xe51e, 0xf497, 0x8028, 0x91a1, 0xa33a, 0xb2b3,
3108 0x4a44, 0x5bcd, 0x6956, 0x78df, 0x0c60, 0x1de9, 0x2f72, 0x3efb,
3109 0xd68d, 0xc704, 0xf59f, 0xe416, 0x90a9, 0x8120, 0xb3bb, 0xa232,
3110 0x5ac5, 0x4b4c, 0x79d7, 0x685e, 0x1ce1, 0x0d68, 0x3ff3, 0x2e7a,
3111 0xe70e, 0xf687, 0xc41c, 0xd595, 0xa12a, 0xb0a3, 0x8238, 0x93b1,
3112 0x6b46, 0x7acf, 0x4854, 0x59dd, 0x2d62, 0x3ceb, 0x0e70, 0x1ff9,
3113 0xf78f, 0xe606, 0xd49d, 0xc514, 0xb1ab, 0xa022, 0x92b9, 0x8330,
3114 0x7bc7, 0x6a4e, 0x58d5, 0x495c, 0x3de3, 0x2c6a, 0x1ef1, 0x0f78
3118 static u32 mask_pattern[2][4] = {
3119 {0x00203000, 0x000003C0, 0x00000000, 0x0000000}, /* ARP */
3120 {0xfffff000, 0xffffffff, 0xffffffff, 0x000ffff} /* Magic Packet */
3124 * ether_crc16 - compute ethernet CRC
3125 * @len: buffer length
3127 * @crc16: initial CRC
3129 * Compute a CRC value for a block of data.
3130 * FIXME: can we use generic functions ?
3133 static u16 ether_crc16(int len, u8 * cp, u16 crc16)
3136 crc16 = (crc16 >> 8) ^ crc16_tab[(crc16 ^ *cp++) & 0xff];
3141 * bit_reverse - 16bit reverse
3142 * @data: 16bit data t reverse
3144 * Reverse the order of a 16bit value and return the reversed bits
3147 static u16 bit_reverse(u16 data)
3149 u32 new = 0x00000000;
3153 for (ii = 0; ii < 16; ii++) {
3154 new |= ((u32) (data & 1) << (31 - ii));
3158 return (u16) (new >> 16);
3162 * wol_calc_crc - WOL CRC
3163 * @pattern: data pattern
3164 * @mask_pattern: mask
3166 * Compute the wake on lan crc hashes for the packet header
3167 * we are interested in.
3170 u16 wol_calc_crc(int size, u8 * pattern, u8 *mask_pattern)
3176 for (i = 0; i < size; i++) {
3177 mask = mask_pattern[i];
3179 /* Skip this loop if the mask equals to zero */
3183 for (j = 0; j < 8; j++) {
3184 if ((mask & 0x01) == 0) {
3189 crc = ether_crc16(1, &(pattern[i * 8 + j]), crc);
3192 /* Finally, invert the result once to get the correct data */
3194 return bit_reverse(crc);
3198 * velocity_set_wol - set up for wake on lan
3199 * @vptr: velocity to set WOL status on
3201 * Set a card up for wake on lan either by unicast or by
3204 * FIXME: check static buffer is safe here
3207 static int velocity_set_wol(struct velocity_info *vptr)
3209 struct mac_regs * regs = vptr->mac_regs;
3213 writew(0xFFFF, ®s->WOLCRClr);
3214 writeb(WOLCFG_SAB | WOLCFG_SAM, ®s->WOLCFGSet);
3215 writew(WOLCR_MAGIC_EN, ®s->WOLCRSet);
3218 if (vptr->wol_opts & VELOCITY_WOL_PHY)
3219 writew((WOLCR_LINKON_EN|WOLCR_LINKOFF_EN), ®s->WOLCRSet);
3222 if (vptr->wol_opts & VELOCITY_WOL_UCAST) {
3223 writew(WOLCR_UNICAST_EN, ®s->WOLCRSet);
3226 if (vptr->wol_opts & VELOCITY_WOL_ARP) {
3227 struct arp_packet *arp = (struct arp_packet *) buf;
3229 memset(buf, 0, sizeof(struct arp_packet) + 7);
3231 for (i = 0; i < 4; i++)
3232 writel(mask_pattern[0][i], ®s->ByteMask[0][i]);
3234 arp->type = htons(ETH_P_ARP);
3235 arp->ar_op = htons(1);
3237 memcpy(arp->ar_tip, vptr->ip_addr, 4);
3239 crc = wol_calc_crc((sizeof(struct arp_packet) + 7) / 8, buf, (u8 *) & mask_pattern[0][0]);
3241 writew(crc, ®s->PatternCRC[0]);
3242 writew(WOLCR_ARP_EN, ®s->WOLCRSet);
3245 BYTE_REG_BITS_ON(PWCFG_WOLTYPE, ®s->PWCFGSet);
3246 BYTE_REG_BITS_ON(PWCFG_LEGACY_WOLEN, ®s->PWCFGSet);
3248 writew(0x0FFF, ®s->WOLSRClr);
3250 if (vptr->mii_status & VELOCITY_AUTONEG_ENABLE) {
3251 if (PHYID_GET_PHY_ID(vptr->phy_id) == PHYID_CICADA_CS8201)
3252 MII_REG_BITS_ON(AUXCR_MDPPS, MII_REG_AUXCR, vptr->mac_regs);
3254 MII_REG_BITS_OFF(G1000CR_1000FD | G1000CR_1000, MII_REG_G1000CR, vptr->mac_regs);
3257 if (vptr->mii_status & VELOCITY_SPEED_1000)
3258 MII_REG_BITS_ON(BMCR_REAUTO, MII_REG_BMCR, vptr->mac_regs);
3260 BYTE_REG_BITS_ON(CHIPGCR_FCMODE, ®s->CHIPGCR);
3264 GCR = readb(®s->CHIPGCR);
3265 GCR = (GCR & ~CHIPGCR_FCGMII) | CHIPGCR_FCFDX;
3266 writeb(GCR, ®s->CHIPGCR);
3269 BYTE_REG_BITS_OFF(ISR_PWEI, ®s->ISR);
3270 /* Turn on SWPTAG just before entering power mode */
3271 BYTE_REG_BITS_ON(STICKHW_SWPTAG, ®s->STICKHW);
3272 /* Go to bed ..... */
3273 BYTE_REG_BITS_ON((STICKHW_DS1 | STICKHW_DS0), ®s->STICKHW);