3 * linux/drivers/s390/net/qeth_main.c ($Revision: 1.145 $)
5 * Linux on zSeries OSA Express and HiperSockets support
7 * Copyright 2000,2003 IBM Corporation
9 * Author(s): Original Code written by
10 * Utz Bacher (utz.bacher@de.ibm.com)
12 * Frank Pavlic (pavlic@de.ibm.com) and
13 * Thomas Spatzier <tspat@de.ibm.com>
15 * $Revision: 1.145 $ $Date: 2004/10/08 15:08:40 $
17 * This program is free software; you can redistribute it and/or modify
18 * it under the terms of the GNU General Public License as published by
19 * the Free Software Foundation; either version 2, or (at your option)
22 * This program is distributed in the hope that it will be useful,
23 * but WITHOUT ANY WARRANTY; without even the implied warranty of
24 * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
25 * GNU General Public License for more details.
27 * You should have received a copy of the GNU General Public License
28 * along with this program; if not, write to the Free Software
29 * Foundation, Inc., 675 Mass Ave, Cambridge, MA 02139, USA.
34 * eye catcher; just for debugging purposes
42 #include <linux/config.h>
43 #include <linux/module.h>
44 #include <linux/moduleparam.h>
46 #include <linux/string.h>
47 #include <linux/errno.h>
51 #include <asm/ebcdic.h>
52 #include <linux/ctype.h>
53 #include <asm/semaphore.h>
54 #include <asm/timex.h>
56 #include <linux/inetdevice.h>
57 #include <linux/netdevice.h>
58 #include <linux/sched.h>
59 #include <linux/workqueue.h>
60 #include <linux/kernel.h>
61 #include <linux/slab.h>
62 #include <linux/interrupt.h>
63 #include <linux/tcp.h>
64 #include <linux/icmp.h>
65 #include <linux/skbuff.h>
66 #include <net/route.h>
69 #include <linux/igmp.h>
71 #include <asm/uaccess.h>
72 #include <linux/init.h>
73 #include <linux/reboot.h>
75 #include <linux/mii.h>
76 #include <linux/rcupdate.h>
82 #define VERSION_QETH_C "$Revision: 1.145 $"
83 static const char *version = "qeth S/390 OSA-Express driver";
86 * Debug Facility Stuff
88 static debug_info_t *qeth_dbf_setup = NULL;
89 static debug_info_t *qeth_dbf_data = NULL;
90 static debug_info_t *qeth_dbf_misc = NULL;
91 static debug_info_t *qeth_dbf_control = NULL;
92 static debug_info_t *qeth_dbf_trace = NULL;
93 static debug_info_t *qeth_dbf_sense = NULL;
94 static debug_info_t *qeth_dbf_qerr = NULL;
96 DEFINE_PER_CPU(char[256], qeth_dbf_txt_buf);
99 * some more definitions and declarations
101 static unsigned int known_devices[][10] = QETH_MODELLIST_ARRAY;
103 /* list of our cards */
104 struct qeth_card_list_struct qeth_card_list;
105 /*process list want to be notified*/
106 spinlock_t qeth_notify_lock;
107 struct list_head qeth_notify_list;
109 static void qeth_send_control_data_cb(struct qeth_channel *,
110 struct qeth_cmd_buffer *);
113 * here we go with function implementation
116 qeth_init_qdio_info(struct qeth_card *card);
119 qeth_init_qdio_queues(struct qeth_card *card);
122 qeth_alloc_qdio_buffers(struct qeth_card *card);
125 qeth_free_qdio_buffers(struct qeth_card *);
128 qeth_clear_qdio_buffers(struct qeth_card *);
131 qeth_clear_ip_list(struct qeth_card *, int, int);
134 qeth_clear_ipacmd_list(struct qeth_card *);
137 qeth_qdio_clear_card(struct qeth_card *, int);
140 qeth_clear_working_pool_list(struct qeth_card *);
143 qeth_clear_cmd_buffers(struct qeth_channel *);
146 qeth_stop(struct net_device *);
149 qeth_clear_ipato_list(struct qeth_card *);
152 qeth_is_addr_covered_by_ipato(struct qeth_card *, struct qeth_ipaddr *);
155 qeth_irq_tasklet(unsigned long);
158 qeth_set_online(struct ccwgroup_device *);
160 static struct qeth_ipaddr *
161 qeth_get_addr_buffer(enum qeth_prot_versions);
164 qeth_set_multicast_list(struct net_device *);
167 qeth_notify_processes(void)
169 /*notify all registered processes */
170 struct qeth_notify_list_struct *n_entry;
172 QETH_DBF_TEXT(trace,3,"procnoti");
173 spin_lock(&qeth_notify_lock);
174 list_for_each_entry(n_entry, &qeth_notify_list, list) {
175 send_sig(n_entry->signum, n_entry->task, 1);
177 spin_unlock(&qeth_notify_lock);
181 qeth_notifier_unregister(struct task_struct *p)
183 struct qeth_notify_list_struct *n_entry, *tmp;
185 QETH_DBF_TEXT(trace, 2, "notunreg");
186 spin_lock(&qeth_notify_lock);
187 list_for_each_entry_safe(n_entry, tmp, &qeth_notify_list, list) {
188 if (n_entry->task == p) {
189 list_del(&n_entry->list);
195 spin_unlock(&qeth_notify_lock);
199 qeth_notifier_register(struct task_struct *p, int signum)
201 struct qeth_notify_list_struct *n_entry;
203 QETH_DBF_TEXT(trace, 2, "notreg");
204 /*check first if entry already exists*/
205 spin_lock(&qeth_notify_lock);
206 list_for_each_entry(n_entry, &qeth_notify_list, list) {
207 if (n_entry->task == p) {
208 n_entry->signum = signum;
209 spin_unlock(&qeth_notify_lock);
213 spin_unlock(&qeth_notify_lock);
215 n_entry = (struct qeth_notify_list_struct *)
216 kmalloc(sizeof(struct qeth_notify_list_struct),GFP_KERNEL);
220 n_entry->signum = signum;
221 spin_lock(&qeth_notify_lock);
222 list_add(&n_entry->list,&qeth_notify_list);
223 spin_unlock(&qeth_notify_lock);
229 * free channel command buffers
232 qeth_clean_channel(struct qeth_channel *channel)
236 QETH_DBF_TEXT(setup, 2, "freech");
237 for (cnt = 0; cnt < QETH_CMD_BUFFER_NO; cnt++)
238 kfree(channel->iob[cnt].data);
245 qeth_free_card(struct qeth_card *card)
248 QETH_DBF_TEXT(setup, 2, "freecrd");
249 QETH_DBF_HEX(setup, 2, &card, sizeof(void *));
250 qeth_clean_channel(&card->read);
251 qeth_clean_channel(&card->write);
253 free_netdev(card->dev);
254 qeth_clear_ip_list(card, 0, 0);
255 qeth_clear_ipato_list(card);
256 qeth_free_qdio_buffers(card);
261 * alloc memory for command buffer per channel
264 qeth_setup_channel(struct qeth_channel *channel)
268 QETH_DBF_TEXT(setup, 2, "setupch");
269 for (cnt=0; cnt < QETH_CMD_BUFFER_NO; cnt++) {
270 channel->iob[cnt].data = (char *)
271 kmalloc(QETH_BUFSIZE, GFP_DMA|GFP_KERNEL);
272 if (channel->iob[cnt].data == NULL)
274 channel->iob[cnt].state = BUF_STATE_FREE;
275 channel->iob[cnt].channel = channel;
276 channel->iob[cnt].callback = qeth_send_control_data_cb;
277 channel->iob[cnt].rc = 0;
279 if (cnt < QETH_CMD_BUFFER_NO) {
281 kfree(channel->iob[cnt].data);
285 channel->io_buf_no = 0;
286 atomic_set(&channel->irq_pending, 0);
287 spin_lock_init(&channel->iob_lock);
289 init_waitqueue_head(&channel->wait_q);
290 channel->irq_tasklet.data = (unsigned long) channel;
291 channel->irq_tasklet.func = qeth_irq_tasklet;
296 * alloc memory for card structure
298 static struct qeth_card *
299 qeth_alloc_card(void)
301 struct qeth_card *card;
303 QETH_DBF_TEXT(setup, 2, "alloccrd");
304 card = (struct qeth_card *) kmalloc(sizeof(struct qeth_card),
308 QETH_DBF_HEX(setup, 2, &card, sizeof(void *));
309 memset(card, 0, sizeof(struct qeth_card));
310 if (qeth_setup_channel(&card->read)) {
314 if (qeth_setup_channel(&card->write)) {
315 qeth_clean_channel(&card->read);
323 __qeth_check_irb_error(struct ccw_device *cdev, struct irb *irb)
328 switch (PTR_ERR(irb)) {
330 PRINT_WARN("i/o-error on device %s\n", cdev->dev.bus_id);
331 QETH_DBF_TEXT(trace, 2, "ckirberr");
332 QETH_DBF_TEXT_(trace, 2, " rc%d", -EIO);
335 PRINT_WARN("timeout on device %s\n", cdev->dev.bus_id);
336 QETH_DBF_TEXT(trace, 2, "ckirberr");
337 QETH_DBF_TEXT_(trace, 2, " rc%d", -ETIMEDOUT);
340 PRINT_WARN("unknown error %ld on device %s\n", PTR_ERR(irb),
342 QETH_DBF_TEXT(trace, 2, "ckirberr");
343 QETH_DBF_TEXT(trace, 2, " rc???");
349 qeth_get_problem(struct ccw_device *cdev, struct irb *irb)
354 sense = (char *) irb->ecw;
355 cstat = irb->scsw.cstat;
356 dstat = irb->scsw.dstat;
358 if (cstat & (SCHN_STAT_CHN_CTRL_CHK | SCHN_STAT_INTF_CTRL_CHK |
359 SCHN_STAT_CHN_DATA_CHK | SCHN_STAT_CHAIN_CHECK |
360 SCHN_STAT_PROT_CHECK | SCHN_STAT_PROG_CHECK)) {
361 QETH_DBF_TEXT(trace,2, "CGENCHK");
362 PRINT_WARN("check on device %s, dstat=x%x, cstat=x%x ",
363 cdev->dev.bus_id, dstat, cstat);
364 HEXDUMP16(WARN, "irb: ", irb);
365 HEXDUMP16(WARN, "irb: ", ((char *) irb) + 32);
369 if (dstat & DEV_STAT_UNIT_CHECK) {
370 if (sense[SENSE_RESETTING_EVENT_BYTE] &
371 SENSE_RESETTING_EVENT_FLAG) {
372 QETH_DBF_TEXT(trace,2,"REVIND");
375 if (sense[SENSE_COMMAND_REJECT_BYTE] &
376 SENSE_COMMAND_REJECT_FLAG) {
377 QETH_DBF_TEXT(trace,2,"CMDREJi");
380 if ((sense[2] == 0xaf) && (sense[3] == 0xfe)) {
381 QETH_DBF_TEXT(trace,2,"AFFE");
384 if ((!sense[0]) && (!sense[1]) && (!sense[2]) && (!sense[3])) {
385 QETH_DBF_TEXT(trace,2,"ZEROSEN");
388 QETH_DBF_TEXT(trace,2,"DGENCHK");
393 static int qeth_issue_next_read(struct qeth_card *);
399 qeth_irq(struct ccw_device *cdev, unsigned long intparm, struct irb *irb)
403 struct qeth_cmd_buffer *buffer;
404 struct qeth_channel *channel;
405 struct qeth_card *card;
407 QETH_DBF_TEXT(trace,5,"irq");
409 if (__qeth_check_irb_error(cdev, irb))
411 cstat = irb->scsw.cstat;
412 dstat = irb->scsw.dstat;
414 card = CARD_FROM_CDEV(cdev);
418 if (card->read.ccwdev == cdev){
419 channel = &card->read;
420 QETH_DBF_TEXT(trace,5,"read");
421 } else if (card->write.ccwdev == cdev) {
422 channel = &card->write;
423 QETH_DBF_TEXT(trace,5,"write");
425 channel = &card->data;
426 QETH_DBF_TEXT(trace,5,"data");
428 atomic_set(&channel->irq_pending, 0);
430 if (irb->scsw.fctl & (SCSW_FCTL_CLEAR_FUNC))
431 channel->state = CH_STATE_STOPPED;
433 if (irb->scsw.fctl & (SCSW_FCTL_HALT_FUNC))
434 channel->state = CH_STATE_HALTED;
436 /*let's wake up immediately on data channel*/
437 if ((channel == &card->data) && (intparm != 0))
440 if (intparm == QETH_CLEAR_CHANNEL_PARM) {
441 QETH_DBF_TEXT(trace, 6, "clrchpar");
442 /* we don't have to handle this further */
445 if (intparm == QETH_HALT_CHANNEL_PARM) {
446 QETH_DBF_TEXT(trace, 6, "hltchpar");
447 /* we don't have to handle this further */
450 if ((dstat & DEV_STAT_UNIT_EXCEP) ||
451 (dstat & DEV_STAT_UNIT_CHECK) ||
453 if (irb->esw.esw0.erw.cons) {
454 /* TODO: we should make this s390dbf */
455 PRINT_WARN("sense data available on channel %s.\n",
456 CHANNEL_ID(channel));
457 PRINT_WARN(" cstat 0x%X\n dstat 0x%X\n", cstat, dstat);
458 HEXDUMP16(WARN,"irb: ",irb);
459 HEXDUMP16(WARN,"sense data: ",irb->ecw);
461 rc = qeth_get_problem(cdev,irb);
463 qeth_schedule_recovery(card);
469 buffer = (struct qeth_cmd_buffer *) __va((addr_t)intparm);
470 buffer->state = BUF_STATE_PROCESSED;
472 if (channel == &card->data)
475 if (channel == &card->read &&
476 channel->state == CH_STATE_UP)
477 qeth_issue_next_read(card);
479 tasklet_schedule(&channel->irq_tasklet);
482 wake_up(&card->wait_q);
486 * tasklet function scheduled from irq handler
489 qeth_irq_tasklet(unsigned long data)
491 struct qeth_card *card;
492 struct qeth_channel *channel;
493 struct qeth_cmd_buffer *iob;
496 QETH_DBF_TEXT(trace,5,"irqtlet");
497 channel = (struct qeth_channel *) data;
499 index = channel->buf_no;
500 card = CARD_FROM_CDEV(channel->ccwdev);
501 while (iob[index].state == BUF_STATE_PROCESSED) {
502 if (iob[index].callback !=NULL) {
503 iob[index].callback(channel,iob + index);
505 index = (index + 1) % QETH_CMD_BUFFER_NO;
507 channel->buf_no = index;
508 wake_up(&card->wait_q);
511 static int qeth_stop_card(struct qeth_card *);
514 qeth_set_offline(struct ccwgroup_device *cgdev)
516 struct qeth_card *card = (struct qeth_card *) cgdev->dev.driver_data;
517 enum qeth_card_states recover_flag;
519 QETH_DBF_TEXT(setup, 3, "setoffl");
520 QETH_DBF_HEX(setup, 3, &card, sizeof(void *));
522 recover_flag = card->state;
523 if (qeth_stop_card(card) == -ERESTARTSYS){
524 PRINT_WARN("Stopping card %s interrupted by user!\n",
528 ccw_device_set_offline(CARD_DDEV(card));
529 ccw_device_set_offline(CARD_WDEV(card));
530 ccw_device_set_offline(CARD_RDEV(card));
531 if (recover_flag == CARD_STATE_UP)
532 card->state = CARD_STATE_RECOVER;
533 qeth_notify_processes();
538 qeth_remove_device(struct ccwgroup_device *cgdev)
540 struct qeth_card *card = (struct qeth_card *) cgdev->dev.driver_data;
543 QETH_DBF_TEXT(setup, 3, "rmdev");
544 QETH_DBF_HEX(setup, 3, &card, sizeof(void *));
549 if (cgdev->state == CCWGROUP_ONLINE){
550 card->use_hard_stop = 1;
551 qeth_set_offline(cgdev);
553 /* remove form our internal list */
554 write_lock_irqsave(&qeth_card_list.rwlock, flags);
555 list_del(&card->list);
556 write_unlock_irqrestore(&qeth_card_list.rwlock, flags);
558 unregister_netdev(card->dev);
559 qeth_remove_device_attributes(&cgdev->dev);
560 qeth_free_card(card);
561 cgdev->dev.driver_data = NULL;
562 put_device(&cgdev->dev);
566 qeth_register_addr_entry(struct qeth_card *, struct qeth_ipaddr *);
568 qeth_deregister_addr_entry(struct qeth_card *, struct qeth_ipaddr *);
571 * Add/remove address to/from card's ip list, i.e. try to add or remove
572 * reference to/from an IP address that is already registered on the card.
574 * 0 address was on card and its reference count has been adjusted,
575 * but is still > 0, so nothing has to be done
576 * also returns 0 if card was not on card and the todo was to delete
577 * the address -> there is also nothing to be done
578 * 1 address was not on card and the todo is to add it to the card's ip
580 * -1 address was on card and its reference count has been decremented
581 * to <= 0 by the todo -> address must be removed from card
584 __qeth_ref_ip_on_card(struct qeth_card *card, struct qeth_ipaddr *todo,
585 struct qeth_ipaddr **__addr)
587 struct qeth_ipaddr *addr;
590 list_for_each_entry(addr, &card->ip_list, entry) {
591 if ((addr->proto == QETH_PROT_IPV4) &&
592 (todo->proto == QETH_PROT_IPV4) &&
593 (addr->type == todo->type) &&
594 (addr->u.a4.addr == todo->u.a4.addr) &&
595 (addr->u.a4.mask == todo->u.a4.mask) ){
599 if ((addr->proto == QETH_PROT_IPV6) &&
600 (todo->proto == QETH_PROT_IPV6) &&
601 (addr->type == todo->type) &&
602 (addr->u.a6.pfxlen == todo->u.a6.pfxlen) &&
603 (memcmp(&addr->u.a6.addr, &todo->u.a6.addr,
604 sizeof(struct in6_addr)) == 0)) {
610 addr->users += todo->users;
611 if (addr->users <= 0){
615 /* for VIPA and RXIP limit refcount to 1 */
616 if (addr->type != QETH_IP_TYPE_NORMAL)
621 if (todo->users > 0){
622 /* for VIPA and RXIP limit refcount to 1 */
623 if (todo->type != QETH_IP_TYPE_NORMAL)
631 __qeth_address_exists_in_list(struct list_head *list, struct qeth_ipaddr *addr,
634 struct qeth_ipaddr *tmp;
636 list_for_each_entry(tmp, list, entry) {
637 if ((tmp->proto == QETH_PROT_IPV4) &&
638 (addr->proto == QETH_PROT_IPV4) &&
639 ((same_type && (tmp->type == addr->type)) ||
640 (!same_type && (tmp->type != addr->type)) ) &&
641 (tmp->u.a4.addr == addr->u.a4.addr) ){
644 if ((tmp->proto == QETH_PROT_IPV6) &&
645 (addr->proto == QETH_PROT_IPV6) &&
646 ((same_type && (tmp->type == addr->type)) ||
647 (!same_type && (tmp->type != addr->type)) ) &&
648 (memcmp(&tmp->u.a6.addr, &addr->u.a6.addr,
649 sizeof(struct in6_addr)) == 0) ) {
657 * Add IP to be added to todo list. If there is already an "add todo"
658 * in this list we just incremenent the reference count.
659 * Returns 0 if we just incremented reference count.
662 __qeth_insert_ip_todo(struct qeth_card *card, struct qeth_ipaddr *addr, int add)
664 struct qeth_ipaddr *tmp, *t;
667 list_for_each_entry_safe(tmp, t, card->ip_tbd_list, entry) {
668 if ((addr->type == QETH_IP_TYPE_DEL_ALL_MC) &&
669 (tmp->type == QETH_IP_TYPE_DEL_ALL_MC))
671 if ((tmp->proto == QETH_PROT_IPV4) &&
672 (addr->proto == QETH_PROT_IPV4) &&
673 (tmp->type == addr->type) &&
674 (tmp->is_multicast == addr->is_multicast) &&
675 (tmp->u.a4.addr == addr->u.a4.addr) &&
676 (tmp->u.a4.mask == addr->u.a4.mask) ){
680 if ((tmp->proto == QETH_PROT_IPV6) &&
681 (addr->proto == QETH_PROT_IPV6) &&
682 (tmp->type == addr->type) &&
683 (tmp->is_multicast == addr->is_multicast) &&
684 (tmp->u.a6.pfxlen == addr->u.a6.pfxlen) &&
685 (memcmp(&tmp->u.a6.addr, &addr->u.a6.addr,
686 sizeof(struct in6_addr)) == 0) ){
692 if (addr->users != 0)
693 tmp->users += addr->users;
695 tmp->users += add? 1:-1;
696 if (tmp->users == 0){
697 list_del(&tmp->entry);
702 if (addr->type == QETH_IP_TYPE_DEL_ALL_MC)
703 list_add(&addr->entry, card->ip_tbd_list);
705 if (addr->users == 0)
706 addr->users += add? 1:-1;
707 if (add && (addr->type == QETH_IP_TYPE_NORMAL) &&
708 qeth_is_addr_covered_by_ipato(card, addr)){
709 QETH_DBF_TEXT(trace, 2, "tkovaddr");
710 addr->set_flags |= QETH_IPA_SETIP_TAKEOVER_FLAG;
712 list_add_tail(&addr->entry, card->ip_tbd_list);
719 * Remove IP address from list
722 qeth_delete_ip(struct qeth_card *card, struct qeth_ipaddr *addr)
727 QETH_DBF_TEXT(trace,4,"delip");
728 if (addr->proto == QETH_PROT_IPV4)
729 QETH_DBF_HEX(trace,4,&addr->u.a4.addr,4);
731 QETH_DBF_HEX(trace,4,&addr->u.a6.addr,8);
732 QETH_DBF_HEX(trace,4,((char *)&addr->u.a6.addr)+8,8);
734 spin_lock_irqsave(&card->ip_lock, flags);
735 rc = __qeth_insert_ip_todo(card, addr, 0);
736 spin_unlock_irqrestore(&card->ip_lock, flags);
741 qeth_add_ip(struct qeth_card *card, struct qeth_ipaddr *addr)
746 QETH_DBF_TEXT(trace,4,"addip");
747 if (addr->proto == QETH_PROT_IPV4)
748 QETH_DBF_HEX(trace,4,&addr->u.a4.addr,4);
750 QETH_DBF_HEX(trace,4,&addr->u.a6.addr,8);
751 QETH_DBF_HEX(trace,4,((char *)&addr->u.a6.addr)+8,8);
753 spin_lock_irqsave(&card->ip_lock, flags);
754 rc = __qeth_insert_ip_todo(card, addr, 1);
755 spin_unlock_irqrestore(&card->ip_lock, flags);
760 __qeth_delete_all_mc(struct qeth_card *card, unsigned long *flags)
762 struct qeth_ipaddr *addr, *tmp;
765 list_for_each_entry_safe(addr, tmp, &card->ip_list, entry) {
766 if (addr->is_multicast) {
767 spin_unlock_irqrestore(&card->ip_lock, *flags);
768 rc = qeth_deregister_addr_entry(card, addr);
769 spin_lock_irqsave(&card->ip_lock, *flags);
771 list_del(&addr->entry);
779 qeth_set_ip_addr_list(struct qeth_card *card)
781 struct list_head *tbd_list;
782 struct qeth_ipaddr *todo, *addr;
786 QETH_DBF_TEXT(trace, 2, "sdiplist");
787 QETH_DBF_HEX(trace, 2, &card, sizeof(void *));
789 spin_lock_irqsave(&card->ip_lock, flags);
790 tbd_list = card->ip_tbd_list;
791 card->ip_tbd_list = kmalloc(sizeof(struct list_head), GFP_ATOMIC);
792 if (!card->ip_tbd_list) {
793 QETH_DBF_TEXT(trace, 0, "silnomem");
794 card->ip_tbd_list = tbd_list;
795 spin_unlock_irqrestore(&card->ip_lock, flags);
798 INIT_LIST_HEAD(card->ip_tbd_list);
800 while (!list_empty(tbd_list)){
801 todo = list_entry(tbd_list->next, struct qeth_ipaddr, entry);
802 list_del(&todo->entry);
803 if (todo->type == QETH_IP_TYPE_DEL_ALL_MC){
804 __qeth_delete_all_mc(card, &flags);
808 rc = __qeth_ref_ip_on_card(card, todo, &addr);
810 /* nothing to be done; only adjusted refcount */
812 } else if (rc == 1) {
813 /* new entry to be added to on-card list */
814 spin_unlock_irqrestore(&card->ip_lock, flags);
815 rc = qeth_register_addr_entry(card, todo);
816 spin_lock_irqsave(&card->ip_lock, flags);
818 list_add_tail(&todo->entry, &card->ip_list);
821 } else if (rc == -1) {
822 /* on-card entry to be removed */
823 list_del_init(&addr->entry);
824 spin_unlock_irqrestore(&card->ip_lock, flags);
825 rc = qeth_deregister_addr_entry(card, addr);
826 spin_lock_irqsave(&card->ip_lock, flags);
830 list_add_tail(&addr->entry, &card->ip_list);
834 spin_unlock_irqrestore(&card->ip_lock, flags);
838 static void qeth_delete_mc_addresses(struct qeth_card *);
839 static void qeth_add_multicast_ipv4(struct qeth_card *);
840 #ifdef CONFIG_QETH_IPV6
841 static void qeth_add_multicast_ipv6(struct qeth_card *);
845 qeth_set_thread_start_bit(struct qeth_card *card, unsigned long thread)
849 spin_lock_irqsave(&card->thread_mask_lock, flags);
850 if ( !(card->thread_allowed_mask & thread) ||
851 (card->thread_start_mask & thread) ) {
852 spin_unlock_irqrestore(&card->thread_mask_lock, flags);
855 card->thread_start_mask |= thread;
856 spin_unlock_irqrestore(&card->thread_mask_lock, flags);
861 qeth_clear_thread_start_bit(struct qeth_card *card, unsigned long thread)
865 spin_lock_irqsave(&card->thread_mask_lock, flags);
866 card->thread_start_mask &= ~thread;
867 spin_unlock_irqrestore(&card->thread_mask_lock, flags);
868 wake_up(&card->wait_q);
872 qeth_clear_thread_running_bit(struct qeth_card *card, unsigned long thread)
876 spin_lock_irqsave(&card->thread_mask_lock, flags);
877 card->thread_running_mask &= ~thread;
878 spin_unlock_irqrestore(&card->thread_mask_lock, flags);
879 wake_up(&card->wait_q);
883 __qeth_do_run_thread(struct qeth_card *card, unsigned long thread)
888 spin_lock_irqsave(&card->thread_mask_lock, flags);
889 if (card->thread_start_mask & thread){
890 if ((card->thread_allowed_mask & thread) &&
891 !(card->thread_running_mask & thread)){
893 card->thread_start_mask &= ~thread;
894 card->thread_running_mask |= thread;
898 spin_unlock_irqrestore(&card->thread_mask_lock, flags);
903 qeth_do_run_thread(struct qeth_card *card, unsigned long thread)
907 wait_event(card->wait_q,
908 (rc = __qeth_do_run_thread(card, thread)) >= 0);
913 qeth_register_ip_addresses(void *ptr)
915 struct qeth_card *card;
917 card = (struct qeth_card *) ptr;
918 daemonize("qeth_reg_ip");
919 QETH_DBF_TEXT(trace,4,"regipth1");
920 if (!qeth_do_run_thread(card, QETH_SET_IP_THREAD))
922 QETH_DBF_TEXT(trace,4,"regipth2");
923 qeth_set_ip_addr_list(card);
924 qeth_clear_thread_running_bit(card, QETH_SET_IP_THREAD);
929 qeth_recover(void *ptr)
931 struct qeth_card *card;
934 card = (struct qeth_card *) ptr;
935 daemonize("qeth_recover");
936 QETH_DBF_TEXT(trace,2,"recover1");
937 QETH_DBF_HEX(trace, 2, &card, sizeof(void *));
938 if (!qeth_do_run_thread(card, QETH_RECOVER_THREAD))
940 QETH_DBF_TEXT(trace,2,"recover2");
941 PRINT_WARN("Recovery of device %s started ...\n",
943 card->use_hard_stop = 1;
944 qeth_set_offline(card->gdev);
945 rc = qeth_set_online(card->gdev);
947 PRINT_INFO("Device %s successfully recovered!\n",
950 PRINT_INFO("Device %s could not be recovered!\n",
952 /* don't run another scheduled recovery */
953 qeth_clear_thread_start_bit(card, QETH_RECOVER_THREAD);
954 qeth_clear_thread_running_bit(card, QETH_RECOVER_THREAD);
959 qeth_schedule_recovery(struct qeth_card *card)
961 QETH_DBF_TEXT(trace,2,"startrec");
963 if (qeth_set_thread_start_bit(card, QETH_RECOVER_THREAD) == 0)
964 schedule_work(&card->kernel_thread_starter);
968 qeth_do_start_thread(struct qeth_card *card, unsigned long thread)
973 spin_lock_irqsave(&card->thread_mask_lock, flags);
974 QETH_DBF_TEXT_(trace, 4, " %02x%02x%02x",
975 (u8) card->thread_start_mask,
976 (u8) card->thread_allowed_mask,
977 (u8) card->thread_running_mask);
978 rc = (card->thread_start_mask & thread);
979 spin_unlock_irqrestore(&card->thread_mask_lock, flags);
984 qeth_start_kernel_thread(struct qeth_card *card)
986 QETH_DBF_TEXT(trace , 2, "strthrd");
988 if (card->read.state != CH_STATE_UP &&
989 card->write.state != CH_STATE_UP)
992 if (qeth_do_start_thread(card, QETH_SET_IP_THREAD))
993 kernel_thread(qeth_register_ip_addresses, (void *)card,SIGCHLD);
994 if (qeth_do_start_thread(card, QETH_RECOVER_THREAD))
995 kernel_thread(qeth_recover, (void *) card, SIGCHLD);
1000 qeth_set_intial_options(struct qeth_card *card)
1002 card->options.route4.type = NO_ROUTER;
1003 #ifdef CONFIG_QETH_IPV6
1004 card->options.route6.type = NO_ROUTER;
1005 #endif /* QETH_IPV6 */
1006 card->options.checksum_type = QETH_CHECKSUM_DEFAULT;
1007 card->options.broadcast_mode = QETH_TR_BROADCAST_ALLRINGS;
1008 card->options.macaddr_mode = QETH_TR_MACADDR_NONCANONICAL;
1009 card->options.fake_broadcast = 0;
1010 card->options.add_hhlen = DEFAULT_ADD_HHLEN;
1011 card->options.fake_ll = 0;
1015 * initialize channels ,card and all state machines
1018 qeth_setup_card(struct qeth_card *card)
1021 QETH_DBF_TEXT(setup, 2, "setupcrd");
1022 QETH_DBF_HEX(setup, 2, &card, sizeof(void *));
1024 card->read.state = CH_STATE_DOWN;
1025 card->write.state = CH_STATE_DOWN;
1026 card->data.state = CH_STATE_DOWN;
1027 card->state = CARD_STATE_DOWN;
1028 card->lan_online = 0;
1029 card->use_hard_stop = 0;
1031 #ifdef CONFIG_QETH_VLAN
1032 spin_lock_init(&card->vlanlock);
1033 card->vlangrp = NULL;
1035 spin_lock_init(&card->ip_lock);
1036 spin_lock_init(&card->thread_mask_lock);
1037 card->thread_start_mask = 0;
1038 card->thread_allowed_mask = 0;
1039 card->thread_running_mask = 0;
1040 INIT_WORK(&card->kernel_thread_starter,
1041 (void *)qeth_start_kernel_thread,card);
1042 INIT_LIST_HEAD(&card->ip_list);
1043 card->ip_tbd_list = kmalloc(sizeof(struct list_head), GFP_KERNEL);
1044 if (!card->ip_tbd_list) {
1045 QETH_DBF_TEXT(setup, 0, "iptbdnom");
1048 INIT_LIST_HEAD(card->ip_tbd_list);
1049 INIT_LIST_HEAD(&card->cmd_waiter_list);
1050 init_waitqueue_head(&card->wait_q);
1051 /* intial options */
1052 qeth_set_intial_options(card);
1053 /* IP address takeover */
1054 INIT_LIST_HEAD(&card->ipato.entries);
1055 card->ipato.enabled = 0;
1056 card->ipato.invert4 = 0;
1057 card->ipato.invert6 = 0;
1058 /* init QDIO stuff */
1059 qeth_init_qdio_info(card);
1064 qeth_determine_card_type(struct qeth_card *card)
1068 QETH_DBF_TEXT(setup, 2, "detcdtyp");
1070 while (known_devices[i][4]) {
1071 if ((CARD_RDEV(card)->id.dev_type == known_devices[i][2]) &&
1072 (CARD_RDEV(card)->id.dev_model == known_devices[i][3])) {
1073 card->info.type = known_devices[i][4];
1074 card->qdio.no_out_queues = known_devices[i][8];
1075 card->info.is_multicast_different = known_devices[i][9];
1080 card->info.type = QETH_CARD_TYPE_UNKNOWN;
1081 PRINT_ERR("unknown card type on device %s\n", CARD_BUS_ID(card));
1086 qeth_probe_device(struct ccwgroup_device *gdev)
1088 struct qeth_card *card;
1090 unsigned long flags;
1093 QETH_DBF_TEXT(setup, 2, "probedev");
1096 if (!get_device(dev))
1099 card = qeth_alloc_card();
1102 QETH_DBF_TEXT_(setup, 2, "1err%d", -ENOMEM);
1105 if ((rc = qeth_setup_card(card))){
1106 QETH_DBF_TEXT_(setup, 2, "2err%d", rc);
1108 qeth_free_card(card);
1111 gdev->dev.driver_data = card;
1113 gdev->cdev[0]->handler = qeth_irq;
1114 gdev->cdev[1]->handler = qeth_irq;
1115 gdev->cdev[2]->handler = qeth_irq;
1117 rc = qeth_create_device_attributes(dev);
1120 qeth_free_card(card);
1123 card->read.ccwdev = gdev->cdev[0];
1124 card->write.ccwdev = gdev->cdev[1];
1125 card->data.ccwdev = gdev->cdev[2];
1126 if ((rc = qeth_determine_card_type(card))){
1127 PRINT_WARN("%s: not a valid card type\n", __func__);
1128 QETH_DBF_TEXT_(setup, 2, "3err%d", rc);
1130 qeth_free_card(card);
1133 /* insert into our internal list */
1134 write_lock_irqsave(&qeth_card_list.rwlock, flags);
1135 list_add_tail(&card->list, &qeth_card_list.list);
1136 write_unlock_irqrestore(&qeth_card_list.rwlock, flags);
1142 qeth_get_unitaddr(struct qeth_card *card)
1148 QETH_DBF_TEXT(setup, 2, "getunit");
1149 rc = read_conf_data(CARD_DDEV(card), (void **) &prcd, &length);
1151 PRINT_ERR("read_conf_data for device %s returned %i\n",
1152 CARD_DDEV_ID(card), rc);
1155 card->info.chpid = prcd[30];
1156 card->info.unit_addr2 = prcd[31];
1157 card->info.cula = prcd[63];
1158 card->info.guestlan = ((prcd[0x10] == _ascebc['V']) &&
1159 (prcd[0x11] == _ascebc['M']));
1164 qeth_init_tokens(struct qeth_card *card)
1166 card->token.issuer_rm_w = 0x00010103UL;
1167 card->token.cm_filter_w = 0x00010108UL;
1168 card->token.cm_connection_w = 0x0001010aUL;
1169 card->token.ulp_filter_w = 0x0001010bUL;
1170 card->token.ulp_connection_w = 0x0001010dUL;
1174 raw_devno_from_bus_id(char *id)
1176 id += (strlen(id) - 4);
1177 return (__u16) simple_strtoul(id, &id, 16);
1183 qeth_setup_ccw(struct qeth_channel *channel,unsigned char *iob, __u32 len)
1185 struct qeth_card *card;
1187 QETH_DBF_TEXT(trace, 4, "setupccw");
1188 card = CARD_FROM_CDEV(channel->ccwdev);
1189 if (channel == &card->read)
1190 memcpy(&channel->ccw, READ_CCW, sizeof(struct ccw1));
1192 memcpy(&channel->ccw, WRITE_CCW, sizeof(struct ccw1));
1193 channel->ccw.count = len;
1194 channel->ccw.cda = (__u32) __pa(iob);
1198 * get free buffer for ccws (IDX activation, lancmds,ipassists...)
1200 static struct qeth_cmd_buffer *
1201 __qeth_get_buffer(struct qeth_channel *channel)
1205 QETH_DBF_TEXT(trace, 6, "getbuff");
1206 index = channel->io_buf_no;
1208 if (channel->iob[index].state == BUF_STATE_FREE) {
1209 channel->iob[index].state = BUF_STATE_LOCKED;
1210 channel->io_buf_no = (channel->io_buf_no + 1) %
1212 memset(channel->iob[index].data, 0, QETH_BUFSIZE);
1213 return channel->iob + index;
1215 index = (index + 1) % QETH_CMD_BUFFER_NO;
1216 } while(index != channel->io_buf_no);
1222 * release command buffer
1225 qeth_release_buffer(struct qeth_channel *channel, struct qeth_cmd_buffer *iob)
1227 unsigned long flags;
1229 QETH_DBF_TEXT(trace, 6, "relbuff");
1230 spin_lock_irqsave(&channel->iob_lock, flags);
1231 memset(iob->data, 0, QETH_BUFSIZE);
1232 iob->state = BUF_STATE_FREE;
1233 iob->callback = qeth_send_control_data_cb;
1235 spin_unlock_irqrestore(&channel->iob_lock, flags);
1238 static struct qeth_cmd_buffer *
1239 qeth_get_buffer(struct qeth_channel *channel)
1241 struct qeth_cmd_buffer *buffer = NULL;
1242 unsigned long flags;
1244 spin_lock_irqsave(&channel->iob_lock, flags);
1245 buffer = __qeth_get_buffer(channel);
1246 spin_unlock_irqrestore(&channel->iob_lock, flags);
1250 static struct qeth_cmd_buffer *
1251 qeth_wait_for_buffer(struct qeth_channel *channel)
1253 struct qeth_cmd_buffer *buffer;
1254 wait_event(channel->wait_q,
1255 ((buffer = qeth_get_buffer(channel)) != NULL));
1260 qeth_clear_cmd_buffers(struct qeth_channel *channel)
1264 for (cnt=0; cnt < QETH_CMD_BUFFER_NO; cnt++)
1265 qeth_release_buffer(channel,&channel->iob[cnt]);
1266 channel->buf_no = 0;
1267 channel->io_buf_no = 0;
1271 * start IDX for read and write channel
1274 qeth_idx_activate_get_answer(struct qeth_channel *channel,
1275 void (*idx_reply_cb)(struct qeth_channel *,
1276 struct qeth_cmd_buffer *))
1278 struct qeth_cmd_buffer *iob;
1279 unsigned long flags;
1281 struct qeth_card *card;
1283 QETH_DBF_TEXT(setup, 2, "idxanswr");
1284 card = CARD_FROM_CDEV(channel->ccwdev);
1285 iob = qeth_get_buffer(channel);
1286 iob->callback = idx_reply_cb;
1287 memcpy(&channel->ccw, READ_CCW, sizeof(struct ccw1));
1288 channel->ccw.count = QETH_BUFSIZE;
1289 channel->ccw.cda = (__u32) __pa(iob->data);
1291 wait_event(card->wait_q,
1292 atomic_compare_and_swap(0,1,&channel->irq_pending) == 0);
1293 QETH_DBF_TEXT(setup, 6, "noirqpnd");
1294 spin_lock_irqsave(get_ccwdev_lock(channel->ccwdev), flags);
1295 rc = ccw_device_start(channel->ccwdev,
1296 &channel->ccw,(addr_t) iob, 0, 0);
1297 spin_unlock_irqrestore(get_ccwdev_lock(channel->ccwdev), flags);
1300 PRINT_ERR("qeth: Error2 in activating channel rc=%d\n",rc);
1301 QETH_DBF_TEXT_(setup, 2, "2err%d", rc);
1302 atomic_set(&channel->irq_pending, 0);
1303 wake_up(&card->wait_q);
1306 rc = wait_event_interruptible_timeout(card->wait_q,
1307 channel->state == CH_STATE_UP, QETH_TIMEOUT);
1308 if (rc == -ERESTARTSYS)
1310 if (channel->state != CH_STATE_UP){
1312 QETH_DBF_TEXT_(setup, 2, "3err%d", rc);
1319 qeth_idx_activate_channel(struct qeth_channel *channel,
1320 void (*idx_reply_cb)(struct qeth_channel *,
1321 struct qeth_cmd_buffer *))
1323 struct qeth_card *card;
1324 struct qeth_cmd_buffer *iob;
1325 unsigned long flags;
1329 card = CARD_FROM_CDEV(channel->ccwdev);
1331 QETH_DBF_TEXT(setup, 2, "idxactch");
1333 iob = qeth_get_buffer(channel);
1334 iob->callback = idx_reply_cb;
1335 memcpy(&channel->ccw, WRITE_CCW, sizeof(struct ccw1));
1336 channel->ccw.count = IDX_ACTIVATE_SIZE;
1337 channel->ccw.cda = (__u32) __pa(iob->data);
1338 if (channel == &card->write) {
1339 memcpy(iob->data, IDX_ACTIVATE_WRITE, IDX_ACTIVATE_SIZE);
1340 memcpy(QETH_TRANSPORT_HEADER_SEQ_NO(iob->data),
1341 &card->seqno.trans_hdr, QETH_SEQ_NO_LENGTH);
1342 card->seqno.trans_hdr++;
1344 memcpy(iob->data, IDX_ACTIVATE_READ, IDX_ACTIVATE_SIZE);
1345 memcpy(QETH_TRANSPORT_HEADER_SEQ_NO(iob->data),
1346 &card->seqno.trans_hdr, QETH_SEQ_NO_LENGTH);
1348 memcpy(QETH_IDX_ACT_ISSUER_RM_TOKEN(iob->data),
1349 &card->token.issuer_rm_w,QETH_MPC_TOKEN_LENGTH);
1350 memcpy(QETH_IDX_ACT_FUNC_LEVEL(iob->data),
1351 &card->info.func_level,sizeof(__u16));
1352 temp = raw_devno_from_bus_id(CARD_DDEV_ID(card));
1353 memcpy(QETH_IDX_ACT_QDIO_DEV_CUA(iob->data), &temp, 2);
1354 temp = (card->info.cula << 8) + card->info.unit_addr2;
1355 memcpy(QETH_IDX_ACT_QDIO_DEV_REALADDR(iob->data), &temp, 2);
1357 wait_event(card->wait_q,
1358 atomic_compare_and_swap(0,1,&channel->irq_pending) == 0);
1359 QETH_DBF_TEXT(setup, 6, "noirqpnd");
1360 spin_lock_irqsave(get_ccwdev_lock(channel->ccwdev), flags);
1361 rc = ccw_device_start(channel->ccwdev,
1362 &channel->ccw,(addr_t) iob, 0, 0);
1363 spin_unlock_irqrestore(get_ccwdev_lock(channel->ccwdev), flags);
1366 PRINT_ERR("qeth: Error1 in activating channel. rc=%d\n",rc);
1367 QETH_DBF_TEXT_(setup, 2, "1err%d", rc);
1368 atomic_set(&channel->irq_pending, 0);
1369 wake_up(&card->wait_q);
1372 rc = wait_event_interruptible_timeout(card->wait_q,
1373 channel->state == CH_STATE_ACTIVATING, QETH_TIMEOUT);
1374 if (rc == -ERESTARTSYS)
1376 if (channel->state != CH_STATE_ACTIVATING) {
1377 PRINT_WARN("qeth: IDX activate timed out!\n");
1378 QETH_DBF_TEXT_(setup, 2, "2err%d", -ETIME);
1381 return qeth_idx_activate_get_answer(channel,idx_reply_cb);
1385 qeth_peer_func_level(int level)
1387 if ((level & 0xff) == 8)
1388 return (level & 0xff) + 0x400;
1389 if (((level >> 8) & 3) == 1)
1390 return (level & 0xff) + 0x200;
1395 qeth_idx_write_cb(struct qeth_channel *channel, struct qeth_cmd_buffer *iob)
1397 struct qeth_card *card;
1400 QETH_DBF_TEXT(setup ,2, "idxwrcb");
1402 if (channel->state == CH_STATE_DOWN) {
1403 channel->state = CH_STATE_ACTIVATING;
1406 card = CARD_FROM_CDEV(channel->ccwdev);
1408 if (!(QETH_IS_IDX_ACT_POS_REPLY(iob->data))) {
1409 PRINT_ERR("IDX_ACTIVATE on write channel device %s: negative "
1410 "reply\n", CARD_WDEV_ID(card));
1413 memcpy(&temp, QETH_IDX_ACT_FUNC_LEVEL(iob->data), 2);
1414 if ((temp & ~0x0100) != qeth_peer_func_level(card->info.func_level)) {
1415 PRINT_WARN("IDX_ACTIVATE on write channel device %s: "
1416 "function level mismatch "
1417 "(sent: 0x%x, received: 0x%x)\n",
1418 CARD_WDEV_ID(card), card->info.func_level, temp);
1421 channel->state = CH_STATE_UP;
1423 qeth_release_buffer(channel, iob);
1427 qeth_check_idx_response(unsigned char *buffer)
1432 QETH_DBF_HEX(control, 2, buffer, QETH_DBF_CONTROL_LEN);
1433 if ((buffer[2] & 0xc0) == 0xc0) {
1434 PRINT_WARN("received an IDX TERMINATE "
1435 "with cause code 0x%02x%s\n",
1437 ((buffer[4] == 0x22) ?
1438 " -- try another portname" : ""));
1439 QETH_DBF_TEXT(trace, 2, "ckidxres");
1440 QETH_DBF_TEXT(trace, 2, " idxterm");
1441 QETH_DBF_TEXT_(trace, 2, " rc%d", -EIO);
1448 qeth_idx_read_cb(struct qeth_channel *channel, struct qeth_cmd_buffer *iob)
1450 struct qeth_card *card;
1453 QETH_DBF_TEXT(setup , 2, "idxrdcb");
1454 if (channel->state == CH_STATE_DOWN) {
1455 channel->state = CH_STATE_ACTIVATING;
1459 card = CARD_FROM_CDEV(channel->ccwdev);
1460 if (qeth_check_idx_response(iob->data)) {
1463 if (!(QETH_IS_IDX_ACT_POS_REPLY(iob->data))) {
1464 PRINT_ERR("IDX_ACTIVATE on read channel device %s: negative "
1465 "reply\n", CARD_RDEV_ID(card));
1470 * temporary fix for microcode bug
1471 * to revert it,replace OR by AND
1473 if ( (!QETH_IDX_NO_PORTNAME_REQUIRED(iob->data)) ||
1474 (card->info.type == QETH_CARD_TYPE_OSAE) )
1475 card->info.portname_required = 1;
1477 memcpy(&temp, QETH_IDX_ACT_FUNC_LEVEL(iob->data), 2);
1478 if (temp != qeth_peer_func_level(card->info.func_level)) {
1479 PRINT_WARN("IDX_ACTIVATE on read channel device %s: function "
1480 "level mismatch (sent: 0x%x, received: 0x%x)\n",
1481 CARD_RDEV_ID(card), card->info.func_level, temp);
1484 memcpy(&card->token.issuer_rm_r,
1485 QETH_IDX_ACT_ISSUER_RM_TOKEN(iob->data),
1486 QETH_MPC_TOKEN_LENGTH);
1487 memcpy(&card->info.mcl_level[0],
1488 QETH_IDX_REPLY_LEVEL(iob->data), QETH_MCL_LENGTH);
1489 channel->state = CH_STATE_UP;
1491 qeth_release_buffer(channel,iob);
1495 qeth_issue_next_read(struct qeth_card *card)
1498 struct qeth_cmd_buffer *iob;
1500 QETH_DBF_TEXT(trace,5,"issnxrd");
1501 if (card->read.state != CH_STATE_UP)
1503 iob = qeth_get_buffer(&card->read);
1505 PRINT_WARN("issue_next_read failed: no iob available!\n");
1508 qeth_setup_ccw(&card->read, iob->data, QETH_BUFSIZE);
1509 wait_event(card->wait_q,
1510 atomic_compare_and_swap(0,1,&card->read.irq_pending) == 0);
1511 QETH_DBF_TEXT(trace, 6, "noirqpnd");
1512 rc = ccw_device_start(card->read.ccwdev, &card->read.ccw,
1513 (addr_t) iob, 0, 0);
1515 PRINT_ERR("Error in starting next read ccw! rc=%i\n", rc);
1516 atomic_set(&card->read.irq_pending, 0);
1517 qeth_schedule_recovery(card);
1518 wake_up(&card->wait_q);
1523 static struct qeth_reply *
1524 qeth_alloc_reply(struct qeth_card *card)
1526 struct qeth_reply *reply;
1528 reply = kmalloc(sizeof(struct qeth_reply), GFP_ATOMIC);
1530 memset(reply, 0, sizeof(struct qeth_reply));
1531 atomic_set(&reply->refcnt, 1);
1538 qeth_get_reply(struct qeth_reply *reply)
1540 WARN_ON(atomic_read(&reply->refcnt) <= 0);
1541 atomic_inc(&reply->refcnt);
1545 qeth_put_reply(struct qeth_reply *reply)
1547 WARN_ON(atomic_read(&reply->refcnt) <= 0);
1548 if (atomic_dec_and_test(&reply->refcnt))
1553 qeth_cmd_timeout(unsigned long data)
1555 struct qeth_reply *reply, *list_reply, *r;
1556 unsigned long flags;
1558 reply = (struct qeth_reply *) data;
1559 spin_lock_irqsave(&reply->card->lock, flags);
1560 list_for_each_entry_safe(list_reply, r,
1561 &reply->card->cmd_waiter_list, list) {
1562 if (reply == list_reply){
1563 qeth_get_reply(reply);
1564 list_del_init(&reply->list);
1565 spin_unlock_irqrestore(&reply->card->lock, flags);
1567 reply->received = 1;
1568 wake_up(&reply->wait_q);
1569 qeth_put_reply(reply);
1573 spin_unlock_irqrestore(&reply->card->lock, flags);
1577 qeth_reset_ip_addresses(struct qeth_card *card)
1579 QETH_DBF_TEXT(trace, 2, "rstipadd");
1581 qeth_clear_ip_list(card, 0, 1);
1582 /* this function will also schedule the SET_IP_THREAD */
1583 qeth_set_multicast_list(card->dev);
1586 static struct qeth_ipa_cmd *
1587 qeth_check_ipa_data(struct qeth_card *card, struct qeth_cmd_buffer *iob)
1589 struct qeth_ipa_cmd *cmd = NULL;
1591 QETH_DBF_TEXT(trace,5,"chkipad");
1592 if (IS_IPA(iob->data)){
1593 cmd = (struct qeth_ipa_cmd *) PDU_ENCAPSULATION(iob->data);
1594 if (IS_IPA_REPLY(cmd))
1597 switch (cmd->hdr.command) {
1598 case IPA_CMD_STOPLAN:
1599 PRINT_WARN("Link failure on %s (CHPID 0x%X) - "
1600 "there is a network problem or "
1601 "someone pulled the cable or "
1602 "disabled the port.\n",
1605 card->lan_online = 0;
1606 if (netif_carrier_ok(card->dev)) {
1607 netif_carrier_off(card->dev);
1608 netif_stop_queue(card->dev);
1611 case IPA_CMD_STARTLAN:
1612 PRINT_INFO("Link reestablished on %s "
1613 "(CHPID 0x%X). Scheduling "
1614 "IP address reset.\n",
1617 card->lan_online = 1;
1618 if (!netif_carrier_ok(card->dev)) {
1619 netif_carrier_on(card->dev);
1620 netif_wake_queue(card->dev);
1622 qeth_reset_ip_addresses(card);
1624 case IPA_CMD_REGISTER_LOCAL_ADDR:
1625 QETH_DBF_TEXT(trace,3, "irla");
1627 case IPA_CMD_UNREGISTER_LOCAL_ADDR:
1628 PRINT_WARN("probably problem on %s: "
1629 "received IPA command 0x%X\n",
1634 PRINT_WARN("Received data is IPA "
1635 "but not a reply!\n");
1644 * wake all waiting ipa commands
1647 qeth_clear_ipacmd_list(struct qeth_card *card)
1649 struct qeth_reply *reply, *r;
1650 unsigned long flags;
1652 QETH_DBF_TEXT(trace, 4, "clipalst");
1654 spin_lock_irqsave(&card->lock, flags);
1655 list_for_each_entry_safe(reply, r, &card->cmd_waiter_list, list) {
1656 qeth_get_reply(reply);
1658 reply->received = 1;
1659 list_del_init(&reply->list);
1660 wake_up(&reply->wait_q);
1661 qeth_put_reply(reply);
1663 spin_unlock_irqrestore(&card->lock, flags);
1667 qeth_send_control_data_cb(struct qeth_channel *channel,
1668 struct qeth_cmd_buffer *iob)
1670 struct qeth_card *card;
1671 struct qeth_reply *reply, *r;
1672 struct qeth_ipa_cmd *cmd;
1673 unsigned long flags;
1676 QETH_DBF_TEXT(trace,4,"sndctlcb");
1678 card = CARD_FROM_CDEV(channel->ccwdev);
1679 if (qeth_check_idx_response(iob->data)) {
1680 qeth_clear_ipacmd_list(card);
1681 qeth_schedule_recovery(card);
1685 cmd = qeth_check_ipa_data(card, iob);
1686 if ((cmd == NULL) && (card->state != CARD_STATE_DOWN))
1689 spin_lock_irqsave(&card->lock, flags);
1690 list_for_each_entry_safe(reply, r, &card->cmd_waiter_list, list) {
1691 if ((reply->seqno == QETH_IDX_COMMAND_SEQNO) ||
1692 ((cmd) && (reply->seqno == cmd->hdr.seqno))) {
1693 qeth_get_reply(reply);
1694 list_del_init(&reply->list);
1695 spin_unlock_irqrestore(&card->lock, flags);
1697 if (reply->callback != NULL) {
1699 reply->offset = (__u16)((char*)cmd -
1701 keep_reply = reply->callback(card,
1703 (unsigned long)cmd);
1706 keep_reply = reply->callback(card,
1708 (unsigned long)iob);
1711 reply->rc = (s16) cmd->hdr.return_code;
1713 reply->rc = iob->rc;
1715 spin_lock_irqsave(&card->lock, flags);
1716 list_add_tail(&reply->list,
1717 &card->cmd_waiter_list);
1718 spin_unlock_irqrestore(&card->lock, flags);
1720 reply->received = 1;
1721 wake_up(&reply->wait_q);
1723 qeth_put_reply(reply);
1727 spin_unlock_irqrestore(&card->lock, flags);
1729 memcpy(&card->seqno.pdu_hdr_ack,
1730 QETH_PDU_HEADER_SEQ_NO(iob->data),
1731 QETH_SEQ_NO_LENGTH);
1732 qeth_release_buffer(channel,iob);
1736 qeth_send_control_data(struct qeth_card *card, int len,
1737 struct qeth_cmd_buffer *iob,
1739 (struct qeth_card *, struct qeth_reply*, unsigned long),
1744 unsigned long flags;
1745 struct qeth_reply *reply;
1746 struct timer_list timer;
1748 QETH_DBF_TEXT(trace, 2, "sendctl");
1750 qeth_setup_ccw(&card->write,iob->data,len);
1752 memcpy(QETH_TRANSPORT_HEADER_SEQ_NO(iob->data),
1753 &card->seqno.trans_hdr, QETH_SEQ_NO_LENGTH);
1754 card->seqno.trans_hdr++;
1756 memcpy(QETH_PDU_HEADER_SEQ_NO(iob->data),
1757 &card->seqno.pdu_hdr, QETH_SEQ_NO_LENGTH);
1758 card->seqno.pdu_hdr++;
1759 memcpy(QETH_PDU_HEADER_ACK_SEQ_NO(iob->data),
1760 &card->seqno.pdu_hdr_ack, QETH_SEQ_NO_LENGTH);
1761 iob->callback = qeth_release_buffer;
1763 reply = qeth_alloc_reply(card);
1765 PRINT_WARN("Could no alloc qeth_reply!\n");
1768 reply->callback = reply_cb;
1769 reply->param = reply_param;
1770 if (card->state == CARD_STATE_DOWN)
1771 reply->seqno = QETH_IDX_COMMAND_SEQNO;
1773 reply->seqno = card->seqno.ipa++;
1775 timer.function = qeth_cmd_timeout;
1776 timer.data = (unsigned long) reply;
1777 if (IS_IPA(iob->data))
1778 timer.expires = jiffies + QETH_IPA_TIMEOUT;
1780 timer.expires = jiffies + QETH_TIMEOUT;
1781 init_waitqueue_head(&reply->wait_q);
1782 spin_lock_irqsave(&card->lock, flags);
1783 list_add_tail(&reply->list, &card->cmd_waiter_list);
1784 spin_unlock_irqrestore(&card->lock, flags);
1785 QETH_DBF_HEX(control, 2, iob->data, QETH_DBF_CONTROL_LEN);
1786 wait_event(card->wait_q,
1787 atomic_compare_and_swap(0,1,&card->write.irq_pending) == 0);
1788 QETH_DBF_TEXT(trace, 6, "noirqpnd");
1789 spin_lock_irqsave(get_ccwdev_lock(card->write.ccwdev), flags);
1790 rc = ccw_device_start(card->write.ccwdev, &card->write.ccw,
1791 (addr_t) iob, 0, 0);
1792 spin_unlock_irqrestore(get_ccwdev_lock(card->write.ccwdev), flags);
1794 PRINT_WARN("qeth_send_control_data: "
1795 "ccw_device_start rc = %i\n", rc);
1796 QETH_DBF_TEXT_(trace, 2, " err%d", rc);
1797 spin_lock_irqsave(&card->lock, flags);
1798 list_del_init(&reply->list);
1799 qeth_put_reply(reply);
1800 spin_unlock_irqrestore(&card->lock, flags);
1801 qeth_release_buffer(iob->channel, iob);
1802 atomic_set(&card->write.irq_pending, 0);
1803 wake_up(&card->wait_q);
1807 wait_event(reply->wait_q, reply->received);
1808 del_timer_sync(&timer);
1810 qeth_put_reply(reply);
1815 qeth_send_ipa_cmd(struct qeth_card *card, struct qeth_cmd_buffer *iob,
1817 (struct qeth_card *,struct qeth_reply*, unsigned long),
1822 QETH_DBF_TEXT(trace,4,"sendipa");
1824 memcpy(iob->data, IPA_PDU_HEADER, IPA_PDU_HEADER_SIZE);
1825 memcpy(QETH_IPA_CMD_DEST_ADDR(iob->data),
1826 &card->token.ulp_connection_r, QETH_MPC_TOKEN_LENGTH);
1828 rc = qeth_send_control_data(card, IPA_CMD_LENGTH, iob,
1829 reply_cb, reply_param);
1835 qeth_cm_enable_cb(struct qeth_card *card, struct qeth_reply *reply,
1838 struct qeth_cmd_buffer *iob;
1840 QETH_DBF_TEXT(setup, 2, "cmenblcb");
1842 iob = (struct qeth_cmd_buffer *) data;
1843 memcpy(&card->token.cm_filter_r,
1844 QETH_CM_ENABLE_RESP_FILTER_TOKEN(iob->data),
1845 QETH_MPC_TOKEN_LENGTH);
1846 QETH_DBF_TEXT_(setup, 2, " rc%d", iob->rc);
1851 qeth_cm_enable(struct qeth_card *card)
1854 struct qeth_cmd_buffer *iob;
1856 QETH_DBF_TEXT(setup,2,"cmenable");
1858 iob = qeth_wait_for_buffer(&card->write);
1859 memcpy(iob->data, CM_ENABLE, CM_ENABLE_SIZE);
1860 memcpy(QETH_CM_ENABLE_ISSUER_RM_TOKEN(iob->data),
1861 &card->token.issuer_rm_r, QETH_MPC_TOKEN_LENGTH);
1862 memcpy(QETH_CM_ENABLE_FILTER_TOKEN(iob->data),
1863 &card->token.cm_filter_w, QETH_MPC_TOKEN_LENGTH);
1865 rc = qeth_send_control_data(card, CM_ENABLE_SIZE, iob,
1866 qeth_cm_enable_cb, NULL);
1871 qeth_cm_setup_cb(struct qeth_card *card, struct qeth_reply *reply,
1875 struct qeth_cmd_buffer *iob;
1877 QETH_DBF_TEXT(setup, 2, "cmsetpcb");
1879 iob = (struct qeth_cmd_buffer *) data;
1880 memcpy(&card->token.cm_connection_r,
1881 QETH_CM_SETUP_RESP_DEST_ADDR(iob->data),
1882 QETH_MPC_TOKEN_LENGTH);
1883 QETH_DBF_TEXT_(setup, 2, " rc%d", iob->rc);
1888 qeth_cm_setup(struct qeth_card *card)
1891 struct qeth_cmd_buffer *iob;
1893 QETH_DBF_TEXT(setup,2,"cmsetup");
1895 iob = qeth_wait_for_buffer(&card->write);
1896 memcpy(iob->data, CM_SETUP, CM_SETUP_SIZE);
1897 memcpy(QETH_CM_SETUP_DEST_ADDR(iob->data),
1898 &card->token.issuer_rm_r, QETH_MPC_TOKEN_LENGTH);
1899 memcpy(QETH_CM_SETUP_CONNECTION_TOKEN(iob->data),
1900 &card->token.cm_connection_w, QETH_MPC_TOKEN_LENGTH);
1901 memcpy(QETH_CM_SETUP_FILTER_TOKEN(iob->data),
1902 &card->token.cm_filter_r, QETH_MPC_TOKEN_LENGTH);
1903 rc = qeth_send_control_data(card, CM_SETUP_SIZE, iob,
1904 qeth_cm_setup_cb, NULL);
1910 qeth_ulp_enable_cb(struct qeth_card *card, struct qeth_reply *reply,
1914 __u16 mtu, framesize;
1917 struct qeth_cmd_buffer *iob;
1919 QETH_DBF_TEXT(setup, 2, "ulpenacb");
1921 iob = (struct qeth_cmd_buffer *) data;
1922 memcpy(&card->token.ulp_filter_r,
1923 QETH_ULP_ENABLE_RESP_FILTER_TOKEN(iob->data),
1924 QETH_MPC_TOKEN_LENGTH);
1925 if (qeth_get_mtu_out_of_mpc(card->info.type)) {
1926 memcpy(&framesize, QETH_ULP_ENABLE_RESP_MAX_MTU(iob->data), 2);
1927 mtu = qeth_get_mtu_outof_framesize(framesize);
1930 QETH_DBF_TEXT_(setup, 2, " rc%d", iob->rc);
1933 card->info.max_mtu = mtu;
1934 card->info.initial_mtu = mtu;
1935 card->qdio.in_buf_size = mtu + 2 * PAGE_SIZE;
1937 card->info.initial_mtu = qeth_get_initial_mtu_for_card(card);
1938 card->info.max_mtu = qeth_get_max_mtu_for_card(card->info.type);
1939 card->qdio.in_buf_size = QETH_IN_BUF_SIZE_DEFAULT;
1942 memcpy(&len, QETH_ULP_ENABLE_RESP_DIFINFO_LEN(iob->data), 2);
1943 if (len >= QETH_MPC_DIFINFO_LEN_INDICATES_LINK_TYPE) {
1945 QETH_ULP_ENABLE_RESP_LINK_TYPE(iob->data), 1);
1946 card->info.link_type = link_type;
1948 card->info.link_type = 0;
1949 QETH_DBF_TEXT_(setup, 2, " rc%d", iob->rc);
1954 qeth_ulp_enable(struct qeth_card *card)
1957 struct qeth_cmd_buffer *iob;
1959 /*FIXME: trace view callbacks*/
1960 QETH_DBF_TEXT(setup,2,"ulpenabl");
1962 iob = qeth_wait_for_buffer(&card->write);
1963 memcpy(iob->data, ULP_ENABLE, ULP_ENABLE_SIZE);
1965 *(QETH_ULP_ENABLE_LINKNUM(iob->data)) =
1966 (__u8) card->info.portno;
1968 memcpy(QETH_ULP_ENABLE_DEST_ADDR(iob->data),
1969 &card->token.cm_connection_r, QETH_MPC_TOKEN_LENGTH);
1970 memcpy(QETH_ULP_ENABLE_FILTER_TOKEN(iob->data),
1971 &card->token.ulp_filter_w, QETH_MPC_TOKEN_LENGTH);
1972 memcpy(QETH_ULP_ENABLE_PORTNAME_AND_LL(iob->data),
1973 card->info.portname, 9);
1974 rc = qeth_send_control_data(card, ULP_ENABLE_SIZE, iob,
1975 qeth_ulp_enable_cb, NULL);
1981 __raw_devno_from_bus_id(char *id)
1983 id += (strlen(id) - 4);
1984 return (__u16) simple_strtoul(id, &id, 16);
1988 qeth_ulp_setup_cb(struct qeth_card *card, struct qeth_reply *reply,
1991 struct qeth_cmd_buffer *iob;
1993 QETH_DBF_TEXT(setup, 2, "ulpstpcb");
1995 iob = (struct qeth_cmd_buffer *) data;
1996 memcpy(&card->token.ulp_connection_r,
1997 QETH_ULP_SETUP_RESP_CONNECTION_TOKEN(iob->data),
1998 QETH_MPC_TOKEN_LENGTH);
1999 QETH_DBF_TEXT_(setup, 2, " rc%d", iob->rc);
2004 qeth_ulp_setup(struct qeth_card *card)
2008 struct qeth_cmd_buffer *iob;
2010 QETH_DBF_TEXT(setup,2,"ulpsetup");
2012 iob = qeth_wait_for_buffer(&card->write);
2013 memcpy(iob->data, ULP_SETUP, ULP_SETUP_SIZE);
2015 memcpy(QETH_ULP_SETUP_DEST_ADDR(iob->data),
2016 &card->token.cm_connection_r, QETH_MPC_TOKEN_LENGTH);
2017 memcpy(QETH_ULP_SETUP_CONNECTION_TOKEN(iob->data),
2018 &card->token.ulp_connection_w, QETH_MPC_TOKEN_LENGTH);
2019 memcpy(QETH_ULP_SETUP_FILTER_TOKEN(iob->data),
2020 &card->token.ulp_filter_r, QETH_MPC_TOKEN_LENGTH);
2022 temp = __raw_devno_from_bus_id(CARD_DDEV_ID(card));
2023 memcpy(QETH_ULP_SETUP_CUA(iob->data), &temp, 2);
2024 temp = (card->info.cula << 8) + card->info.unit_addr2;
2025 memcpy(QETH_ULP_SETUP_REAL_DEVADDR(iob->data), &temp, 2);
2026 rc = qeth_send_control_data(card, ULP_SETUP_SIZE, iob,
2027 qeth_ulp_setup_cb, NULL);
2032 qeth_check_for_inbound_error(struct qeth_qdio_buffer *buf,
2033 unsigned int qdio_error,
2034 unsigned int siga_error)
2038 if (qdio_error || siga_error) {
2039 QETH_DBF_TEXT(trace, 2, "qdinerr");
2040 QETH_DBF_TEXT(qerr, 2, "qdinerr");
2041 QETH_DBF_TEXT_(qerr, 2, " F15=%02X",
2042 buf->buffer->element[15].flags & 0xff);
2043 QETH_DBF_TEXT_(qerr, 2, " F14=%02X",
2044 buf->buffer->element[14].flags & 0xff);
2045 QETH_DBF_TEXT_(qerr, 2, " qerr=%X", qdio_error);
2046 QETH_DBF_TEXT_(qerr, 2, " serr=%X", siga_error);
2052 static inline struct sk_buff *
2053 qeth_get_skb(unsigned int length)
2055 struct sk_buff* skb;
2056 #ifdef CONFIG_QETH_VLAN
2057 if ((skb = dev_alloc_skb(length + VLAN_HLEN)))
2058 skb_reserve(skb, VLAN_HLEN);
2060 skb = dev_alloc_skb(length);
2065 static inline struct sk_buff *
2066 qeth_get_next_skb(struct qeth_card *card, struct qdio_buffer *buffer,
2067 struct qdio_buffer_element **__element, int *__offset,
2068 struct qeth_hdr **hdr)
2070 struct qdio_buffer_element *element = *__element;
2071 int offset = *__offset;
2072 struct sk_buff *skb = NULL;
2077 QETH_DBF_TEXT(trace,6,"nextskb");
2078 /* qeth_hdr must not cross element boundaries */
2079 if (element->length < offset + sizeof(struct qeth_hdr)){
2080 if (qeth_is_last_sbale(element))
2084 if (element->length < sizeof(struct qeth_hdr))
2087 *hdr = element->addr + offset;
2089 offset += sizeof(struct qeth_hdr);
2090 skb_len = (*hdr)->length;
2093 if (card->options.fake_ll){
2094 if (!(skb = qeth_get_skb(skb_len + QETH_FAKE_LL_LEN)))
2096 skb_pull(skb, QETH_FAKE_LL_LEN);
2097 } else if (!(skb = qeth_get_skb(skb_len)))
2099 data_ptr = element->addr + offset;
2101 data_len = min(skb_len, (int)(element->length - offset));
2103 memcpy(skb_put(skb, data_len), data_ptr, data_len);
2104 skb_len -= data_len;
2106 if (qeth_is_last_sbale(element)){
2107 QETH_DBF_TEXT(trace,4,"unexeob");
2108 QETH_DBF_TEXT_(trace,4,"%s",CARD_BUS_ID(card));
2109 QETH_DBF_TEXT(qerr,2,"unexeob");
2110 QETH_DBF_TEXT_(qerr,2,"%s",CARD_BUS_ID(card));
2111 QETH_DBF_HEX(misc,4,buffer,sizeof(*buffer));
2112 dev_kfree_skb_any(skb);
2113 card->stats.rx_errors++;
2118 data_ptr = element->addr;
2123 *__element = element;
2127 if (net_ratelimit()){
2128 PRINT_WARN("No memory for packet received on %s.\n",
2129 card->info.if_name);
2130 QETH_DBF_TEXT(trace,2,"noskbmem");
2131 QETH_DBF_TEXT_(trace,2,"%s",CARD_BUS_ID(card));
2133 card->stats.rx_dropped++;
2137 static inline unsigned short
2138 qeth_type_trans(struct sk_buff *skb, struct net_device *dev)
2141 struct qeth_card *card;
2143 QETH_DBF_TEXT(trace,5,"typtrans");
2145 card = (struct qeth_card *)dev->priv;
2147 if ((card->info.link_type == QETH_LINK_TYPE_HSTR) ||
2148 (card->info.link_type == QETH_LINK_TYPE_LANE_TR))
2149 return tr_type_trans(skb,dev);
2150 #endif /* CONFIG_TR */
2152 skb->mac.raw = skb->data;
2153 skb_pull(skb, ETH_ALEN * 2 + sizeof (short));
2156 if (*eth->h_dest & 1) {
2157 if (memcmp(eth->h_dest, dev->broadcast, ETH_ALEN) == 0)
2158 skb->pkt_type = PACKET_BROADCAST;
2160 skb->pkt_type = PACKET_MULTICAST;
2162 skb->pkt_type = PACKET_OTHERHOST;
2164 if (ntohs(eth->h_proto) >= 1536)
2165 return eth->h_proto;
2166 if (*(unsigned short *) (skb->data) == 0xFFFF)
2167 return htons(ETH_P_802_3);
2168 return htons(ETH_P_802_2);
2172 qeth_rebuild_skb_fake_ll(struct qeth_card *card, struct sk_buff *skb,
2173 struct qeth_hdr *hdr)
2175 struct ethhdr *fake_hdr;
2176 struct iphdr *ip_hdr;
2178 QETH_DBF_TEXT(trace,5,"skbfake");
2179 skb->mac.raw = skb->data - QETH_FAKE_LL_LEN;
2180 /* this is a fake ethernet header */
2181 fake_hdr = (struct ethhdr *) skb->mac.raw;
2183 /* the destination MAC address */
2184 switch (skb->pkt_type){
2185 case PACKET_MULTICAST:
2186 switch (skb->protocol){
2187 #ifdef CONFIG_QETH_IPV6
2188 case __constant_htons(ETH_P_IPV6):
2189 ndisc_mc_map((struct in6_addr *)
2190 skb->data + QETH_FAKE_LL_V6_ADDR_POS,
2191 fake_hdr->h_dest, card->dev, 0);
2193 #endif /* CONFIG_QETH_IPV6 */
2194 case __constant_htons(ETH_P_IP):
2195 ip_hdr = (struct iphdr *)skb->data;
2196 if (card->dev->type == ARPHRD_IEEE802_TR)
2197 ip_tr_mc_map(ip_hdr->daddr, fake_hdr->h_dest);
2199 ip_eth_mc_map(ip_hdr->daddr, fake_hdr->h_dest);
2202 memcpy(fake_hdr->h_dest, card->dev->dev_addr, ETH_ALEN);
2205 case PACKET_BROADCAST:
2206 memset(fake_hdr->h_dest, 0xff, ETH_ALEN);
2209 memcpy(fake_hdr->h_dest, card->dev->dev_addr, ETH_ALEN);
2211 /* the source MAC address */
2212 if (hdr->ext_flags & QETH_HDR_EXT_SRC_MAC_ADDR)
2213 memcpy(fake_hdr->h_source, &hdr->dest_addr[2], ETH_ALEN);
2215 memset(fake_hdr->h_source, 0, ETH_ALEN);
2217 fake_hdr->h_proto = skb->protocol;
2221 qeth_rebuild_skb_vlan(struct qeth_card *card, struct sk_buff *skb,
2222 struct qeth_hdr *hdr)
2224 #ifdef CONFIG_QETH_VLAN
2227 if (hdr->ext_flags & QETH_HDR_EXT_VLAN_FRAME) {
2228 vlan_tag = (u16 *) skb_push(skb, VLAN_HLEN);
2229 *vlan_tag = hdr->vlan_id;
2230 *(vlan_tag + 1) = skb->protocol;
2231 skb->protocol = __constant_htons(ETH_P_8021Q);
2233 #endif /* CONFIG_QETH_VLAN */
2237 qeth_rebuild_skb(struct qeth_card *card, struct sk_buff *skb,
2238 struct qeth_hdr *hdr)
2240 #ifdef CONFIG_QETH_IPV6
2241 if (hdr->flags & QETH_HDR_PASSTHRU){
2242 skb->protocol = qeth_type_trans(skb, card->dev);
2245 #endif /* CONFIG_QETH_IPV6 */
2246 skb->protocol = htons((hdr->flags & QETH_HDR_IPV6)? ETH_P_IPV6 :
2248 switch (hdr->flags & QETH_HDR_CAST_MASK){
2249 case QETH_CAST_UNICAST:
2250 skb->pkt_type = PACKET_HOST;
2252 case QETH_CAST_MULTICAST:
2253 skb->pkt_type = PACKET_MULTICAST;
2254 card->stats.multicast++;
2256 case QETH_CAST_BROADCAST:
2257 skb->pkt_type = PACKET_BROADCAST;
2258 card->stats.multicast++;
2260 case QETH_CAST_ANYCAST:
2261 case QETH_CAST_NOCAST:
2263 skb->pkt_type = PACKET_HOST;
2265 qeth_rebuild_skb_vlan(card, skb, hdr);
2266 if (card->options.fake_ll)
2267 qeth_rebuild_skb_fake_ll(card, skb, hdr);
2269 skb->mac.raw = skb->data;
2270 skb->ip_summed = card->options.checksum_type;
2271 if (card->options.checksum_type == HW_CHECKSUMMING){
2272 if ( (hdr->ext_flags &
2273 (QETH_HDR_EXT_CSUM_HDR_REQ |
2274 QETH_HDR_EXT_CSUM_TRANSP_REQ)) ==
2275 (QETH_HDR_EXT_CSUM_HDR_REQ |
2276 QETH_HDR_EXT_CSUM_TRANSP_REQ) )
2277 skb->ip_summed = CHECKSUM_UNNECESSARY;
2279 skb->ip_summed = SW_CHECKSUMMING;
2284 qeth_process_inbound_buffer(struct qeth_card *card,
2285 struct qeth_qdio_buffer *buf, int index)
2287 struct qdio_buffer_element *element;
2289 struct sk_buff *skb;
2290 struct qeth_hdr *hdr;
2293 /* get first element of current buffer */
2294 element = (struct qdio_buffer_element *)&buf->buffer->element[0];
2296 #ifdef CONFIG_QETH_PERF_STATS
2297 card->perf_stats.bufs_rec++;
2299 while((skb = qeth_get_next_skb(card, buf->buffer, &element,
2301 qeth_rebuild_skb(card, skb, hdr);
2302 /* is device UP ? */
2303 if (!(card->dev->flags & IFF_UP)){
2304 dev_kfree_skb_any(skb);
2307 skb->dev = card->dev;
2308 rxrc = netif_rx(skb);
2309 card->dev->last_rx = jiffies;
2310 card->stats.rx_packets++;
2311 card->stats.rx_bytes += skb->len;
2315 static inline struct qeth_buffer_pool_entry *
2316 qeth_get_buffer_pool_entry(struct qeth_card *card)
2318 struct qeth_buffer_pool_entry *entry;
2320 QETH_DBF_TEXT(trace, 6, "gtbfplen");
2321 if (!list_empty(&card->qdio.in_buf_pool.entry_list)) {
2322 entry = list_entry(card->qdio.in_buf_pool.entry_list.next,
2323 struct qeth_buffer_pool_entry, list);
2324 list_del_init(&entry->list);
2331 qeth_init_input_buffer(struct qeth_card *card, struct qeth_qdio_buffer *buf)
2333 struct qeth_buffer_pool_entry *pool_entry;
2336 pool_entry = qeth_get_buffer_pool_entry(card);
2338 * since the buffer is accessed only from the input_tasklet
2339 * there shouldn't be a need to synchronize; also, since we use
2340 * the QETH_IN_BUF_REQUEUE_THRESHOLD we should never run out off
2343 BUG_ON(!pool_entry);
2345 buf->pool_entry = pool_entry;
2346 for(i = 0; i < QETH_MAX_BUFFER_ELEMENTS(card); ++i){
2347 buf->buffer->element[i].length = PAGE_SIZE;
2348 buf->buffer->element[i].addr = pool_entry->elements[i];
2349 if (i == QETH_MAX_BUFFER_ELEMENTS(card) - 1)
2350 buf->buffer->element[i].flags = SBAL_FLAGS_LAST_ENTRY;
2352 buf->buffer->element[i].flags = 0;
2354 buf->state = QETH_QDIO_BUF_EMPTY;
2358 qeth_clear_output_buffer(struct qeth_qdio_out_q *queue,
2359 struct qeth_qdio_out_buffer *buf)
2362 struct sk_buff *skb;
2364 /* is PCI flag set on buffer? */
2365 if (buf->buffer->element[0].flags & 0x40)
2366 atomic_dec(&queue->set_pci_flags_count);
2368 for(i = 0; i < QETH_MAX_BUFFER_ELEMENTS(queue->card); ++i){
2369 buf->buffer->element[i].length = 0;
2370 buf->buffer->element[i].addr = NULL;
2371 buf->buffer->element[i].flags = 0;
2372 while ((skb = skb_dequeue(&buf->skb_list))){
2373 atomic_dec(&skb->users);
2374 dev_kfree_skb_any(skb);
2377 buf->next_element_to_fill = 0;
2378 atomic_set(&buf->state, QETH_QDIO_BUF_EMPTY);
2382 qeth_queue_input_buffer(struct qeth_card *card, int index)
2384 struct qeth_qdio_q *queue = card->qdio.in_q;
2389 QETH_DBF_TEXT(trace,6,"queinbuf");
2390 count = (index < queue->next_buf_to_init)?
2391 card->qdio.in_buf_pool.buf_count -
2392 (queue->next_buf_to_init - index) :
2393 card->qdio.in_buf_pool.buf_count -
2394 (queue->next_buf_to_init + QDIO_MAX_BUFFERS_PER_Q - index);
2395 /* only requeue at a certain threshold to avoid SIGAs */
2396 if (count >= QETH_IN_BUF_REQUEUE_THRESHOLD(card)){
2397 for (i = queue->next_buf_to_init;
2398 i < queue->next_buf_to_init + count; ++i)
2399 qeth_init_input_buffer(card,
2400 &queue->bufs[i % QDIO_MAX_BUFFERS_PER_Q]);
2402 * according to old code it should be avoided to requeue all
2403 * 128 buffers in order to benefit from PCI avoidance.
2404 * this function keeps at least one buffer (the buffer at
2405 * 'index') un-requeued -> this buffer is the first buffer that
2406 * will be requeued the next time
2408 #ifdef CONFIG_QETH_PERF_STATS
2409 card->perf_stats.inbound_do_qdio_cnt++;
2410 card->perf_stats.inbound_do_qdio_start_time = qeth_get_micros();
2412 rc = do_QDIO(CARD_DDEV(card),
2413 QDIO_FLAG_SYNC_INPUT | QDIO_FLAG_UNDER_INTERRUPT,
2414 0, queue->next_buf_to_init, count, NULL);
2415 #ifdef CONFIG_QETH_PERF_STATS
2416 card->perf_stats.inbound_do_qdio_time += qeth_get_micros() -
2417 card->perf_stats.inbound_do_qdio_start_time;
2420 PRINT_WARN("qeth_queue_input_buffer's do_QDIO "
2421 "return %i (device %s).\n",
2422 rc, CARD_DDEV_ID(card));
2423 QETH_DBF_TEXT(trace,2,"qinberr");
2424 QETH_DBF_TEXT_(trace,2,"%s",CARD_BUS_ID(card));
2426 queue->next_buf_to_init = (queue->next_buf_to_init + count) %
2427 QDIO_MAX_BUFFERS_PER_Q;
2432 qeth_put_buffer_pool_entry(struct qeth_card *card,
2433 struct qeth_buffer_pool_entry *entry)
2435 QETH_DBF_TEXT(trace, 6, "ptbfplen");
2436 list_add_tail(&entry->list, &card->qdio.in_buf_pool.entry_list);
2440 qeth_qdio_input_handler(struct ccw_device * ccwdev, unsigned int status,
2441 unsigned int qdio_err, unsigned int siga_err,
2442 unsigned int queue, int first_element, int count,
2443 unsigned long card_ptr)
2445 struct net_device *net_dev;
2446 struct qeth_card *card;
2447 struct qeth_qdio_buffer *buffer;
2451 QETH_DBF_TEXT(trace, 6, "qdinput");
2452 card = (struct qeth_card *) card_ptr;
2453 net_dev = card->dev;
2454 #ifdef CONFIG_QETH_PERF_STATS
2455 card->perf_stats.inbound_cnt++;
2456 card->perf_stats.inbound_start_time = qeth_get_micros();
2458 if (status & QDIO_STATUS_LOOK_FOR_ERROR) {
2459 if (status & QDIO_STATUS_ACTIVATE_CHECK_CONDITION){
2460 QETH_DBF_TEXT(trace, 1,"qdinchk");
2461 QETH_DBF_TEXT_(trace,1,"%s",CARD_BUS_ID(card));
2462 QETH_DBF_TEXT_(trace,1,"%04X%04X",first_element,count);
2463 QETH_DBF_TEXT_(trace,1,"%04X%04X", queue, status);
2464 qeth_schedule_recovery(card);
2468 for (i = first_element; i < (first_element + count); ++i) {
2469 index = i % QDIO_MAX_BUFFERS_PER_Q;
2470 buffer = &card->qdio.in_q->bufs[index];
2471 if (!((status == QDIO_STATUS_LOOK_FOR_ERROR) &&
2472 qeth_check_for_inbound_error(buffer, qdio_err, siga_err)))
2473 qeth_process_inbound_buffer(card, buffer, index);
2474 /* clear buffer and give back to hardware */
2475 qeth_put_buffer_pool_entry(card, buffer->pool_entry);
2476 qeth_queue_input_buffer(card, index);
2478 #ifdef CONFIG_QETH_PERF_STATS
2479 card->perf_stats.inbound_time += qeth_get_micros() -
2480 card->perf_stats.inbound_start_time;
2485 qeth_handle_send_error(struct qeth_card *card,
2486 struct qeth_qdio_out_buffer *buffer,
2487 int qdio_err, int siga_err)
2489 int sbalf15 = buffer->buffer->element[15].flags & 0xff;
2490 int cc = siga_err & 3;
2492 QETH_DBF_TEXT(trace, 6, "hdsnderr");
2496 QETH_DBF_TEXT(trace, 1,"lnkfail");
2497 QETH_DBF_TEXT_(trace,1,"%s",CARD_BUS_ID(card));
2498 QETH_DBF_TEXT_(trace,1,"%04x %02x",
2499 (u16)qdio_err, (u8)sbalf15);
2500 return QETH_SEND_ERROR_LINK_FAILURE;
2502 return QETH_SEND_ERROR_NONE;
2504 if (siga_err & QDIO_SIGA_ERROR_B_BIT_SET) {
2505 QETH_DBF_TEXT(trace, 1, "SIGAcc2B");
2506 QETH_DBF_TEXT_(trace,1,"%s",CARD_BUS_ID(card));
2507 return QETH_SEND_ERROR_KICK_IT;
2509 if ((sbalf15 >= 15) && (sbalf15 <= 31))
2510 return QETH_SEND_ERROR_RETRY;
2511 return QETH_SEND_ERROR_LINK_FAILURE;
2512 /* look at qdio_error and sbalf 15 */
2514 QETH_DBF_TEXT(trace, 1, "SIGAcc1");
2515 QETH_DBF_TEXT_(trace,1,"%s",CARD_BUS_ID(card));
2516 return QETH_SEND_ERROR_LINK_FAILURE;
2518 QETH_DBF_TEXT(trace, 1, "SIGAcc3");
2519 QETH_DBF_TEXT_(trace,1,"%s",CARD_BUS_ID(card));
2520 return QETH_SEND_ERROR_KICK_IT;
2522 return QETH_SEND_ERROR_LINK_FAILURE;
2526 qeth_flush_buffers(struct qeth_qdio_out_q *queue, int under_int,
2527 int index, int count)
2529 struct qeth_qdio_out_buffer *buf;
2533 QETH_DBF_TEXT(trace, 6, "flushbuf");
2535 for (i = index; i < index + count; ++i) {
2536 buf = &queue->bufs[i % QDIO_MAX_BUFFERS_PER_Q];
2537 buf->buffer->element[buf->next_element_to_fill - 1].flags |=
2538 SBAL_FLAGS_LAST_ENTRY;
2540 if (queue->card->info.type == QETH_CARD_TYPE_IQD)
2543 if (!queue->do_pack){
2544 if ((atomic_read(&queue->used_buffers) >=
2545 (QETH_HIGH_WATERMARK_PACK -
2546 QETH_WATERMARK_PACK_FUZZ)) &&
2547 !atomic_read(&queue->set_pci_flags_count)){
2548 /* it's likely that we'll go to packing
2550 atomic_inc(&queue->set_pci_flags_count);
2551 buf->buffer->element[0].flags |= 0x40;
2554 if (!atomic_read(&queue->set_pci_flags_count)){
2556 * there's no outstanding PCI any more, so we
2557 * have to request a PCI to be sure the the PCI
2558 * will wake at some time in the future then we
2559 * can flush packed buffers that might still be
2560 * hanging around, which can happen if no
2561 * further send was requested by the stack
2563 atomic_inc(&queue->set_pci_flags_count);
2564 buf->buffer->element[0].flags |= 0x40;
2566 #ifdef CONFIG_QETH_PERF_STATS
2567 queue->card->perf_stats.bufs_sent_pack++;
2572 queue->card->dev->trans_start = jiffies;
2573 #ifdef CONFIG_QETH_PERF_STATS
2574 queue->card->perf_stats.outbound_do_qdio_cnt++;
2575 queue->card->perf_stats.outbound_do_qdio_start_time = qeth_get_micros();
2578 rc = do_QDIO(CARD_DDEV(queue->card),
2579 QDIO_FLAG_SYNC_OUTPUT | QDIO_FLAG_UNDER_INTERRUPT,
2580 queue->queue_no, index, count, NULL);
2582 rc = do_QDIO(CARD_DDEV(queue->card), QDIO_FLAG_SYNC_OUTPUT,
2583 queue->queue_no, index, count, NULL);
2584 #ifdef CONFIG_QETH_PERF_STATS
2585 queue->card->perf_stats.outbound_do_qdio_time += qeth_get_micros() -
2586 queue->card->perf_stats.outbound_do_qdio_start_time;
2589 QETH_DBF_SPRINTF(trace, 0, "qeth_flush_buffers: do_QDIO "
2590 "returned error (%i) on device %s.",
2591 rc, CARD_DDEV_ID(queue->card));
2592 QETH_DBF_TEXT(trace, 2, "flushbuf");
2593 QETH_DBF_TEXT_(trace, 2, " err%d", rc);
2594 queue->card->stats.tx_errors += count;
2595 /* this must not happen under normal circumstances. if it
2596 * happens something is really wrong -> recover */
2597 qeth_schedule_recovery(queue->card);
2600 atomic_add(count, &queue->used_buffers);
2601 #ifdef CONFIG_QETH_PERF_STATS
2602 queue->card->perf_stats.bufs_sent += count;
2607 * Switched to packing state if the number of used buffers on a queue
2608 * reaches a certain limit.
2611 qeth_switch_to_packing_if_needed(struct qeth_qdio_out_q *queue)
2613 if (!queue->do_pack) {
2614 if (atomic_read(&queue->used_buffers)
2615 >= QETH_HIGH_WATERMARK_PACK){
2616 /* switch non-PACKING -> PACKING */
2617 QETH_DBF_TEXT(trace, 6, "np->pack");
2618 #ifdef CONFIG_QETH_PERF_STATS
2619 queue->card->perf_stats.sc_dp_p++;
2627 * Switches from packing to non-packing mode. If there is a packing
2628 * buffer on the queue this buffer will be prepared to be flushed.
2629 * In that case 1 is returned to inform the caller. If no buffer
2630 * has to be flushed, zero is returned.
2633 qeth_switch_to_nonpacking_if_needed(struct qeth_qdio_out_q *queue)
2635 struct qeth_qdio_out_buffer *buffer;
2636 int flush_count = 0;
2638 if (queue->do_pack) {
2639 if (atomic_read(&queue->used_buffers)
2640 <= QETH_LOW_WATERMARK_PACK) {
2641 /* switch PACKING -> non-PACKING */
2642 QETH_DBF_TEXT(trace, 6, "pack->np");
2643 #ifdef CONFIG_QETH_PERF_STATS
2644 queue->card->perf_stats.sc_p_dp++;
2647 /* flush packing buffers */
2648 buffer = &queue->bufs[queue->next_buf_to_fill];
2649 if ((atomic_read(&buffer->state) ==
2650 QETH_QDIO_BUF_EMPTY) &&
2651 (buffer->next_element_to_fill > 0)) {
2652 atomic_set(&buffer->state,QETH_QDIO_BUF_PRIMED);
2654 queue->next_buf_to_fill =
2655 (queue->next_buf_to_fill + 1) %
2656 QDIO_MAX_BUFFERS_PER_Q;
2664 * Called to flush a packing buffer if no more pci flags are on the queue.
2665 * Checks if there is a packing buffer and prepares it to be flushed.
2666 * In that case returns 1, otherwise zero.
2669 qeth_flush_buffers_on_no_pci(struct qeth_qdio_out_q *queue)
2671 struct qeth_qdio_out_buffer *buffer;
2673 buffer = &queue->bufs[queue->next_buf_to_fill];
2674 if((atomic_read(&buffer->state) == QETH_QDIO_BUF_EMPTY) &&
2675 (buffer->next_element_to_fill > 0)){
2676 /* it's a packing buffer */
2677 atomic_set(&buffer->state, QETH_QDIO_BUF_PRIMED);
2678 queue->next_buf_to_fill =
2679 (queue->next_buf_to_fill + 1) % QDIO_MAX_BUFFERS_PER_Q;
2686 qeth_check_outbound_queue(struct qeth_qdio_out_q *queue)
2692 * check if weed have to switch to non-packing mode or if
2693 * we have to get a pci flag out on the queue
2695 if ((atomic_read(&queue->used_buffers) <= QETH_LOW_WATERMARK_PACK) ||
2696 !atomic_read(&queue->set_pci_flags_count)){
2697 if (atomic_swap(&queue->state, QETH_OUT_Q_LOCKED_FLUSH) ==
2698 QETH_OUT_Q_UNLOCKED) {
2700 * If we get in here, there was no action in
2701 * do_send_packet. So, we check if there is a
2702 * packing buffer to be flushed here.
2704 /* TODO: try if we get a performance improvement
2705 * by calling netif_stop_queue here */
2706 /* save start index for flushing */
2707 index = queue->next_buf_to_fill;
2708 flush_cnt += qeth_switch_to_nonpacking_if_needed(queue);
2710 !atomic_read(&queue->set_pci_flags_count))
2712 qeth_flush_buffers_on_no_pci(queue);
2713 /* were done with updating critical queue members */
2714 atomic_set(&queue->state, QETH_OUT_Q_UNLOCKED);
2715 /* flushing can be done outside the lock */
2717 qeth_flush_buffers(queue, 1, index, flush_cnt);
2723 qeth_qdio_output_handler(struct ccw_device * ccwdev, unsigned int status,
2724 unsigned int qdio_error, unsigned int siga_error,
2725 unsigned int __queue, int first_element, int count,
2726 unsigned long card_ptr)
2728 struct qeth_card *card = (struct qeth_card *) card_ptr;
2729 struct qeth_qdio_out_q *queue = card->qdio.out_qs[__queue];
2730 struct qeth_qdio_out_buffer *buffer;
2733 QETH_DBF_TEXT(trace, 6, "qdouhdl");
2734 if (status & QDIO_STATUS_LOOK_FOR_ERROR) {
2735 if (status & QDIO_STATUS_ACTIVATE_CHECK_CONDITION){
2736 QETH_DBF_SPRINTF(trace, 2, "On device %s: "
2737 "received active check "
2738 "condition (0x%08x).",
2739 CARD_BUS_ID(card), status);
2740 QETH_DBF_TEXT(trace, 2, "chkcond");
2741 QETH_DBF_TEXT_(trace, 2, "%08x", status);
2742 netif_stop_queue(card->dev);
2743 qeth_schedule_recovery(card);
2748 #ifdef CONFIG_QETH_PERF_STATS
2749 card->perf_stats.outbound_handler_cnt++;
2750 card->perf_stats.outbound_handler_start_time = qeth_get_micros();
2752 for(i = first_element; i < (first_element + count); ++i){
2753 buffer = &queue->bufs[i % QDIO_MAX_BUFFERS_PER_Q];
2754 /*we only handle the KICK_IT error by doing a recovery */
2755 if (qeth_handle_send_error(card, buffer, qdio_error, siga_error)
2756 == QETH_SEND_ERROR_KICK_IT){
2757 netif_stop_queue(card->dev);
2758 qeth_schedule_recovery(card);
2761 qeth_clear_output_buffer(queue, buffer);
2763 atomic_sub(count, &queue->used_buffers);
2764 /* check if we need to do something on this outbound queue */
2765 qeth_check_outbound_queue(queue);
2767 netif_wake_queue(card->dev);
2768 #ifdef CONFIG_QETH_PERF_STATS
2769 card->perf_stats.outbound_handler_time += qeth_get_micros() -
2770 card->perf_stats.outbound_handler_start_time;
2775 qeth_create_qib_param_field(struct qeth_card *card)
2779 param_field = kmalloc(QDIO_MAX_BUFFERS_PER_Q * sizeof(char),
2784 memset(param_field, 0, QDIO_MAX_BUFFERS_PER_Q * sizeof(char));
2786 param_field[0] = _ascebc['P'];
2787 param_field[1] = _ascebc['C'];
2788 param_field[2] = _ascebc['I'];
2789 param_field[3] = _ascebc['T'];
2790 *((unsigned int *) (¶m_field[4])) = QETH_PCI_THRESHOLD_A(card);
2791 *((unsigned int *) (¶m_field[8])) = QETH_PCI_THRESHOLD_B(card);
2792 *((unsigned int *) (¶m_field[12])) = QETH_PCI_TIMER_VALUE(card);
2798 qeth_initialize_working_pool_list(struct qeth_card *card)
2800 struct qeth_buffer_pool_entry *entry;
2802 QETH_DBF_TEXT(trace,5,"inwrklst");
2804 list_for_each_entry(entry,
2805 &card->qdio.init_pool.entry_list, init_list) {
2806 qeth_put_buffer_pool_entry(card,entry);
2811 qeth_clear_working_pool_list(struct qeth_card *card)
2813 struct qeth_buffer_pool_entry *pool_entry, *tmp;
2815 QETH_DBF_TEXT(trace,5,"clwrklst");
2816 list_for_each_entry_safe(pool_entry, tmp,
2817 &card->qdio.in_buf_pool.entry_list, list){
2818 list_del(&pool_entry->list);
2823 qeth_free_buffer_pool(struct qeth_card *card)
2825 struct qeth_buffer_pool_entry *pool_entry, *tmp;
2827 QETH_DBF_TEXT(trace,5,"freepool");
2828 list_for_each_entry_safe(pool_entry, tmp,
2829 &card->qdio.init_pool.entry_list, init_list){
2830 for (i = 0; i < QETH_MAX_BUFFER_ELEMENTS(card); ++i)
2831 free_page((unsigned long)pool_entry->elements[i]);
2832 list_del(&pool_entry->init_list);
2838 qeth_alloc_buffer_pool(struct qeth_card *card)
2840 struct qeth_buffer_pool_entry *pool_entry;
2844 QETH_DBF_TEXT(trace,5,"clwkpool");
2845 for (i = 0; i < card->qdio.init_pool.buf_count; ++i){
2846 pool_entry = kmalloc(sizeof(*pool_entry), GFP_KERNEL);
2848 qeth_free_buffer_pool(card);
2851 for(j = 0; j < QETH_MAX_BUFFER_ELEMENTS(card); ++j){
2852 ptr = (void *) __get_free_page(GFP_KERNEL);
2855 free_page((unsigned long)
2856 pool_entry->elements[--j]);
2858 qeth_free_buffer_pool(card);
2861 pool_entry->elements[j] = ptr;
2863 list_add(&pool_entry->init_list,
2864 &card->qdio.init_pool.entry_list);
2870 qeth_realloc_buffer_pool(struct qeth_card *card, int bufcnt)
2872 QETH_DBF_TEXT(trace, 2, "realcbp");
2874 if ((card->state != CARD_STATE_DOWN) &&
2875 (card->state != CARD_STATE_RECOVER))
2878 /* TODO: steel/add buffers from/to a running card's buffer pool (?) */
2879 qeth_clear_working_pool_list(card);
2880 qeth_free_buffer_pool(card);
2881 card->qdio.in_buf_pool.buf_count = bufcnt;
2882 card->qdio.init_pool.buf_count = bufcnt;
2883 return qeth_alloc_buffer_pool(card);
2887 qeth_alloc_qdio_buffers(struct qeth_card *card)
2891 QETH_DBF_TEXT(setup, 2, "allcqdbf");
2893 if (card->qdio.state == QETH_QDIO_ALLOCATED)
2896 card->qdio.in_q = kmalloc(sizeof(struct qeth_qdio_q), GFP_KERNEL);
2897 if (!card->qdio.in_q)
2899 QETH_DBF_TEXT(setup, 2, "inq");
2900 QETH_DBF_HEX(setup, 2, &card->qdio.in_q, sizeof(void *));
2901 memset(card->qdio.in_q, 0, sizeof(struct qeth_qdio_q));
2902 /* give inbound qeth_qdio_buffers their qdio_buffers */
2903 for (i = 0; i < QDIO_MAX_BUFFERS_PER_Q; ++i)
2904 card->qdio.in_q->bufs[i].buffer =
2905 &card->qdio.in_q->qdio_bufs[i];
2906 /* inbound buffer pool */
2907 if (qeth_alloc_buffer_pool(card)){
2908 kfree(card->qdio.in_q);
2913 kmalloc(card->qdio.no_out_queues *
2914 sizeof(struct qeth_qdio_out_q *), GFP_KERNEL);
2915 if (!card->qdio.out_qs){
2916 qeth_free_buffer_pool(card);
2919 for (i = 0; i < card->qdio.no_out_queues; ++i){
2920 card->qdio.out_qs[i] = kmalloc(sizeof(struct qeth_qdio_out_q),
2922 if (!card->qdio.out_qs[i]){
2924 kfree(card->qdio.out_qs[--i]);
2925 kfree(card->qdio.out_qs);
2928 QETH_DBF_TEXT_(setup, 2, "outq %i", i);
2929 QETH_DBF_HEX(setup, 2, &card->qdio.out_qs[i], sizeof(void *));
2930 memset(card->qdio.out_qs[i], 0, sizeof(struct qeth_qdio_out_q));
2931 card->qdio.out_qs[i]->queue_no = i;
2932 /* give inbound qeth_qdio_buffers their qdio_buffers */
2933 for (j = 0; j < QDIO_MAX_BUFFERS_PER_Q; ++j){
2934 card->qdio.out_qs[i]->bufs[j].buffer =
2935 &card->qdio.out_qs[i]->qdio_bufs[j];
2936 skb_queue_head_init(&card->qdio.out_qs[i]->bufs[j].
2940 card->qdio.state = QETH_QDIO_ALLOCATED;
2945 qeth_free_qdio_buffers(struct qeth_card *card)
2949 QETH_DBF_TEXT(trace, 2, "freeqdbf");
2950 if (card->qdio.state == QETH_QDIO_UNINITIALIZED)
2952 kfree(card->qdio.in_q);
2953 /* inbound buffer pool */
2954 qeth_free_buffer_pool(card);
2955 /* free outbound qdio_qs */
2956 for (i = 0; i < card->qdio.no_out_queues; ++i){
2957 for (j = 0; j < QDIO_MAX_BUFFERS_PER_Q; ++j)
2958 qeth_clear_output_buffer(card->qdio.out_qs[i],
2959 &card->qdio.out_qs[i]->bufs[j]);
2960 kfree(card->qdio.out_qs[i]);
2962 kfree(card->qdio.out_qs);
2963 card->qdio.state = QETH_QDIO_UNINITIALIZED;
2967 qeth_clear_qdio_buffers(struct qeth_card *card)
2971 QETH_DBF_TEXT(trace, 2, "clearqdbf");
2972 /* clear outbound buffers to free skbs */
2973 for (i = 0; i < card->qdio.no_out_queues; ++i)
2974 if (card->qdio.out_qs[i]){
2975 for (j = 0; j < QDIO_MAX_BUFFERS_PER_Q; ++j)
2976 qeth_clear_output_buffer(card->qdio.out_qs[i],
2977 &card->qdio.out_qs[i]->bufs[j]);
2982 qeth_init_qdio_info(struct qeth_card *card)
2984 QETH_DBF_TEXT(setup, 4, "intqdinf");
2985 card->qdio.state = QETH_QDIO_UNINITIALIZED;
2987 card->qdio.in_buf_size = QETH_IN_BUF_SIZE_DEFAULT;
2988 card->qdio.init_pool.buf_count = QETH_IN_BUF_COUNT_DEFAULT;
2989 card->qdio.in_buf_pool.buf_count = card->qdio.init_pool.buf_count;
2990 INIT_LIST_HEAD(&card->qdio.in_buf_pool.entry_list);
2991 INIT_LIST_HEAD(&card->qdio.init_pool.entry_list);
2993 card->qdio.do_prio_queueing = QETH_PRIOQ_DEFAULT;
2994 card->qdio.default_out_queue = QETH_DEFAULT_QUEUE;
2998 qeth_init_qdio_queues(struct qeth_card *card)
3003 QETH_DBF_TEXT(setup, 2, "initqdqs");
3006 memset(card->qdio.in_q->qdio_bufs, 0,
3007 QDIO_MAX_BUFFERS_PER_Q * sizeof(struct qdio_buffer));
3008 qeth_initialize_working_pool_list(card);
3009 /*give only as many buffers to hardware as we have buffer pool entries*/
3010 for (i = 0; i < card->qdio.in_buf_pool.buf_count - 1; ++i)
3011 qeth_init_input_buffer(card, &card->qdio.in_q->bufs[i]);
3012 card->qdio.in_q->next_buf_to_init = card->qdio.in_buf_pool.buf_count - 1;
3013 rc = do_QDIO(CARD_DDEV(card), QDIO_FLAG_SYNC_INPUT, 0, 0,
3014 card->qdio.in_buf_pool.buf_count - 1, NULL);
3016 QETH_DBF_TEXT_(setup, 2, "1err%d", rc);
3019 rc = qdio_synchronize(CARD_DDEV(card), QDIO_FLAG_SYNC_INPUT, 0);
3021 QETH_DBF_TEXT_(setup, 2, "2err%d", rc);
3024 /* outbound queue */
3025 for (i = 0; i < card->qdio.no_out_queues; ++i){
3026 memset(card->qdio.out_qs[i]->qdio_bufs, 0,
3027 QDIO_MAX_BUFFERS_PER_Q * sizeof(struct qdio_buffer));
3028 for (j = 0; j < QDIO_MAX_BUFFERS_PER_Q; ++j){
3029 qeth_clear_output_buffer(card->qdio.out_qs[i],
3030 &card->qdio.out_qs[i]->bufs[j]);
3032 card->qdio.out_qs[i]->card = card;
3033 card->qdio.out_qs[i]->next_buf_to_fill = 0;
3034 card->qdio.out_qs[i]->do_pack = 0;
3035 atomic_set(&card->qdio.out_qs[i]->used_buffers,0);
3036 atomic_set(&card->qdio.out_qs[i]->set_pci_flags_count, 0);
3037 atomic_set(&card->qdio.out_qs[i]->state,
3038 QETH_OUT_Q_UNLOCKED);
3044 qeth_qdio_establish(struct qeth_card *card)
3046 struct qdio_initialize init_data;
3047 char *qib_param_field;
3048 struct qdio_buffer **in_sbal_ptrs;
3049 struct qdio_buffer **out_sbal_ptrs;
3053 QETH_DBF_TEXT(setup, 2, "qdioest");
3054 qib_param_field = qeth_create_qib_param_field(card);
3055 if (!qib_param_field)
3058 in_sbal_ptrs = kmalloc(QDIO_MAX_BUFFERS_PER_Q * sizeof(void *),
3060 if (!in_sbal_ptrs) {
3061 kfree(qib_param_field);
3064 for(i = 0; i < QDIO_MAX_BUFFERS_PER_Q; ++i)
3065 in_sbal_ptrs[i] = (struct qdio_buffer *)
3066 virt_to_phys(card->qdio.in_q->bufs[i].buffer);
3069 kmalloc(card->qdio.no_out_queues * QDIO_MAX_BUFFERS_PER_Q *
3070 sizeof(void *), GFP_KERNEL);
3071 if (!out_sbal_ptrs) {
3072 kfree(in_sbal_ptrs);
3073 kfree(qib_param_field);
3076 for(i = 0, k = 0; i < card->qdio.no_out_queues; ++i)
3077 for(j = 0; j < QDIO_MAX_BUFFERS_PER_Q; ++j, ++k){
3078 out_sbal_ptrs[k] = (struct qdio_buffer *)
3079 virt_to_phys(card->qdio.out_qs[i]->
3083 memset(&init_data, 0, sizeof(struct qdio_initialize));
3084 init_data.cdev = CARD_DDEV(card);
3085 init_data.q_format = qeth_get_qdio_q_format(card);
3086 init_data.qib_param_field_format = 0;
3087 init_data.qib_param_field = qib_param_field;
3088 init_data.min_input_threshold = QETH_MIN_INPUT_THRESHOLD;
3089 init_data.max_input_threshold = QETH_MAX_INPUT_THRESHOLD;
3090 init_data.min_output_threshold = QETH_MIN_OUTPUT_THRESHOLD;
3091 init_data.max_output_threshold = QETH_MAX_OUTPUT_THRESHOLD;
3092 init_data.no_input_qs = 1;
3093 init_data.no_output_qs = card->qdio.no_out_queues;
3094 init_data.input_handler = (qdio_handler_t *)
3095 qeth_qdio_input_handler;
3096 init_data.output_handler = (qdio_handler_t *)
3097 qeth_qdio_output_handler;
3098 init_data.int_parm = (unsigned long) card;
3099 init_data.flags = QDIO_INBOUND_0COPY_SBALS |
3100 QDIO_OUTBOUND_0COPY_SBALS |
3101 QDIO_USE_OUTBOUND_PCIS;
3102 init_data.input_sbal_addr_array = (void **) in_sbal_ptrs;
3103 init_data.output_sbal_addr_array = (void **) out_sbal_ptrs;
3105 if (!(rc = qdio_initialize(&init_data)))
3106 card->qdio.state = QETH_QDIO_ESTABLISHED;
3108 kfree(out_sbal_ptrs);
3109 kfree(in_sbal_ptrs);
3110 kfree(qib_param_field);
3115 qeth_qdio_activate(struct qeth_card *card)
3117 QETH_DBF_TEXT(setup,3,"qdioact");
3118 return qdio_activate(CARD_DDEV(card), 0);
3122 qeth_clear_channel(struct qeth_channel *channel)
3124 unsigned long flags;
3125 struct qeth_card *card;
3128 QETH_DBF_TEXT(trace,3,"clearch");
3129 card = CARD_FROM_CDEV(channel->ccwdev);
3130 spin_lock_irqsave(get_ccwdev_lock(channel->ccwdev), flags);
3131 rc = ccw_device_clear(channel->ccwdev, QETH_CLEAR_CHANNEL_PARM);
3132 spin_unlock_irqrestore(get_ccwdev_lock(channel->ccwdev), flags);
3136 rc = wait_event_interruptible_timeout(card->wait_q,
3137 channel->state==CH_STATE_STOPPED, QETH_TIMEOUT);
3138 if (rc == -ERESTARTSYS)
3140 if (channel->state != CH_STATE_STOPPED)
3142 channel->state = CH_STATE_DOWN;
3147 qeth_halt_channel(struct qeth_channel *channel)
3149 unsigned long flags;
3150 struct qeth_card *card;
3153 QETH_DBF_TEXT(trace,3,"haltch");
3154 card = CARD_FROM_CDEV(channel->ccwdev);
3155 spin_lock_irqsave(get_ccwdev_lock(channel->ccwdev), flags);
3156 rc = ccw_device_halt(channel->ccwdev, QETH_HALT_CHANNEL_PARM);
3157 spin_unlock_irqrestore(get_ccwdev_lock(channel->ccwdev), flags);
3161 rc = wait_event_interruptible_timeout(card->wait_q,
3162 channel->state==CH_STATE_HALTED, QETH_TIMEOUT);
3163 if (rc == -ERESTARTSYS)
3165 if (channel->state != CH_STATE_HALTED)
3171 qeth_halt_channels(struct qeth_card *card)
3175 QETH_DBF_TEXT(trace,3,"haltchs");
3176 if ((rc = qeth_halt_channel(&card->read)))
3178 if ((rc = qeth_halt_channel(&card->write)))
3180 return qeth_halt_channel(&card->data);
3183 qeth_clear_channels(struct qeth_card *card)
3187 QETH_DBF_TEXT(trace,3,"clearchs");
3188 if ((rc = qeth_clear_channel(&card->read)))
3190 if ((rc = qeth_clear_channel(&card->write)))
3192 return qeth_clear_channel(&card->data);
3196 qeth_clear_halt_card(struct qeth_card *card, int halt)
3200 QETH_DBF_TEXT(trace,3,"clhacrd");
3201 QETH_DBF_HEX(trace, 3, &card, sizeof(void *));
3204 rc = qeth_halt_channels(card);
3207 return qeth_clear_channels(card);
3211 qeth_qdio_clear_card(struct qeth_card *card, int use_halt)
3215 QETH_DBF_TEXT(trace,3,"qdioclr");
3216 if (card->qdio.state == QETH_QDIO_ESTABLISHED){
3217 qdio_cleanup(CARD_DDEV(card),
3218 (card->info.type == QETH_CARD_TYPE_IQD) ?
3219 QDIO_FLAG_CLEANUP_USING_HALT :
3220 QDIO_FLAG_CLEANUP_USING_CLEAR);
3221 card->qdio.state = QETH_QDIO_ALLOCATED;
3223 rc = qeth_clear_halt_card(card, use_halt);
3224 card->state = CARD_STATE_DOWN;
3229 qeth_dm_act(struct qeth_card *card)
3232 struct qeth_cmd_buffer *iob;
3234 QETH_DBF_TEXT(setup,2,"dmact");
3236 iob = qeth_wait_for_buffer(&card->write);
3237 memcpy(iob->data, DM_ACT, DM_ACT_SIZE);
3239 memcpy(QETH_DM_ACT_DEST_ADDR(iob->data),
3240 &card->token.cm_connection_r, QETH_MPC_TOKEN_LENGTH);
3241 memcpy(QETH_DM_ACT_CONNECTION_TOKEN(iob->data),
3242 &card->token.ulp_connection_r, QETH_MPC_TOKEN_LENGTH);
3243 rc = qeth_send_control_data(card, DM_ACT_SIZE, iob, NULL, NULL);
3248 qeth_mpc_initialize(struct qeth_card *card)
3252 QETH_DBF_TEXT(setup,2,"mpcinit");
3254 if ((rc = qeth_issue_next_read(card))){
3255 QETH_DBF_TEXT_(setup, 2, "1err%d", rc);
3258 if ((rc = qeth_cm_enable(card))){
3259 QETH_DBF_TEXT_(setup, 2, "2err%d", rc);
3262 if ((rc = qeth_cm_setup(card))){
3263 QETH_DBF_TEXT_(setup, 2, "3err%d", rc);
3266 if ((rc = qeth_ulp_enable(card))){
3267 QETH_DBF_TEXT_(setup, 2, "4err%d", rc);
3270 if ((rc = qeth_ulp_setup(card))){
3271 QETH_DBF_TEXT_(setup, 2, "5err%d", rc);
3274 if ((rc = qeth_alloc_qdio_buffers(card))){
3275 QETH_DBF_TEXT_(setup, 2, "5err%d", rc);
3278 if ((rc = qeth_qdio_establish(card))){
3279 QETH_DBF_TEXT_(setup, 2, "6err%d", rc);
3280 qeth_free_qdio_buffers(card);
3283 if ((rc = qeth_qdio_activate(card))){
3284 QETH_DBF_TEXT_(setup, 2, "7err%d", rc);
3287 if ((rc = qeth_dm_act(card))){
3288 QETH_DBF_TEXT_(setup, 2, "8err%d", rc);
3294 qeth_qdio_clear_card(card, card->info.type==QETH_CARD_TYPE_OSAE);
3298 static struct net_device *
3299 qeth_get_netdevice(enum qeth_card_types type, enum qeth_link_types linktype)
3301 struct net_device *dev = NULL;
3304 case QETH_CARD_TYPE_OSAE:
3306 case QETH_LINK_TYPE_LANE_TR:
3307 case QETH_LINK_TYPE_HSTR:
3309 dev = alloc_trdev(0);
3310 #endif /* CONFIG_TR */
3313 dev = alloc_etherdev(0);
3316 case QETH_CARD_TYPE_IQD:
3317 dev = alloc_netdev(0, "hsi%d", ether_setup);
3320 dev = alloc_etherdev(0);
3326 qeth_send_packet(struct qeth_card *, struct sk_buff *);
3329 qeth_hard_start_xmit(struct sk_buff *skb, struct net_device *dev)
3332 struct qeth_card *card;
3334 QETH_DBF_TEXT(trace, 6, "hrdstxmi");
3335 card = (struct qeth_card *)dev->priv;
3337 card->stats.tx_dropped++;
3338 card->stats.tx_errors++;
3341 if ((card->state != CARD_STATE_UP) || !netif_carrier_ok(dev)) {
3342 card->stats.tx_dropped++;
3343 card->stats.tx_errors++;
3344 card->stats.tx_carrier_errors++;
3347 #ifdef CONFIG_QETH_PERF_STATS
3348 card->perf_stats.outbound_cnt++;
3349 card->perf_stats.outbound_start_time = qeth_get_micros();
3352 * We only call netif_stop_queue in case of errors. Since we've
3353 * got our own synchronization on queues we can keep the stack's
3356 if ((rc = qeth_send_packet(card, skb)))
3357 netif_stop_queue(dev);
3359 #ifdef CONFIG_QETH_PERF_STATS
3360 card->perf_stats.outbound_time += qeth_get_micros() -
3361 card->perf_stats.outbound_start_time;
3367 qeth_verify_vlan_dev(struct net_device *dev, struct qeth_card *card)
3370 #ifdef CONFIG_QETH_VLAN
3371 struct vlan_group *vg;
3374 if (!(vg = card->vlangrp))
3377 for (i = 0; i < VLAN_GROUP_ARRAY_LEN; i++){
3378 if (vg->vlan_devices[i] == dev){
3379 rc = QETH_VLAN_CARD;
3388 qeth_verify_dev(struct net_device *dev)
3390 struct qeth_card *card;
3391 unsigned long flags;
3394 read_lock_irqsave(&qeth_card_list.rwlock, flags);
3395 list_for_each_entry(card, &qeth_card_list.list, list){
3396 if (card->dev == dev){
3397 rc = QETH_REAL_CARD;
3400 rc = qeth_verify_vlan_dev(dev, card);
3404 read_unlock_irqrestore(&qeth_card_list.rwlock, flags);
3409 static struct qeth_card *
3410 qeth_get_card_from_dev(struct net_device *dev)
3412 struct qeth_card *card = NULL;
3415 rc = qeth_verify_dev(dev);
3416 if (rc == QETH_REAL_CARD)
3417 card = (struct qeth_card *)dev->priv;
3418 else if (rc == QETH_VLAN_CARD)
3419 card = (struct qeth_card *)
3420 VLAN_DEV_INFO(dev)->real_dev->priv;
3422 QETH_DBF_TEXT_(trace, 4, "%d", rc);
3427 qeth_tx_timeout(struct net_device *dev)
3429 struct qeth_card *card;
3431 card = (struct qeth_card *) dev->priv;
3432 card->stats.tx_errors++;
3433 qeth_schedule_recovery(card);
3437 qeth_open(struct net_device *dev)
3439 struct qeth_card *card;
3441 QETH_DBF_TEXT(trace, 4, "qethopen");
3443 card = (struct qeth_card *) dev->priv;
3445 if (card->state != CARD_STATE_SOFTSETUP)
3448 card->dev->flags |= IFF_UP;
3449 netif_start_queue(dev);
3450 card->data.state = CH_STATE_UP;
3451 card->state = CARD_STATE_UP;
3453 if (!card->lan_online){
3454 if (netif_carrier_ok(dev))
3455 netif_carrier_off(dev);
3456 netif_stop_queue(dev);
3462 qeth_stop(struct net_device *dev)
3464 struct qeth_card *card;
3466 QETH_DBF_TEXT(trace, 4, "qethstop");
3468 card = (struct qeth_card *) dev->priv;
3470 netif_stop_queue(dev);
3471 card->dev->flags &= ~IFF_UP;
3472 if (card->state == CARD_STATE_UP)
3473 card->state = CARD_STATE_SOFTSETUP;
3478 qeth_get_cast_type(struct qeth_card *card, struct sk_buff *skb)
3480 int cast_type = RTN_UNSPEC;
3482 if (skb->dst && skb->dst->neighbour){
3483 cast_type = skb->dst->neighbour->type;
3484 if ((cast_type == RTN_BROADCAST) ||
3485 (cast_type == RTN_MULTICAST) ||
3486 (cast_type == RTN_ANYCAST))
3491 /* try something else */
3492 if (skb->protocol == ETH_P_IPV6)
3493 return (skb->nh.raw[24] == 0xff) ? RTN_MULTICAST : 0;
3494 else if (skb->protocol == ETH_P_IP)
3495 return ((skb->nh.raw[16] & 0xf0) == 0xe0) ? RTN_MULTICAST : 0;
3497 if (!memcmp(skb->nh.raw, skb->dev->broadcast, 6))
3498 return RTN_BROADCAST;
3502 hdr_mac = *((u16 *)skb->nh.raw);
3504 switch (card->info.link_type) {
3505 case QETH_LINK_TYPE_HSTR:
3506 case QETH_LINK_TYPE_LANE_TR:
3507 if ((hdr_mac == QETH_TR_MAC_NC) ||
3508 (hdr_mac == QETH_TR_MAC_C))
3509 return RTN_MULTICAST;
3510 /* eth or so multicast? */
3512 if ((hdr_mac == QETH_ETH_MAC_V4) ||
3513 (hdr_mac == QETH_ETH_MAC_V6))
3514 return RTN_MULTICAST;
3521 qeth_get_priority_queue(struct qeth_card *card, struct sk_buff *skb,
3522 int ipv, int cast_type)
3524 if (!ipv && (card->info.type == QETH_CARD_TYPE_OSAE))
3525 return card->qdio.default_out_queue;
3526 switch (card->qdio.no_out_queues) {
3528 if (cast_type && card->info.is_multicast_different)
3529 return card->info.is_multicast_different &
3530 (card->qdio.no_out_queues - 1);
3531 if (card->qdio.do_prio_queueing && (ipv == 4)) {
3532 if (card->qdio.do_prio_queueing==QETH_PRIO_Q_ING_TOS){
3533 if (skb->nh.iph->tos & IP_TOS_NOTIMPORTANT)
3535 if (skb->nh.iph->tos & IP_TOS_HIGHRELIABILITY)
3537 if (skb->nh.iph->tos & IP_TOS_HIGHTHROUGHPUT)
3539 if (skb->nh.iph->tos & IP_TOS_LOWDELAY)
3542 if (card->qdio.do_prio_queueing==QETH_PRIO_Q_ING_PREC)
3543 return 3 - (skb->nh.iph->tos >> 6);
3544 } else if (card->qdio.do_prio_queueing && (ipv == 6)) {
3547 return card->qdio.default_out_queue;
3554 qeth_get_ip_version(struct sk_buff *skb)
3556 switch (skb->protocol) {
3567 qeth_prepare_skb(struct qeth_card *card, struct sk_buff **skb,
3568 struct qeth_hdr **hdr, int ipv)
3570 struct sk_buff *new_skb;
3571 #ifdef CONFIG_QETH_VLAN
3575 QETH_DBF_TEXT(trace, 6, "prepskb");
3576 if (skb_headroom(*skb) < sizeof(struct qeth_hdr)){
3577 new_skb = skb_realloc_headroom(*skb, sizeof(struct qeth_hdr));
3579 PRINT_ERR("qeth_prepare_skb: could "
3580 "not realloc headroom for qeth_hdr "
3581 "on interface %s", card->info.if_name);
3586 #ifdef CONFIG_QETH_VLAN
3587 if (card->vlangrp && vlan_tx_tag_present(*skb) && (ipv == 6)){
3589 * Move the mac addresses (6 bytes src, 6 bytes dest)
3590 * to the beginning of the new header. We are using three
3591 * memcpys instead of one memmove to save cycles.
3593 skb_push(*skb, VLAN_HLEN);
3594 memcpy((*skb)->data, (*skb)->data + 4, 4);
3595 memcpy((*skb)->data + 4, (*skb)->data + 8, 4);
3596 memcpy((*skb)->data + 8, (*skb)->data + 12, 4);
3597 tag = (u16 *) (*skb)->data + 12;
3599 * first two bytes = ETH_P_8021Q (0x8100)
3600 * second two bytes = VLANID
3602 *tag = __constant_htons(ETH_P_8021Q);
3603 *(tag + 1) = vlan_tx_tag_get(*skb);
3604 *(tag + 1) = htons(*(tag + 1));
3607 *hdr = (struct qeth_hdr *) skb_push(*skb, sizeof(struct qeth_hdr));
3609 * sanity check, the Linux memory allocation scheme should
3610 * never present us cases like this one (the 32bytes header plus
3611 * the first 40 bytes of the paket cross a 4k boundary)
3613 if ((((unsigned long) *hdr) & (~(PAGE_SIZE - 1))) !=
3614 (((unsigned long) *hdr + sizeof(struct qeth_hdr) +
3615 QETH_IP_HEADER_SIZE) & (~(PAGE_SIZE - 1)))) {
3616 PRINT_ERR("qeth_prepare_skb: misaligned "
3617 "packet on interface %s. Discarded.",
3618 card->info.if_name);
3625 qeth_get_qeth_hdr_flags4(int cast_type)
3627 if (cast_type == RTN_MULTICAST)
3628 return QETH_CAST_MULTICAST;
3629 if (cast_type == RTN_BROADCAST)
3630 return QETH_CAST_BROADCAST;
3631 return QETH_CAST_UNICAST;
3635 qeth_get_qeth_hdr_flags6(int cast_type)
3637 u8 ct = QETH_HDR_PASSTHRU | QETH_HDR_IPV6;
3638 if (cast_type == RTN_MULTICAST)
3639 return ct | QETH_CAST_MULTICAST;
3640 if (cast_type == RTN_ANYCAST)
3641 return ct | QETH_CAST_ANYCAST;
3642 if (cast_type == RTN_BROADCAST)
3643 return ct | QETH_CAST_BROADCAST;
3644 return ct | QETH_CAST_UNICAST;
3648 qeth_fill_header(struct qeth_card *card, struct qeth_hdr *hdr,
3649 struct sk_buff *skb, int ipv, int cast_type)
3654 QETH_DBF_TEXT(trace, 6, "fillhdr");
3655 #ifdef CONFIG_QETH_VLAN
3657 * before we're going to overwrite this location with next hop ip.
3658 * v6 uses passthrough, v4 sets the tag in the QDIO header.
3660 if (card->vlangrp && vlan_tx_tag_present(skb)) {
3661 hdr->ext_flags = (ipv == 4)? QETH_EXT_HDR_VLAN_FRAME :
3662 QETH_EXT_HDR_INCLUDE_VLAN_TAG;
3663 hdr->vlan_id = vlan_tx_tag_get(skb);
3665 #endif /* CONFIG_QETH_VLAN */
3666 hdr->length = skb->len - sizeof(struct qeth_hdr);
3667 if (ipv == 4) { /* IPv4 */
3668 hdr->flags = qeth_get_qeth_hdr_flags4(cast_type);
3669 memset(hdr->dest_addr, 0, 12);
3670 if ((skb->dst) && (skb->dst->neighbour)) {
3671 *((u32 *) (&hdr->dest_addr[12])) =
3672 *((u32 *) skb->dst->neighbour->primary_key);
3674 /* fill in destination address used in ip header */
3675 *((u32 *) (&hdr->dest_addr[12])) = skb->nh.iph->daddr;
3677 } else if (ipv == 6) { /* IPv6 or passthru */
3678 hdr->flags = qeth_get_qeth_hdr_flags6(cast_type);
3679 if ((skb->dst) && (skb->dst->neighbour)) {
3680 memcpy(hdr->dest_addr,
3681 skb->dst->neighbour->primary_key, 16);
3683 /* fill in destination address used in ip header */
3684 memcpy(hdr->dest_addr, &skb->nh.ipv6h->daddr, 16);
3686 } else { /* passthrough */
3687 if (!memcmp(skb->data + sizeof(struct qeth_hdr),
3688 skb->dev->broadcast, 6)) { /* broadcast? */
3689 hdr->flags = QETH_CAST_BROADCAST | QETH_HDR_PASSTHRU;
3691 hdr->flags = (cast_type == RTN_MULTICAST) ?
3692 QETH_CAST_MULTICAST | QETH_HDR_PASSTHRU :
3693 QETH_CAST_UNICAST | QETH_HDR_PASSTHRU;
3699 qeth_fill_buffer(struct qeth_qdio_out_q *queue, struct qeth_qdio_out_buffer *buf,
3700 char *data, struct sk_buff *skb)
3702 struct qdio_buffer *buffer;
3703 int length = skb->len;
3708 QETH_DBF_TEXT(trace, 6, "qdfillbf");
3709 buffer = buf->buffer;
3710 atomic_inc(&skb->users);
3711 skb_queue_tail(&buf->skb_list, skb);
3712 element = buf->next_element_to_fill;
3713 while (length > 0) {
3714 /* length_here is the remaining amount of data in this page */
3715 length_here = PAGE_SIZE - ((unsigned long) data % PAGE_SIZE);
3716 if (length < length_here)
3717 length_here = length;
3718 buffer->element[element].addr = data;
3719 buffer->element[element].length = length_here;
3720 length -= length_here;
3723 buffer->element[element].flags = 0;
3725 buffer->element[element].flags =
3726 SBAL_FLAGS_LAST_FRAG;
3729 buffer->element[element].flags =
3730 SBAL_FLAGS_FIRST_FRAG;
3732 buffer->element[element].flags =
3733 SBAL_FLAGS_MIDDLE_FRAG;
3735 data += length_here;
3739 buf->next_element_to_fill = element;
3740 if (!queue->do_pack) {
3741 QETH_DBF_TEXT(trace, 6, "fillbfnp");
3742 /* set state to PRIMED -> will be flushed */
3743 atomic_set(&buf->state, QETH_QDIO_BUF_PRIMED);
3745 QETH_DBF_TEXT(trace, 6, "fillbfpa");
3746 #ifdef CONFIG_QETH_PERF_STATS
3747 queue->card->perf_stats.skbs_sent_pack++;
3749 if (buf->next_element_to_fill >=
3750 QETH_MAX_BUFFER_ELEMENTS(queue->card)) {
3752 * packed buffer if full -> set state PRIMED
3753 * -> will be flushed
3755 atomic_set(&buf->state, QETH_QDIO_BUF_PRIMED);
3762 qeth_do_send_packet_fast(struct qeth_card *card, struct qeth_qdio_out_q *queue,
3763 struct sk_buff *skb, struct qeth_hdr *hdr,
3764 int elements_needed)
3766 struct qeth_qdio_out_buffer *buffer;
3769 QETH_DBF_TEXT(trace, 6, "dosndpfa");
3771 /* spin until we get the queue ... */
3772 while (atomic_compare_and_swap(QETH_OUT_Q_UNLOCKED,
3775 /* ... now we've got the queue */
3776 index = queue->next_buf_to_fill;
3777 buffer = &queue->bufs[queue->next_buf_to_fill];
3779 * check if buffer is empty to make sure that we do not 'overtake'
3780 * ourselves and try to fill a buffer that is already primed
3782 if (atomic_read(&buffer->state) != QETH_QDIO_BUF_EMPTY) {
3783 card->stats.tx_dropped++;
3784 atomic_set(&queue->state, QETH_OUT_Q_UNLOCKED);
3787 queue->next_buf_to_fill = (queue->next_buf_to_fill + 1) %
3788 QDIO_MAX_BUFFERS_PER_Q;
3789 atomic_set(&queue->state, QETH_OUT_Q_UNLOCKED);
3790 qeth_fill_buffer(queue, buffer, (char *)hdr, skb);
3791 qeth_flush_buffers(queue, 0, index, 1);
3796 qeth_do_send_packet(struct qeth_card *card, struct qeth_qdio_out_q *queue,
3797 struct sk_buff *skb, struct qeth_hdr *hdr,
3798 int elements_needed)
3800 struct qeth_qdio_out_buffer *buffer;
3802 int flush_count = 0;
3805 QETH_DBF_TEXT(trace, 6, "dosndpkt");
3807 /* spin until we get the queue ... */
3808 while (atomic_compare_and_swap(QETH_OUT_Q_UNLOCKED,
3811 start_index = queue->next_buf_to_fill;
3812 buffer = &queue->bufs[queue->next_buf_to_fill];
3814 * check if buffer is empty to make sure that we do not 'overtake'
3815 * ourselves and try to fill a buffer that is already primed
3817 if (atomic_read(&buffer->state) != QETH_QDIO_BUF_EMPTY){
3818 card->stats.tx_dropped++;
3819 atomic_set(&queue->state, QETH_OUT_Q_UNLOCKED);
3822 /* check if we need to switch packing state of this queue */
3823 qeth_switch_to_packing_if_needed(queue);
3824 if (queue->do_pack){
3825 /* does packet fit in current buffer? */
3826 if((QETH_MAX_BUFFER_ELEMENTS(card) -
3827 buffer->next_element_to_fill) < elements_needed){
3828 /* ... no -> set state PRIMED */
3829 atomic_set(&buffer->state, QETH_QDIO_BUF_PRIMED);
3831 queue->next_buf_to_fill =
3832 (queue->next_buf_to_fill + 1) %
3833 QDIO_MAX_BUFFERS_PER_Q;
3834 buffer = &queue->bufs[queue->next_buf_to_fill];
3835 /* we did a step forward, so check buffer state again */
3836 if (atomic_read(&buffer->state) != QETH_QDIO_BUF_EMPTY){
3837 card->stats.tx_dropped++;
3838 /* return EBUSY because we sent old packet, not
3839 * the current one */
3841 atomic_set(&queue->state, QETH_OUT_Q_UNLOCKED);
3846 qeth_fill_buffer(queue, buffer, (char *)hdr, skb);
3847 if (atomic_read(&buffer->state) == QETH_QDIO_BUF_PRIMED){
3848 /* next time fill the next buffer */
3850 queue->next_buf_to_fill = (queue->next_buf_to_fill + 1) %
3851 QDIO_MAX_BUFFERS_PER_Q;
3854 * queue->state will go from LOCKED -> UNLOCKED or from
3855 * LOCKED_FLUSH -> LOCKED if output_handler wanted to 'notify' us
3856 * (switch packing state or flush buffer to get another pci flag out).
3857 * In that case we will enter this loop
3859 while (atomic_dec_return(&queue->state)){
3860 /* check if we can go back to non-packing state */
3861 flush_count += qeth_switch_to_nonpacking_if_needed(queue);
3863 * check if we need to flush a packing buffer to get a pci
3864 * flag out on the queue
3866 if (!flush_count && !atomic_read(&queue->set_pci_flags_count))
3867 flush_count += qeth_flush_buffers_on_no_pci(queue);
3869 /* at this point the queue is UNLOCKED again */
3872 qeth_flush_buffers(queue, 0, start_index, flush_count);
3878 qeth_send_packet(struct qeth_card *card, struct sk_buff *skb)
3882 struct qeth_qdio_out_q *queue;
3883 struct qeth_hdr *hdr;
3884 int elements_needed;
3887 QETH_DBF_TEXT(trace, 6, "sendpkt");
3889 ipv = qeth_get_ip_version(skb);
3890 cast_type = qeth_get_cast_type(card, skb);
3891 if ((cast_type == RTN_BROADCAST) && (card->info.broadcast_capable == 0)){
3892 card->stats.tx_dropped++;
3893 card->stats.tx_errors++;
3894 dev_kfree_skb_any(skb);
3897 queue = card->qdio.out_qs
3898 [qeth_get_priority_queue(card, skb, ipv, cast_type)];
3900 if ((rc = qeth_prepare_skb(card, &skb, &hdr, ipv))){
3901 QETH_DBF_TEXT_(trace, 4, "1err%d", rc);
3904 qeth_fill_header(card, hdr, skb, ipv, cast_type);
3905 elements_needed = 1 + (((((unsigned long) hdr) % PAGE_SIZE) + skb->len)
3907 if (elements_needed > QETH_MAX_BUFFER_ELEMENTS(card)){
3908 PRINT_ERR("qeth_do_send_packet: invalid size of "
3909 "IP packet. Discarded.");
3913 if (card->info.type != QETH_CARD_TYPE_IQD)
3914 rc = qeth_do_send_packet(card, queue, skb, hdr,
3917 rc = qeth_do_send_packet_fast(card, queue, skb, hdr,
3921 card->stats.tx_packets++;
3922 card->stats.tx_bytes += skb->len;
3928 qeth_mdio_read(struct net_device *dev, int phy_id, int regnum)
3930 struct qeth_card *card = (struct qeth_card *) dev->priv;
3934 case MII_BMCR: /* Basic mode control register */
3936 if ((card->info.link_type != QETH_LINK_TYPE_GBIT_ETH)&&
3937 (card->info.link_type != QETH_LINK_TYPE_10GBIT_ETH))
3938 rc |= BMCR_SPEED100;
3940 case MII_BMSR: /* Basic mode status register */
3941 rc = BMSR_ERCAP | BMSR_ANEGCOMPLETE | BMSR_LSTATUS |
3942 BMSR_10HALF | BMSR_10FULL | BMSR_100HALF | BMSR_100FULL |
3945 case MII_PHYSID1: /* PHYS ID 1 */
3946 rc = (dev->dev_addr[0] << 16) | (dev->dev_addr[1] << 8) |
3948 rc = (rc >> 5) & 0xFFFF;
3950 case MII_PHYSID2: /* PHYS ID 2 */
3951 rc = (dev->dev_addr[2] << 10) & 0xFFFF;
3953 case MII_ADVERTISE: /* Advertisement control reg */
3956 case MII_LPA: /* Link partner ability reg */
3957 rc = LPA_10HALF | LPA_10FULL | LPA_100HALF | LPA_100FULL |
3958 LPA_100BASE4 | LPA_LPACK;
3960 case MII_EXPANSION: /* Expansion register */
3962 case MII_DCOUNTER: /* disconnect counter */
3964 case MII_FCSCOUNTER: /* false carrier counter */
3966 case MII_NWAYTEST: /* N-way auto-neg test register */
3968 case MII_RERRCOUNTER: /* rx error counter */
3969 rc = card->stats.rx_errors;
3971 case MII_SREVISION: /* silicon revision */
3973 case MII_RESV1: /* reserved 1 */
3975 case MII_LBRERROR: /* loopback, rx, bypass error */
3977 case MII_PHYADDR: /* physical address */
3979 case MII_RESV2: /* reserved 2 */
3981 case MII_TPISTATUS: /* TPI status for 10mbps */
3983 case MII_NCONFIG: /* network interface config */
3993 qeth_mdio_write(struct net_device *dev, int phy_id, int regnum, int value)
3996 case MII_BMCR: /* Basic mode control register */
3997 case MII_BMSR: /* Basic mode status register */
3998 case MII_PHYSID1: /* PHYS ID 1 */
3999 case MII_PHYSID2: /* PHYS ID 2 */
4000 case MII_ADVERTISE: /* Advertisement control reg */
4001 case MII_LPA: /* Link partner ability reg */
4002 case MII_EXPANSION: /* Expansion register */
4003 case MII_DCOUNTER: /* disconnect counter */
4004 case MII_FCSCOUNTER: /* false carrier counter */
4005 case MII_NWAYTEST: /* N-way auto-neg test register */
4006 case MII_RERRCOUNTER: /* rx error counter */
4007 case MII_SREVISION: /* silicon revision */
4008 case MII_RESV1: /* reserved 1 */
4009 case MII_LBRERROR: /* loopback, rx, bypass error */
4010 case MII_PHYADDR: /* physical address */
4011 case MII_RESV2: /* reserved 2 */
4012 case MII_TPISTATUS: /* TPI status for 10mbps */
4013 case MII_NCONFIG: /* network interface config */
4019 static inline const char *
4020 qeth_arp_get_error_cause(int *rc)
4023 case QETH_IPA_ARP_RC_FAILED:
4025 return "operation failed";
4026 case QETH_IPA_ARP_RC_NOTSUPP:
4028 return "operation not supported";
4029 case QETH_IPA_ARP_RC_OUT_OF_RANGE:
4031 return "argument out of range";
4032 case QETH_IPA_ARP_RC_Q_NOTSUPP:
4034 return "query operation not supported";
4035 case QETH_IPA_ARP_RC_Q_NO_DATA:
4037 return "no query data available";
4039 return "unknown error";
4044 qeth_send_simple_setassparms(struct qeth_card *, enum qeth_ipa_funcs,
4048 qeth_arp_set_no_entries(struct qeth_card *card, int no_entries)
4053 QETH_DBF_TEXT(trace,3,"arpstnoe");
4055 /* TODO: really not supported by GuestLAN? */
4056 if (card->info.guestlan)
4058 if (!qeth_is_supported(card,IPA_ARP_PROCESSING)) {
4059 PRINT_WARN("ARP processing not supported "
4060 "on %s!\n", card->info.if_name);
4063 rc = qeth_send_simple_setassparms(card, IPA_ARP_PROCESSING,
4064 IPA_CMD_ASS_ARP_SET_NO_ENTRIES,
4068 PRINT_WARN("Could not set number of ARP entries on %s: "
4070 card->info.if_name, qeth_arp_get_error_cause(&rc),
4077 qeth_copy_arp_entries_stripped(struct qeth_arp_query_info *qinfo,
4078 struct qeth_arp_query_data *qdata,
4079 int entry_size, int uentry_size)
4085 entry_ptr = (char *)&qdata->data;
4086 uentry_ptr = (char *)(qinfo->udata + qinfo->udata_offset);
4087 for (i = 0; i < qdata->no_entries; ++i){
4088 /* strip off 32 bytes "media specific information" */
4089 memcpy(uentry_ptr, (entry_ptr + 32), entry_size - 32);
4090 entry_ptr += entry_size;
4091 uentry_ptr += uentry_size;
4096 qeth_arp_query_cb(struct qeth_card *card, struct qeth_reply *reply,
4099 struct qeth_ipa_cmd *cmd;
4100 struct qeth_arp_query_data *qdata;
4101 struct qeth_arp_query_info *qinfo;
4106 QETH_DBF_TEXT(trace,4,"arpquecb");
4108 qinfo = (struct qeth_arp_query_info *) reply->param;
4109 cmd = (struct qeth_ipa_cmd *) data;
4110 if (cmd->hdr.return_code) {
4111 QETH_DBF_TEXT_(trace,4,"qaer1%i", cmd->hdr.return_code);
4114 if (cmd->data.setassparms.hdr.return_code) {
4115 cmd->hdr.return_code = cmd->data.setassparms.hdr.return_code;
4116 QETH_DBF_TEXT_(trace,4,"qaer2%i", cmd->hdr.return_code);
4119 qdata = &cmd->data.setassparms.data.query_arp;
4120 switch(qdata->reply_bits){
4122 uentry_size = entry_size = sizeof(struct qeth_arp_qi_entry5);
4123 if (qinfo->mask_bits & QETH_QARP_STRIP_ENTRIES)
4124 uentry_size = sizeof(struct qeth_arp_qi_entry5_short);
4127 /* fall through to default */
4129 /* tr is the same as eth -> entry7 */
4130 uentry_size = entry_size = sizeof(struct qeth_arp_qi_entry7);
4131 if (qinfo->mask_bits & QETH_QARP_STRIP_ENTRIES)
4132 uentry_size = sizeof(struct qeth_arp_qi_entry7_short);
4135 /* check if there is enough room in userspace */
4136 if ((qinfo->udata_len - qinfo->udata_offset) <
4137 qdata->no_entries * uentry_size){
4138 QETH_DBF_TEXT_(trace, 4, "qaer3%i", -ENOMEM);
4139 cmd->hdr.return_code = -ENOMEM;
4140 PRINT_WARN("query ARP user space buffer is too small for "
4141 "the returned number of ARP entries. "
4142 "Aborting query!\n");
4145 QETH_DBF_TEXT_(trace, 4, "anore%i",
4146 cmd->data.setassparms.hdr.number_of_replies);
4147 QETH_DBF_TEXT_(trace, 4, "aseqn%i", cmd->data.setassparms.hdr.seq_no);
4148 QETH_DBF_TEXT_(trace, 4, "anoen%i", qdata->no_entries);
4150 if (qinfo->mask_bits & QETH_QARP_STRIP_ENTRIES) {
4151 /* strip off "media specific information" */
4152 qeth_copy_arp_entries_stripped(qinfo, qdata, entry_size,
4155 /*copy entries to user buffer*/
4156 memcpy(qinfo->udata + qinfo->udata_offset,
4157 (char *)&qdata->data, qdata->no_entries*uentry_size);
4159 qinfo->no_entries += qdata->no_entries;
4160 qinfo->udata_offset += (qdata->no_entries*uentry_size);
4161 /* check if all replies received ... */
4162 if (cmd->data.setassparms.hdr.seq_no <
4163 cmd->data.setassparms.hdr.number_of_replies)
4165 memcpy(qinfo->udata, &qinfo->no_entries, 4);
4166 /* keep STRIP_ENTRIES flag so the user program can distinguish
4167 * stripped entries from normal ones */
4168 if (qinfo->mask_bits & QETH_QARP_STRIP_ENTRIES)
4169 qdata->reply_bits |= QETH_QARP_STRIP_ENTRIES;
4170 memcpy(qinfo->udata + QETH_QARP_MASK_OFFSET,&qdata->reply_bits,2);
4174 memcpy(qinfo->udata, &i, 4);
4179 qeth_send_ipa_arp_cmd(struct qeth_card *card, struct qeth_cmd_buffer *iob,
4180 int len, int (*reply_cb)(struct qeth_card *,
4181 struct qeth_reply *,
4185 QETH_DBF_TEXT(trace,4,"sendarp");
4187 memcpy(iob->data, IPA_PDU_HEADER, IPA_PDU_HEADER_SIZE);
4188 memcpy(QETH_IPA_CMD_DEST_ADDR(iob->data),
4189 &card->token.ulp_connection_r, QETH_MPC_TOKEN_LENGTH);
4190 return qeth_send_control_data(card, IPA_PDU_HEADER_SIZE + len, iob,
4191 reply_cb, reply_param);
4195 qeth_send_ipa_snmp_cmd(struct qeth_card *card, struct qeth_cmd_buffer *iob,
4196 int len, int (*reply_cb)(struct qeth_card *,
4197 struct qeth_reply *,
4203 QETH_DBF_TEXT(trace,4,"sendsnmp");
4205 memcpy(iob->data, IPA_PDU_HEADER, IPA_PDU_HEADER_SIZE);
4206 memcpy(QETH_IPA_CMD_DEST_ADDR(iob->data),
4207 &card->token.ulp_connection_r, QETH_MPC_TOKEN_LENGTH);
4208 /* adjust PDU length fields in IPA_PDU_HEADER */
4209 s1 = (u32) IPA_PDU_HEADER_SIZE + len;
4211 memcpy(QETH_IPA_PDU_LEN_TOTAL(iob->data), &s1, 2);
4212 memcpy(QETH_IPA_PDU_LEN_PDU1(iob->data), &s2, 2);
4213 memcpy(QETH_IPA_PDU_LEN_PDU2(iob->data), &s2, 2);
4214 memcpy(QETH_IPA_PDU_LEN_PDU3(iob->data), &s2, 2);
4215 return qeth_send_control_data(card, IPA_PDU_HEADER_SIZE + len, iob,
4216 reply_cb, reply_param);
4219 static struct qeth_cmd_buffer *
4220 qeth_get_setassparms_cmd(struct qeth_card *, enum qeth_ipa_funcs,
4221 __u16, __u16, enum qeth_prot_versions);
4223 qeth_arp_query(struct qeth_card *card, char *udata)
4225 struct qeth_cmd_buffer *iob;
4226 struct qeth_arp_query_info qinfo = {0, };
4230 QETH_DBF_TEXT(trace,3,"arpquery");
4233 * currently GuestLAN does only deliver all zeros on query arp,
4234 * even though arp processing is supported (according to IPA supp.
4235 * funcs flags); since all zeros is no valueable information,
4236 * we say EOPNOTSUPP for all ARP functions
4238 if (card->info.guestlan)
4240 if (!qeth_is_supported(card,/*IPA_QUERY_ARP_ADDR_INFO*/
4241 IPA_ARP_PROCESSING)) {
4242 PRINT_WARN("ARP processing not supported "
4243 "on %s!\n", card->info.if_name);
4246 /* get size of userspace buffer and mask_bits -> 6 bytes */
4247 if (copy_from_user(&qinfo, udata, 6))
4249 if (!(qinfo.udata = kmalloc(qinfo.udata_len, GFP_KERNEL)))
4251 memset(qinfo.udata, 0, qinfo.udata_len);
4252 qinfo.udata_offset = QETH_QARP_ENTRIES_OFFSET;
4253 iob = qeth_get_setassparms_cmd(card, IPA_ARP_PROCESSING,
4254 IPA_CMD_ASS_ARP_QUERY_INFO,
4255 sizeof(int),QETH_PROT_IPV4);
4257 rc = qeth_send_ipa_arp_cmd(card, iob,
4258 QETH_SETASS_BASE_LEN+QETH_ARP_CMD_LEN,
4259 qeth_arp_query_cb, (void *)&qinfo);
4262 PRINT_WARN("Error while querying ARP cache on %s: %s "
4264 card->info.if_name, qeth_arp_get_error_cause(&rc),
4266 copy_to_user(udata, qinfo.udata, 4);
4268 copy_to_user(udata, qinfo.udata, qinfo.udata_len);
4275 * SNMP command callback
4278 qeth_snmp_command_cb(struct qeth_card *card, struct qeth_reply *reply,
4279 unsigned long sdata)
4281 struct qeth_ipa_cmd *cmd;
4282 struct qeth_arp_query_info *qinfo;
4283 struct qeth_snmp_cmd *snmp;
4284 unsigned char *data;
4287 QETH_DBF_TEXT(trace,3,"snpcmdcb");
4289 cmd = (struct qeth_ipa_cmd *) sdata;
4290 data = (unsigned char *)((char *)cmd - reply->offset);
4291 qinfo = (struct qeth_arp_query_info *) reply->param;
4292 snmp = &cmd->data.setadapterparms.data.snmp;
4294 if (cmd->hdr.return_code) {
4295 QETH_DBF_TEXT_(trace,4,"scer1%i", cmd->hdr.return_code);
4298 if (cmd->data.setadapterparms.hdr.return_code) {
4299 cmd->hdr.return_code = cmd->data.setadapterparms.hdr.return_code;
4300 QETH_DBF_TEXT_(trace,4,"scer2%i", cmd->hdr.return_code);
4303 data_len = *((__u16*)QETH_IPA_PDU_LEN_PDU1(data));
4304 if (cmd->data.setadapterparms.hdr.seq_no == 1)
4305 data_len -= (__u16)((char *)&snmp->data - (char *)cmd);
4307 data_len -= (__u16)((char*)&snmp->request - (char *)cmd);
4309 /* check if there is enough room in userspace */
4310 if ((qinfo->udata_len - qinfo->udata_offset) < data_len) {
4311 QETH_DBF_TEXT_(trace, 4, "scer3%i", -ENOMEM);
4312 cmd->hdr.return_code = -ENOMEM;
4315 QETH_DBF_TEXT_(trace, 4, "snore%i",
4316 cmd->data.setadapterparms.hdr.used_total);
4317 QETH_DBF_TEXT_(trace, 4, "sseqn%i", cmd->data.setadapterparms.hdr.seq_no);
4318 /*copy entries to user buffer*/
4319 if (cmd->data.setadapterparms.hdr.seq_no == 1) {
4320 memcpy(qinfo->udata + qinfo->udata_offset,
4322 data_len + offsetof(struct qeth_snmp_cmd,data));
4323 qinfo->udata_offset += offsetof(struct qeth_snmp_cmd, data);
4325 memcpy(qinfo->udata + qinfo->udata_offset,
4326 (char *)&snmp->request, data_len);
4328 qinfo->udata_offset += data_len;
4329 /* check if all replies received ... */
4330 QETH_DBF_TEXT_(trace, 4, "srtot%i",
4331 cmd->data.setadapterparms.hdr.used_total);
4332 QETH_DBF_TEXT_(trace, 4, "srseq%i",
4333 cmd->data.setadapterparms.hdr.seq_no);
4334 if (cmd->data.setadapterparms.hdr.seq_no <
4335 cmd->data.setadapterparms.hdr.used_total)
4340 static struct qeth_cmd_buffer *
4341 qeth_get_ipacmd_buffer(struct qeth_card *, enum qeth_ipa_cmds,
4342 enum qeth_prot_versions );
4344 static struct qeth_cmd_buffer *
4345 qeth_get_adapter_cmd(struct qeth_card *card, __u32 command, __u32 cmdlen)
4347 struct qeth_cmd_buffer *iob;
4348 struct qeth_ipa_cmd *cmd;
4350 iob = qeth_get_ipacmd_buffer(card,IPA_CMD_SETADAPTERPARMS,
4352 cmd = (struct qeth_ipa_cmd *)(iob->data+IPA_PDU_HEADER_SIZE);
4353 cmd->data.setadapterparms.hdr.cmdlength = cmdlen;
4354 cmd->data.setadapterparms.hdr.command_code = command;
4355 cmd->data.setadapterparms.hdr.used_total = 1;
4356 cmd->data.setadapterparms.hdr.seq_no = 1;
4362 * function to send SNMP commands to OSA-E card
4365 qeth_snmp_command(struct qeth_card *card, char *udata)
4367 struct qeth_cmd_buffer *iob;
4368 struct qeth_ipa_cmd *cmd;
4369 struct qeth_snmp_ureq *ureq;
4371 struct qeth_arp_query_info qinfo = {0, };
4374 QETH_DBF_TEXT(trace,3,"snmpcmd");
4376 if (card->info.guestlan)
4378 if (!qeth_adp_supported(card,IPA_SETADP_SET_SNMP_CONTROL)) {
4379 PRINT_WARN("SNMP Query MIBS not supported "
4380 "on %s!\n", card->info.if_name);
4383 /* skip 4 bytes (data_len struct member) to get req_len */
4384 if (copy_from_user(&req_len, udata + sizeof(int), sizeof(int)))
4386 ureq = kmalloc(req_len+sizeof(struct qeth_snmp_ureq_hdr), GFP_KERNEL);
4388 QETH_DBF_TEXT(trace, 2, "snmpnome");
4391 if (copy_from_user(ureq, udata,
4392 req_len+sizeof(struct qeth_snmp_ureq_hdr))){
4396 qinfo.udata_len = ureq->hdr.data_len;
4397 if (!(qinfo.udata = kmalloc(qinfo.udata_len, GFP_KERNEL))){
4401 memset(qinfo.udata, 0, qinfo.udata_len);
4402 qinfo.udata_offset = sizeof(struct qeth_snmp_ureq_hdr);
4404 iob = qeth_get_adapter_cmd(card, IPA_SETADP_SET_SNMP_CONTROL,
4405 QETH_SNMP_SETADP_CMDLENGTH + req_len);
4406 cmd = (struct qeth_ipa_cmd *)(iob->data+IPA_PDU_HEADER_SIZE);
4407 memcpy(&cmd->data.setadapterparms.data.snmp, &ureq->cmd, req_len);
4408 rc = qeth_send_ipa_snmp_cmd(card, iob, QETH_SETADP_BASE_LEN + req_len,
4409 qeth_snmp_command_cb, (void *)&qinfo);
4411 PRINT_WARN("SNMP command failed on %s: (0x%x)\n",
4412 card->info.if_name, rc);
4414 copy_to_user(udata, qinfo.udata, qinfo.udata_len);
4422 qeth_default_setassparms_cb(struct qeth_card *, struct qeth_reply *,
4426 qeth_send_setassparms(struct qeth_card *, struct qeth_cmd_buffer *,
4429 (struct qeth_card *, struct qeth_reply *, unsigned long),
4433 qeth_arp_add_entry(struct qeth_card *card, struct qeth_arp_cache_entry *entry)
4435 struct qeth_cmd_buffer *iob;
4440 QETH_DBF_TEXT(trace,3,"arpadent");
4443 * currently GuestLAN does only deliver all zeros on query arp,
4444 * even though arp processing is supported (according to IPA supp.
4445 * funcs flags); since all zeros is no valueable information,
4446 * we say EOPNOTSUPP for all ARP functions
4448 if (card->info.guestlan)
4450 if (!qeth_is_supported(card,IPA_ARP_PROCESSING)) {
4451 PRINT_WARN("ARP processing not supported "
4452 "on %s!\n", card->info.if_name);
4456 iob = qeth_get_setassparms_cmd(card, IPA_ARP_PROCESSING,
4457 IPA_CMD_ASS_ARP_ADD_ENTRY,
4458 sizeof(struct qeth_arp_cache_entry),
4460 rc = qeth_send_setassparms(card, iob,
4461 sizeof(struct qeth_arp_cache_entry),
4462 (unsigned long) entry,
4463 qeth_default_setassparms_cb, NULL);
4466 qeth_ipaddr4_to_string((u8 *)entry->ipaddr, buf);
4467 PRINT_WARN("Could not add ARP entry for address %s on %s: "
4469 buf, card->info.if_name,
4470 qeth_arp_get_error_cause(&rc), tmp, tmp);
4476 qeth_arp_remove_entry(struct qeth_card *card, struct qeth_arp_cache_entry *entry)
4478 struct qeth_cmd_buffer *iob;
4479 char buf[16] = {0, };
4483 QETH_DBF_TEXT(trace,3,"arprment");
4486 * currently GuestLAN does only deliver all zeros on query arp,
4487 * even though arp processing is supported (according to IPA supp.
4488 * funcs flags); since all zeros is no valueable information,
4489 * we say EOPNOTSUPP for all ARP functions
4491 if (card->info.guestlan)
4493 if (!qeth_is_supported(card,IPA_ARP_PROCESSING)) {
4494 PRINT_WARN("ARP processing not supported "
4495 "on %s!\n", card->info.if_name);
4498 memcpy(buf, entry, 12);
4499 iob = qeth_get_setassparms_cmd(card, IPA_ARP_PROCESSING,
4500 IPA_CMD_ASS_ARP_REMOVE_ENTRY,
4503 rc = qeth_send_setassparms(card, iob,
4504 12, (unsigned long)buf,
4505 qeth_default_setassparms_cb, NULL);
4509 qeth_ipaddr4_to_string((u8 *)entry->ipaddr, buf);
4510 PRINT_WARN("Could not delete ARP entry for address %s on %s: "
4512 buf, card->info.if_name,
4513 qeth_arp_get_error_cause(&rc), tmp, tmp);
4519 qeth_arp_flush_cache(struct qeth_card *card)
4524 QETH_DBF_TEXT(trace,3,"arpflush");
4527 * currently GuestLAN does only deliver all zeros on query arp,
4528 * even though arp processing is supported (according to IPA supp.
4529 * funcs flags); since all zeros is no valueable information,
4530 * we say EOPNOTSUPP for all ARP functions
4532 if (card->info.guestlan || (card->info.type == QETH_CARD_TYPE_IQD))
4534 if (!qeth_is_supported(card,IPA_ARP_PROCESSING)) {
4535 PRINT_WARN("ARP processing not supported "
4536 "on %s!\n", card->info.if_name);
4539 rc = qeth_send_simple_setassparms(card, IPA_ARP_PROCESSING,
4540 IPA_CMD_ASS_ARP_FLUSH_CACHE, 0);
4543 PRINT_WARN("Could not flush ARP cache on %s: %s (0x%x/%d)\n",
4544 card->info.if_name, qeth_arp_get_error_cause(&rc),
4551 qeth_do_ioctl(struct net_device *dev, struct ifreq *rq, int cmd)
4553 struct qeth_card *card = (struct qeth_card *)dev->priv;
4554 struct qeth_arp_cache_entry arp_entry;
4555 struct mii_ioctl_data *mii_data;
4561 if ((card->state != CARD_STATE_UP) &&
4562 (card->state != CARD_STATE_SOFTSETUP))
4566 case SIOC_QETH_ARP_SET_NO_ENTRIES:
4567 if (!capable(CAP_NET_ADMIN)){
4571 rc = qeth_arp_set_no_entries(card, rq->ifr_ifru.ifru_ivalue);
4573 case SIOC_QETH_ARP_QUERY_INFO:
4574 if (!capable(CAP_NET_ADMIN)){
4578 rc = qeth_arp_query(card, rq->ifr_ifru.ifru_data);
4580 case SIOC_QETH_ARP_ADD_ENTRY:
4581 if (!capable(CAP_NET_ADMIN)){
4585 if (copy_from_user(&arp_entry, rq->ifr_ifru.ifru_data,
4586 sizeof(struct qeth_arp_cache_entry)))
4589 rc = qeth_arp_add_entry(card, &arp_entry);
4591 case SIOC_QETH_ARP_REMOVE_ENTRY:
4592 if (!capable(CAP_NET_ADMIN)){
4596 if (copy_from_user(&arp_entry, rq->ifr_ifru.ifru_data,
4597 sizeof(struct qeth_arp_cache_entry)))
4600 rc = qeth_arp_remove_entry(card, &arp_entry);
4602 case SIOC_QETH_ARP_FLUSH_CACHE:
4603 if (!capable(CAP_NET_ADMIN)){
4607 rc = qeth_arp_flush_cache(card);
4609 case SIOC_QETH_ADP_SET_SNMP_CONTROL:
4610 rc = qeth_snmp_command(card, rq->ifr_ifru.ifru_data);
4612 case SIOC_QETH_GET_CARD_TYPE:
4613 if ((card->info.type == QETH_CARD_TYPE_OSAE) &&
4614 !card->info.guestlan)
4619 mii_data = if_mii(rq);
4620 mii_data->phy_id = 0;
4623 mii_data = if_mii(rq);
4624 if (mii_data->phy_id != 0)
4627 mii_data->val_out = qeth_mdio_read(dev,mii_data->phy_id,
4633 /* TODO: remove return if qeth_mdio_write does something */
4634 if (!capable(CAP_NET_ADMIN)){
4638 mii_data = if_mii(rq);
4639 if (mii_data->phy_id != 0)
4642 qeth_mdio_write(dev, mii_data->phy_id, mii_data->reg_num,
4649 QETH_DBF_TEXT_(trace, 2, "ioce%d", rc);
4653 static struct net_device_stats *
4654 qeth_get_stats(struct net_device *dev)
4656 struct qeth_card *card;
4658 card = (struct qeth_card *) (dev->priv);
4660 QETH_DBF_TEXT(trace,5,"getstat");
4662 return &card->stats;
4666 qeth_change_mtu(struct net_device *dev, int new_mtu)
4668 struct qeth_card *card;
4671 card = (struct qeth_card *) (dev->priv);
4673 QETH_DBF_TEXT(trace,4,"chgmtu");
4674 sprintf(dbf_text, "%8x", new_mtu);
4675 QETH_DBF_TEXT(trace,4,dbf_text);
4679 if (new_mtu > 65535)
4681 if ((!qeth_is_supported(card,IPA_IP_FRAGMENTATION)) &&
4682 (!qeth_mtu_is_valid(card, new_mtu)))
4688 #ifdef CONFIG_QETH_VLAN
4690 qeth_vlan_rx_register(struct net_device *dev, struct vlan_group *grp)
4692 struct qeth_card *card;
4693 unsigned long flags;
4695 QETH_DBF_TEXT(trace,4,"vlanreg");
4697 card = (struct qeth_card *) dev->priv;
4698 spin_lock_irqsave(&card->vlanlock, flags);
4699 card->vlangrp = grp;
4700 spin_unlock_irqrestore(&card->vlanlock, flags);
4704 qeth_free_vlan_buffer(struct qeth_card *card, struct qeth_qdio_out_buffer *buf,
4708 struct sk_buff *skb;
4709 struct sk_buff_head tmp_list;
4711 skb_queue_head_init(&tmp_list);
4712 for(i = 0; i < QETH_MAX_BUFFER_ELEMENTS(card); ++i){
4713 while ((skb = skb_dequeue(&buf->skb_list))){
4714 if (vlan_tx_tag_present(skb) &&
4715 (vlan_tx_tag_get(skb) == vid)) {
4716 atomic_dec(&skb->users);
4719 skb_queue_tail(&tmp_list, skb);
4722 while ((skb = skb_dequeue(&tmp_list)))
4723 skb_queue_tail(&buf->skb_list, skb);
4727 qeth_free_vlan_skbs(struct qeth_card *card, unsigned short vid)
4731 QETH_DBF_TEXT(trace, 4, "frvlskbs");
4732 for (i = 0; i < card->qdio.no_out_queues; ++i){
4733 for (j = 0; j < QDIO_MAX_BUFFERS_PER_Q; ++j)
4734 qeth_free_vlan_buffer(card, &card->qdio.
4735 out_qs[i]->bufs[j], vid);
4740 qeth_free_vlan_addresses4(struct qeth_card *card, unsigned short vid)
4742 struct in_device *in_dev;
4743 struct in_ifaddr *ifa;
4744 struct qeth_ipaddr *addr;
4746 QETH_DBF_TEXT(trace, 4, "frvaddr4");
4750 in_dev = __in_dev_get(card->vlangrp->vlan_devices[vid]);
4753 for (ifa = in_dev->ifa_list; ifa; ifa = ifa->ifa_next){
4754 addr = qeth_get_addr_buffer(QETH_PROT_IPV4);
4756 addr->u.a4.addr = ifa->ifa_address;
4757 addr->u.a4.mask = ifa->ifa_mask;
4758 addr->type = QETH_IP_TYPE_NORMAL;
4759 if (!qeth_delete_ip(card, addr))
4767 #ifdef CONFIG_QETH_IPV6
4769 qeth_free_vlan_addresses6(struct qeth_card *card, unsigned short vid)
4771 struct inet6_dev *in6_dev;
4772 struct inet6_ifaddr *ifa;
4773 struct qeth_ipaddr *addr;
4775 QETH_DBF_TEXT(trace, 4, "frvaddr6");
4778 in6_dev = in6_dev_get(card->vlangrp->vlan_devices[vid]);
4781 for (ifa = in6_dev->addr_list; ifa; ifa = ifa->lst_next){
4782 addr = qeth_get_addr_buffer(QETH_PROT_IPV6);
4784 memcpy(&addr->u.a6.addr, &ifa->addr,
4785 sizeof(struct in6_addr));
4786 addr->u.a6.pfxlen = ifa->prefix_len;
4787 addr->type = QETH_IP_TYPE_NORMAL;
4788 if (!qeth_delete_ip(card, addr))
4792 in6_dev_put(in6_dev);
4795 #define qeth_free_vlan_addresses6(card, vid) do{ ; }while(0)
4799 qeth_vlan_rx_kill_vid(struct net_device *dev, unsigned short vid)
4801 struct qeth_card *card;
4802 unsigned long flags;
4804 QETH_DBF_TEXT(trace,4,"vlkilvid");
4806 card = (struct qeth_card *) dev->priv;
4807 /* free all skbs for the vlan device */
4808 qeth_free_vlan_skbs(card, vid);
4809 spin_lock_irqsave(&card->vlanlock, flags);
4810 /* unregister IP addresses of vlan device */
4811 qeth_free_vlan_addresses4(card, vid);
4812 qeth_free_vlan_addresses6(card, vid);
4814 card->vlangrp->vlan_devices[vid] = NULL;
4815 spin_unlock_irqrestore(&card->vlanlock, flags);
4816 qeth_set_multicast_list(card->dev);
4821 * set multicast address on card
4824 qeth_set_multicast_list(struct net_device *dev)
4826 struct qeth_card *card = (struct qeth_card *) dev->priv;
4828 QETH_DBF_TEXT(trace,3,"setmulti");
4829 qeth_delete_mc_addresses(card);
4830 qeth_add_multicast_ipv4(card);
4831 #ifdef CONFIG_QETH_IPV6
4832 qeth_add_multicast_ipv6(card);
4834 if (qeth_set_thread_start_bit(card, QETH_SET_IP_THREAD) == 0)
4835 schedule_work(&card->kernel_thread_starter);
4839 qeth_neigh_setup(struct net_device *dev, struct neigh_parms *np)
4844 #ifdef CONFIG_QETH_IPV6
4846 qeth_ipv6_generate_eui64(u8 * eui, struct net_device *dev)
4848 switch (dev->type) {
4851 case ARPHRD_IEEE802_TR:
4852 if (dev->addr_len != ETH_ALEN)
4854 memcpy(eui, dev->dev_addr, 3);
4855 memcpy(eui + 5, dev->dev_addr + 3, 3);
4856 eui[3] = (dev->dev_id >> 8) & 0xff;
4857 eui[4] = dev->dev_id & 0xff;
4866 qeth_get_mac_for_ipm(__u32 ipm, char *mac, struct net_device *dev)
4868 if (dev->type == ARPHRD_IEEE802_TR)
4869 ip_tr_mc_map(ipm, mac);
4871 ip_eth_mc_map(ipm, mac);
4874 static struct qeth_ipaddr *
4875 qeth_get_addr_buffer(enum qeth_prot_versions prot)
4877 struct qeth_ipaddr *addr;
4879 addr = kmalloc(sizeof(struct qeth_ipaddr), GFP_ATOMIC);
4881 PRINT_WARN("Not enough memory to add address\n");
4884 memset(addr,0,sizeof(struct qeth_ipaddr));
4885 addr->type = QETH_IP_TYPE_NORMAL;
4891 qeth_delete_mc_addresses(struct qeth_card *card)
4893 struct qeth_ipaddr *iptodo;
4894 unsigned long flags;
4896 QETH_DBF_TEXT(trace,4,"delmc");
4897 iptodo = qeth_get_addr_buffer(QETH_PROT_IPV4);
4899 QETH_DBF_TEXT(trace, 2, "dmcnomem");
4902 iptodo->type = QETH_IP_TYPE_DEL_ALL_MC;
4903 spin_lock_irqsave(&card->ip_lock, flags);
4904 if (!__qeth_insert_ip_todo(card, iptodo, 0))
4906 spin_unlock_irqrestore(&card->ip_lock, flags);
4910 qeth_add_mc(struct qeth_card *card, struct in_device *in4_dev)
4912 struct qeth_ipaddr *ipm;
4913 struct ip_mc_list *im4;
4914 char buf[MAX_ADDR_LEN];
4916 QETH_DBF_TEXT(trace,4,"addmc");
4917 for (im4 = in4_dev->mc_list; im4; im4 = im4->next) {
4918 qeth_get_mac_for_ipm(im4->multiaddr, buf, in4_dev->dev);
4919 ipm = qeth_get_addr_buffer(QETH_PROT_IPV4);
4922 ipm->u.a4.addr = im4->multiaddr;
4923 memcpy(ipm->mac,buf,OSA_ADDR_LEN);
4924 ipm->is_multicast = 1;
4925 if (!qeth_add_ip(card,ipm))
4931 qeth_add_vlan_mc(struct qeth_card *card)
4933 #ifdef CONFIG_QETH_VLAN
4934 struct in_device *in_dev;
4935 struct vlan_group *vg;
4938 QETH_DBF_TEXT(trace,4,"addmcvl");
4939 if (!qeth_is_supported(card,IPA_FULL_VLAN) ||
4940 (card->vlangrp == NULL))
4944 for (i = 0; i < VLAN_GROUP_ARRAY_LEN; i++) {
4945 if (vg->vlan_devices[i] == NULL ||
4946 !(vg->vlan_devices[i]->flags & IFF_UP))
4948 in_dev = in_dev_get(vg->vlan_devices[i]);
4951 read_lock(&in_dev->mc_list_lock);
4952 qeth_add_mc(card,in_dev);
4953 read_unlock(&in_dev->mc_list_lock);
4960 qeth_add_multicast_ipv4(struct qeth_card *card)
4962 struct in_device *in4_dev;
4964 QETH_DBF_TEXT(trace,4,"chkmcv4");
4965 in4_dev = in_dev_get(card->dev);
4966 if (in4_dev == NULL)
4968 read_lock(&in4_dev->mc_list_lock);
4969 qeth_add_mc(card, in4_dev);
4970 qeth_add_vlan_mc(card);
4971 read_unlock(&in4_dev->mc_list_lock);
4972 in_dev_put(in4_dev);
4975 #ifdef CONFIG_QETH_IPV6
4977 qeth_add_mc6(struct qeth_card *card, struct inet6_dev *in6_dev)
4979 struct qeth_ipaddr *ipm;
4980 struct ifmcaddr6 *im6;
4981 char buf[MAX_ADDR_LEN];
4983 QETH_DBF_TEXT(trace,4,"addmc6");
4984 for (im6 = in6_dev->mc_list; im6 != NULL; im6 = im6->next) {
4985 ndisc_mc_map(&im6->mca_addr, buf, in6_dev->dev, 0);
4986 ipm = qeth_get_addr_buffer(QETH_PROT_IPV6);
4989 ipm->is_multicast = 1;
4990 memcpy(ipm->mac,buf,OSA_ADDR_LEN);
4991 memcpy(&ipm->u.a6.addr,&im6->mca_addr.s6_addr,
4992 sizeof(struct in6_addr));
4993 if (!qeth_add_ip(card,ipm))
4999 qeth_add_vlan_mc6(struct qeth_card *card)
5001 #ifdef CONFIG_QETH_VLAN
5002 struct inet6_dev *in_dev;
5003 struct vlan_group *vg;
5006 QETH_DBF_TEXT(trace,4,"admc6vl");
5007 if (!qeth_is_supported(card,IPA_FULL_VLAN) ||
5008 (card->vlangrp == NULL))
5012 for (i = 0; i < VLAN_GROUP_ARRAY_LEN; i++) {
5013 if (vg->vlan_devices[i] == NULL ||
5014 !(vg->vlan_devices[i]->flags & IFF_UP))
5016 in_dev = in6_dev_get(vg->vlan_devices[i]);
5019 read_lock(&in_dev->lock);
5020 qeth_add_mc6(card,in_dev);
5021 read_unlock(&in_dev->lock);
5022 in6_dev_put(in_dev);
5024 #endif /* CONFIG_QETH_VLAN */
5028 qeth_add_multicast_ipv6(struct qeth_card *card)
5030 struct inet6_dev *in6_dev;
5032 QETH_DBF_TEXT(trace,4,"chkmcv6");
5033 if (!qeth_is_supported(card, IPA_IPV6))
5036 in6_dev = in6_dev_get(card->dev);
5037 if (in6_dev == NULL)
5039 read_lock(&in6_dev->lock);
5040 qeth_add_mc6(card, in6_dev);
5041 qeth_add_vlan_mc6(card);
5042 read_unlock(&in6_dev->lock);
5043 in6_dev_put(in6_dev);
5045 #endif /* CONFIG_QETH_IPV6 */
5048 qeth_fill_ipacmd_header(struct qeth_card *card, struct qeth_ipa_cmd *cmd,
5049 __u8 command, enum qeth_prot_versions prot)
5051 memset(cmd, 0, sizeof (struct qeth_ipa_cmd));
5052 cmd->hdr.command = command;
5053 cmd->hdr.initiator = IPA_CMD_INITIATOR_HOST;
5054 cmd->hdr.seqno = card->seqno.ipa;
5055 cmd->hdr.adapter_type = qeth_get_ipa_adp_type(card->info.link_type);
5056 cmd->hdr.rel_adapter_no = (__u8) card->info.portno;
5057 cmd->hdr.prim_version_no = 1;
5058 cmd->hdr.param_count = 1;
5059 cmd->hdr.prot_version = prot;
5060 cmd->hdr.ipa_supported = 0;
5061 cmd->hdr.ipa_enabled = 0;
5064 static struct qeth_cmd_buffer *
5065 qeth_get_ipacmd_buffer(struct qeth_card *card, enum qeth_ipa_cmds ipacmd,
5066 enum qeth_prot_versions prot)
5068 struct qeth_cmd_buffer *iob;
5069 struct qeth_ipa_cmd *cmd;
5071 iob = qeth_wait_for_buffer(&card->write);
5072 cmd = (struct qeth_ipa_cmd *)(iob->data+IPA_PDU_HEADER_SIZE);
5073 qeth_fill_ipacmd_header(card, cmd, ipacmd, prot);
5079 qeth_send_setdelmc(struct qeth_card *card, struct qeth_ipaddr *addr, int ipacmd)
5082 struct qeth_cmd_buffer *iob;
5083 struct qeth_ipa_cmd *cmd;
5085 QETH_DBF_TEXT(trace,4,"setdelmc");
5087 iob = qeth_get_ipacmd_buffer(card, ipacmd, addr->proto);
5088 cmd = (struct qeth_ipa_cmd *)(iob->data+IPA_PDU_HEADER_SIZE);
5089 memcpy(&cmd->data.setdelipm.mac,addr->mac, OSA_ADDR_LEN);
5090 if (addr->proto == QETH_PROT_IPV6)
5091 memcpy(cmd->data.setdelipm.ip6, &addr->u.a6.addr,
5092 sizeof(struct in6_addr));
5094 memcpy(&cmd->data.setdelipm.ip4, &addr->u.a4.addr,4);
5096 rc = qeth_send_ipa_cmd(card, iob, NULL, NULL);
5101 qeth_fill_netmask(u8 *netmask, unsigned int len)
5104 for (i=0;i<16;i++) {
5109 netmask[i] = (u8)(0xFF00>>j);
5116 qeth_send_setdelip(struct qeth_card *card, struct qeth_ipaddr *addr,
5117 int ipacmd, unsigned int flags)
5120 struct qeth_cmd_buffer *iob;
5121 struct qeth_ipa_cmd *cmd;
5124 QETH_DBF_TEXT(trace,4,"setdelip");
5125 QETH_DBF_TEXT_(trace,4,"flags%02X", flags);
5127 iob = qeth_get_ipacmd_buffer(card, ipacmd, addr->proto);
5128 cmd = (struct qeth_ipa_cmd *)(iob->data+IPA_PDU_HEADER_SIZE);
5129 if (addr->proto == QETH_PROT_IPV6) {
5130 memcpy(cmd->data.setdelip6.ip_addr, &addr->u.a6.addr,
5131 sizeof(struct in6_addr));
5132 qeth_fill_netmask(netmask,addr->u.a6.pfxlen);
5133 memcpy(cmd->data.setdelip6.mask, netmask,
5134 sizeof(struct in6_addr));
5135 cmd->data.setdelip6.flags = flags;
5137 memcpy(cmd->data.setdelip4.ip_addr, &addr->u.a4.addr, 4);
5138 memcpy(cmd->data.setdelip4.mask, &addr->u.a4.mask, 4);
5139 cmd->data.setdelip4.flags = flags;
5142 rc = qeth_send_ipa_cmd(card, iob, NULL, NULL);
5148 qeth_register_addr_entry(struct qeth_card *card, struct qeth_ipaddr *addr)
5154 if (addr->proto == QETH_PROT_IPV4) {
5155 QETH_DBF_TEXT(trace, 2,"setaddr4");
5156 QETH_DBF_HEX(trace, 3, &addr->u.a4.addr, sizeof(int));
5157 } else if (addr->proto == QETH_PROT_IPV6) {
5158 QETH_DBF_TEXT(trace, 2, "setaddr6");
5159 QETH_DBF_HEX(trace,3,&addr->u.a6.addr,8);
5160 QETH_DBF_HEX(trace,3,((char *)&addr->u.a6.addr)+8,8);
5162 QETH_DBF_TEXT(trace, 2, "setaddr?");
5163 QETH_DBF_HEX(trace, 3, addr, sizeof(struct qeth_ipaddr));
5166 if (addr->is_multicast)
5167 rc = qeth_send_setdelmc(card, addr, IPA_CMD_SETIPM);
5169 rc = qeth_send_setdelip(card, addr, IPA_CMD_SETIP,
5172 QETH_DBF_TEXT(trace, 2, "failed");
5173 } while ((--cnt > 0) && rc);
5175 QETH_DBF_TEXT(trace, 2, "FAILED");
5176 /* TODO: re-activate this warning as soon as we have a
5178 qeth_ipaddr_to_string(addr->proto, (u8 *)&addr->u, buf);
5179 PRINT_WARN("Could not register IP address %s (rc=%x)\n",
5187 qeth_deregister_addr_entry(struct qeth_card *card, struct qeth_ipaddr *addr)
5192 if (addr->proto == QETH_PROT_IPV4) {
5193 QETH_DBF_TEXT(trace, 2,"deladdr4");
5194 QETH_DBF_HEX(trace, 3, &addr->u.a4.addr, sizeof(int));
5195 } else if (addr->proto == QETH_PROT_IPV6) {
5196 QETH_DBF_TEXT(trace, 2, "deladdr6");
5197 QETH_DBF_HEX(trace,3,&addr->u.a6.addr,8);
5198 QETH_DBF_HEX(trace,3,((char *)&addr->u.a6.addr)+8,8);
5200 QETH_DBF_TEXT(trace, 2, "deladdr?");
5201 QETH_DBF_HEX(trace, 3, addr, sizeof(struct qeth_ipaddr));
5203 if (addr->is_multicast)
5204 rc = qeth_send_setdelmc(card, addr, IPA_CMD_DELIPM);
5206 rc = qeth_send_setdelip(card, addr, IPA_CMD_DELIP,
5209 QETH_DBF_TEXT(trace, 2, "failed");
5210 /* TODO: re-activate this warning as soon as we have a
5212 qeth_ipaddr_to_string(addr->proto, (u8 *)&addr->u, buf);
5213 PRINT_WARN("Could not deregister IP address %s (rc=%x)\n",
5221 qeth_netdev_init(struct net_device *dev)
5223 struct qeth_card *card;
5225 card = (struct qeth_card *) dev->priv;
5227 QETH_DBF_TEXT(trace,3,"initdev");
5229 dev->tx_timeout = &qeth_tx_timeout;
5230 dev->watchdog_timeo = QETH_TX_TIMEOUT;
5231 dev->open = qeth_open;
5232 dev->stop = qeth_stop;
5233 dev->hard_start_xmit = qeth_hard_start_xmit;
5234 dev->do_ioctl = qeth_do_ioctl;
5235 dev->get_stats = qeth_get_stats;
5236 dev->change_mtu = qeth_change_mtu;
5237 dev->neigh_setup = qeth_neigh_setup;
5238 dev->set_multicast_list = qeth_set_multicast_list;
5239 #ifdef CONFIG_QETH_VLAN
5240 dev->vlan_rx_register = qeth_vlan_rx_register;
5241 dev->vlan_rx_kill_vid = qeth_vlan_rx_kill_vid;
5243 if (qeth_get_netdev_flags(card->info.type) & IFF_NOARP) {
5244 dev->rebuild_header = NULL;
5245 dev->hard_header = NULL;
5246 dev->header_cache_update = NULL;
5247 dev->hard_header_cache = NULL;
5249 #ifdef CONFIG_QETH_IPV6
5250 /*IPv6 address autoconfiguration stuff*/
5251 card->dev->dev_id = card->info.unique_id & 0xffff;
5252 if (!(card->info.unique_id & UNIQUE_ID_NOT_BY_CARD))
5253 card->dev->generate_eui64 = qeth_ipv6_generate_eui64;
5257 dev->hard_header_parse = NULL;
5258 dev->set_mac_address = NULL;
5259 dev->flags |= qeth_get_netdev_flags(card->info.type);
5260 if ((card->options.fake_broadcast) ||
5261 (card->info.broadcast_capable))
5262 dev->flags |= IFF_BROADCAST;
5264 dev->hard_header_len =
5265 qeth_get_hlen(card->info.link_type) + card->options.add_hhlen;
5266 dev->addr_len = OSA_ADDR_LEN;
5267 dev->mtu = card->info.initial_mtu;
5269 SET_MODULE_OWNER(dev);
5274 qeth_init_func_level(struct qeth_card *card)
5276 if (card->ipato.enabled) {
5277 if (card->info.type == QETH_CARD_TYPE_IQD)
5278 card->info.func_level =
5279 QETH_IDX_FUNC_LEVEL_IQD_ENA_IPAT;
5281 card->info.func_level =
5282 QETH_IDX_FUNC_LEVEL_OSAE_ENA_IPAT;
5284 if (card->info.type == QETH_CARD_TYPE_IQD)
5285 card->info.func_level =
5286 QETH_IDX_FUNC_LEVEL_IQD_DIS_IPAT;
5288 card->info.func_level =
5289 QETH_IDX_FUNC_LEVEL_OSAE_DIS_IPAT;
5294 * hardsetup card, initialize MPC and QDIO stuff
5297 qeth_hardsetup_card(struct qeth_card *card)
5302 QETH_DBF_TEXT(setup, 2, "hrdsetup");
5306 PRINT_WARN("Retrying to do IDX activates.\n");
5307 ccw_device_set_offline(CARD_DDEV(card));
5308 ccw_device_set_offline(CARD_WDEV(card));
5309 ccw_device_set_offline(CARD_RDEV(card));
5310 ccw_device_set_online(CARD_RDEV(card));
5311 ccw_device_set_online(CARD_WDEV(card));
5312 ccw_device_set_online(CARD_DDEV(card));
5314 rc = qeth_qdio_clear_card(card,card->info.type==QETH_CARD_TYPE_OSAE);
5315 if (rc == -ERESTARTSYS) {
5316 QETH_DBF_TEXT(setup, 2, "break1");
5319 QETH_DBF_TEXT_(setup, 2, "1err%d", rc);
5325 if ((rc = qeth_get_unitaddr(card))){
5326 QETH_DBF_TEXT_(setup, 2, "2err%d", rc);
5329 qeth_init_tokens(card);
5330 qeth_init_func_level(card);
5331 rc = qeth_idx_activate_channel(&card->read, qeth_idx_read_cb);
5332 if (rc == -ERESTARTSYS) {
5333 QETH_DBF_TEXT(setup, 2, "break2");
5336 QETH_DBF_TEXT_(setup, 2, "3err%d", rc);
5342 rc = qeth_idx_activate_channel(&card->write, qeth_idx_write_cb);
5343 if (rc == -ERESTARTSYS) {
5344 QETH_DBF_TEXT(setup, 2, "break3");
5347 QETH_DBF_TEXT_(setup, 2, "4err%d", rc);
5353 if ((rc = qeth_mpc_initialize(card))){
5354 QETH_DBF_TEXT_(setup, 2, "5err%d", rc);
5357 /* at first set_online allocate netdev */
5359 card->dev = qeth_get_netdevice(card->info.type,
5360 card->info.link_type);
5362 qeth_qdio_clear_card(card, card->info.type ==
5363 QETH_CARD_TYPE_OSAE);
5365 QETH_DBF_TEXT_(setup, 2, "6err%d", rc);
5368 card->dev->priv = card;
5369 card->dev->type = qeth_get_arphdr_type(card->info.type,
5370 card->info.link_type);
5371 card->dev->init = qeth_netdev_init;
5375 PRINT_ERR("Initialization in hardsetup failed! rc=%d\n", rc);
5380 qeth_default_setassparms_cb(struct qeth_card *card, struct qeth_reply *reply,
5383 struct qeth_ipa_cmd *cmd;
5385 QETH_DBF_TEXT(trace,4,"defadpcb");
5387 cmd = (struct qeth_ipa_cmd *) data;
5388 if (cmd->hdr.return_code == 0){
5389 cmd->hdr.return_code = cmd->data.setassparms.hdr.return_code;
5390 if (cmd->hdr.prot_version == QETH_PROT_IPV4)
5391 card->options.ipa4.enabled_funcs = cmd->hdr.ipa_enabled;
5392 #ifdef CONFIG_QETH_IPV6
5393 if (cmd->hdr.prot_version == QETH_PROT_IPV6)
5394 card->options.ipa6.enabled_funcs = cmd->hdr.ipa_enabled;
5397 if (cmd->data.setassparms.hdr.assist_no == IPA_INBOUND_CHECKSUM &&
5398 cmd->data.setassparms.hdr.command_code == IPA_CMD_ASS_START) {
5399 card->info.csum_mask = cmd->data.setassparms.data.flags_32bit;
5400 QETH_DBF_TEXT_(trace, 3, "csum:%d", card->info.csum_mask);
5406 qeth_default_setadapterparms_cb(struct qeth_card *card,
5407 struct qeth_reply *reply,
5410 struct qeth_ipa_cmd *cmd;
5412 QETH_DBF_TEXT(trace,4,"defadpcb");
5414 cmd = (struct qeth_ipa_cmd *) data;
5415 if (cmd->hdr.return_code == 0)
5416 cmd->hdr.return_code = cmd->data.setadapterparms.hdr.return_code;
5421 qeth_query_setadapterparms_cb(struct qeth_card *card, struct qeth_reply *reply,
5424 struct qeth_ipa_cmd *cmd;
5426 QETH_DBF_TEXT(trace,3,"quyadpcb");
5428 cmd = (struct qeth_ipa_cmd *) data;
5429 if (cmd->data.setadapterparms.data.query_cmds_supp.lan_type & 0x7f)
5430 card->info.link_type =
5431 cmd->data.setadapterparms.data.query_cmds_supp.lan_type;
5432 card->options.adp.supported_funcs =
5433 cmd->data.setadapterparms.data.query_cmds_supp.supported_cmds;
5434 return qeth_default_setadapterparms_cb(card, reply, (unsigned long)cmd);
5438 qeth_query_setadapterparms(struct qeth_card *card)
5441 struct qeth_cmd_buffer *iob;
5443 QETH_DBF_TEXT(trace,3,"queryadp");
5444 iob = qeth_get_adapter_cmd(card, IPA_SETADP_QUERY_COMMANDS_SUPPORTED,
5445 sizeof(struct qeth_ipacmd_setadpparms));
5446 rc = qeth_send_ipa_cmd(card, iob, qeth_query_setadapterparms_cb, NULL);
5451 qeth_setadpparms_change_macaddr_cb(struct qeth_card *card,
5452 struct qeth_reply *reply,
5455 struct qeth_ipa_cmd *cmd;
5457 QETH_DBF_TEXT(trace,4,"chgmaccb");
5459 cmd = (struct qeth_ipa_cmd *) data;
5460 memcpy(card->dev->dev_addr,
5461 &cmd->data.setadapterparms.data.change_addr.addr,OSA_ADDR_LEN);
5462 qeth_default_setadapterparms_cb(card, reply, (unsigned long) cmd);
5467 qeth_setadpparms_change_macaddr(struct qeth_card *card)
5470 struct qeth_cmd_buffer *iob;
5471 struct qeth_ipa_cmd *cmd;
5473 QETH_DBF_TEXT(trace,4,"chgmac");
5475 iob = qeth_get_adapter_cmd(card,IPA_SETADP_ALTER_MAC_ADDRESS,
5476 sizeof(struct qeth_ipacmd_setadpparms));
5477 cmd = (struct qeth_ipa_cmd *)(iob->data+IPA_PDU_HEADER_SIZE);
5478 cmd->data.setadapterparms.data.change_addr.cmd = CHANGE_ADDR_READ_MAC;
5479 cmd->data.setadapterparms.data.change_addr.addr_size = OSA_ADDR_LEN;
5480 memcpy(&cmd->data.setadapterparms.data.change_addr.addr,
5481 card->dev->dev_addr, OSA_ADDR_LEN);
5482 rc = qeth_send_ipa_cmd(card, iob, qeth_setadpparms_change_macaddr_cb,
5488 qeth_send_setadp_mode(struct qeth_card *card, __u32 command, __u32 mode)
5491 struct qeth_cmd_buffer *iob;
5492 struct qeth_ipa_cmd *cmd;
5494 QETH_DBF_TEXT(trace,4,"adpmode");
5496 iob = qeth_get_adapter_cmd(card, command,
5497 sizeof(struct qeth_ipacmd_setadpparms));
5498 cmd = (struct qeth_ipa_cmd *)(iob->data+IPA_PDU_HEADER_SIZE);
5499 cmd->data.setadapterparms.data.mode = mode;
5500 rc = qeth_send_ipa_cmd(card, iob, qeth_default_setadapterparms_cb,
5506 qeth_setadapter_hstr(struct qeth_card *card)
5510 QETH_DBF_TEXT(trace,4,"adphstr");
5512 if (qeth_adp_supported(card,IPA_SETADP_SET_BROADCAST_MODE)) {
5513 rc = qeth_send_setadp_mode(card, IPA_SETADP_SET_BROADCAST_MODE,
5514 card->options.broadcast_mode);
5516 PRINT_WARN("couldn't set broadcast mode on "
5518 CARD_BUS_ID(card), rc);
5519 rc = qeth_send_setadp_mode(card, IPA_SETADP_ALTER_MAC_ADDRESS,
5520 card->options.macaddr_mode);
5522 PRINT_WARN("couldn't set macaddr mode on "
5523 "device %s: x%x\n", CARD_BUS_ID(card), rc);
5526 if (card->options.broadcast_mode == QETH_TR_BROADCAST_LOCAL)
5527 PRINT_WARN("set adapter parameters not available "
5528 "to set broadcast mode, using ALLRINGS "
5529 "on device %s:\n", CARD_BUS_ID(card));
5530 if (card->options.macaddr_mode == QETH_TR_MACADDR_CANONICAL)
5531 PRINT_WARN("set adapter parameters not available "
5532 "to set macaddr mode, using NONCANONICAL "
5533 "on device %s:\n", CARD_BUS_ID(card));
5538 qeth_setadapter_parms(struct qeth_card *card)
5542 QETH_DBF_TEXT(setup, 2, "setadprm");
5544 if (!qeth_is_supported(card, IPA_SETADAPTERPARMS)){
5545 PRINT_WARN("set adapter parameters not supported "
5548 QETH_DBF_TEXT(setup, 2, " notsupp");
5551 rc = qeth_query_setadapterparms(card);
5553 PRINT_WARN("couldn't set adapter parameters on device %s: "
5554 "x%x\n", CARD_BUS_ID(card), rc);
5557 if (qeth_adp_supported(card,IPA_SETADP_ALTER_MAC_ADDRESS)) {
5558 rc = qeth_setadpparms_change_macaddr(card);
5560 PRINT_WARN("couldn't get MAC address on "
5562 CARD_BUS_ID(card), rc);
5565 if ((card->info.link_type == QETH_LINK_TYPE_HSTR) ||
5566 (card->info.link_type == QETH_LINK_TYPE_LANE_TR))
5567 rc = qeth_setadapter_hstr(card);
5574 qeth_send_startstoplan(struct qeth_card *card, enum qeth_ipa_cmds ipacmd,
5575 enum qeth_prot_versions prot)
5578 struct qeth_cmd_buffer *iob;
5580 iob = qeth_get_ipacmd_buffer(card,ipacmd,prot);
5581 rc = qeth_send_ipa_cmd(card, iob, NULL, NULL);
5587 qeth_send_startlan(struct qeth_card *card, enum qeth_prot_versions prot)
5591 QETH_DBF_TEXT_(setup, 2, "strtlan%i", prot);
5593 rc = qeth_send_startstoplan(card, IPA_CMD_STARTLAN, prot);
5598 qeth_send_stoplan(struct qeth_card *card)
5603 * TODO: according to the IPA format document page 14,
5604 * TCP/IP (we!) never issue a STOPLAN
5607 QETH_DBF_TEXT(trace, 2, "stoplan");
5609 rc = qeth_send_startstoplan(card, IPA_CMD_STOPLAN, QETH_PROT_IPV4);
5614 qeth_query_ipassists_cb(struct qeth_card *card, struct qeth_reply *reply,
5617 struct qeth_ipa_cmd *cmd;
5619 QETH_DBF_TEXT(setup, 2, "qipasscb");
5621 cmd = (struct qeth_ipa_cmd *) data;
5622 if (cmd->hdr.prot_version == QETH_PROT_IPV4) {
5623 card->options.ipa4.supported_funcs = cmd->hdr.ipa_supported;
5624 card->options.ipa4.enabled_funcs = cmd->hdr.ipa_enabled;
5626 #ifdef CONFIG_QETH_IPV6
5627 card->options.ipa6.supported_funcs = cmd->hdr.ipa_supported;
5628 card->options.ipa6.enabled_funcs = cmd->hdr.ipa_enabled;
5635 qeth_query_ipassists(struct qeth_card *card, enum qeth_prot_versions prot)
5638 struct qeth_cmd_buffer *iob;
5640 QETH_DBF_TEXT_(setup, 2, "qipassi%i", prot);
5642 iob = qeth_get_ipacmd_buffer(card,IPA_CMD_QIPASSIST,prot);
5643 rc = qeth_send_ipa_cmd(card, iob, qeth_query_ipassists_cb, NULL);
5647 static struct qeth_cmd_buffer *
5648 qeth_get_setassparms_cmd(struct qeth_card *card, enum qeth_ipa_funcs ipa_func,
5649 __u16 cmd_code, __u16 len,
5650 enum qeth_prot_versions prot)
5652 struct qeth_cmd_buffer *iob;
5653 struct qeth_ipa_cmd *cmd;
5655 QETH_DBF_TEXT(trace,4,"getasscm");
5656 iob = qeth_get_ipacmd_buffer(card,IPA_CMD_SETASSPARMS,prot);
5658 cmd = (struct qeth_ipa_cmd *)(iob->data+IPA_PDU_HEADER_SIZE);
5659 cmd->data.setassparms.hdr.assist_no = ipa_func;
5660 cmd->data.setassparms.hdr.length = 8 + len;
5661 cmd->data.setassparms.hdr.command_code = cmd_code;
5662 cmd->data.setassparms.hdr.return_code = 0;
5663 cmd->data.setassparms.hdr.seq_no = 0;
5669 qeth_send_setassparms(struct qeth_card *card, struct qeth_cmd_buffer *iob,
5670 __u16 len, long data,
5672 (struct qeth_card *,struct qeth_reply *,unsigned long),
5676 struct qeth_ipa_cmd *cmd;
5678 QETH_DBF_TEXT(trace,4,"sendassp");
5680 cmd = (struct qeth_ipa_cmd *)(iob->data+IPA_PDU_HEADER_SIZE);
5681 if (len <= sizeof(__u32))
5682 cmd->data.setassparms.data.flags_32bit = (__u32) data;
5683 else if (len > sizeof(__u32))
5684 memcpy(&cmd->data.setassparms.data, (void *) data, len);
5686 rc = qeth_send_ipa_cmd(card, iob, reply_cb, reply_param);
5690 #ifdef CONFIG_QETH_IPV6
5692 qeth_send_simple_setassparms_ipv6(struct qeth_card *card,
5693 enum qeth_ipa_funcs ipa_func, __u16 cmd_code)
5697 struct qeth_cmd_buffer *iob;
5699 QETH_DBF_TEXT(trace,4,"simassp6");
5700 iob = qeth_get_setassparms_cmd(card, ipa_func, cmd_code,
5702 rc = qeth_send_setassparms(card, iob, 0, 0,
5703 qeth_default_setassparms_cb, NULL);
5709 qeth_send_simple_setassparms(struct qeth_card *card,
5710 enum qeth_ipa_funcs ipa_func,
5711 __u16 cmd_code, long data)
5715 struct qeth_cmd_buffer *iob;
5717 QETH_DBF_TEXT(trace,4,"simassp4");
5719 length = sizeof(__u32);
5720 iob = qeth_get_setassparms_cmd(card, ipa_func, cmd_code,
5721 length, QETH_PROT_IPV4);
5722 rc = qeth_send_setassparms(card, iob, length, data,
5723 qeth_default_setassparms_cb, NULL);
5728 qeth_start_ipa_arp_processing(struct qeth_card *card)
5732 QETH_DBF_TEXT(trace,3,"ipaarp");
5734 if (!qeth_is_supported(card,IPA_ARP_PROCESSING)) {
5735 PRINT_WARN("ARP processing not supported "
5736 "on %s!\n", card->info.if_name);
5739 rc = qeth_send_simple_setassparms(card,IPA_ARP_PROCESSING,
5740 IPA_CMD_ASS_START, 0);
5742 PRINT_WARN("Could not start ARP processing "
5743 "assist on %s: 0x%x\n",
5744 card->info.if_name, rc);
5750 qeth_start_ipa_ip_fragmentation(struct qeth_card *card)
5754 QETH_DBF_TEXT(trace,3,"ipaipfrg");
5756 if (!qeth_is_supported(card, IPA_IP_FRAGMENTATION)) {
5757 PRINT_INFO("Hardware IP fragmentation not supported on %s\n",
5758 card->info.if_name);
5762 rc = qeth_send_simple_setassparms(card, IPA_IP_FRAGMENTATION,
5763 IPA_CMD_ASS_START, 0);
5765 PRINT_WARN("Could not start Hardware IP fragmentation "
5766 "assist on %s: 0x%x\n",
5767 card->info.if_name, rc);
5769 PRINT_INFO("Hardware IP fragmentation enabled \n");
5774 qeth_start_ipa_source_mac(struct qeth_card *card)
5778 QETH_DBF_TEXT(trace,3,"stsrcmac");
5780 if (!card->options.fake_ll)
5783 if (!qeth_is_supported(card, IPA_SOURCE_MAC)) {
5784 PRINT_INFO("Inbound source address not "
5785 "supported on %s\n", card->info.if_name);
5789 rc = qeth_send_simple_setassparms(card, IPA_SOURCE_MAC,
5790 IPA_CMD_ASS_START, 0);
5792 PRINT_WARN("Could not start inbound source "
5793 "assist on %s: 0x%x\n",
5794 card->info.if_name, rc);
5799 qeth_start_ipa_vlan(struct qeth_card *card)
5803 QETH_DBF_TEXT(trace,3,"strtvlan");
5805 #ifdef CONFIG_QETH_VLAN
5806 if (!qeth_is_supported(card, IPA_FULL_VLAN)) {
5807 PRINT_WARN("VLAN not supported on %s\n", card->info.if_name);
5811 rc = qeth_send_simple_setassparms(card, IPA_VLAN_PRIO,
5812 IPA_CMD_ASS_START,0);
5814 PRINT_WARN("Could not start vlan "
5815 "assist on %s: 0x%x\n",
5816 card->info.if_name, rc);
5818 PRINT_INFO("VLAN enabled \n");
5819 card->dev->features |= NETIF_F_HW_VLAN_TX | NETIF_F_HW_VLAN_RX;
5821 #endif /* QETH_VLAN */
5826 qeth_start_ipa_multicast(struct qeth_card *card)
5830 QETH_DBF_TEXT(trace,3,"stmcast");
5832 if (!qeth_is_supported(card, IPA_MULTICASTING)) {
5833 PRINT_WARN("Multicast not supported on %s\n",
5834 card->info.if_name);
5838 rc = qeth_send_simple_setassparms(card, IPA_MULTICASTING,
5839 IPA_CMD_ASS_START,0);
5841 PRINT_WARN("Could not start multicast "
5842 "assist on %s: rc=%i\n",
5843 card->info.if_name, rc);
5845 PRINT_INFO("Multicast enabled\n");
5846 card->dev->flags |= IFF_MULTICAST;
5851 #ifdef CONFIG_QETH_IPV6
5853 qeth_softsetup_ipv6(struct qeth_card *card)
5857 QETH_DBF_TEXT(trace,3,"softipv6");
5859 netif_stop_queue(card->dev);
5860 rc = qeth_send_startlan(card, QETH_PROT_IPV6);
5862 PRINT_ERR("IPv6 startlan failed on %s\n",
5863 card->info.if_name);
5866 netif_wake_queue(card->dev);
5867 rc = qeth_query_ipassists(card,QETH_PROT_IPV6);
5869 PRINT_ERR("IPv6 query ipassist failed on %s\n",
5870 card->info.if_name);
5873 rc = qeth_send_simple_setassparms(card, IPA_IPV6,
5874 IPA_CMD_ASS_START, 3);
5876 PRINT_WARN("IPv6 start assist (version 4) failed "
5878 card->info.if_name, rc);
5881 rc = qeth_send_simple_setassparms_ipv6(card, IPA_IPV6,
5884 PRINT_WARN("IPV6 start assist (version 6) failed "
5886 card->info.if_name, rc);
5889 rc = qeth_send_simple_setassparms_ipv6(card, IPA_PASSTHRU,
5892 PRINT_WARN("Could not enable passthrough "
5894 card->info.if_name, rc);
5897 PRINT_INFO("IPV6 enabled \n");
5904 qeth_start_ipa_ipv6(struct qeth_card *card)
5907 #ifdef CONFIG_QETH_IPV6
5908 QETH_DBF_TEXT(trace,3,"strtipv6");
5910 if (!qeth_is_supported(card, IPA_IPV6)) {
5911 PRINT_WARN("IPv6 not supported on %s\n",
5912 card->info.if_name);
5915 rc = qeth_softsetup_ipv6(card);
5921 qeth_start_ipa_broadcast(struct qeth_card *card)
5925 QETH_DBF_TEXT(trace,3,"stbrdcst");
5926 card->info.broadcast_capable = 0;
5927 if (!qeth_is_supported(card, IPA_FILTERING)) {
5928 PRINT_WARN("Broadcast not supported on %s\n",
5929 card->info.if_name);
5933 rc = qeth_send_simple_setassparms(card, IPA_FILTERING,
5934 IPA_CMD_ASS_START, 0);
5936 PRINT_WARN("Could not enable broadcasting filtering "
5938 card->info.if_name, rc);
5942 rc = qeth_send_simple_setassparms(card, IPA_FILTERING,
5943 IPA_CMD_ASS_CONFIGURE, 1);
5945 PRINT_WARN("Could not set up broadcast filtering on %s: 0x%x\n",
5946 card->info.if_name, rc);
5949 card->info.broadcast_capable = QETH_BROADCAST_WITH_ECHO;
5950 PRINT_INFO("Broadcast enabled \n");
5951 rc = qeth_send_simple_setassparms(card, IPA_FILTERING,
5952 IPA_CMD_ASS_ENABLE, 1);
5954 PRINT_WARN("Could not set up broadcast echo filtering on "
5955 "%s: 0x%x\n", card->info.if_name, rc);
5958 card->info.broadcast_capable = QETH_BROADCAST_WITHOUT_ECHO;
5960 if (card->info.broadcast_capable)
5961 card->dev->flags |= IFF_BROADCAST;
5963 card->dev->flags &= ~IFF_BROADCAST;
5968 qeth_send_checksum_command(struct qeth_card *card)
5972 rc = qeth_send_simple_setassparms(card, IPA_INBOUND_CHECKSUM,
5973 IPA_CMD_ASS_START, 0);
5975 PRINT_WARN("Starting Inbound HW Checksumming failed on %s: "
5976 "0x%x,\ncontinuing using Inbound SW Checksumming\n",
5977 card->info.if_name, rc);
5980 rc = qeth_send_simple_setassparms(card, IPA_INBOUND_CHECKSUM,
5982 card->info.csum_mask);
5984 PRINT_WARN("Enabling Inbound HW Checksumming failed on %s: "
5985 "0x%x,\ncontinuing using Inbound SW Checksumming\n",
5986 card->info.if_name, rc);
5993 qeth_start_ipa_checksum(struct qeth_card *card)
5997 QETH_DBF_TEXT(trace,3,"strtcsum");
5999 if (card->options.checksum_type == NO_CHECKSUMMING) {
6000 PRINT_WARN("Using no checksumming on %s.\n",
6001 card->info.if_name);
6004 if (card->options.checksum_type == SW_CHECKSUMMING) {
6005 PRINT_WARN("Using SW checksumming on %s.\n",
6006 card->info.if_name);
6009 if (!qeth_is_supported(card, IPA_INBOUND_CHECKSUM)) {
6010 PRINT_WARN("Inbound HW Checksumming not "
6011 "supported on %s,\ncontinuing "
6012 "using Inbound SW Checksumming\n",
6013 card->info.if_name);
6014 card->options.checksum_type = SW_CHECKSUMMING;
6017 rc = qeth_send_checksum_command(card);
6019 PRINT_INFO("HW Checksumming (inbound) enabled \n");
6026 qeth_print_ipassist_status(struct qeth_card *card)
6031 offset += sprintf(buf, "IPAssist options of %s: ", card->info.if_name);
6032 if (qeth_is_enabled(card, IPA_ARP_PROCESSING))
6033 offset += sprintf(buf+offset, "ARP ");
6034 if (qeth_is_enabled(card, IPA_IP_FRAGMENTATION))
6035 offset += sprintf(buf+offset, "IP_FRAG");
6036 if (qeth_is_enabled(card, IPA_SOURCE_MAC))
6037 offset += sprintf(buf+offset, "SRC_MAC");
6038 if (qeth_is_enabled(card, IPA_FULL_VLAN))
6039 offset += sprintf(buf+offset, "VLAN");
6040 if (qeth_is_enabled(card, IPA_VLAN_PRIO))
6041 offset += sprintf(buf+offset, "VLAN_PRIO");
6046 qeth_start_ipassists(struct qeth_card *card)
6048 QETH_DBF_TEXT(trace,3,"strtipas");
6049 qeth_start_ipa_arp_processing(card); /* go on*/
6050 qeth_start_ipa_ip_fragmentation(card); /* go on*/
6051 qeth_start_ipa_source_mac(card); /* go on*/
6052 qeth_start_ipa_vlan(card); /* go on*/
6053 qeth_start_ipa_multicast(card); /* go on*/
6054 qeth_start_ipa_ipv6(card); /* go on*/
6055 qeth_start_ipa_broadcast(card); /* go on*/
6056 qeth_start_ipa_checksum(card); /* go on*/
6061 qeth_send_setrouting(struct qeth_card *card, enum qeth_routing_types type,
6062 enum qeth_prot_versions prot)
6065 struct qeth_ipa_cmd *cmd;
6066 struct qeth_cmd_buffer *iob;
6068 QETH_DBF_TEXT(trace,4,"setroutg");
6069 iob = qeth_get_ipacmd_buffer(card, IPA_CMD_SETRTG, prot);
6070 cmd = (struct qeth_ipa_cmd *)(iob->data+IPA_PDU_HEADER_SIZE);
6071 cmd->data.setrtg.type = (type);
6072 rc = qeth_send_ipa_cmd(card, iob, NULL, NULL);
6079 qeth_correct_routing_type(struct qeth_card *card, enum qeth_routing_types *type,
6080 enum qeth_prot_versions prot)
6082 if (card->info.type == QETH_CARD_TYPE_IQD) {
6085 case PRIMARY_CONNECTOR:
6086 case SECONDARY_CONNECTOR:
6087 case MULTICAST_ROUTER:
6095 case PRIMARY_ROUTER:
6096 case SECONDARY_ROUTER:
6098 case MULTICAST_ROUTER:
6099 if (qeth_is_ipafunc_supported(card, prot,
6107 PRINT_WARN("Routing type '%s' not supported for interface %s.\n"
6108 "Router status set to 'no router'.\n",
6109 ((*type == PRIMARY_ROUTER)? "primary router" :
6110 (*type == SECONDARY_ROUTER)? "secondary router" :
6111 (*type == PRIMARY_CONNECTOR)? "primary connector" :
6112 (*type == SECONDARY_CONNECTOR)? "secondary connector" :
6113 (*type == MULTICAST_ROUTER)? "multicast router" :
6120 qeth_setrouting_v4(struct qeth_card *card)
6124 QETH_DBF_TEXT(trace,3,"setrtg4");
6126 qeth_correct_routing_type(card, &card->options.route4.type,
6129 rc = qeth_send_setrouting(card, card->options.route4.type,
6132 card->options.route4.type = NO_ROUTER;
6133 PRINT_WARN("Error (0x%04x) while setting routing type on %s. "
6134 "Type set to 'no router'.\n",
6135 rc, card->info.if_name);
6141 qeth_setrouting_v6(struct qeth_card *card)
6145 QETH_DBF_TEXT(trace,3,"setrtg6");
6146 #ifdef CONFIG_QETH_IPV6
6148 qeth_correct_routing_type(card, &card->options.route6.type,
6151 if ((card->options.route6.type == NO_ROUTER) ||
6152 ((card->info.type == QETH_CARD_TYPE_OSAE) &&
6153 (card->options.route6.type == MULTICAST_ROUTER) &&
6154 !qeth_is_supported6(card,IPA_OSA_MC_ROUTER)))
6156 rc = qeth_send_setrouting(card, card->options.route6.type,
6159 card->options.route6.type = NO_ROUTER;
6160 PRINT_WARN("Error (0x%04x) while setting routing type on %s. "
6161 "Type set to 'no router'.\n",
6162 rc, card->info.if_name);
6169 * softsetup card: init IPA stuff
6172 qeth_softsetup_card(struct qeth_card *card)
6176 QETH_DBF_TEXT(setup, 2, "softsetp");
6178 if ((rc = qeth_send_startlan(card, QETH_PROT_IPV4))){
6179 QETH_DBF_TEXT_(setup, 2, "1err%d", rc);
6181 PRINT_WARN("LAN on card %s if offline! "
6182 "Continuing softsetup.\n",
6184 card->lan_online = 0;
6188 card->lan_online = 1;
6189 if ((rc = qeth_setadapter_parms(card)))
6190 QETH_DBF_TEXT_(setup, 2, "2err%d", rc);
6191 if ((rc = qeth_start_ipassists(card)))
6192 QETH_DBF_TEXT_(setup, 2, "3err%d", rc);
6193 if ((rc = qeth_setrouting_v4(card)))
6194 QETH_DBF_TEXT_(setup, 2, "4err%d", rc);
6195 if ((rc = qeth_setrouting_v6(card)))
6196 QETH_DBF_TEXT_(setup, 2, "5err%d", rc);
6197 netif_stop_queue(card->dev);
6201 #ifdef CONFIG_QETH_IPV6
6203 qeth_get_unique_id_cb(struct qeth_card *card, struct qeth_reply *reply,
6206 struct qeth_ipa_cmd *cmd;
6208 cmd = (struct qeth_ipa_cmd *) data;
6209 if (cmd->hdr.return_code == 0)
6210 card->info.unique_id = *((__u16 *)
6211 &cmd->data.create_destroy_addr.unique_id[6]);
6213 card->info.unique_id = UNIQUE_ID_IF_CREATE_ADDR_FAILED |
6214 UNIQUE_ID_NOT_BY_CARD;
6215 PRINT_WARN("couldn't get a unique id from the card on device "
6216 "%s (result=x%x), using default id. ipv6 "
6217 "autoconfig on other lpars may lead to duplicate "
6218 "ip addresses. please use manually "
6219 "configured ones.\n",
6220 CARD_BUS_ID(card), cmd->hdr.return_code);
6227 qeth_put_unique_id(struct qeth_card *card)
6231 #ifdef CONFIG_QETH_IPV6
6232 struct qeth_cmd_buffer *iob;
6233 struct qeth_ipa_cmd *cmd;
6235 QETH_DBF_TEXT(trace,2,"puniqeid");
6237 if ((card->info.unique_id & UNIQUE_ID_NOT_BY_CARD) ==
6238 UNIQUE_ID_NOT_BY_CARD)
6240 iob = qeth_get_ipacmd_buffer(card, IPA_CMD_DESTROY_ADDR,
6242 cmd = (struct qeth_ipa_cmd *)(iob->data+IPA_PDU_HEADER_SIZE);
6243 *((__u16 *) &cmd->data.create_destroy_addr.unique_id[6]) =
6244 card->info.unique_id;
6245 memcpy(&cmd->data.create_destroy_addr.unique_id[0],
6246 card->dev->dev_addr, OSA_ADDR_LEN);
6247 rc = qeth_send_ipa_cmd(card, iob, NULL, NULL);
6249 card->info.unique_id = UNIQUE_ID_IF_CREATE_ADDR_FAILED |
6250 UNIQUE_ID_NOT_BY_CARD;
6259 qeth_clear_ip_list(struct qeth_card *card, int clean, int recover)
6261 struct qeth_ipaddr *addr, *tmp;
6262 unsigned long flags;
6264 QETH_DBF_TEXT(trace,4,"clearip");
6265 spin_lock_irqsave(&card->ip_lock, flags);
6266 /* clear todo list */
6267 list_for_each_entry_safe(addr, tmp, card->ip_tbd_list, entry){
6268 list_del(&addr->entry);
6272 while (!list_empty(&card->ip_list)) {
6273 addr = list_entry(card->ip_list.next,
6274 struct qeth_ipaddr, entry);
6275 list_del_init(&addr->entry);
6277 spin_unlock_irqrestore(&card->ip_lock, flags);
6278 qeth_deregister_addr_entry(card, addr);
6279 spin_lock_irqsave(&card->ip_lock, flags);
6281 if (!recover || addr->is_multicast) {
6285 list_add_tail(&addr->entry, card->ip_tbd_list);
6287 spin_unlock_irqrestore(&card->ip_lock, flags);
6291 qeth_set_allowed_threads(struct qeth_card *card, unsigned long threads,
6292 int clear_start_mask)
6294 unsigned long flags;
6296 spin_lock_irqsave(&card->thread_mask_lock, flags);
6297 card->thread_allowed_mask = threads;
6298 if (clear_start_mask)
6299 card->thread_start_mask &= threads;
6300 spin_unlock_irqrestore(&card->thread_mask_lock, flags);
6301 wake_up(&card->wait_q);
6305 qeth_threads_running(struct qeth_card *card, unsigned long threads)
6307 unsigned long flags;
6310 spin_lock_irqsave(&card->thread_mask_lock, flags);
6311 rc = (card->thread_running_mask & threads);
6312 spin_unlock_irqrestore(&card->thread_mask_lock, flags);
6317 qeth_wait_for_threads(struct qeth_card *card, unsigned long threads)
6319 return wait_event_interruptible(card->wait_q,
6320 qeth_threads_running(card, threads) == 0);
6324 qeth_stop_card(struct qeth_card *card)
6326 int recover_flag = 0;
6329 QETH_DBF_TEXT(setup ,2,"stopcard");
6330 QETH_DBF_HEX(setup, 2, &card, sizeof(void *));
6332 qeth_set_allowed_threads(card, 0, 1);
6333 if (qeth_wait_for_threads(card, ~QETH_RECOVER_THREAD))
6334 return -ERESTARTSYS;
6335 if (card->read.state == CH_STATE_UP &&
6336 card->write.state == CH_STATE_UP &&
6337 (card->state == CARD_STATE_UP)) {
6340 dev_close(card->dev);
6342 if (!card->use_hard_stop)
6343 if ((rc = qeth_send_stoplan(card)))
6344 QETH_DBF_TEXT_(setup, 2, "1err%d", rc);
6345 card->state = CARD_STATE_SOFTSETUP;
6347 if (card->state == CARD_STATE_SOFTSETUP) {
6348 qeth_clear_ip_list(card, !card->use_hard_stop, recover_flag);
6349 qeth_clear_ipacmd_list(card);
6350 card->state = CARD_STATE_HARDSETUP;
6352 if (card->state == CARD_STATE_HARDSETUP) {
6353 if (!card->use_hard_stop)
6354 if ((rc = qeth_put_unique_id(card)))
6355 QETH_DBF_TEXT_(setup, 2, "2err%d", rc);
6356 qeth_qdio_clear_card(card, 0);
6357 qeth_clear_qdio_buffers(card);
6358 qeth_clear_working_pool_list(card);
6359 card->state = CARD_STATE_DOWN;
6361 if (card->state == CARD_STATE_DOWN) {
6362 qeth_clear_cmd_buffers(&card->read);
6363 qeth_clear_cmd_buffers(&card->write);
6365 card->use_hard_stop = 0;
6371 qeth_get_unique_id(struct qeth_card *card)
6374 #ifdef CONFIG_QETH_IPV6
6375 struct qeth_cmd_buffer *iob;
6376 struct qeth_ipa_cmd *cmd;
6378 QETH_DBF_TEXT(setup, 2, "guniqeid");
6380 if (!qeth_is_supported(card,IPA_IPV6)) {
6381 card->info.unique_id = UNIQUE_ID_IF_CREATE_ADDR_FAILED |
6382 UNIQUE_ID_NOT_BY_CARD;
6386 iob = qeth_get_ipacmd_buffer(card, IPA_CMD_CREATE_ADDR,
6388 cmd = (struct qeth_ipa_cmd *)(iob->data+IPA_PDU_HEADER_SIZE);
6389 *((__u16 *) &cmd->data.create_destroy_addr.unique_id[6]) =
6390 card->info.unique_id;
6392 rc = qeth_send_ipa_cmd(card, iob, qeth_get_unique_id_cb, NULL);
6394 card->info.unique_id = UNIQUE_ID_IF_CREATE_ADDR_FAILED |
6395 UNIQUE_ID_NOT_BY_CARD;
6400 qeth_print_status_with_portname(struct qeth_card *card)
6405 sprintf(dbf_text, "%s", card->info.portname + 1);
6406 for (i = 0; i < 8; i++)
6408 (char) _ebcasc[(__u8) dbf_text[i]];
6410 printk("qeth: Device %s/%s/%s is a%s card%s%s%s\n"
6411 "with link type %s (portname: %s)\n",
6415 qeth_get_cardname(card),
6416 (card->info.mcl_level[0]) ? " (level: " : "",
6417 (card->info.mcl_level[0]) ? card->info.mcl_level : "",
6418 (card->info.mcl_level[0]) ? ")" : "",
6419 qeth_get_cardname_short(card),
6425 qeth_print_status_no_portname(struct qeth_card *card)
6427 if (card->info.portname[0])
6428 printk("qeth: Device %s/%s/%s is a%s "
6429 "card%s%s%s\nwith link type %s "
6430 "(no portname needed by interface).\n",
6434 qeth_get_cardname(card),
6435 (card->info.mcl_level[0]) ? " (level: " : "",
6436 (card->info.mcl_level[0]) ? card->info.mcl_level : "",
6437 (card->info.mcl_level[0]) ? ")" : "",
6438 qeth_get_cardname_short(card));
6440 printk("qeth: Device %s/%s/%s is a%s "
6441 "card%s%s%s\nwith link type %s.\n",
6445 qeth_get_cardname(card),
6446 (card->info.mcl_level[0]) ? " (level: " : "",
6447 (card->info.mcl_level[0]) ? card->info.mcl_level : "",
6448 (card->info.mcl_level[0]) ? ")" : "",
6449 qeth_get_cardname_short(card));
6453 qeth_print_status_message(struct qeth_card *card)
6455 switch (card->info.type) {
6456 case QETH_CARD_TYPE_OSAE:
6457 /* VM will use a non-zero first character
6458 * to indicate a HiperSockets like reporting
6459 * of the level OSA sets the first character to zero
6461 if (!card->info.mcl_level[0]) {
6462 sprintf(card->info.mcl_level,"%02x%02x",
6463 card->info.mcl_level[2],
6464 card->info.mcl_level[3]);
6466 card->info.mcl_level[QETH_MCL_LENGTH] = 0;
6470 case QETH_CARD_TYPE_IQD:
6471 card->info.mcl_level[0] = (char) _ebcasc[(__u8)
6472 card->info.mcl_level[0]];
6473 card->info.mcl_level[1] = (char) _ebcasc[(__u8)
6474 card->info.mcl_level[1]];
6475 card->info.mcl_level[2] = (char) _ebcasc[(__u8)
6476 card->info.mcl_level[2]];
6477 card->info.mcl_level[3] = (char) _ebcasc[(__u8)
6478 card->info.mcl_level[3]];
6479 card->info.mcl_level[QETH_MCL_LENGTH] = 0;
6482 memset(&card->info.mcl_level[0], 0, QETH_MCL_LENGTH + 1);
6484 if (card->info.portname_required)
6485 qeth_print_status_with_portname(card);
6487 qeth_print_status_no_portname(card);
6491 qeth_register_netdev(struct qeth_card *card)
6495 QETH_DBF_TEXT(setup, 3, "regnetd");
6496 if (card->dev->reg_state != NETREG_UNINITIALIZED)
6499 SET_NETDEV_DEV(card->dev, &card->gdev->dev);
6500 rc = register_netdev(card->dev);
6502 strcpy(card->info.if_name, card->dev->name);
6508 qeth_start_again(struct qeth_card *card)
6510 QETH_DBF_TEXT(setup ,2, "startag");
6513 dev_open(card->dev);
6515 qeth_set_multicast_list(card->dev);
6519 qeth_set_online(struct ccwgroup_device *gdev)
6521 struct qeth_card *card = gdev->dev.driver_data;
6523 enum qeth_card_states recover_flag;
6526 QETH_DBF_TEXT(setup ,2, "setonlin");
6527 QETH_DBF_HEX(setup, 2, &card, sizeof(void *));
6529 qeth_set_allowed_threads(card, QETH_RECOVER_THREAD, 1);
6530 if (qeth_wait_for_threads(card, ~QETH_RECOVER_THREAD)){
6531 PRINT_WARN("set_online of card %s interrupted by user!\n",
6533 return -ERESTARTSYS;
6536 recover_flag = card->state;
6537 if (ccw_device_set_online(CARD_RDEV(card)) ||
6538 ccw_device_set_online(CARD_WDEV(card)) ||
6539 ccw_device_set_online(CARD_DDEV(card))){
6540 QETH_DBF_TEXT_(setup, 2, "1err%d", rc);
6544 if ((rc = qeth_hardsetup_card(card))){
6545 QETH_DBF_TEXT_(setup, 2, "2err%d", rc);
6548 card->state = CARD_STATE_HARDSETUP;
6550 if ((rc = qeth_query_ipassists(card,QETH_PROT_IPV4))){
6551 QETH_DBF_TEXT_(setup, 2, "3err%d", rc);
6554 rc = qeth_get_unique_id(card);
6557 QETH_DBF_TEXT_(setup, 2, "4err%d", rc);
6560 qeth_print_status_message(card);
6561 if ((rc = qeth_register_netdev(card))){
6562 QETH_DBF_TEXT_(setup, 2, "5err%d", rc);
6565 if ((rc = qeth_softsetup_card(card))){
6566 QETH_DBF_TEXT_(setup, 2, "6err%d", rc);
6569 card->state = CARD_STATE_SOFTSETUP;
6571 if ((rc = qeth_init_qdio_queues(card))){
6572 QETH_DBF_TEXT_(setup, 2, "7err%d", rc);
6575 /*maybe it was set offline without ifconfig down
6576 * we can also use this state for recovery purposes*/
6577 qeth_set_allowed_threads(card, 0xffffffff, 0);
6578 if (recover_flag == CARD_STATE_RECOVER)
6579 qeth_start_again(card);
6580 qeth_notify_processes();
6583 card->use_hard_stop = 1;
6584 qeth_stop_card(card);
6585 ccw_device_set_offline(CARD_DDEV(card));
6586 ccw_device_set_offline(CARD_WDEV(card));
6587 ccw_device_set_offline(CARD_RDEV(card));
6588 if (recover_flag == CARD_STATE_RECOVER)
6589 card->state = CARD_STATE_RECOVER;
6591 card->state = CARD_STATE_DOWN;
6595 static struct ccw_device_id qeth_ids[] = {
6596 {CCW_DEVICE(0x1731, 0x01), driver_info:QETH_CARD_TYPE_OSAE},
6597 {CCW_DEVICE(0x1731, 0x05), driver_info:QETH_CARD_TYPE_IQD},
6600 MODULE_DEVICE_TABLE(ccw, qeth_ids);
6602 struct device *qeth_root_dev = NULL;
6604 struct ccwgroup_driver qeth_ccwgroup_driver = {
6605 .owner = THIS_MODULE,
6607 .driver_id = 0xD8C5E3C8,
6608 .probe = qeth_probe_device,
6609 .remove = qeth_remove_device,
6610 .set_online = qeth_set_online,
6611 .set_offline = qeth_set_offline,
6614 struct ccw_driver qeth_ccw_driver = {
6617 .probe = ccwgroup_probe_ccwdev,
6618 .remove = ccwgroup_remove_ccwdev,
6623 qeth_unregister_dbf_views(void)
6626 debug_unregister(qeth_dbf_setup);
6628 debug_unregister(qeth_dbf_qerr);
6630 debug_unregister(qeth_dbf_sense);
6632 debug_unregister(qeth_dbf_misc);
6634 debug_unregister(qeth_dbf_data);
6635 if (qeth_dbf_control)
6636 debug_unregister(qeth_dbf_control);
6638 debug_unregister(qeth_dbf_trace);
6641 qeth_register_dbf_views(void)
6643 qeth_dbf_setup = debug_register(QETH_DBF_SETUP_NAME,
6644 QETH_DBF_SETUP_INDEX,
6645 QETH_DBF_SETUP_NR_AREAS,
6646 QETH_DBF_SETUP_LEN);
6647 qeth_dbf_misc = debug_register(QETH_DBF_MISC_NAME,
6648 QETH_DBF_MISC_INDEX,
6649 QETH_DBF_MISC_NR_AREAS,
6651 qeth_dbf_data = debug_register(QETH_DBF_DATA_NAME,
6652 QETH_DBF_DATA_INDEX,
6653 QETH_DBF_DATA_NR_AREAS,
6655 qeth_dbf_control = debug_register(QETH_DBF_CONTROL_NAME,
6656 QETH_DBF_CONTROL_INDEX,
6657 QETH_DBF_CONTROL_NR_AREAS,
6658 QETH_DBF_CONTROL_LEN);
6659 qeth_dbf_sense = debug_register(QETH_DBF_SENSE_NAME,
6660 QETH_DBF_SENSE_INDEX,
6661 QETH_DBF_SENSE_NR_AREAS,
6662 QETH_DBF_SENSE_LEN);
6663 qeth_dbf_qerr = debug_register(QETH_DBF_QERR_NAME,
6664 QETH_DBF_QERR_INDEX,
6665 QETH_DBF_QERR_NR_AREAS,
6667 qeth_dbf_trace = debug_register(QETH_DBF_TRACE_NAME,
6668 QETH_DBF_TRACE_INDEX,
6669 QETH_DBF_TRACE_NR_AREAS,
6670 QETH_DBF_TRACE_LEN);
6672 if ((qeth_dbf_setup == NULL) || (qeth_dbf_misc == NULL) ||
6673 (qeth_dbf_data == NULL) || (qeth_dbf_control == NULL) ||
6674 (qeth_dbf_sense == NULL) || (qeth_dbf_qerr == NULL) ||
6675 (qeth_dbf_trace == NULL)) {
6676 qeth_unregister_dbf_views();
6679 debug_register_view(qeth_dbf_setup, &debug_hex_ascii_view);
6680 debug_set_level(qeth_dbf_setup, QETH_DBF_SETUP_LEVEL);
6682 debug_register_view(qeth_dbf_misc, &debug_hex_ascii_view);
6683 debug_set_level(qeth_dbf_misc, QETH_DBF_MISC_LEVEL);
6685 debug_register_view(qeth_dbf_data, &debug_hex_ascii_view);
6686 debug_set_level(qeth_dbf_data, QETH_DBF_DATA_LEVEL);
6688 debug_register_view(qeth_dbf_control, &debug_hex_ascii_view);
6689 debug_set_level(qeth_dbf_control, QETH_DBF_CONTROL_LEVEL);
6691 debug_register_view(qeth_dbf_sense, &debug_hex_ascii_view);
6692 debug_set_level(qeth_dbf_sense, QETH_DBF_SENSE_LEVEL);
6694 debug_register_view(qeth_dbf_qerr, &debug_hex_ascii_view);
6695 debug_set_level(qeth_dbf_qerr, QETH_DBF_QERR_LEVEL);
6697 debug_register_view(qeth_dbf_trace, &debug_hex_ascii_view);
6698 debug_set_level(qeth_dbf_trace, QETH_DBF_TRACE_LEVEL);
6703 #ifdef CONFIG_QETH_IPV6
6704 extern struct neigh_table arp_tbl;
6705 static struct neigh_ops *arp_direct_ops;
6706 static int (*qeth_old_arp_constructor) (struct neighbour *);
6708 static struct neigh_ops arp_direct_ops_template = {
6712 .error_report = NULL,
6713 .output = dev_queue_xmit,
6714 .connected_output = dev_queue_xmit,
6715 .hh_output = dev_queue_xmit,
6716 .queue_xmit = dev_queue_xmit
6720 qeth_arp_constructor(struct neighbour *neigh)
6722 struct net_device *dev = neigh->dev;
6723 struct in_device *in_dev;
6724 struct neigh_parms *parms;
6726 if (!qeth_verify_dev(dev)) {
6727 return qeth_old_arp_constructor(neigh);
6731 in_dev = rcu_dereference(__in_dev_get(dev));
6732 if (in_dev == NULL) {
6737 parms = in_dev->arp_parms;
6738 __neigh_parms_put(neigh->parms);
6739 neigh->parms = neigh_parms_clone(parms);
6742 neigh->type = inet_addr_type(*(u32 *) neigh->primary_key);
6743 neigh->nud_state = NUD_NOARP;
6744 neigh->ops = arp_direct_ops;
6745 neigh->output = neigh->ops->queue_xmit;
6748 #endif /*CONFIG_QETH_IPV6*/
6751 * IP address takeover related functions
6754 qeth_clear_ipato_list(struct qeth_card *card)
6756 struct qeth_ipato_entry *ipatoe, *tmp;
6757 unsigned long flags;
6759 spin_lock_irqsave(&card->ip_lock, flags);
6760 list_for_each_entry_safe(ipatoe, tmp, &card->ipato.entries, entry) {
6761 list_del(&ipatoe->entry);
6764 spin_unlock_irqrestore(&card->ip_lock, flags);
6768 qeth_add_ipato_entry(struct qeth_card *card, struct qeth_ipato_entry *new)
6770 struct qeth_ipato_entry *ipatoe;
6771 unsigned long flags;
6774 QETH_DBF_TEXT(trace, 2, "addipato");
6775 spin_lock_irqsave(&card->ip_lock, flags);
6776 list_for_each_entry(ipatoe, &card->ipato.entries, entry){
6777 if (ipatoe->proto != new->proto)
6779 if (!memcmp(ipatoe->addr, new->addr,
6780 (ipatoe->proto == QETH_PROT_IPV4)? 4:16) &&
6781 (ipatoe->mask_bits == new->mask_bits)){
6782 PRINT_WARN("ipato entry already exists!\n");
6788 list_add_tail(&new->entry, &card->ipato.entries);
6790 spin_unlock_irqrestore(&card->ip_lock, flags);
6795 qeth_del_ipato_entry(struct qeth_card *card, enum qeth_prot_versions proto,
6796 u8 *addr, int mask_bits)
6798 struct qeth_ipato_entry *ipatoe, *tmp;
6799 unsigned long flags;
6801 QETH_DBF_TEXT(trace, 2, "delipato");
6802 spin_lock_irqsave(&card->ip_lock, flags);
6803 list_for_each_entry_safe(ipatoe, tmp, &card->ipato.entries, entry){
6804 if (ipatoe->proto != proto)
6806 if (!memcmp(ipatoe->addr, addr,
6807 (proto == QETH_PROT_IPV4)? 4:16) &&
6808 (ipatoe->mask_bits == mask_bits)){
6809 list_del(&ipatoe->entry);
6813 spin_unlock_irqrestore(&card->ip_lock, flags);
6817 qeth_convert_addr_to_bits(u8 *addr, u8 *bits, int len)
6822 for (i = 0; i < len; ++i){
6824 for (j = 7; j >= 0; --j){
6825 bits[i*8 + j] = octet & 1;
6832 qeth_is_addr_covered_by_ipato(struct qeth_card *card, struct qeth_ipaddr *addr)
6834 struct qeth_ipato_entry *ipatoe;
6835 u8 addr_bits[128] = {0, };
6836 u8 ipatoe_bits[128] = {0, };
6839 if (!card->ipato.enabled)
6842 qeth_convert_addr_to_bits((u8 *) &addr->u, addr_bits,
6843 (addr->proto == QETH_PROT_IPV4)? 4:16);
6844 list_for_each_entry(ipatoe, &card->ipato.entries, entry){
6845 if (addr->proto != ipatoe->proto)
6847 qeth_convert_addr_to_bits(ipatoe->addr, ipatoe_bits,
6848 (ipatoe->proto==QETH_PROT_IPV4) ?
6850 if (addr->proto == QETH_PROT_IPV4)
6851 rc = !memcmp(addr_bits, ipatoe_bits,
6852 min(32, ipatoe->mask_bits));
6854 rc = !memcmp(addr_bits, ipatoe_bits,
6855 min(128, ipatoe->mask_bits));
6860 if ((addr->proto == QETH_PROT_IPV4) && card->ipato.invert4)
6862 else if ((addr->proto == QETH_PROT_IPV6) && card->ipato.invert6)
6869 * VIPA related functions
6872 qeth_add_vipa(struct qeth_card *card, enum qeth_prot_versions proto,
6875 struct qeth_ipaddr *ipaddr;
6876 unsigned long flags;
6879 ipaddr = qeth_get_addr_buffer(proto);
6881 if (proto == QETH_PROT_IPV4){
6882 QETH_DBF_TEXT(trace, 2, "addvipa4");
6883 memcpy(&ipaddr->u.a4.addr, addr, 4);
6884 ipaddr->u.a4.mask = 0;
6885 #ifdef CONFIG_QETH_IPV6
6886 } else if (proto == QETH_PROT_IPV6){
6887 QETH_DBF_TEXT(trace, 2, "addvipa6");
6888 memcpy(&ipaddr->u.a6.addr, addr, 16);
6889 ipaddr->u.a6.pfxlen = 0;
6892 ipaddr->type = QETH_IP_TYPE_VIPA;
6893 ipaddr->set_flags = QETH_IPA_SETIP_VIPA_FLAG;
6894 ipaddr->del_flags = QETH_IPA_DELIP_VIPA_FLAG;
6897 spin_lock_irqsave(&card->ip_lock, flags);
6898 if (__qeth_address_exists_in_list(&card->ip_list, ipaddr, 0) ||
6899 __qeth_address_exists_in_list(card->ip_tbd_list, ipaddr, 0))
6901 spin_unlock_irqrestore(&card->ip_lock, flags);
6903 PRINT_WARN("Cannot add VIPA. Address already exists!\n");
6906 if (!qeth_add_ip(card, ipaddr))
6908 if (qeth_set_thread_start_bit(card, QETH_SET_IP_THREAD) == 0)
6909 schedule_work(&card->kernel_thread_starter);
6914 qeth_del_vipa(struct qeth_card *card, enum qeth_prot_versions proto,
6917 struct qeth_ipaddr *ipaddr;
6919 ipaddr = qeth_get_addr_buffer(proto);
6921 if (proto == QETH_PROT_IPV4){
6922 QETH_DBF_TEXT(trace, 2, "delvipa4");
6923 memcpy(&ipaddr->u.a4.addr, addr, 4);
6924 ipaddr->u.a4.mask = 0;
6925 #ifdef CONFIG_QETH_IPV6
6926 } else if (proto == QETH_PROT_IPV6){
6927 QETH_DBF_TEXT(trace, 2, "delvipa6");
6928 memcpy(&ipaddr->u.a6.addr, addr, 16);
6929 ipaddr->u.a6.pfxlen = 0;
6932 ipaddr->type = QETH_IP_TYPE_VIPA;
6935 if (!qeth_delete_ip(card, ipaddr))
6937 if (qeth_set_thread_start_bit(card, QETH_SET_IP_THREAD) == 0)
6938 schedule_work(&card->kernel_thread_starter);
6942 * proxy ARP related functions
6945 qeth_add_rxip(struct qeth_card *card, enum qeth_prot_versions proto,
6948 struct qeth_ipaddr *ipaddr;
6949 unsigned long flags;
6952 ipaddr = qeth_get_addr_buffer(proto);
6954 if (proto == QETH_PROT_IPV4){
6955 QETH_DBF_TEXT(trace, 2, "addrxip4");
6956 memcpy(&ipaddr->u.a4.addr, addr, 4);
6957 ipaddr->u.a4.mask = 0;
6958 #ifdef CONFIG_QETH_IPV6
6959 } else if (proto == QETH_PROT_IPV6){
6960 QETH_DBF_TEXT(trace, 2, "addrxip6");
6961 memcpy(&ipaddr->u.a6.addr, addr, 16);
6962 ipaddr->u.a6.pfxlen = 0;
6965 ipaddr->type = QETH_IP_TYPE_RXIP;
6966 ipaddr->set_flags = QETH_IPA_SETIP_TAKEOVER_FLAG;
6967 ipaddr->del_flags = 0;
6970 spin_lock_irqsave(&card->ip_lock, flags);
6971 if (__qeth_address_exists_in_list(&card->ip_list, ipaddr, 0) ||
6972 __qeth_address_exists_in_list(card->ip_tbd_list, ipaddr, 0))
6974 spin_unlock_irqrestore(&card->ip_lock, flags);
6976 PRINT_WARN("Cannot add RXIP. Address already exists!\n");
6979 if (!qeth_add_ip(card, ipaddr))
6981 if (qeth_set_thread_start_bit(card, QETH_SET_IP_THREAD) == 0)
6982 schedule_work(&card->kernel_thread_starter);
6987 qeth_del_rxip(struct qeth_card *card, enum qeth_prot_versions proto,
6990 struct qeth_ipaddr *ipaddr;
6992 ipaddr = qeth_get_addr_buffer(proto);
6994 if (proto == QETH_PROT_IPV4){
6995 QETH_DBF_TEXT(trace, 2, "addrxip4");
6996 memcpy(&ipaddr->u.a4.addr, addr, 4);
6997 ipaddr->u.a4.mask = 0;
6998 #ifdef CONFIG_QETH_IPV6
6999 } else if (proto == QETH_PROT_IPV6){
7000 QETH_DBF_TEXT(trace, 2, "addrxip6");
7001 memcpy(&ipaddr->u.a6.addr, addr, 16);
7002 ipaddr->u.a6.pfxlen = 0;
7005 ipaddr->type = QETH_IP_TYPE_RXIP;
7008 if (!qeth_delete_ip(card, ipaddr))
7010 if (qeth_set_thread_start_bit(card, QETH_SET_IP_THREAD) == 0)
7011 schedule_work(&card->kernel_thread_starter);
7018 qeth_ip_event(struct notifier_block *this,
7019 unsigned long event,void *ptr)
7021 struct in_ifaddr *ifa = (struct in_ifaddr *)ptr;
7022 struct net_device *dev =(struct net_device *) ifa->ifa_dev->dev;
7023 struct qeth_ipaddr *addr;
7024 struct qeth_card *card;
7026 QETH_DBF_TEXT(trace,3,"ipevent");
7027 card = qeth_get_card_from_dev(dev);
7031 addr = qeth_get_addr_buffer(QETH_PROT_IPV4);
7033 addr->u.a4.addr = ifa->ifa_address;
7034 addr->u.a4.mask = ifa->ifa_mask;
7035 addr->type = QETH_IP_TYPE_NORMAL;
7041 if (!qeth_add_ip(card, addr))
7045 if (!qeth_delete_ip(card, addr))
7051 if (qeth_set_thread_start_bit(card, QETH_SET_IP_THREAD) == 0)
7052 schedule_work(&card->kernel_thread_starter);
7057 static struct notifier_block qeth_ip_notifier = {
7062 #ifdef CONFIG_QETH_IPV6
7064 * IPv6 event handler
7067 qeth_ip6_event(struct notifier_block *this,
7068 unsigned long event,void *ptr)
7071 struct inet6_ifaddr *ifa = (struct inet6_ifaddr *)ptr;
7072 struct net_device *dev = (struct net_device *)ifa->idev->dev;
7073 struct qeth_ipaddr *addr;
7074 struct qeth_card *card;
7076 QETH_DBF_TEXT(trace,3,"ip6event");
7078 card = qeth_get_card_from_dev(dev);
7081 if (!qeth_is_supported(card, IPA_IPV6))
7084 addr = qeth_get_addr_buffer(QETH_PROT_IPV6);
7086 memcpy(&addr->u.a6.addr, &ifa->addr, sizeof(struct in6_addr));
7087 addr->u.a6.pfxlen = ifa->prefix_len;
7088 addr->type = QETH_IP_TYPE_NORMAL;
7094 if (!qeth_add_ip(card, addr))
7098 if (!qeth_delete_ip(card, addr))
7104 if (qeth_set_thread_start_bit(card, QETH_SET_IP_THREAD) == 0)
7105 schedule_work(&card->kernel_thread_starter);
7110 static struct notifier_block qeth_ip6_notifier = {
7117 qeth_reboot_event(struct notifier_block *this, unsigned long event, void *ptr)
7120 struct device *entry;
7121 struct qeth_card *card;
7123 down_read(&qeth_ccwgroup_driver.driver.bus->subsys.rwsem);
7124 list_for_each_entry(entry, &qeth_ccwgroup_driver.driver.devices,
7126 card = (struct qeth_card *) entry->driver_data;
7127 qeth_clear_ip_list(card, 0, 0);
7128 qeth_qdio_clear_card(card, 0);
7130 up_read(&qeth_ccwgroup_driver.driver.bus->subsys.rwsem);
7135 static struct notifier_block qeth_reboot_notifier = {
7141 qeth_register_notifiers(void)
7145 QETH_DBF_TEXT(trace,5,"regnotif");
7146 if ((r = register_reboot_notifier(&qeth_reboot_notifier)))
7148 if ((r = register_inetaddr_notifier(&qeth_ip_notifier)))
7150 #ifdef CONFIG_QETH_IPV6
7151 if ((r = register_inet6addr_notifier(&qeth_ip6_notifier)))
7156 #ifdef CONFIG_QETH_IPV6
7158 unregister_inetaddr_notifier(&qeth_ip_notifier);
7161 unregister_reboot_notifier(&qeth_reboot_notifier);
7166 * unregister all event notifiers
7169 qeth_unregister_notifiers(void)
7172 QETH_DBF_TEXT(trace,5,"unregnot");
7173 BUG_ON(unregister_reboot_notifier(&qeth_reboot_notifier));
7174 BUG_ON(unregister_inetaddr_notifier(&qeth_ip_notifier));
7175 #ifdef CONFIG_QETH_IPV6
7176 BUG_ON(unregister_inet6addr_notifier(&qeth_ip6_notifier));
7177 #endif /* QETH_IPV6 */
7181 #ifdef CONFIG_QETH_IPV6
7183 qeth_ipv6_init(void)
7185 qeth_old_arp_constructor = arp_tbl.constructor;
7186 write_lock(&arp_tbl.lock);
7187 arp_tbl.constructor = qeth_arp_constructor;
7188 write_unlock(&arp_tbl.lock);
7190 arp_direct_ops = (struct neigh_ops*)
7191 kmalloc(sizeof(struct neigh_ops), GFP_KERNEL);
7192 if (!arp_direct_ops)
7195 memcpy(arp_direct_ops, &arp_direct_ops_template,
7196 sizeof(struct neigh_ops));
7202 qeth_ipv6_uninit(void)
7204 write_lock(&arp_tbl.lock);
7205 arp_tbl.constructor = qeth_old_arp_constructor;
7206 write_unlock(&arp_tbl.lock);
7207 kfree(arp_direct_ops);
7209 #endif /* CONFIG_QETH_IPV6 */
7212 qeth_sysfs_unregister(void)
7214 qeth_remove_driver_attributes();
7215 ccw_driver_unregister(&qeth_ccw_driver);
7216 ccwgroup_driver_unregister(&qeth_ccwgroup_driver);
7217 s390_root_dev_unregister(qeth_root_dev);
7220 * register qeth at sysfs
7223 qeth_sysfs_register(void)
7227 rc = ccwgroup_driver_register(&qeth_ccwgroup_driver);
7230 rc = ccw_driver_register(&qeth_ccw_driver);
7233 rc = qeth_create_driver_attributes();
7236 qeth_root_dev = s390_root_dev_register("qeth");
7237 if (IS_ERR(qeth_root_dev)) {
7238 rc = PTR_ERR(qeth_root_dev);
7253 PRINT_INFO("loading %s (%s/%s/%s/%s/%s/%s/%s %s %s)\n",
7254 version, VERSION_QETH_C, VERSION_QETH_H,
7255 VERSION_QETH_MPC_H, VERSION_QETH_MPC_C,
7256 VERSION_QETH_FS_H, VERSION_QETH_PROC_C,
7257 VERSION_QETH_SYS_C, QETH_VERSION_IPV6,
7260 INIT_LIST_HEAD(&qeth_card_list.list);
7261 INIT_LIST_HEAD(&qeth_notify_list);
7262 spin_lock_init(&qeth_notify_lock);
7263 rwlock_init(&qeth_card_list.rwlock);
7265 if (qeth_register_dbf_views())
7267 if (qeth_sysfs_register())
7270 #ifdef CONFIG_QETH_IPV6
7271 if (qeth_ipv6_init()) {
7272 PRINT_ERR("Out of memory during ipv6 init.\n");
7275 #endif /* QETH_IPV6 */
7276 if (qeth_register_notifiers())
7278 if (qeth_create_procfs_entries())
7284 qeth_unregister_notifiers();
7286 #ifdef CONFIG_QETH_IPV6
7288 #endif /* QETH_IPV6 */
7290 qeth_sysfs_unregister();
7291 qeth_unregister_dbf_views();
7293 PRINT_ERR("Initialization failed");
7298 __exit qeth_exit(void)
7300 struct qeth_card *card, *tmp;
7301 unsigned long flags;
7303 QETH_DBF_TEXT(trace,1, "cleanup.");
7306 * Weed would not need to clean up our devices here, because the
7307 * common device layer calls qeth_remove_device for each device
7308 * as soon as we unregister our driver (done in qeth_sysfs_unregister).
7309 * But we do cleanup here so we can do a "soft" shutdown of our cards.
7310 * qeth_remove_device called by the common device layer would otherwise
7311 * do a "hard" shutdown (card->use_hard_stop is set to one in
7312 * qeth_remove_device).
7315 read_lock_irqsave(&qeth_card_list.rwlock, flags);
7316 list_for_each_entry_safe(card, tmp, &qeth_card_list.list, list){
7317 read_unlock_irqrestore(&qeth_card_list.rwlock, flags);
7318 qeth_set_offline(card->gdev);
7319 qeth_remove_device(card->gdev);
7322 read_unlock_irqrestore(&qeth_card_list.rwlock, flags);
7323 #ifdef CONFIG_QETH_IPV6
7326 qeth_unregister_notifiers();
7327 qeth_remove_procfs_entries();
7328 qeth_sysfs_unregister();
7329 qeth_unregister_dbf_views();
7330 printk("qeth: removed\n");
7333 EXPORT_SYMBOL(qeth_eyecatcher);
7334 module_init(qeth_init);
7335 module_exit(qeth_exit);
7336 MODULE_AUTHOR("Frank Pavlic <pavlic@de.ibm.com>");
7337 MODULE_DESCRIPTION("Linux on zSeries OSA Express and HiperSockets support\n" \
7338 "Copyright 2000,2003 IBM Corporation\n");
7340 MODULE_LICENSE("GPL");