3 * linux/drivers/s390/net/qeth_main.c ($Revision: 1.145 $)
5 * Linux on zSeries OSA Express and HiperSockets support
7 * Copyright 2000,2003 IBM Corporation
9 * Author(s): Original Code written by
10 * Utz Bacher (utz.bacher@de.ibm.com)
12 * Frank Pavlic (pavlic@de.ibm.com) and
13 * Thomas Spatzier <tspat@de.ibm.com>
15 * $Revision: 1.145 $ $Date: 2004/10/08 15:08:40 $
17 * This program is free software; you can redistribute it and/or modify
18 * it under the terms of the GNU General Public License as published by
19 * the Free Software Foundation; either version 2, or (at your option)
22 * This program is distributed in the hope that it will be useful,
23 * but WITHOUT ANY WARRANTY; without even the implied warranty of
24 * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
25 * GNU General Public License for more details.
27 * You should have received a copy of the GNU General Public License
28 * along with this program; if not, write to the Free Software
29 * Foundation, Inc., 675 Mass Ave, Cambridge, MA 02139, USA.
34 * eye catcher; just for debugging purposes
42 #include <linux/config.h>
43 #include <linux/module.h>
44 #include <linux/moduleparam.h>
46 #include <linux/string.h>
47 #include <linux/errno.h>
51 #include <asm/ebcdic.h>
52 #include <linux/ctype.h>
53 #include <asm/semaphore.h>
54 #include <asm/timex.h>
56 #include <linux/inetdevice.h>
57 #include <linux/netdevice.h>
58 #include <linux/sched.h>
59 #include <linux/workqueue.h>
60 #include <linux/kernel.h>
61 #include <linux/slab.h>
62 #include <linux/interrupt.h>
63 #include <linux/tcp.h>
64 #include <linux/icmp.h>
65 #include <linux/skbuff.h>
66 #include <net/route.h>
69 #include <linux/igmp.h>
71 #include <asm/uaccess.h>
72 #include <linux/init.h>
73 #include <linux/reboot.h>
75 #include <linux/mii.h>
76 #include <linux/rcupdate.h>
82 #define VERSION_QETH_C "$Revision: 1.145 $"
83 static const char *version = "qeth S/390 OSA-Express driver";
86 * Debug Facility Stuff
88 static debug_info_t *qeth_dbf_setup = NULL;
89 static debug_info_t *qeth_dbf_data = NULL;
90 static debug_info_t *qeth_dbf_misc = NULL;
91 static debug_info_t *qeth_dbf_control = NULL;
92 static debug_info_t *qeth_dbf_trace = NULL;
93 static debug_info_t *qeth_dbf_sense = NULL;
94 static debug_info_t *qeth_dbf_qerr = NULL;
96 DEFINE_PER_CPU(char[256], qeth_dbf_txt_buf);
99 * some more definitions and declarations
101 static unsigned int known_devices[][10] = QETH_MODELLIST_ARRAY;
103 /* list of our cards */
104 struct qeth_card_list_struct qeth_card_list;
105 /*process list want to be notified*/
106 spinlock_t qeth_notify_lock;
107 struct list_head qeth_notify_list;
109 static void qeth_send_control_data_cb(struct qeth_channel *,
110 struct qeth_cmd_buffer *);
113 * here we go with function implementation
116 qeth_init_qdio_info(struct qeth_card *card);
119 qeth_init_qdio_queues(struct qeth_card *card);
122 qeth_alloc_qdio_buffers(struct qeth_card *card);
125 qeth_free_qdio_buffers(struct qeth_card *);
128 qeth_clear_qdio_buffers(struct qeth_card *);
131 qeth_clear_ip_list(struct qeth_card *, int, int);
134 qeth_clear_ipacmd_list(struct qeth_card *);
137 qeth_qdio_clear_card(struct qeth_card *, int);
140 qeth_clear_working_pool_list(struct qeth_card *);
143 qeth_clear_cmd_buffers(struct qeth_channel *);
146 qeth_stop(struct net_device *);
149 qeth_clear_ipato_list(struct qeth_card *);
152 qeth_is_addr_covered_by_ipato(struct qeth_card *, struct qeth_ipaddr *);
155 qeth_irq_tasklet(unsigned long);
158 qeth_set_online(struct ccwgroup_device *);
160 static struct qeth_ipaddr *
161 qeth_get_addr_buffer(enum qeth_prot_versions);
164 qeth_notify_processes(void)
166 /*notify all registered processes */
167 struct qeth_notify_list_struct *n_entry;
169 QETH_DBF_TEXT(trace,3,"procnoti");
170 spin_lock(&qeth_notify_lock);
171 list_for_each_entry(n_entry, &qeth_notify_list, list) {
172 send_sig(n_entry->signum, n_entry->task, 1);
174 spin_unlock(&qeth_notify_lock);
178 qeth_notifier_unregister(struct task_struct *p)
180 struct qeth_notify_list_struct *n_entry, *tmp;
182 QETH_DBF_TEXT(trace, 2, "notunreg");
183 spin_lock(&qeth_notify_lock);
184 list_for_each_entry_safe(n_entry, tmp, &qeth_notify_list, list) {
185 if (n_entry->task == p) {
186 list_del(&n_entry->list);
192 spin_unlock(&qeth_notify_lock);
196 qeth_notifier_register(struct task_struct *p, int signum)
198 struct qeth_notify_list_struct *n_entry;
200 QETH_DBF_TEXT(trace, 2, "notreg");
201 /*check first if entry already exists*/
202 spin_lock(&qeth_notify_lock);
203 list_for_each_entry(n_entry, &qeth_notify_list, list) {
204 if (n_entry->task == p) {
205 n_entry->signum = signum;
206 spin_unlock(&qeth_notify_lock);
210 spin_unlock(&qeth_notify_lock);
212 n_entry = (struct qeth_notify_list_struct *)
213 kmalloc(sizeof(struct qeth_notify_list_struct),GFP_KERNEL);
217 n_entry->signum = signum;
218 spin_lock(&qeth_notify_lock);
219 list_add(&n_entry->list,&qeth_notify_list);
220 spin_unlock(&qeth_notify_lock);
226 * free channel command buffers
229 qeth_clean_channel(struct qeth_channel *channel)
233 QETH_DBF_TEXT(setup, 2, "freech");
234 for (cnt = 0; cnt < QETH_CMD_BUFFER_NO; cnt++)
235 kfree(channel->iob[cnt].data);
242 qeth_free_card(struct qeth_card *card)
245 QETH_DBF_TEXT(setup, 2, "freecrd");
246 QETH_DBF_HEX(setup, 2, &card, sizeof(void *));
247 qeth_clean_channel(&card->read);
248 qeth_clean_channel(&card->write);
250 free_netdev(card->dev);
251 qeth_clear_ip_list(card, 0, 0);
252 qeth_clear_ipato_list(card);
253 qeth_free_qdio_buffers(card);
258 * alloc memory for command buffer per channel
261 qeth_setup_channel(struct qeth_channel *channel)
265 QETH_DBF_TEXT(setup, 2, "setupch");
266 for (cnt=0; cnt < QETH_CMD_BUFFER_NO; cnt++) {
267 channel->iob[cnt].data = (char *)
268 kmalloc(QETH_BUFSIZE, GFP_DMA|GFP_KERNEL);
269 if (channel->iob[cnt].data == NULL)
271 channel->iob[cnt].state = BUF_STATE_FREE;
272 channel->iob[cnt].channel = channel;
273 channel->iob[cnt].callback = qeth_send_control_data_cb;
274 channel->iob[cnt].rc = 0;
276 if (cnt < QETH_CMD_BUFFER_NO) {
278 kfree(channel->iob[cnt].data);
282 channel->io_buf_no = 0;
283 atomic_set(&channel->irq_pending, 0);
284 spin_lock_init(&channel->iob_lock);
286 init_waitqueue_head(&channel->wait_q);
287 channel->irq_tasklet.data = (unsigned long) channel;
288 channel->irq_tasklet.func = qeth_irq_tasklet;
293 * alloc memory for card structure
295 static struct qeth_card *
296 qeth_alloc_card(void)
298 struct qeth_card *card;
300 QETH_DBF_TEXT(setup, 2, "alloccrd");
301 card = (struct qeth_card *) kmalloc(sizeof(struct qeth_card),
305 QETH_DBF_HEX(setup, 2, &card, sizeof(void *));
306 memset(card, 0, sizeof(struct qeth_card));
307 if (qeth_setup_channel(&card->read)) {
311 if (qeth_setup_channel(&card->write)) {
312 qeth_clean_channel(&card->read);
320 __qeth_check_irb_error(struct ccw_device *cdev, struct irb *irb)
325 switch (PTR_ERR(irb)) {
327 PRINT_WARN("i/o-error on device %s\n", cdev->dev.bus_id);
328 QETH_DBF_TEXT(trace, 2, "ckirberr");
329 QETH_DBF_TEXT_(trace, 2, " rc%d", -EIO);
332 PRINT_WARN("timeout on device %s\n", cdev->dev.bus_id);
333 QETH_DBF_TEXT(trace, 2, "ckirberr");
334 QETH_DBF_TEXT_(trace, 2, " rc%d", -ETIMEDOUT);
337 PRINT_WARN("unknown error %ld on device %s\n", PTR_ERR(irb),
339 QETH_DBF_TEXT(trace, 2, "ckirberr");
340 QETH_DBF_TEXT(trace, 2, " rc???");
346 qeth_get_problem(struct ccw_device *cdev, struct irb *irb)
351 sense = (char *) irb->ecw;
352 cstat = irb->scsw.cstat;
353 dstat = irb->scsw.dstat;
355 if (cstat & (SCHN_STAT_CHN_CTRL_CHK | SCHN_STAT_INTF_CTRL_CHK |
356 SCHN_STAT_CHN_DATA_CHK | SCHN_STAT_CHAIN_CHECK |
357 SCHN_STAT_PROT_CHECK | SCHN_STAT_PROG_CHECK)) {
358 QETH_DBF_TEXT(trace,2, "CGENCHK");
359 PRINT_WARN("check on device %s, dstat=x%x, cstat=x%x ",
360 cdev->dev.bus_id, dstat, cstat);
361 HEXDUMP16(WARN, "irb: ", irb);
362 HEXDUMP16(WARN, "irb: ", ((char *) irb) + 32);
366 if (dstat & DEV_STAT_UNIT_CHECK) {
367 if (sense[SENSE_RESETTING_EVENT_BYTE] &
368 SENSE_RESETTING_EVENT_FLAG) {
369 QETH_DBF_TEXT(trace,2,"REVIND");
372 if (sense[SENSE_COMMAND_REJECT_BYTE] &
373 SENSE_COMMAND_REJECT_FLAG) {
374 QETH_DBF_TEXT(trace,2,"CMDREJi");
377 if ((sense[2] == 0xaf) && (sense[3] == 0xfe)) {
378 QETH_DBF_TEXT(trace,2,"AFFE");
381 if ((!sense[0]) && (!sense[1]) && (!sense[2]) && (!sense[3])) {
382 QETH_DBF_TEXT(trace,2,"ZEROSEN");
385 QETH_DBF_TEXT(trace,2,"DGENCHK");
390 static int qeth_issue_next_read(struct qeth_card *);
396 qeth_irq(struct ccw_device *cdev, unsigned long intparm, struct irb *irb)
400 struct qeth_cmd_buffer *buffer;
401 struct qeth_channel *channel;
402 struct qeth_card *card;
404 QETH_DBF_TEXT(trace,5,"irq");
406 if (__qeth_check_irb_error(cdev, irb))
408 cstat = irb->scsw.cstat;
409 dstat = irb->scsw.dstat;
411 card = CARD_FROM_CDEV(cdev);
415 if (card->read.ccwdev == cdev){
416 channel = &card->read;
417 QETH_DBF_TEXT(trace,5,"read");
418 } else if (card->write.ccwdev == cdev) {
419 channel = &card->write;
420 QETH_DBF_TEXT(trace,5,"write");
422 channel = &card->data;
423 QETH_DBF_TEXT(trace,5,"data");
425 atomic_set(&channel->irq_pending, 0);
427 if (irb->scsw.fctl & (SCSW_FCTL_CLEAR_FUNC))
428 channel->state = CH_STATE_STOPPED;
430 if (irb->scsw.fctl & (SCSW_FCTL_HALT_FUNC))
431 channel->state = CH_STATE_HALTED;
433 /*let's wake up immediately on data channel*/
434 if ((channel == &card->data) && (intparm != 0))
437 if (intparm == QETH_CLEAR_CHANNEL_PARM) {
438 QETH_DBF_TEXT(trace, 6, "clrchpar");
439 /* we don't have to handle this further */
442 if (intparm == QETH_HALT_CHANNEL_PARM) {
443 QETH_DBF_TEXT(trace, 6, "hltchpar");
444 /* we don't have to handle this further */
447 if ((dstat & DEV_STAT_UNIT_EXCEP) ||
448 (dstat & DEV_STAT_UNIT_CHECK) ||
450 if (irb->esw.esw0.erw.cons) {
451 /* TODO: we should make this s390dbf */
452 PRINT_WARN("sense data available on channel %s.\n",
453 CHANNEL_ID(channel));
454 PRINT_WARN(" cstat 0x%X\n dstat 0x%X\n", cstat, dstat);
455 HEXDUMP16(WARN,"irb: ",irb);
456 HEXDUMP16(WARN,"sense data: ",irb->ecw);
458 rc = qeth_get_problem(cdev,irb);
460 qeth_schedule_recovery(card);
466 buffer = (struct qeth_cmd_buffer *) __va((addr_t)intparm);
467 buffer->state = BUF_STATE_PROCESSED;
469 if (channel == &card->data)
472 if (channel == &card->read &&
473 channel->state == CH_STATE_UP)
474 qeth_issue_next_read(card);
476 tasklet_schedule(&channel->irq_tasklet);
479 wake_up(&card->wait_q);
483 * tasklet function scheduled from irq handler
486 qeth_irq_tasklet(unsigned long data)
488 struct qeth_card *card;
489 struct qeth_channel *channel;
490 struct qeth_cmd_buffer *iob;
493 QETH_DBF_TEXT(trace,5,"irqtlet");
494 channel = (struct qeth_channel *) data;
496 index = channel->buf_no;
497 card = CARD_FROM_CDEV(channel->ccwdev);
498 while (iob[index].state == BUF_STATE_PROCESSED) {
499 if (iob[index].callback !=NULL) {
500 iob[index].callback(channel,iob + index);
502 index = (index + 1) % QETH_CMD_BUFFER_NO;
504 channel->buf_no = index;
505 wake_up(&card->wait_q);
508 static int qeth_stop_card(struct qeth_card *);
511 qeth_set_offline(struct ccwgroup_device *cgdev)
513 struct qeth_card *card = (struct qeth_card *) cgdev->dev.driver_data;
514 enum qeth_card_states recover_flag;
516 QETH_DBF_TEXT(setup, 3, "setoffl");
517 QETH_DBF_HEX(setup, 3, &card, sizeof(void *));
519 recover_flag = card->state;
520 if (qeth_stop_card(card) == -ERESTARTSYS){
521 PRINT_WARN("Stopping card %s interrupted by user!\n",
525 ccw_device_set_offline(CARD_DDEV(card));
526 ccw_device_set_offline(CARD_WDEV(card));
527 ccw_device_set_offline(CARD_RDEV(card));
528 if (recover_flag == CARD_STATE_UP)
529 card->state = CARD_STATE_RECOVER;
530 qeth_notify_processes();
535 qeth_remove_device(struct ccwgroup_device *cgdev)
537 struct qeth_card *card = (struct qeth_card *) cgdev->dev.driver_data;
540 QETH_DBF_TEXT(setup, 3, "rmdev");
541 QETH_DBF_HEX(setup, 3, &card, sizeof(void *));
546 if (cgdev->state == CCWGROUP_ONLINE){
547 card->use_hard_stop = 1;
548 qeth_set_offline(cgdev);
550 /* remove form our internal list */
551 write_lock_irqsave(&qeth_card_list.rwlock, flags);
552 list_del(&card->list);
553 write_unlock_irqrestore(&qeth_card_list.rwlock, flags);
555 unregister_netdev(card->dev);
556 qeth_remove_device_attributes(&cgdev->dev);
557 qeth_free_card(card);
558 cgdev->dev.driver_data = NULL;
559 put_device(&cgdev->dev);
563 qeth_register_addr_entry(struct qeth_card *, struct qeth_ipaddr *);
565 qeth_deregister_addr_entry(struct qeth_card *, struct qeth_ipaddr *);
568 * Add/remove address to/from card's ip list, i.e. try to add or remove
569 * reference to/from an IP address that is already registered on the card.
571 * 0 address was on card and its reference count has been adjusted,
572 * but is still > 0, so nothing has to be done
573 * also returns 0 if card was not on card and the todo was to delete
574 * the address -> there is also nothing to be done
575 * 1 address was not on card and the todo is to add it to the card's ip
577 * -1 address was on card and its reference count has been decremented
578 * to <= 0 by the todo -> address must be removed from card
581 __qeth_ref_ip_on_card(struct qeth_card *card, struct qeth_ipaddr *todo,
582 struct qeth_ipaddr **__addr)
584 struct qeth_ipaddr *addr;
587 list_for_each_entry(addr, &card->ip_list, entry) {
588 if ((addr->proto == QETH_PROT_IPV4) &&
589 (todo->proto == QETH_PROT_IPV4) &&
590 (addr->type == todo->type) &&
591 (addr->u.a4.addr == todo->u.a4.addr) &&
592 (addr->u.a4.mask == todo->u.a4.mask) ){
596 if ((addr->proto == QETH_PROT_IPV6) &&
597 (todo->proto == QETH_PROT_IPV6) &&
598 (addr->type == todo->type) &&
599 (addr->u.a6.pfxlen == todo->u.a6.pfxlen) &&
600 (memcmp(&addr->u.a6.addr, &todo->u.a6.addr,
601 sizeof(struct in6_addr)) == 0)) {
607 addr->users += todo->users;
608 if (addr->users <= 0){
612 /* for VIPA and RXIP limit refcount to 1 */
613 if (addr->type != QETH_IP_TYPE_NORMAL)
618 if (todo->users > 0){
619 /* for VIPA and RXIP limit refcount to 1 */
620 if (todo->type != QETH_IP_TYPE_NORMAL)
628 __qeth_address_exists_in_list(struct list_head *list, struct qeth_ipaddr *addr,
631 struct qeth_ipaddr *tmp;
633 list_for_each_entry(tmp, list, entry) {
634 if ((tmp->proto == QETH_PROT_IPV4) &&
635 (addr->proto == QETH_PROT_IPV4) &&
636 ((same_type && (tmp->type == addr->type)) ||
637 (!same_type && (tmp->type != addr->type)) ) &&
638 (tmp->u.a4.addr == addr->u.a4.addr) ){
641 if ((tmp->proto == QETH_PROT_IPV6) &&
642 (addr->proto == QETH_PROT_IPV6) &&
643 ((same_type && (tmp->type == addr->type)) ||
644 (!same_type && (tmp->type != addr->type)) ) &&
645 (memcmp(&tmp->u.a6.addr, &addr->u.a6.addr,
646 sizeof(struct in6_addr)) == 0) ) {
654 * Add IP to be added to todo list. If there is already an "add todo"
655 * in this list we just incremenent the reference count.
656 * Returns 0 if we just incremented reference count.
659 __qeth_insert_ip_todo(struct qeth_card *card, struct qeth_ipaddr *addr, int add)
661 struct qeth_ipaddr *tmp, *t;
664 list_for_each_entry_safe(tmp, t, &card->ip_tbd_list, entry) {
665 if ((tmp->proto == QETH_PROT_IPV4) &&
666 (addr->proto == QETH_PROT_IPV4) &&
667 (tmp->type == addr->type) &&
668 (tmp->is_multicast == addr->is_multicast) &&
669 (tmp->u.a4.addr == addr->u.a4.addr) &&
670 (tmp->u.a4.mask == addr->u.a4.mask) ){
674 if ((tmp->proto == QETH_PROT_IPV6) &&
675 (addr->proto == QETH_PROT_IPV6) &&
676 (tmp->type == addr->type) &&
677 (tmp->is_multicast == addr->is_multicast) &&
678 (tmp->u.a6.pfxlen == addr->u.a6.pfxlen) &&
679 (memcmp(&tmp->u.a6.addr, &addr->u.a6.addr,
680 sizeof(struct in6_addr)) == 0) ){
686 if (addr->users != 0)
687 tmp->users += addr->users;
689 tmp->users += add? 1:-1;
690 if (tmp->users == 0){
691 list_del(&tmp->entry);
696 if (addr->users == 0)
697 addr->users += add? 1:-1;
698 if (add && (addr->type == QETH_IP_TYPE_NORMAL) &&
699 qeth_is_addr_covered_by_ipato(card, addr)){
700 QETH_DBF_TEXT(trace, 2, "tkovaddr");
701 addr->set_flags |= QETH_IPA_SETIP_TAKEOVER_FLAG;
703 list_add_tail(&addr->entry, &card->ip_tbd_list);
709 * Remove IP address from list
712 qeth_delete_ip(struct qeth_card *card, struct qeth_ipaddr *addr)
717 QETH_DBF_TEXT(trace,4,"delip");
718 if (addr->proto == QETH_PROT_IPV4)
719 QETH_DBF_HEX(trace,4,&addr->u.a4.addr,4);
721 QETH_DBF_HEX(trace,4,&addr->u.a6.addr,4);
722 QETH_DBF_HEX(trace,4,((char *)&addr->u.a6.addr)+4,4);
724 spin_lock_irqsave(&card->ip_lock, flags);
725 rc = __qeth_insert_ip_todo(card, addr, 0);
726 spin_unlock_irqrestore(&card->ip_lock, flags);
731 qeth_add_ip(struct qeth_card *card, struct qeth_ipaddr *addr)
736 QETH_DBF_TEXT(trace,4,"addip");
737 if (addr->proto == QETH_PROT_IPV4)
738 QETH_DBF_HEX(trace,4,&addr->u.a4.addr,4);
740 QETH_DBF_HEX(trace,4,&addr->u.a6.addr,4);
741 QETH_DBF_HEX(trace,4,((char *)&addr->u.a6.addr)+4,4);
743 spin_lock_irqsave(&card->ip_lock, flags);
744 rc = __qeth_insert_ip_todo(card, addr, 1);
745 spin_unlock_irqrestore(&card->ip_lock, flags);
750 qeth_reinsert_todos(struct qeth_card *card, struct list_head *todos)
752 struct qeth_ipaddr *todo, *tmp;
754 list_for_each_entry_safe(todo, tmp, todos, entry){
755 list_del_init(&todo->entry);
756 if (todo->users < 0) {
757 if (!qeth_delete_ip(card, todo))
760 if (!qeth_add_ip(card, todo))
767 qeth_set_ip_addr_list(struct qeth_card *card)
769 struct list_head failed_todos;
770 struct qeth_ipaddr *todo, *addr;
774 QETH_DBF_TEXT(trace, 2, "sdiplist");
775 QETH_DBF_HEX(trace, 2, &card, sizeof(void *));
777 INIT_LIST_HEAD(&failed_todos);
779 spin_lock_irqsave(&card->ip_lock, flags);
780 while (!list_empty(&card->ip_tbd_list)) {
781 todo = list_entry(card->ip_tbd_list.next,
782 struct qeth_ipaddr, entry);
783 list_del_init(&todo->entry);
784 rc = __qeth_ref_ip_on_card(card, todo, &addr);
786 /* nothing to be done; only adjusted refcount */
788 } else if (rc == 1) {
789 /* new entry to be added to on-card list */
790 spin_unlock_irqrestore(&card->ip_lock, flags);
791 rc = qeth_register_addr_entry(card, todo);
792 spin_lock_irqsave(&card->ip_lock, flags);
794 list_add_tail(&todo->entry, &card->ip_list);
796 list_add_tail(&todo->entry, &failed_todos);
797 } else if (rc == -1) {
798 /* on-card entry to be removed */
799 list_del_init(&addr->entry);
800 spin_unlock_irqrestore(&card->ip_lock, flags);
801 rc = qeth_deregister_addr_entry(card, addr);
802 spin_lock_irqsave(&card->ip_lock, flags);
807 list_add_tail(&addr->entry, &card->ip_list);
808 list_add_tail(&todo->entry, &failed_todos);
812 spin_unlock_irqrestore(&card->ip_lock, flags);
813 qeth_reinsert_todos(card, &failed_todos);
816 static void qeth_delete_mc_addresses(struct qeth_card *);
817 static void qeth_add_multicast_ipv4(struct qeth_card *);
818 #ifdef CONFIG_QETH_IPV6
819 static void qeth_add_multicast_ipv6(struct qeth_card *);
823 qeth_set_thread_start_bit(struct qeth_card *card, unsigned long thread)
827 spin_lock_irqsave(&card->thread_mask_lock, flags);
828 if ( !(card->thread_allowed_mask & thread) ||
829 (card->thread_start_mask & thread) ) {
830 spin_unlock_irqrestore(&card->thread_mask_lock, flags);
833 card->thread_start_mask |= thread;
834 spin_unlock_irqrestore(&card->thread_mask_lock, flags);
839 qeth_clear_thread_start_bit(struct qeth_card *card, unsigned long thread)
843 spin_lock_irqsave(&card->thread_mask_lock, flags);
844 card->thread_start_mask &= ~thread;
845 spin_unlock_irqrestore(&card->thread_mask_lock, flags);
846 wake_up(&card->wait_q);
850 qeth_clear_thread_running_bit(struct qeth_card *card, unsigned long thread)
854 spin_lock_irqsave(&card->thread_mask_lock, flags);
855 card->thread_running_mask &= ~thread;
856 spin_unlock_irqrestore(&card->thread_mask_lock, flags);
857 wake_up(&card->wait_q);
861 __qeth_do_run_thread(struct qeth_card *card, unsigned long thread)
866 spin_lock_irqsave(&card->thread_mask_lock, flags);
867 if (card->thread_start_mask & thread){
868 if ((card->thread_allowed_mask & thread) &&
869 !(card->thread_running_mask & thread)){
871 card->thread_start_mask &= ~thread;
872 card->thread_running_mask |= thread;
876 spin_unlock_irqrestore(&card->thread_mask_lock, flags);
881 qeth_do_run_thread(struct qeth_card *card, unsigned long thread)
885 wait_event(card->wait_q,
886 (rc = __qeth_do_run_thread(card, thread)) >= 0);
891 qeth_register_mc_addresses(void *ptr)
893 struct qeth_card *card;
895 card = (struct qeth_card *) ptr;
896 daemonize("qeth_reg_mcaddrs");
897 QETH_DBF_TEXT(trace,4,"regmcth1");
898 if (!qeth_do_run_thread(card, QETH_SET_MC_THREAD))
900 QETH_DBF_TEXT(trace,4,"regmcth2");
901 qeth_delete_mc_addresses(card);
902 qeth_add_multicast_ipv4(card);
903 #ifdef CONFIG_QETH_IPV6
904 qeth_add_multicast_ipv6(card);
906 qeth_set_ip_addr_list(card);
907 qeth_clear_thread_running_bit(card, QETH_SET_MC_THREAD);
912 qeth_register_ip_address(void *ptr)
914 struct qeth_card *card;
916 card = (struct qeth_card *) ptr;
917 daemonize("qeth_reg_ip");
918 QETH_DBF_TEXT(trace,4,"regipth1");
919 if (!qeth_do_run_thread(card, QETH_SET_IP_THREAD))
921 QETH_DBF_TEXT(trace,4,"regipth2");
922 qeth_set_ip_addr_list(card);
923 qeth_clear_thread_running_bit(card, QETH_SET_IP_THREAD);
928 qeth_recover(void *ptr)
930 struct qeth_card *card;
933 card = (struct qeth_card *) ptr;
934 daemonize("qeth_recover");
935 QETH_DBF_TEXT(trace,2,"recover1");
936 QETH_DBF_HEX(trace, 2, &card, sizeof(void *));
937 if (!qeth_do_run_thread(card, QETH_RECOVER_THREAD))
939 QETH_DBF_TEXT(trace,2,"recover2");
940 PRINT_WARN("Recovery of device %s started ...\n",
942 card->use_hard_stop = 1;
943 qeth_set_offline(card->gdev);
944 rc = qeth_set_online(card->gdev);
946 PRINT_INFO("Device %s successfully recovered!\n",
949 PRINT_INFO("Device %s could not be recovered!\n",
951 /* don't run another scheduled recovery */
952 qeth_clear_thread_start_bit(card, QETH_RECOVER_THREAD);
953 qeth_clear_thread_running_bit(card, QETH_RECOVER_THREAD);
958 qeth_schedule_recovery(struct qeth_card *card)
960 QETH_DBF_TEXT(trace,2,"startrec");
962 if (qeth_set_thread_start_bit(card, QETH_RECOVER_THREAD) == 0)
963 schedule_work(&card->kernel_thread_starter);
967 qeth_do_start_thread(struct qeth_card *card, unsigned long thread)
972 spin_lock_irqsave(&card->thread_mask_lock, flags);
973 QETH_DBF_TEXT_(trace, 4, " %02x%02x%02x",
974 (u8) card->thread_start_mask,
975 (u8) card->thread_allowed_mask,
976 (u8) card->thread_running_mask);
977 rc = (card->thread_start_mask & thread);
978 spin_unlock_irqrestore(&card->thread_mask_lock, flags);
983 qeth_start_kernel_thread(struct qeth_card *card)
985 QETH_DBF_TEXT(trace , 2, "strthrd");
987 if (card->read.state != CH_STATE_UP &&
988 card->write.state != CH_STATE_UP)
991 if (qeth_do_start_thread(card, QETH_SET_IP_THREAD))
992 kernel_thread(qeth_register_ip_address, (void *) card, SIGCHLD);
993 if (qeth_do_start_thread(card, QETH_SET_MC_THREAD))
994 kernel_thread(qeth_register_mc_addresses, (void *)card,SIGCHLD);
995 if (qeth_do_start_thread(card, QETH_RECOVER_THREAD))
996 kernel_thread(qeth_recover, (void *) card, SIGCHLD);
1001 qeth_set_intial_options(struct qeth_card *card)
1003 card->options.route4.type = NO_ROUTER;
1004 #ifdef CONFIG_QETH_IPV6
1005 card->options.route6.type = NO_ROUTER;
1006 #endif /* QETH_IPV6 */
1007 card->options.checksum_type = QETH_CHECKSUM_DEFAULT;
1008 card->options.broadcast_mode = QETH_TR_BROADCAST_ALLRINGS;
1009 card->options.macaddr_mode = QETH_TR_MACADDR_NONCANONICAL;
1010 card->options.fake_broadcast = 0;
1011 card->options.add_hhlen = DEFAULT_ADD_HHLEN;
1012 card->options.fake_ll = 0;
1016 * initialize channels ,card and all state machines
1019 qeth_setup_card(struct qeth_card *card)
1022 QETH_DBF_TEXT(setup, 2, "setupcrd");
1023 QETH_DBF_HEX(setup, 2, &card, sizeof(void *));
1025 card->read.state = CH_STATE_DOWN;
1026 card->write.state = CH_STATE_DOWN;
1027 card->data.state = CH_STATE_DOWN;
1028 card->state = CARD_STATE_DOWN;
1029 card->lan_online = 0;
1030 card->use_hard_stop = 0;
1032 #ifdef CONFIG_QETH_VLAN
1033 spin_lock_init(&card->vlanlock);
1034 card->vlangrp = NULL;
1036 spin_lock_init(&card->ip_lock);
1037 spin_lock_init(&card->thread_mask_lock);
1038 card->thread_start_mask = 0;
1039 card->thread_allowed_mask = 0;
1040 card->thread_running_mask = 0;
1041 INIT_WORK(&card->kernel_thread_starter,
1042 (void *)qeth_start_kernel_thread,card);
1043 INIT_LIST_HEAD(&card->ip_list);
1044 INIT_LIST_HEAD(&card->ip_tbd_list);
1045 INIT_LIST_HEAD(&card->cmd_waiter_list);
1046 init_waitqueue_head(&card->wait_q);
1047 /* intial options */
1048 qeth_set_intial_options(card);
1049 /* IP address takeover */
1050 INIT_LIST_HEAD(&card->ipato.entries);
1051 card->ipato.enabled = 0;
1052 card->ipato.invert4 = 0;
1053 card->ipato.invert6 = 0;
1054 /* init QDIO stuff */
1055 qeth_init_qdio_info(card);
1060 qeth_determine_card_type(struct qeth_card *card)
1064 QETH_DBF_TEXT(setup, 2, "detcdtyp");
1066 while (known_devices[i][4]) {
1067 if ((CARD_RDEV(card)->id.dev_type == known_devices[i][2]) &&
1068 (CARD_RDEV(card)->id.dev_model == known_devices[i][3])) {
1069 card->info.type = known_devices[i][4];
1070 card->qdio.no_out_queues = known_devices[i][8];
1071 card->info.is_multicast_different = known_devices[i][9];
1076 card->info.type = QETH_CARD_TYPE_UNKNOWN;
1077 PRINT_ERR("unknown card type on device %s\n", CARD_BUS_ID(card));
1082 qeth_probe_device(struct ccwgroup_device *gdev)
1084 struct qeth_card *card;
1086 unsigned long flags;
1089 QETH_DBF_TEXT(setup, 2, "probedev");
1092 if (!get_device(dev))
1095 card = qeth_alloc_card();
1098 QETH_DBF_TEXT_(setup, 2, "1err%d", -ENOMEM);
1101 if ((rc = qeth_setup_card(card))){
1102 QETH_DBF_TEXT_(setup, 2, "2err%d", rc);
1104 qeth_free_card(card);
1107 gdev->dev.driver_data = card;
1109 gdev->cdev[0]->handler = qeth_irq;
1110 gdev->cdev[1]->handler = qeth_irq;
1111 gdev->cdev[2]->handler = qeth_irq;
1113 rc = qeth_create_device_attributes(dev);
1116 qeth_free_card(card);
1119 card->read.ccwdev = gdev->cdev[0];
1120 card->write.ccwdev = gdev->cdev[1];
1121 card->data.ccwdev = gdev->cdev[2];
1122 if ((rc = qeth_determine_card_type(card))){
1123 PRINT_WARN("%s: not a valid card type\n", __func__);
1124 QETH_DBF_TEXT_(setup, 2, "3err%d", rc);
1126 qeth_free_card(card);
1129 /* insert into our internal list */
1130 write_lock_irqsave(&qeth_card_list.rwlock, flags);
1131 list_add_tail(&card->list, &qeth_card_list.list);
1132 write_unlock_irqrestore(&qeth_card_list.rwlock, flags);
1138 qeth_get_unitaddr(struct qeth_card *card)
1144 QETH_DBF_TEXT(setup, 2, "getunit");
1145 rc = read_conf_data(CARD_DDEV(card), (void **) &prcd, &length);
1147 PRINT_ERR("read_conf_data for device %s returned %i\n",
1148 CARD_DDEV_ID(card), rc);
1151 card->info.chpid = prcd[30];
1152 card->info.unit_addr2 = prcd[31];
1153 card->info.cula = prcd[63];
1154 card->info.guestlan = ((prcd[0x10] == _ascebc['V']) &&
1155 (prcd[0x11] == _ascebc['M']));
1160 qeth_init_tokens(struct qeth_card *card)
1162 card->token.issuer_rm_w = 0x00010103UL;
1163 card->token.cm_filter_w = 0x00010108UL;
1164 card->token.cm_connection_w = 0x0001010aUL;
1165 card->token.ulp_filter_w = 0x0001010bUL;
1166 card->token.ulp_connection_w = 0x0001010dUL;
1170 raw_devno_from_bus_id(char *id)
1172 id += (strlen(id) - 4);
1173 return (__u16) simple_strtoul(id, &id, 16);
1179 qeth_setup_ccw(struct qeth_channel *channel,unsigned char *iob, __u32 len)
1181 struct qeth_card *card;
1183 QETH_DBF_TEXT(trace, 4, "setupccw");
1184 card = CARD_FROM_CDEV(channel->ccwdev);
1185 if (channel == &card->read)
1186 memcpy(&channel->ccw, READ_CCW, sizeof(struct ccw1));
1188 memcpy(&channel->ccw, WRITE_CCW, sizeof(struct ccw1));
1189 channel->ccw.count = len;
1190 channel->ccw.cda = (__u32) __pa(iob);
1194 * get free buffer for ccws (IDX activation, lancmds,ipassists...)
1196 static struct qeth_cmd_buffer *
1197 __qeth_get_buffer(struct qeth_channel *channel)
1201 QETH_DBF_TEXT(trace, 6, "getbuff");
1202 index = channel->io_buf_no;
1204 if (channel->iob[index].state == BUF_STATE_FREE) {
1205 channel->iob[index].state = BUF_STATE_LOCKED;
1206 channel->io_buf_no = (channel->io_buf_no + 1) %
1208 memset(channel->iob[index].data, 0, QETH_BUFSIZE);
1209 return channel->iob + index;
1211 index = (index + 1) % QETH_CMD_BUFFER_NO;
1212 } while(index != channel->io_buf_no);
1218 * release command buffer
1221 qeth_release_buffer(struct qeth_channel *channel, struct qeth_cmd_buffer *iob)
1223 unsigned long flags;
1225 QETH_DBF_TEXT(trace, 6, "relbuff");
1226 spin_lock_irqsave(&channel->iob_lock, flags);
1227 memset(iob->data, 0, QETH_BUFSIZE);
1228 iob->state = BUF_STATE_FREE;
1229 iob->callback = qeth_send_control_data_cb;
1231 spin_unlock_irqrestore(&channel->iob_lock, flags);
1234 static struct qeth_cmd_buffer *
1235 qeth_get_buffer(struct qeth_channel *channel)
1237 struct qeth_cmd_buffer *buffer = NULL;
1238 unsigned long flags;
1240 spin_lock_irqsave(&channel->iob_lock, flags);
1241 buffer = __qeth_get_buffer(channel);
1242 spin_unlock_irqrestore(&channel->iob_lock, flags);
1246 static struct qeth_cmd_buffer *
1247 qeth_wait_for_buffer(struct qeth_channel *channel)
1249 struct qeth_cmd_buffer *buffer;
1250 wait_event(channel->wait_q,
1251 ((buffer = qeth_get_buffer(channel)) != NULL));
1256 qeth_clear_cmd_buffers(struct qeth_channel *channel)
1260 for (cnt=0; cnt < QETH_CMD_BUFFER_NO; cnt++)
1261 qeth_release_buffer(channel,&channel->iob[cnt]);
1262 channel->buf_no = 0;
1263 channel->io_buf_no = 0;
1267 * start IDX for read and write channel
1270 qeth_idx_activate_get_answer(struct qeth_channel *channel,
1271 void (*idx_reply_cb)(struct qeth_channel *,
1272 struct qeth_cmd_buffer *))
1274 struct qeth_cmd_buffer *iob;
1275 unsigned long flags;
1277 struct qeth_card *card;
1279 QETH_DBF_TEXT(setup, 2, "idxanswr");
1280 card = CARD_FROM_CDEV(channel->ccwdev);
1281 iob = qeth_get_buffer(channel);
1282 iob->callback = idx_reply_cb;
1283 memcpy(&channel->ccw, READ_CCW, sizeof(struct ccw1));
1284 channel->ccw.count = QETH_BUFSIZE;
1285 channel->ccw.cda = (__u32) __pa(iob->data);
1287 wait_event(card->wait_q,
1288 atomic_compare_and_swap(0,1,&channel->irq_pending) == 0);
1289 QETH_DBF_TEXT(setup, 6, "noirqpnd");
1290 spin_lock_irqsave(get_ccwdev_lock(channel->ccwdev), flags);
1291 rc = ccw_device_start(channel->ccwdev,
1292 &channel->ccw,(addr_t) iob, 0, 0);
1293 spin_unlock_irqrestore(get_ccwdev_lock(channel->ccwdev), flags);
1296 PRINT_ERR("qeth: Error2 in activating channel rc=%d\n",rc);
1297 QETH_DBF_TEXT_(setup, 2, "2err%d", rc);
1298 atomic_set(&channel->irq_pending, 0);
1299 wake_up(&card->wait_q);
1302 rc = wait_event_interruptible_timeout(card->wait_q,
1303 channel->state == CH_STATE_UP, QETH_TIMEOUT);
1304 if (rc == -ERESTARTSYS)
1306 if (channel->state != CH_STATE_UP){
1308 QETH_DBF_TEXT_(setup, 2, "3err%d", rc);
1315 qeth_idx_activate_channel(struct qeth_channel *channel,
1316 void (*idx_reply_cb)(struct qeth_channel *,
1317 struct qeth_cmd_buffer *))
1319 struct qeth_card *card;
1320 struct qeth_cmd_buffer *iob;
1321 unsigned long flags;
1325 card = CARD_FROM_CDEV(channel->ccwdev);
1327 QETH_DBF_TEXT(setup, 2, "idxactch");
1329 iob = qeth_get_buffer(channel);
1330 iob->callback = idx_reply_cb;
1331 memcpy(&channel->ccw, WRITE_CCW, sizeof(struct ccw1));
1332 channel->ccw.count = IDX_ACTIVATE_SIZE;
1333 channel->ccw.cda = (__u32) __pa(iob->data);
1334 if (channel == &card->write) {
1335 memcpy(iob->data, IDX_ACTIVATE_WRITE, IDX_ACTIVATE_SIZE);
1336 memcpy(QETH_TRANSPORT_HEADER_SEQ_NO(iob->data),
1337 &card->seqno.trans_hdr, QETH_SEQ_NO_LENGTH);
1338 card->seqno.trans_hdr++;
1340 memcpy(iob->data, IDX_ACTIVATE_READ, IDX_ACTIVATE_SIZE);
1341 memcpy(QETH_TRANSPORT_HEADER_SEQ_NO(iob->data),
1342 &card->seqno.trans_hdr, QETH_SEQ_NO_LENGTH);
1344 memcpy(QETH_IDX_ACT_ISSUER_RM_TOKEN(iob->data),
1345 &card->token.issuer_rm_w,QETH_MPC_TOKEN_LENGTH);
1346 memcpy(QETH_IDX_ACT_FUNC_LEVEL(iob->data),
1347 &card->info.func_level,sizeof(__u16));
1348 temp = raw_devno_from_bus_id(CARD_DDEV_ID(card));
1349 memcpy(QETH_IDX_ACT_QDIO_DEV_CUA(iob->data), &temp, 2);
1350 temp = (card->info.cula << 8) + card->info.unit_addr2;
1351 memcpy(QETH_IDX_ACT_QDIO_DEV_REALADDR(iob->data), &temp, 2);
1353 wait_event(card->wait_q,
1354 atomic_compare_and_swap(0,1,&channel->irq_pending) == 0);
1355 QETH_DBF_TEXT(setup, 6, "noirqpnd");
1356 spin_lock_irqsave(get_ccwdev_lock(channel->ccwdev), flags);
1357 rc = ccw_device_start(channel->ccwdev,
1358 &channel->ccw,(addr_t) iob, 0, 0);
1359 spin_unlock_irqrestore(get_ccwdev_lock(channel->ccwdev), flags);
1362 PRINT_ERR("qeth: Error1 in activating channel. rc=%d\n",rc);
1363 QETH_DBF_TEXT_(setup, 2, "1err%d", rc);
1364 atomic_set(&channel->irq_pending, 0);
1365 wake_up(&card->wait_q);
1368 rc = wait_event_interruptible_timeout(card->wait_q,
1369 channel->state == CH_STATE_ACTIVATING, QETH_TIMEOUT);
1370 if (rc == -ERESTARTSYS)
1372 if (channel->state != CH_STATE_ACTIVATING) {
1373 PRINT_WARN("qeth: IDX activate timed out!\n");
1374 QETH_DBF_TEXT_(setup, 2, "2err%d", -ETIME);
1377 return qeth_idx_activate_get_answer(channel,idx_reply_cb);
1381 qeth_peer_func_level(int level)
1383 if ((level & 0xff) == 8)
1384 return (level & 0xff) + 0x400;
1385 if (((level >> 8) & 3) == 1)
1386 return (level & 0xff) + 0x200;
1391 qeth_idx_write_cb(struct qeth_channel *channel, struct qeth_cmd_buffer *iob)
1393 struct qeth_card *card;
1396 QETH_DBF_TEXT(setup ,2, "idxwrcb");
1398 if (channel->state == CH_STATE_DOWN) {
1399 channel->state = CH_STATE_ACTIVATING;
1402 card = CARD_FROM_CDEV(channel->ccwdev);
1404 if (!(QETH_IS_IDX_ACT_POS_REPLY(iob->data))) {
1405 PRINT_ERR("IDX_ACTIVATE on write channel device %s: negative "
1406 "reply\n", CARD_WDEV_ID(card));
1409 memcpy(&temp, QETH_IDX_ACT_FUNC_LEVEL(iob->data), 2);
1410 if ((temp & ~0x0100) != qeth_peer_func_level(card->info.func_level)) {
1411 PRINT_WARN("IDX_ACTIVATE on write channel device %s: "
1412 "function level mismatch "
1413 "(sent: 0x%x, received: 0x%x)\n",
1414 CARD_WDEV_ID(card), card->info.func_level, temp);
1417 channel->state = CH_STATE_UP;
1419 qeth_release_buffer(channel, iob);
1423 qeth_check_idx_response(unsigned char *buffer)
1428 QETH_DBF_HEX(control, 2, buffer, QETH_DBF_CONTROL_LEN);
1429 if ((buffer[2] & 0xc0) == 0xc0) {
1430 PRINT_WARN("received an IDX TERMINATE "
1431 "with cause code 0x%02x%s\n",
1433 ((buffer[4] == 0x22) ?
1434 " -- try another portname" : ""));
1435 QETH_DBF_TEXT(trace, 2, "ckidxres");
1436 QETH_DBF_TEXT(trace, 2, " idxterm");
1437 QETH_DBF_TEXT_(trace, 2, " rc%d", -EIO);
1444 qeth_idx_read_cb(struct qeth_channel *channel, struct qeth_cmd_buffer *iob)
1446 struct qeth_card *card;
1449 QETH_DBF_TEXT(setup , 2, "idxrdcb");
1450 if (channel->state == CH_STATE_DOWN) {
1451 channel->state = CH_STATE_ACTIVATING;
1455 card = CARD_FROM_CDEV(channel->ccwdev);
1456 if (qeth_check_idx_response(iob->data)) {
1459 if (!(QETH_IS_IDX_ACT_POS_REPLY(iob->data))) {
1460 PRINT_ERR("IDX_ACTIVATE on read channel device %s: negative "
1461 "reply\n", CARD_RDEV_ID(card));
1466 * temporary fix for microcode bug
1467 * to revert it,replace OR by AND
1469 if ( (!QETH_IDX_NO_PORTNAME_REQUIRED(iob->data)) ||
1470 (card->info.type == QETH_CARD_TYPE_OSAE) )
1471 card->info.portname_required = 1;
1473 memcpy(&temp, QETH_IDX_ACT_FUNC_LEVEL(iob->data), 2);
1474 if (temp != qeth_peer_func_level(card->info.func_level)) {
1475 PRINT_WARN("IDX_ACTIVATE on read channel device %s: function "
1476 "level mismatch (sent: 0x%x, received: 0x%x)\n",
1477 CARD_RDEV_ID(card), card->info.func_level, temp);
1480 memcpy(&card->token.issuer_rm_r,
1481 QETH_IDX_ACT_ISSUER_RM_TOKEN(iob->data),
1482 QETH_MPC_TOKEN_LENGTH);
1483 memcpy(&card->info.mcl_level[0],
1484 QETH_IDX_REPLY_LEVEL(iob->data), QETH_MCL_LENGTH);
1485 channel->state = CH_STATE_UP;
1487 qeth_release_buffer(channel,iob);
1491 qeth_issue_next_read(struct qeth_card *card)
1494 struct qeth_cmd_buffer *iob;
1496 QETH_DBF_TEXT(trace,5,"issnxrd");
1497 if (card->read.state != CH_STATE_UP)
1499 iob = qeth_get_buffer(&card->read);
1501 PRINT_WARN("issue_next_read failed: no iob available!\n");
1504 qeth_setup_ccw(&card->read, iob->data, QETH_BUFSIZE);
1505 wait_event(card->wait_q,
1506 atomic_compare_and_swap(0,1,&card->read.irq_pending) == 0);
1507 QETH_DBF_TEXT(trace, 6, "noirqpnd");
1508 rc = ccw_device_start(card->read.ccwdev, &card->read.ccw,
1509 (addr_t) iob, 0, 0);
1511 PRINT_ERR("Error in starting next read ccw! rc=%i\n", rc);
1512 atomic_set(&card->read.irq_pending, 0);
1513 qeth_schedule_recovery(card);
1514 wake_up(&card->wait_q);
1519 static struct qeth_reply *
1520 qeth_alloc_reply(struct qeth_card *card)
1522 struct qeth_reply *reply;
1524 reply = kmalloc(sizeof(struct qeth_reply), GFP_ATOMIC);
1526 memset(reply, 0, sizeof(struct qeth_reply));
1527 atomic_set(&reply->refcnt, 1);
1534 qeth_get_reply(struct qeth_reply *reply)
1536 WARN_ON(atomic_read(&reply->refcnt) <= 0);
1537 atomic_inc(&reply->refcnt);
1541 qeth_put_reply(struct qeth_reply *reply)
1543 WARN_ON(atomic_read(&reply->refcnt) <= 0);
1544 if (atomic_dec_and_test(&reply->refcnt))
1549 qeth_cmd_timeout(unsigned long data)
1551 struct qeth_reply *reply, *list_reply, *r;
1552 unsigned long flags;
1554 reply = (struct qeth_reply *) data;
1555 spin_lock_irqsave(&reply->card->lock, flags);
1556 list_for_each_entry_safe(list_reply, r,
1557 &reply->card->cmd_waiter_list, list) {
1558 if (reply == list_reply){
1559 qeth_get_reply(reply);
1560 list_del_init(&reply->list);
1561 spin_unlock_irqrestore(&reply->card->lock, flags);
1563 reply->received = 1;
1564 wake_up(&reply->wait_q);
1565 qeth_put_reply(reply);
1569 spin_unlock_irqrestore(&reply->card->lock, flags);
1573 qeth_reset_ip_addresses(struct qeth_card *card)
1575 QETH_DBF_TEXT(trace, 2, "rstipadd");
1577 qeth_clear_ip_list(card, 0, 1);
1578 if ( (qeth_set_thread_start_bit(card, QETH_SET_IP_THREAD) == 0) ||
1579 (qeth_set_thread_start_bit(card, QETH_SET_MC_THREAD) == 0) )
1580 schedule_work(&card->kernel_thread_starter);
1583 static struct qeth_ipa_cmd *
1584 qeth_check_ipa_data(struct qeth_card *card, struct qeth_cmd_buffer *iob)
1586 struct qeth_ipa_cmd *cmd = NULL;
1588 QETH_DBF_TEXT(trace,5,"chkipad");
1589 if (IS_IPA(iob->data)){
1590 cmd = (struct qeth_ipa_cmd *) PDU_ENCAPSULATION(iob->data);
1591 if (IS_IPA_REPLY(cmd))
1594 switch (cmd->hdr.command) {
1595 case IPA_CMD_STOPLAN:
1596 PRINT_WARN("Link failure on %s (CHPID 0x%X) - "
1597 "there is a network problem or "
1598 "someone pulled the cable or "
1599 "disabled the port.\n",
1602 card->lan_online = 0;
1603 if (netif_carrier_ok(card->dev)) {
1604 netif_carrier_off(card->dev);
1605 netif_stop_queue(card->dev);
1608 case IPA_CMD_STARTLAN:
1609 PRINT_INFO("Link reestablished on %s "
1610 "(CHPID 0x%X). Scheduling "
1611 "IP address reset.\n",
1614 card->lan_online = 1;
1615 if (!netif_carrier_ok(card->dev)) {
1616 netif_carrier_on(card->dev);
1617 netif_wake_queue(card->dev);
1619 qeth_reset_ip_addresses(card);
1621 case IPA_CMD_REGISTER_LOCAL_ADDR:
1622 QETH_DBF_TEXT(trace,3, "irla");
1624 case IPA_CMD_UNREGISTER_LOCAL_ADDR:
1625 PRINT_WARN("probably problem on %s: "
1626 "received IPA command 0x%X\n",
1631 PRINT_WARN("Received data is IPA "
1632 "but not a reply!\n");
1641 * wake all waiting ipa commands
1644 qeth_clear_ipacmd_list(struct qeth_card *card)
1646 struct qeth_reply *reply, *r;
1647 unsigned long flags;
1649 QETH_DBF_TEXT(trace, 4, "clipalst");
1651 spin_lock_irqsave(&card->lock, flags);
1652 list_for_each_entry_safe(reply, r, &card->cmd_waiter_list, list) {
1653 qeth_get_reply(reply);
1655 reply->received = 1;
1656 list_del_init(&reply->list);
1657 wake_up(&reply->wait_q);
1658 qeth_put_reply(reply);
1660 spin_unlock_irqrestore(&card->lock, flags);
1664 qeth_send_control_data_cb(struct qeth_channel *channel,
1665 struct qeth_cmd_buffer *iob)
1667 struct qeth_card *card;
1668 struct qeth_reply *reply, *r;
1669 struct qeth_ipa_cmd *cmd;
1670 unsigned long flags;
1673 QETH_DBF_TEXT(trace,4,"sndctlcb");
1675 card = CARD_FROM_CDEV(channel->ccwdev);
1676 if (qeth_check_idx_response(iob->data)) {
1677 qeth_clear_ipacmd_list(card);
1678 qeth_schedule_recovery(card);
1682 cmd = qeth_check_ipa_data(card, iob);
1683 if ((cmd == NULL) && (card->state != CARD_STATE_DOWN))
1686 spin_lock_irqsave(&card->lock, flags);
1687 list_for_each_entry_safe(reply, r, &card->cmd_waiter_list, list) {
1688 if ((reply->seqno == QETH_IDX_COMMAND_SEQNO) ||
1689 ((cmd) && (reply->seqno == cmd->hdr.seqno))) {
1690 qeth_get_reply(reply);
1691 list_del_init(&reply->list);
1692 spin_unlock_irqrestore(&card->lock, flags);
1694 if (reply->callback != NULL) {
1696 reply->offset = (__u16)((char*)cmd -
1698 keep_reply = reply->callback(card,
1700 (unsigned long)cmd);
1703 keep_reply = reply->callback(card,
1705 (unsigned long)iob);
1708 reply->rc = (s16) cmd->hdr.return_code;
1710 reply->rc = iob->rc;
1712 spin_lock_irqsave(&card->lock, flags);
1713 list_add_tail(&reply->list,
1714 &card->cmd_waiter_list);
1715 spin_unlock_irqrestore(&card->lock, flags);
1717 reply->received = 1;
1718 wake_up(&reply->wait_q);
1720 qeth_put_reply(reply);
1724 spin_unlock_irqrestore(&card->lock, flags);
1726 memcpy(&card->seqno.pdu_hdr_ack,
1727 QETH_PDU_HEADER_SEQ_NO(iob->data),
1728 QETH_SEQ_NO_LENGTH);
1729 qeth_release_buffer(channel,iob);
1733 qeth_send_control_data(struct qeth_card *card, int len,
1734 struct qeth_cmd_buffer *iob,
1736 (struct qeth_card *, struct qeth_reply*, unsigned long),
1741 unsigned long flags;
1742 struct qeth_reply *reply;
1743 struct timer_list timer;
1745 QETH_DBF_TEXT(trace, 2, "sendctl");
1747 qeth_setup_ccw(&card->write,iob->data,len);
1749 memcpy(QETH_TRANSPORT_HEADER_SEQ_NO(iob->data),
1750 &card->seqno.trans_hdr, QETH_SEQ_NO_LENGTH);
1751 card->seqno.trans_hdr++;
1753 memcpy(QETH_PDU_HEADER_SEQ_NO(iob->data),
1754 &card->seqno.pdu_hdr, QETH_SEQ_NO_LENGTH);
1755 card->seqno.pdu_hdr++;
1756 memcpy(QETH_PDU_HEADER_ACK_SEQ_NO(iob->data),
1757 &card->seqno.pdu_hdr_ack, QETH_SEQ_NO_LENGTH);
1758 iob->callback = qeth_release_buffer;
1760 reply = qeth_alloc_reply(card);
1762 PRINT_WARN("Could no alloc qeth_reply!\n");
1765 reply->callback = reply_cb;
1766 reply->param = reply_param;
1767 if (card->state == CARD_STATE_DOWN)
1768 reply->seqno = QETH_IDX_COMMAND_SEQNO;
1770 reply->seqno = card->seqno.ipa++;
1772 timer.function = qeth_cmd_timeout;
1773 timer.data = (unsigned long) reply;
1774 if (IS_IPA(iob->data))
1775 timer.expires = jiffies + QETH_IPA_TIMEOUT;
1777 timer.expires = jiffies + QETH_TIMEOUT;
1778 init_waitqueue_head(&reply->wait_q);
1779 spin_lock_irqsave(&card->lock, flags);
1780 list_add_tail(&reply->list, &card->cmd_waiter_list);
1781 spin_unlock_irqrestore(&card->lock, flags);
1782 QETH_DBF_HEX(control, 2, iob->data, QETH_DBF_CONTROL_LEN);
1783 wait_event(card->wait_q,
1784 atomic_compare_and_swap(0,1,&card->write.irq_pending) == 0);
1785 QETH_DBF_TEXT(trace, 6, "noirqpnd");
1786 spin_lock_irqsave(get_ccwdev_lock(card->write.ccwdev), flags);
1787 rc = ccw_device_start(card->write.ccwdev, &card->write.ccw,
1788 (addr_t) iob, 0, 0);
1789 spin_unlock_irqrestore(get_ccwdev_lock(card->write.ccwdev), flags);
1791 PRINT_WARN("qeth_send_control_data: "
1792 "ccw_device_start rc = %i\n", rc);
1793 QETH_DBF_TEXT_(trace, 2, " err%d", rc);
1794 spin_lock_irqsave(&card->lock, flags);
1795 list_del_init(&reply->list);
1796 qeth_put_reply(reply);
1797 spin_unlock_irqrestore(&card->lock, flags);
1798 qeth_release_buffer(iob->channel, iob);
1799 atomic_set(&card->write.irq_pending, 0);
1800 wake_up(&card->wait_q);
1804 wait_event(reply->wait_q, reply->received);
1805 del_timer_sync(&timer);
1807 qeth_put_reply(reply);
1812 qeth_send_ipa_cmd(struct qeth_card *card, struct qeth_cmd_buffer *iob,
1814 (struct qeth_card *,struct qeth_reply*, unsigned long),
1819 QETH_DBF_TEXT(trace,4,"sendipa");
1821 memcpy(iob->data, IPA_PDU_HEADER, IPA_PDU_HEADER_SIZE);
1822 memcpy(QETH_IPA_CMD_DEST_ADDR(iob->data),
1823 &card->token.ulp_connection_r, QETH_MPC_TOKEN_LENGTH);
1825 rc = qeth_send_control_data(card, IPA_CMD_LENGTH, iob,
1826 reply_cb, reply_param);
1832 qeth_cm_enable_cb(struct qeth_card *card, struct qeth_reply *reply,
1835 struct qeth_cmd_buffer *iob;
1837 QETH_DBF_TEXT(setup, 2, "cmenblcb");
1839 iob = (struct qeth_cmd_buffer *) data;
1840 memcpy(&card->token.cm_filter_r,
1841 QETH_CM_ENABLE_RESP_FILTER_TOKEN(iob->data),
1842 QETH_MPC_TOKEN_LENGTH);
1843 QETH_DBF_TEXT_(setup, 2, " rc%d", iob->rc);
1848 qeth_cm_enable(struct qeth_card *card)
1851 struct qeth_cmd_buffer *iob;
1853 QETH_DBF_TEXT(setup,2,"cmenable");
1855 iob = qeth_wait_for_buffer(&card->write);
1856 memcpy(iob->data, CM_ENABLE, CM_ENABLE_SIZE);
1857 memcpy(QETH_CM_ENABLE_ISSUER_RM_TOKEN(iob->data),
1858 &card->token.issuer_rm_r, QETH_MPC_TOKEN_LENGTH);
1859 memcpy(QETH_CM_ENABLE_FILTER_TOKEN(iob->data),
1860 &card->token.cm_filter_w, QETH_MPC_TOKEN_LENGTH);
1862 rc = qeth_send_control_data(card, CM_ENABLE_SIZE, iob,
1863 qeth_cm_enable_cb, NULL);
1868 qeth_cm_setup_cb(struct qeth_card *card, struct qeth_reply *reply,
1872 struct qeth_cmd_buffer *iob;
1874 QETH_DBF_TEXT(setup, 2, "cmsetpcb");
1876 iob = (struct qeth_cmd_buffer *) data;
1877 memcpy(&card->token.cm_connection_r,
1878 QETH_CM_SETUP_RESP_DEST_ADDR(iob->data),
1879 QETH_MPC_TOKEN_LENGTH);
1880 QETH_DBF_TEXT_(setup, 2, " rc%d", iob->rc);
1885 qeth_cm_setup(struct qeth_card *card)
1888 struct qeth_cmd_buffer *iob;
1890 QETH_DBF_TEXT(setup,2,"cmsetup");
1892 iob = qeth_wait_for_buffer(&card->write);
1893 memcpy(iob->data, CM_SETUP, CM_SETUP_SIZE);
1894 memcpy(QETH_CM_SETUP_DEST_ADDR(iob->data),
1895 &card->token.issuer_rm_r, QETH_MPC_TOKEN_LENGTH);
1896 memcpy(QETH_CM_SETUP_CONNECTION_TOKEN(iob->data),
1897 &card->token.cm_connection_w, QETH_MPC_TOKEN_LENGTH);
1898 memcpy(QETH_CM_SETUP_FILTER_TOKEN(iob->data),
1899 &card->token.cm_filter_r, QETH_MPC_TOKEN_LENGTH);
1900 rc = qeth_send_control_data(card, CM_SETUP_SIZE, iob,
1901 qeth_cm_setup_cb, NULL);
1907 qeth_ulp_enable_cb(struct qeth_card *card, struct qeth_reply *reply,
1911 __u16 mtu, framesize;
1914 struct qeth_cmd_buffer *iob;
1916 QETH_DBF_TEXT(setup, 2, "ulpenacb");
1918 iob = (struct qeth_cmd_buffer *) data;
1919 memcpy(&card->token.ulp_filter_r,
1920 QETH_ULP_ENABLE_RESP_FILTER_TOKEN(iob->data),
1921 QETH_MPC_TOKEN_LENGTH);
1922 if (qeth_get_mtu_out_of_mpc(card->info.type)) {
1923 memcpy(&framesize, QETH_ULP_ENABLE_RESP_MAX_MTU(iob->data), 2);
1924 mtu = qeth_get_mtu_outof_framesize(framesize);
1927 QETH_DBF_TEXT_(setup, 2, " rc%d", iob->rc);
1930 card->info.max_mtu = mtu;
1931 card->info.initial_mtu = mtu;
1932 card->qdio.in_buf_size = mtu + 2 * PAGE_SIZE;
1934 card->info.initial_mtu = qeth_get_initial_mtu_for_card(card);
1935 card->info.max_mtu = qeth_get_max_mtu_for_card(card->info.type);
1936 card->qdio.in_buf_size = QETH_IN_BUF_SIZE_DEFAULT;
1939 memcpy(&len, QETH_ULP_ENABLE_RESP_DIFINFO_LEN(iob->data), 2);
1940 if (len >= QETH_MPC_DIFINFO_LEN_INDICATES_LINK_TYPE) {
1942 QETH_ULP_ENABLE_RESP_LINK_TYPE(iob->data), 1);
1943 card->info.link_type = link_type;
1945 card->info.link_type = 0;
1946 QETH_DBF_TEXT_(setup, 2, " rc%d", iob->rc);
1951 qeth_ulp_enable(struct qeth_card *card)
1954 struct qeth_cmd_buffer *iob;
1956 /*FIXME: trace view callbacks*/
1957 QETH_DBF_TEXT(setup,2,"ulpenabl");
1959 iob = qeth_wait_for_buffer(&card->write);
1960 memcpy(iob->data, ULP_ENABLE, ULP_ENABLE_SIZE);
1962 *(QETH_ULP_ENABLE_LINKNUM(iob->data)) =
1963 (__u8) card->info.portno;
1965 memcpy(QETH_ULP_ENABLE_DEST_ADDR(iob->data),
1966 &card->token.cm_connection_r, QETH_MPC_TOKEN_LENGTH);
1967 memcpy(QETH_ULP_ENABLE_FILTER_TOKEN(iob->data),
1968 &card->token.ulp_filter_w, QETH_MPC_TOKEN_LENGTH);
1969 memcpy(QETH_ULP_ENABLE_PORTNAME_AND_LL(iob->data),
1970 card->info.portname, 9);
1971 rc = qeth_send_control_data(card, ULP_ENABLE_SIZE, iob,
1972 qeth_ulp_enable_cb, NULL);
1978 __raw_devno_from_bus_id(char *id)
1980 id += (strlen(id) - 4);
1981 return (__u16) simple_strtoul(id, &id, 16);
1985 qeth_ulp_setup_cb(struct qeth_card *card, struct qeth_reply *reply,
1988 struct qeth_cmd_buffer *iob;
1990 QETH_DBF_TEXT(setup, 2, "ulpstpcb");
1992 iob = (struct qeth_cmd_buffer *) data;
1993 memcpy(&card->token.ulp_connection_r,
1994 QETH_ULP_SETUP_RESP_CONNECTION_TOKEN(iob->data),
1995 QETH_MPC_TOKEN_LENGTH);
1996 QETH_DBF_TEXT_(setup, 2, " rc%d", iob->rc);
2001 qeth_ulp_setup(struct qeth_card *card)
2005 struct qeth_cmd_buffer *iob;
2007 QETH_DBF_TEXT(setup,2,"ulpsetup");
2009 iob = qeth_wait_for_buffer(&card->write);
2010 memcpy(iob->data, ULP_SETUP, ULP_SETUP_SIZE);
2012 memcpy(QETH_ULP_SETUP_DEST_ADDR(iob->data),
2013 &card->token.cm_connection_r, QETH_MPC_TOKEN_LENGTH);
2014 memcpy(QETH_ULP_SETUP_CONNECTION_TOKEN(iob->data),
2015 &card->token.ulp_connection_w, QETH_MPC_TOKEN_LENGTH);
2016 memcpy(QETH_ULP_SETUP_FILTER_TOKEN(iob->data),
2017 &card->token.ulp_filter_r, QETH_MPC_TOKEN_LENGTH);
2019 temp = __raw_devno_from_bus_id(CARD_DDEV_ID(card));
2020 memcpy(QETH_ULP_SETUP_CUA(iob->data), &temp, 2);
2021 temp = (card->info.cula << 8) + card->info.unit_addr2;
2022 memcpy(QETH_ULP_SETUP_REAL_DEVADDR(iob->data), &temp, 2);
2023 rc = qeth_send_control_data(card, ULP_SETUP_SIZE, iob,
2024 qeth_ulp_setup_cb, NULL);
2029 qeth_check_for_inbound_error(struct qeth_qdio_buffer *buf,
2030 unsigned int qdio_error,
2031 unsigned int siga_error)
2035 if (qdio_error || siga_error) {
2036 QETH_DBF_TEXT(trace, 2, "qdinerr");
2037 QETH_DBF_TEXT(qerr, 2, "qdinerr");
2038 QETH_DBF_TEXT_(qerr, 2, " F15=%02X",
2039 buf->buffer->element[15].flags & 0xff);
2040 QETH_DBF_TEXT_(qerr, 2, " F14=%02X",
2041 buf->buffer->element[14].flags & 0xff);
2042 QETH_DBF_TEXT_(qerr, 2, " qerr=%X", qdio_error);
2043 QETH_DBF_TEXT_(qerr, 2, " serr=%X", siga_error);
2049 static inline struct sk_buff *
2050 qeth_get_skb(unsigned int length)
2052 struct sk_buff* skb;
2053 #ifdef CONFIG_QETH_VLAN
2054 if ((skb = dev_alloc_skb(length + VLAN_HLEN)))
2055 skb_reserve(skb, VLAN_HLEN);
2057 skb = dev_alloc_skb(length);
2062 static inline struct sk_buff *
2063 qeth_get_next_skb(struct qeth_card *card, struct qdio_buffer *buffer,
2064 struct qdio_buffer_element **__element, int *__offset,
2065 struct qeth_hdr **hdr)
2067 struct qdio_buffer_element *element = *__element;
2068 int offset = *__offset;
2069 struct sk_buff *skb = NULL;
2074 QETH_DBF_TEXT(trace,6,"nextskb");
2075 /* qeth_hdr must not cross element boundaries */
2076 if (element->length < offset + sizeof(struct qeth_hdr)){
2077 if (qeth_is_last_sbale(element))
2081 if (element->length < sizeof(struct qeth_hdr))
2084 *hdr = element->addr + offset;
2086 offset += sizeof(struct qeth_hdr);
2087 skb_len = (*hdr)->length;
2090 if (card->options.fake_ll){
2091 if (!(skb = qeth_get_skb(skb_len + QETH_FAKE_LL_LEN)))
2093 skb_pull(skb, QETH_FAKE_LL_LEN);
2094 } else if (!(skb = qeth_get_skb(skb_len)))
2096 data_ptr = element->addr + offset;
2098 data_len = min(skb_len, (int)(element->length - offset));
2100 memcpy(skb_put(skb, data_len), data_ptr, data_len);
2101 skb_len -= data_len;
2103 if (qeth_is_last_sbale(element)){
2104 QETH_DBF_TEXT(trace,4,"unexeob");
2105 QETH_DBF_TEXT_(trace,4,"%s",CARD_BUS_ID(card));
2106 QETH_DBF_TEXT(qerr,2,"unexeob");
2107 QETH_DBF_TEXT_(qerr,2,"%s",CARD_BUS_ID(card));
2108 QETH_DBF_HEX(misc,4,buffer,sizeof(*buffer));
2109 dev_kfree_skb_any(skb);
2110 card->stats.rx_errors++;
2115 data_ptr = element->addr;
2120 *__element = element;
2124 if (net_ratelimit()){
2125 PRINT_WARN("No memory for packet received on %s.\n",
2126 card->info.if_name);
2127 QETH_DBF_TEXT(trace,2,"noskbmem");
2128 QETH_DBF_TEXT_(trace,2,"%s",CARD_BUS_ID(card));
2130 card->stats.rx_dropped++;
2134 static inline unsigned short
2135 qeth_type_trans(struct sk_buff *skb, struct net_device *dev)
2138 struct qeth_card *card;
2140 QETH_DBF_TEXT(trace,5,"typtrans");
2142 card = (struct qeth_card *)dev->priv;
2144 if ((card->info.link_type == QETH_LINK_TYPE_HSTR) ||
2145 (card->info.link_type == QETH_LINK_TYPE_LANE_TR))
2146 return tr_type_trans(skb,dev);
2147 #endif /* CONFIG_TR */
2149 skb->mac.raw = skb->data;
2150 skb_pull(skb, ETH_ALEN * 2 + sizeof (short));
2153 if (*eth->h_dest & 1) {
2154 if (memcmp(eth->h_dest, dev->broadcast, ETH_ALEN) == 0)
2155 skb->pkt_type = PACKET_BROADCAST;
2157 skb->pkt_type = PACKET_MULTICAST;
2159 skb->pkt_type = PACKET_OTHERHOST;
2161 if (ntohs(eth->h_proto) >= 1536)
2162 return eth->h_proto;
2163 if (*(unsigned short *) (skb->data) == 0xFFFF)
2164 return htons(ETH_P_802_3);
2165 return htons(ETH_P_802_2);
2169 qeth_rebuild_skb_fake_ll(struct qeth_card *card, struct sk_buff *skb,
2170 struct qeth_hdr *hdr)
2172 struct ethhdr *fake_hdr;
2173 struct iphdr *ip_hdr;
2175 QETH_DBF_TEXT(trace,5,"skbfake");
2176 skb->mac.raw = skb->data - QETH_FAKE_LL_LEN;
2177 /* this is a fake ethernet header */
2178 fake_hdr = (struct ethhdr *) skb->mac.raw;
2180 /* the destination MAC address */
2181 switch (skb->pkt_type){
2182 case PACKET_MULTICAST:
2183 switch (skb->protocol){
2184 #ifdef CONFIG_QETH_IPV6
2185 case __constant_htons(ETH_P_IPV6):
2186 ndisc_mc_map((struct in6_addr *)
2187 skb->data + QETH_FAKE_LL_V6_ADDR_POS,
2188 fake_hdr->h_dest, card->dev, 0);
2190 #endif /* CONFIG_QETH_IPV6 */
2191 case __constant_htons(ETH_P_IP):
2192 ip_hdr = (struct iphdr *)skb->data;
2193 if (card->dev->type == ARPHRD_IEEE802_TR)
2194 ip_tr_mc_map(ip_hdr->daddr, fake_hdr->h_dest);
2196 ip_eth_mc_map(ip_hdr->daddr, fake_hdr->h_dest);
2199 memcpy(fake_hdr->h_dest, card->dev->dev_addr, ETH_ALEN);
2202 case PACKET_BROADCAST:
2203 memset(fake_hdr->h_dest, 0xff, ETH_ALEN);
2206 memcpy(fake_hdr->h_dest, card->dev->dev_addr, ETH_ALEN);
2208 /* the source MAC address */
2209 if (hdr->ext_flags & QETH_HDR_EXT_SRC_MAC_ADDR)
2210 memcpy(fake_hdr->h_source, &hdr->dest_addr[2], ETH_ALEN);
2212 memset(fake_hdr->h_source, 0, ETH_ALEN);
2214 fake_hdr->h_proto = skb->protocol;
2218 qeth_rebuild_skb_vlan(struct qeth_card *card, struct sk_buff *skb,
2219 struct qeth_hdr *hdr)
2221 #ifdef CONFIG_QETH_VLAN
2224 if (hdr->ext_flags & QETH_HDR_EXT_VLAN_FRAME) {
2225 vlan_tag = (u16 *) skb_push(skb, VLAN_HLEN);
2226 *vlan_tag = hdr->vlan_id;
2227 *(vlan_tag + 1) = skb->protocol;
2228 skb->protocol = __constant_htons(ETH_P_8021Q);
2230 #endif /* CONFIG_QETH_VLAN */
2234 qeth_rebuild_skb(struct qeth_card *card, struct sk_buff *skb,
2235 struct qeth_hdr *hdr)
2237 #ifdef CONFIG_QETH_IPV6
2238 if (hdr->flags & QETH_HDR_PASSTHRU){
2239 skb->protocol = qeth_type_trans(skb, card->dev);
2242 #endif /* CONFIG_QETH_IPV6 */
2243 skb->protocol = htons((hdr->flags & QETH_HDR_IPV6)? ETH_P_IPV6 :
2245 switch (hdr->flags & QETH_HDR_CAST_MASK){
2246 case QETH_CAST_UNICAST:
2247 skb->pkt_type = PACKET_HOST;
2249 case QETH_CAST_MULTICAST:
2250 skb->pkt_type = PACKET_MULTICAST;
2251 card->stats.multicast++;
2253 case QETH_CAST_BROADCAST:
2254 skb->pkt_type = PACKET_BROADCAST;
2255 card->stats.multicast++;
2257 case QETH_CAST_ANYCAST:
2258 case QETH_CAST_NOCAST:
2260 skb->pkt_type = PACKET_HOST;
2262 if (card->options.fake_ll)
2263 qeth_rebuild_skb_fake_ll(card, skb, hdr);
2265 skb->mac.raw = skb->data;
2266 skb->ip_summed = card->options.checksum_type;
2267 if (card->options.checksum_type == HW_CHECKSUMMING){
2268 if ( (hdr->ext_flags &
2269 (QETH_HDR_EXT_CSUM_HDR_REQ |
2270 QETH_HDR_EXT_CSUM_TRANSP_REQ)) ==
2271 (QETH_HDR_EXT_CSUM_HDR_REQ |
2272 QETH_HDR_EXT_CSUM_TRANSP_REQ) )
2273 skb->ip_summed = CHECKSUM_UNNECESSARY;
2275 skb->ip_summed = SW_CHECKSUMMING;
2277 qeth_rebuild_skb_vlan(card, skb, hdr);
2281 qeth_process_inbound_buffer(struct qeth_card *card,
2282 struct qeth_qdio_buffer *buf, int index)
2284 struct qdio_buffer_element *element;
2286 struct sk_buff *skb;
2287 struct qeth_hdr *hdr;
2290 /* get first element of current buffer */
2291 element = (struct qdio_buffer_element *)&buf->buffer->element[0];
2293 #ifdef CONFIG_QETH_PERF_STATS
2294 card->perf_stats.bufs_rec++;
2296 while((skb = qeth_get_next_skb(card, buf->buffer, &element,
2298 qeth_rebuild_skb(card, skb, hdr);
2299 /* is device UP ? */
2300 if (!(card->dev->flags & IFF_UP)){
2301 dev_kfree_skb_any(skb);
2304 skb->dev = card->dev;
2305 rxrc = netif_rx(skb);
2306 card->dev->last_rx = jiffies;
2307 card->stats.rx_packets++;
2308 card->stats.rx_bytes += skb->len;
2312 static inline struct qeth_buffer_pool_entry *
2313 qeth_get_buffer_pool_entry(struct qeth_card *card)
2315 struct qeth_buffer_pool_entry *entry;
2317 QETH_DBF_TEXT(trace, 6, "gtbfplen");
2318 if (!list_empty(&card->qdio.in_buf_pool.entry_list)) {
2319 entry = list_entry(card->qdio.in_buf_pool.entry_list.next,
2320 struct qeth_buffer_pool_entry, list);
2321 list_del_init(&entry->list);
2328 qeth_init_input_buffer(struct qeth_card *card, struct qeth_qdio_buffer *buf)
2330 struct qeth_buffer_pool_entry *pool_entry;
2333 pool_entry = qeth_get_buffer_pool_entry(card);
2335 * since the buffer is accessed only from the input_tasklet
2336 * there shouldn't be a need to synchronize; also, since we use
2337 * the QETH_IN_BUF_REQUEUE_THRESHOLD we should never run out off
2340 BUG_ON(!pool_entry);
2342 buf->pool_entry = pool_entry;
2343 for(i = 0; i < QETH_MAX_BUFFER_ELEMENTS(card); ++i){
2344 buf->buffer->element[i].length = PAGE_SIZE;
2345 buf->buffer->element[i].addr = pool_entry->elements[i];
2346 if (i == QETH_MAX_BUFFER_ELEMENTS(card) - 1)
2347 buf->buffer->element[i].flags = SBAL_FLAGS_LAST_ENTRY;
2349 buf->buffer->element[i].flags = 0;
2351 buf->state = QETH_QDIO_BUF_EMPTY;
2355 qeth_clear_output_buffer(struct qeth_qdio_out_q *queue,
2356 struct qeth_qdio_out_buffer *buf)
2359 struct sk_buff *skb;
2361 /* is PCI flag set on buffer? */
2362 if (buf->buffer->element[0].flags & 0x40)
2363 atomic_dec(&queue->set_pci_flags_count);
2365 for(i = 0; i < QETH_MAX_BUFFER_ELEMENTS(queue->card); ++i){
2366 buf->buffer->element[i].length = 0;
2367 buf->buffer->element[i].addr = NULL;
2368 buf->buffer->element[i].flags = 0;
2369 while ((skb = skb_dequeue(&buf->skb_list))){
2370 atomic_dec(&skb->users);
2371 dev_kfree_skb_any(skb);
2374 buf->next_element_to_fill = 0;
2375 atomic_set(&buf->state, QETH_QDIO_BUF_EMPTY);
2379 qeth_queue_input_buffer(struct qeth_card *card, int index)
2381 struct qeth_qdio_q *queue = card->qdio.in_q;
2386 QETH_DBF_TEXT(trace,6,"queinbuf");
2387 count = (index < queue->next_buf_to_init)?
2388 card->qdio.in_buf_pool.buf_count -
2389 (queue->next_buf_to_init - index) :
2390 card->qdio.in_buf_pool.buf_count -
2391 (queue->next_buf_to_init + QDIO_MAX_BUFFERS_PER_Q - index);
2392 /* only requeue at a certain threshold to avoid SIGAs */
2393 if (count >= QETH_IN_BUF_REQUEUE_THRESHOLD(card)){
2394 for (i = queue->next_buf_to_init;
2395 i < queue->next_buf_to_init + count; ++i)
2396 qeth_init_input_buffer(card,
2397 &queue->bufs[i % QDIO_MAX_BUFFERS_PER_Q]);
2399 * according to old code it should be avoided to requeue all
2400 * 128 buffers in order to benefit from PCI avoidance.
2401 * this function keeps at least one buffer (the buffer at
2402 * 'index') un-requeued -> this buffer is the first buffer that
2403 * will be requeued the next time
2405 #ifdef CONFIG_QETH_PERF_STATS
2406 card->perf_stats.inbound_do_qdio_cnt++;
2407 card->perf_stats.inbound_do_qdio_start_time = qeth_get_micros();
2409 rc = do_QDIO(CARD_DDEV(card),
2410 QDIO_FLAG_SYNC_INPUT | QDIO_FLAG_UNDER_INTERRUPT,
2411 0, queue->next_buf_to_init, count, NULL);
2412 #ifdef CONFIG_QETH_PERF_STATS
2413 card->perf_stats.inbound_do_qdio_time += qeth_get_micros() -
2414 card->perf_stats.inbound_do_qdio_start_time;
2417 PRINT_WARN("qeth_queue_input_buffer's do_QDIO "
2418 "return %i (device %s).\n",
2419 rc, CARD_DDEV_ID(card));
2420 QETH_DBF_TEXT(trace,2,"qinberr");
2421 QETH_DBF_TEXT_(trace,2,"%s",CARD_BUS_ID(card));
2423 queue->next_buf_to_init = (queue->next_buf_to_init + count) %
2424 QDIO_MAX_BUFFERS_PER_Q;
2429 qeth_put_buffer_pool_entry(struct qeth_card *card,
2430 struct qeth_buffer_pool_entry *entry)
2432 QETH_DBF_TEXT(trace, 6, "ptbfplen");
2433 list_add_tail(&entry->list, &card->qdio.in_buf_pool.entry_list);
2437 qeth_qdio_input_handler(struct ccw_device * ccwdev, unsigned int status,
2438 unsigned int qdio_err, unsigned int siga_err,
2439 unsigned int queue, int first_element, int count,
2440 unsigned long card_ptr)
2442 struct net_device *net_dev;
2443 struct qeth_card *card;
2444 struct qeth_qdio_buffer *buffer;
2448 QETH_DBF_TEXT(trace, 6, "qdinput");
2449 card = (struct qeth_card *) card_ptr;
2450 net_dev = card->dev;
2451 #ifdef CONFIG_QETH_PERF_STATS
2452 card->perf_stats.inbound_cnt++;
2453 card->perf_stats.inbound_start_time = qeth_get_micros();
2455 if (status & QDIO_STATUS_LOOK_FOR_ERROR) {
2456 if (status & QDIO_STATUS_ACTIVATE_CHECK_CONDITION){
2457 QETH_DBF_TEXT(trace, 1,"qdinchk");
2458 QETH_DBF_TEXT_(trace,1,"%s",CARD_BUS_ID(card));
2459 QETH_DBF_TEXT_(trace,1,"%04X%04X",first_element,count);
2460 QETH_DBF_TEXT_(trace,1,"%04X%04X", queue, status);
2461 qeth_schedule_recovery(card);
2465 for (i = first_element; i < (first_element + count); ++i) {
2466 index = i % QDIO_MAX_BUFFERS_PER_Q;
2467 buffer = &card->qdio.in_q->bufs[index];
2468 if (!((status == QDIO_STATUS_LOOK_FOR_ERROR) &&
2469 qeth_check_for_inbound_error(buffer, qdio_err, siga_err)))
2470 qeth_process_inbound_buffer(card, buffer, index);
2471 /* clear buffer and give back to hardware */
2472 qeth_put_buffer_pool_entry(card, buffer->pool_entry);
2473 qeth_queue_input_buffer(card, index);
2475 #ifdef CONFIG_QETH_PERF_STATS
2476 card->perf_stats.inbound_time += qeth_get_micros() -
2477 card->perf_stats.inbound_start_time;
2482 qeth_handle_send_error(struct qeth_card *card,
2483 struct qeth_qdio_out_buffer *buffer,
2484 int qdio_err, int siga_err)
2486 int sbalf15 = buffer->buffer->element[15].flags & 0xff;
2487 int cc = siga_err & 3;
2489 QETH_DBF_TEXT(trace, 6, "hdsnderr");
2493 QETH_DBF_TEXT(trace, 1,"lnkfail");
2494 QETH_DBF_TEXT_(trace,1,"%s",CARD_BUS_ID(card));
2495 QETH_DBF_TEXT_(trace,1,"%04x %02x",
2496 (u16)qdio_err, (u8)sbalf15);
2497 return QETH_SEND_ERROR_LINK_FAILURE;
2499 return QETH_SEND_ERROR_NONE;
2501 if (siga_err & QDIO_SIGA_ERROR_B_BIT_SET) {
2502 QETH_DBF_TEXT(trace, 1, "SIGAcc2B");
2503 QETH_DBF_TEXT_(trace,1,"%s",CARD_BUS_ID(card));
2504 return QETH_SEND_ERROR_KICK_IT;
2506 if ((sbalf15 >= 15) && (sbalf15 <= 31))
2507 return QETH_SEND_ERROR_RETRY;
2508 return QETH_SEND_ERROR_LINK_FAILURE;
2509 /* look at qdio_error and sbalf 15 */
2511 QETH_DBF_TEXT(trace, 1, "SIGAcc1");
2512 QETH_DBF_TEXT_(trace,1,"%s",CARD_BUS_ID(card));
2513 return QETH_SEND_ERROR_LINK_FAILURE;
2515 QETH_DBF_TEXT(trace, 1, "SIGAcc3");
2516 QETH_DBF_TEXT_(trace,1,"%s",CARD_BUS_ID(card));
2517 return QETH_SEND_ERROR_KICK_IT;
2519 return QETH_SEND_ERROR_LINK_FAILURE;
2523 qeth_flush_buffers(struct qeth_qdio_out_q *queue, int under_int,
2524 int index, int count)
2526 struct qeth_qdio_out_buffer *buf;
2530 QETH_DBF_TEXT(trace, 6, "flushbuf");
2532 for (i = index; i < index + count; ++i) {
2533 buf = &queue->bufs[i % QDIO_MAX_BUFFERS_PER_Q];
2534 buf->buffer->element[buf->next_element_to_fill - 1].flags |=
2535 SBAL_FLAGS_LAST_ENTRY;
2537 if (queue->card->info.type == QETH_CARD_TYPE_IQD)
2540 if (!queue->do_pack){
2541 if ((atomic_read(&queue->used_buffers) >=
2542 (QETH_HIGH_WATERMARK_PACK -
2543 QETH_WATERMARK_PACK_FUZZ)) &&
2544 !atomic_read(&queue->set_pci_flags_count)){
2545 /* it's likely that we'll go to packing
2547 atomic_inc(&queue->set_pci_flags_count);
2548 buf->buffer->element[0].flags |= 0x40;
2551 if (!atomic_read(&queue->set_pci_flags_count)){
2553 * there's no outstanding PCI any more, so we
2554 * have to request a PCI to be sure the the PCI
2555 * will wake at some time in the future then we
2556 * can flush packed buffers that might still be
2557 * hanging around, which can happen if no
2558 * further send was requested by the stack
2560 atomic_inc(&queue->set_pci_flags_count);
2561 buf->buffer->element[0].flags |= 0x40;
2563 #ifdef CONFIG_QETH_PERF_STATS
2564 queue->card->perf_stats.bufs_sent_pack++;
2569 queue->card->dev->trans_start = jiffies;
2570 #ifdef CONFIG_QETH_PERF_STATS
2571 queue->card->perf_stats.outbound_do_qdio_cnt++;
2572 queue->card->perf_stats.outbound_do_qdio_start_time = qeth_get_micros();
2575 rc = do_QDIO(CARD_DDEV(queue->card),
2576 QDIO_FLAG_SYNC_OUTPUT | QDIO_FLAG_UNDER_INTERRUPT,
2577 queue->queue_no, index, count, NULL);
2579 rc = do_QDIO(CARD_DDEV(queue->card), QDIO_FLAG_SYNC_OUTPUT,
2580 queue->queue_no, index, count, NULL);
2581 #ifdef CONFIG_QETH_PERF_STATS
2582 queue->card->perf_stats.outbound_do_qdio_time += qeth_get_micros() -
2583 queue->card->perf_stats.outbound_do_qdio_start_time;
2586 QETH_DBF_SPRINTF(trace, 0, "qeth_flush_buffers: do_QDIO "
2587 "returned error (%i) on device %s.",
2588 rc, CARD_DDEV_ID(queue->card));
2589 QETH_DBF_TEXT(trace, 2, "flushbuf");
2590 QETH_DBF_TEXT_(trace, 2, " err%d", rc);
2591 queue->card->stats.tx_errors += count;
2592 /* this must not happen under normal circumstances. if it
2593 * happens something is really wrong -> recover */
2594 qeth_schedule_recovery(queue->card);
2597 atomic_add(count, &queue->used_buffers);
2598 #ifdef CONFIG_QETH_PERF_STATS
2599 queue->card->perf_stats.bufs_sent += count;
2604 * Switched to packing state if the number of used buffers on a queue
2605 * reaches a certain limit.
2608 qeth_switch_to_packing_if_needed(struct qeth_qdio_out_q *queue)
2610 if (!queue->do_pack) {
2611 if (atomic_read(&queue->used_buffers)
2612 >= QETH_HIGH_WATERMARK_PACK){
2613 /* switch non-PACKING -> PACKING */
2614 QETH_DBF_TEXT(trace, 6, "np->pack");
2615 #ifdef CONFIG_QETH_PERF_STATS
2616 queue->card->perf_stats.sc_dp_p++;
2624 * Switches from packing to non-packing mode. If there is a packing
2625 * buffer on the queue this buffer will be prepared to be flushed.
2626 * In that case 1 is returned to inform the caller. If no buffer
2627 * has to be flushed, zero is returned.
2630 qeth_switch_to_nonpacking_if_needed(struct qeth_qdio_out_q *queue)
2632 struct qeth_qdio_out_buffer *buffer;
2633 int flush_count = 0;
2635 if (queue->do_pack) {
2636 if (atomic_read(&queue->used_buffers)
2637 <= QETH_LOW_WATERMARK_PACK) {
2638 /* switch PACKING -> non-PACKING */
2639 QETH_DBF_TEXT(trace, 6, "pack->np");
2640 #ifdef CONFIG_QETH_PERF_STATS
2641 queue->card->perf_stats.sc_p_dp++;
2644 /* flush packing buffers */
2645 buffer = &queue->bufs[queue->next_buf_to_fill];
2646 if ((atomic_read(&buffer->state) ==
2647 QETH_QDIO_BUF_EMPTY) &&
2648 (buffer->next_element_to_fill > 0)) {
2649 atomic_set(&buffer->state,QETH_QDIO_BUF_PRIMED);
2651 queue->next_buf_to_fill =
2652 (queue->next_buf_to_fill + 1) %
2653 QDIO_MAX_BUFFERS_PER_Q;
2661 * Called to flush a packing buffer if no more pci flags are on the queue.
2662 * Checks if there is a packing buffer and prepares it to be flushed.
2663 * In that case returns 1, otherwise zero.
2666 qeth_flush_buffers_on_no_pci(struct qeth_qdio_out_q *queue)
2668 struct qeth_qdio_out_buffer *buffer;
2670 buffer = &queue->bufs[queue->next_buf_to_fill];
2671 if((atomic_read(&buffer->state) == QETH_QDIO_BUF_EMPTY) &&
2672 (buffer->next_element_to_fill > 0)){
2673 /* it's a packing buffer */
2674 atomic_set(&buffer->state, QETH_QDIO_BUF_PRIMED);
2675 queue->next_buf_to_fill =
2676 (queue->next_buf_to_fill + 1) % QDIO_MAX_BUFFERS_PER_Q;
2683 qeth_check_outbound_queue(struct qeth_qdio_out_q *queue)
2689 * check if weed have to switch to non-packing mode or if
2690 * we have to get a pci flag out on the queue
2692 if ((atomic_read(&queue->used_buffers) <= QETH_LOW_WATERMARK_PACK) ||
2693 !atomic_read(&queue->set_pci_flags_count)){
2694 if (atomic_swap(&queue->state, QETH_OUT_Q_LOCKED_FLUSH) ==
2695 QETH_OUT_Q_UNLOCKED) {
2697 * If we get in here, there was no action in
2698 * do_send_packet. So, we check if there is a
2699 * packing buffer to be flushed here.
2701 /* TODO: try if we get a performance improvement
2702 * by calling netif_stop_queue here */
2703 /* save start index for flushing */
2704 index = queue->next_buf_to_fill;
2705 flush_cnt += qeth_switch_to_nonpacking_if_needed(queue);
2707 !atomic_read(&queue->set_pci_flags_count))
2709 qeth_flush_buffers_on_no_pci(queue);
2710 /* were done with updating critical queue members */
2711 atomic_set(&queue->state, QETH_OUT_Q_UNLOCKED);
2712 /* flushing can be done outside the lock */
2714 qeth_flush_buffers(queue, 1, index, flush_cnt);
2720 qeth_qdio_output_handler(struct ccw_device * ccwdev, unsigned int status,
2721 unsigned int qdio_error, unsigned int siga_error,
2722 unsigned int __queue, int first_element, int count,
2723 unsigned long card_ptr)
2725 struct qeth_card *card = (struct qeth_card *) card_ptr;
2726 struct qeth_qdio_out_q *queue = card->qdio.out_qs[__queue];
2727 struct qeth_qdio_out_buffer *buffer;
2730 QETH_DBF_TEXT(trace, 6, "qdouhdl");
2731 if (status & QDIO_STATUS_LOOK_FOR_ERROR) {
2732 if (status & QDIO_STATUS_ACTIVATE_CHECK_CONDITION){
2733 QETH_DBF_SPRINTF(trace, 2, "On device %s: "
2734 "received active check "
2735 "condition (0x%08x).",
2736 CARD_BUS_ID(card), status);
2737 QETH_DBF_TEXT(trace, 2, "chkcond");
2738 QETH_DBF_TEXT_(trace, 2, "%08x", status);
2739 netif_stop_queue(card->dev);
2740 qeth_schedule_recovery(card);
2745 #ifdef CONFIG_QETH_PERF_STATS
2746 card->perf_stats.outbound_handler_cnt++;
2747 card->perf_stats.outbound_handler_start_time = qeth_get_micros();
2749 for(i = first_element; i < (first_element + count); ++i){
2750 buffer = &queue->bufs[i % QDIO_MAX_BUFFERS_PER_Q];
2751 /*we only handle the KICK_IT error by doing a recovery */
2752 if (qeth_handle_send_error(card, buffer, qdio_error, siga_error)
2753 == QETH_SEND_ERROR_KICK_IT){
2754 netif_stop_queue(card->dev);
2755 qeth_schedule_recovery(card);
2758 qeth_clear_output_buffer(queue, buffer);
2760 atomic_sub(count, &queue->used_buffers);
2761 /* check if we need to do something on this outbound queue */
2762 qeth_check_outbound_queue(queue);
2764 netif_wake_queue(card->dev);
2765 #ifdef CONFIG_QETH_PERF_STATS
2766 card->perf_stats.outbound_handler_time += qeth_get_micros() -
2767 card->perf_stats.outbound_handler_start_time;
2772 qeth_create_qib_param_field(struct qeth_card *card)
2776 param_field = kmalloc(QDIO_MAX_BUFFERS_PER_Q * sizeof(char),
2781 memset(param_field, 0, QDIO_MAX_BUFFERS_PER_Q * sizeof(char));
2783 param_field[0] = _ascebc['P'];
2784 param_field[1] = _ascebc['C'];
2785 param_field[2] = _ascebc['I'];
2786 param_field[3] = _ascebc['T'];
2787 *((unsigned int *) (¶m_field[4])) = QETH_PCI_THRESHOLD_A(card);
2788 *((unsigned int *) (¶m_field[8])) = QETH_PCI_THRESHOLD_B(card);
2789 *((unsigned int *) (¶m_field[12])) = QETH_PCI_TIMER_VALUE(card);
2795 qeth_initialize_working_pool_list(struct qeth_card *card)
2797 struct qeth_buffer_pool_entry *entry;
2799 QETH_DBF_TEXT(trace,5,"inwrklst");
2801 list_for_each_entry(entry,
2802 &card->qdio.init_pool.entry_list, init_list) {
2803 qeth_put_buffer_pool_entry(card,entry);
2808 qeth_clear_working_pool_list(struct qeth_card *card)
2810 struct qeth_buffer_pool_entry *pool_entry, *tmp;
2812 QETH_DBF_TEXT(trace,5,"clwrklst");
2813 list_for_each_entry_safe(pool_entry, tmp,
2814 &card->qdio.in_buf_pool.entry_list, list){
2815 list_del(&pool_entry->list);
2820 qeth_free_buffer_pool(struct qeth_card *card)
2822 struct qeth_buffer_pool_entry *pool_entry, *tmp;
2824 QETH_DBF_TEXT(trace,5,"freepool");
2825 list_for_each_entry_safe(pool_entry, tmp,
2826 &card->qdio.init_pool.entry_list, init_list){
2827 for (i = 0; i < QETH_MAX_BUFFER_ELEMENTS(card); ++i)
2828 free_page((unsigned long)pool_entry->elements[i]);
2829 list_del(&pool_entry->init_list);
2835 qeth_alloc_buffer_pool(struct qeth_card *card)
2837 struct qeth_buffer_pool_entry *pool_entry;
2841 QETH_DBF_TEXT(trace,5,"clwkpool");
2842 for (i = 0; i < card->qdio.init_pool.buf_count; ++i){
2843 pool_entry = kmalloc(sizeof(*pool_entry), GFP_KERNEL);
2845 qeth_free_buffer_pool(card);
2848 for(j = 0; j < QETH_MAX_BUFFER_ELEMENTS(card); ++j){
2849 ptr = (void *) __get_free_page(GFP_KERNEL);
2852 free_page((unsigned long)
2853 pool_entry->elements[--j]);
2855 qeth_free_buffer_pool(card);
2858 pool_entry->elements[j] = ptr;
2860 list_add(&pool_entry->init_list,
2861 &card->qdio.init_pool.entry_list);
2867 qeth_realloc_buffer_pool(struct qeth_card *card, int bufcnt)
2869 QETH_DBF_TEXT(trace, 2, "realcbp");
2871 if ((card->state != CARD_STATE_DOWN) &&
2872 (card->state != CARD_STATE_RECOVER))
2875 /* TODO: steel/add buffers from/to a running card's buffer pool (?) */
2876 qeth_clear_working_pool_list(card);
2877 qeth_free_buffer_pool(card);
2878 card->qdio.in_buf_pool.buf_count = bufcnt;
2879 card->qdio.init_pool.buf_count = bufcnt;
2880 return qeth_alloc_buffer_pool(card);
2884 qeth_alloc_qdio_buffers(struct qeth_card *card)
2888 QETH_DBF_TEXT(setup, 2, "allcqdbf");
2890 if (card->qdio.state == QETH_QDIO_ALLOCATED)
2893 card->qdio.in_q = kmalloc(sizeof(struct qeth_qdio_q), GFP_KERNEL);
2894 if (!card->qdio.in_q)
2896 QETH_DBF_TEXT(setup, 2, "inq");
2897 QETH_DBF_HEX(setup, 2, &card->qdio.in_q, sizeof(void *));
2898 memset(card->qdio.in_q, 0, sizeof(struct qeth_qdio_q));
2899 /* give inbound qeth_qdio_buffers their qdio_buffers */
2900 for (i = 0; i < QDIO_MAX_BUFFERS_PER_Q; ++i)
2901 card->qdio.in_q->bufs[i].buffer =
2902 &card->qdio.in_q->qdio_bufs[i];
2903 /* inbound buffer pool */
2904 if (qeth_alloc_buffer_pool(card)){
2905 kfree(card->qdio.in_q);
2910 kmalloc(card->qdio.no_out_queues *
2911 sizeof(struct qeth_qdio_out_q *), GFP_KERNEL);
2912 if (!card->qdio.out_qs){
2913 qeth_free_buffer_pool(card);
2916 for (i = 0; i < card->qdio.no_out_queues; ++i){
2917 card->qdio.out_qs[i] = kmalloc(sizeof(struct qeth_qdio_out_q),
2919 if (!card->qdio.out_qs[i]){
2921 kfree(card->qdio.out_qs[--i]);
2922 kfree(card->qdio.out_qs);
2925 QETH_DBF_TEXT_(setup, 2, "outq %i", i);
2926 QETH_DBF_HEX(setup, 2, &card->qdio.out_qs[i], sizeof(void *));
2927 memset(card->qdio.out_qs[i], 0, sizeof(struct qeth_qdio_out_q));
2928 card->qdio.out_qs[i]->queue_no = i;
2929 /* give inbound qeth_qdio_buffers their qdio_buffers */
2930 for (j = 0; j < QDIO_MAX_BUFFERS_PER_Q; ++j){
2931 card->qdio.out_qs[i]->bufs[j].buffer =
2932 &card->qdio.out_qs[i]->qdio_bufs[j];
2933 skb_queue_head_init(&card->qdio.out_qs[i]->bufs[j].
2937 card->qdio.state = QETH_QDIO_ALLOCATED;
2942 qeth_free_qdio_buffers(struct qeth_card *card)
2946 QETH_DBF_TEXT(trace, 2, "freeqdbf");
2947 if (card->qdio.state == QETH_QDIO_UNINITIALIZED)
2949 kfree(card->qdio.in_q);
2950 /* inbound buffer pool */
2951 qeth_free_buffer_pool(card);
2952 /* free outbound qdio_qs */
2953 for (i = 0; i < card->qdio.no_out_queues; ++i){
2954 for (j = 0; j < QDIO_MAX_BUFFERS_PER_Q; ++j)
2955 qeth_clear_output_buffer(card->qdio.out_qs[i],
2956 &card->qdio.out_qs[i]->bufs[j]);
2957 kfree(card->qdio.out_qs[i]);
2959 kfree(card->qdio.out_qs);
2960 card->qdio.state = QETH_QDIO_UNINITIALIZED;
2964 qeth_clear_qdio_buffers(struct qeth_card *card)
2968 QETH_DBF_TEXT(trace, 2, "clearqdbf");
2969 /* clear outbound buffers to free skbs */
2970 for (i = 0; i < card->qdio.no_out_queues; ++i)
2971 if (card->qdio.out_qs[i]){
2972 for (j = 0; j < QDIO_MAX_BUFFERS_PER_Q; ++j)
2973 qeth_clear_output_buffer(card->qdio.out_qs[i],
2974 &card->qdio.out_qs[i]->bufs[j]);
2979 qeth_init_qdio_info(struct qeth_card *card)
2981 QETH_DBF_TEXT(setup, 4, "intqdinf");
2982 card->qdio.state = QETH_QDIO_UNINITIALIZED;
2984 card->qdio.in_buf_size = QETH_IN_BUF_SIZE_DEFAULT;
2985 card->qdio.init_pool.buf_count = QETH_IN_BUF_COUNT_DEFAULT;
2986 card->qdio.in_buf_pool.buf_count = card->qdio.init_pool.buf_count;
2987 INIT_LIST_HEAD(&card->qdio.in_buf_pool.entry_list);
2988 INIT_LIST_HEAD(&card->qdio.init_pool.entry_list);
2990 card->qdio.do_prio_queueing = QETH_PRIOQ_DEFAULT;
2991 card->qdio.default_out_queue = QETH_DEFAULT_QUEUE;
2995 qeth_init_qdio_queues(struct qeth_card *card)
3000 QETH_DBF_TEXT(setup, 2, "initqdqs");
3003 memset(card->qdio.in_q->qdio_bufs, 0,
3004 QDIO_MAX_BUFFERS_PER_Q * sizeof(struct qdio_buffer));
3005 qeth_initialize_working_pool_list(card);
3006 /*give only as many buffers to hardware as we have buffer pool entries*/
3007 for (i = 0; i < card->qdio.in_buf_pool.buf_count - 1; ++i)
3008 qeth_init_input_buffer(card, &card->qdio.in_q->bufs[i]);
3009 card->qdio.in_q->next_buf_to_init = card->qdio.in_buf_pool.buf_count - 1;
3010 rc = do_QDIO(CARD_DDEV(card), QDIO_FLAG_SYNC_INPUT, 0, 0,
3011 card->qdio.in_buf_pool.buf_count - 1, NULL);
3013 QETH_DBF_TEXT_(setup, 2, "1err%d", rc);
3016 rc = qdio_synchronize(CARD_DDEV(card), QDIO_FLAG_SYNC_INPUT, 0);
3018 QETH_DBF_TEXT_(setup, 2, "2err%d", rc);
3021 /* outbound queue */
3022 for (i = 0; i < card->qdio.no_out_queues; ++i){
3023 memset(card->qdio.out_qs[i]->qdio_bufs, 0,
3024 QDIO_MAX_BUFFERS_PER_Q * sizeof(struct qdio_buffer));
3025 for (j = 0; j < QDIO_MAX_BUFFERS_PER_Q; ++j){
3026 qeth_clear_output_buffer(card->qdio.out_qs[i],
3027 &card->qdio.out_qs[i]->bufs[j]);
3029 card->qdio.out_qs[i]->card = card;
3030 card->qdio.out_qs[i]->next_buf_to_fill = 0;
3031 card->qdio.out_qs[i]->do_pack = 0;
3032 atomic_set(&card->qdio.out_qs[i]->used_buffers,0);
3033 atomic_set(&card->qdio.out_qs[i]->set_pci_flags_count, 0);
3034 atomic_set(&card->qdio.out_qs[i]->state,
3035 QETH_OUT_Q_UNLOCKED);
3041 qeth_qdio_establish(struct qeth_card *card)
3043 struct qdio_initialize init_data;
3044 char *qib_param_field;
3045 struct qdio_buffer **in_sbal_ptrs;
3046 struct qdio_buffer **out_sbal_ptrs;
3050 QETH_DBF_TEXT(setup, 2, "qdioest");
3051 qib_param_field = qeth_create_qib_param_field(card);
3052 if (!qib_param_field)
3055 in_sbal_ptrs = kmalloc(QDIO_MAX_BUFFERS_PER_Q * sizeof(void *),
3057 if (!in_sbal_ptrs) {
3058 kfree(qib_param_field);
3061 for(i = 0; i < QDIO_MAX_BUFFERS_PER_Q; ++i)
3062 in_sbal_ptrs[i] = (struct qdio_buffer *)
3063 virt_to_phys(card->qdio.in_q->bufs[i].buffer);
3066 kmalloc(card->qdio.no_out_queues * QDIO_MAX_BUFFERS_PER_Q *
3067 sizeof(void *), GFP_KERNEL);
3068 if (!out_sbal_ptrs) {
3069 kfree(in_sbal_ptrs);
3070 kfree(qib_param_field);
3073 for(i = 0, k = 0; i < card->qdio.no_out_queues; ++i)
3074 for(j = 0; j < QDIO_MAX_BUFFERS_PER_Q; ++j, ++k){
3075 out_sbal_ptrs[k] = (struct qdio_buffer *)
3076 virt_to_phys(card->qdio.out_qs[i]->
3080 memset(&init_data, 0, sizeof(struct qdio_initialize));
3081 init_data.cdev = CARD_DDEV(card);
3082 init_data.q_format = qeth_get_qdio_q_format(card);
3083 init_data.qib_param_field_format = 0;
3084 init_data.qib_param_field = qib_param_field;
3085 init_data.min_input_threshold = QETH_MIN_INPUT_THRESHOLD;
3086 init_data.max_input_threshold = QETH_MAX_INPUT_THRESHOLD;
3087 init_data.min_output_threshold = QETH_MIN_OUTPUT_THRESHOLD;
3088 init_data.max_output_threshold = QETH_MAX_OUTPUT_THRESHOLD;
3089 init_data.no_input_qs = 1;
3090 init_data.no_output_qs = card->qdio.no_out_queues;
3091 init_data.input_handler = (qdio_handler_t *)
3092 qeth_qdio_input_handler;
3093 init_data.output_handler = (qdio_handler_t *)
3094 qeth_qdio_output_handler;
3095 init_data.int_parm = (unsigned long) card;
3096 init_data.flags = QDIO_INBOUND_0COPY_SBALS |
3097 QDIO_OUTBOUND_0COPY_SBALS |
3098 QDIO_USE_OUTBOUND_PCIS;
3099 init_data.input_sbal_addr_array = (void **) in_sbal_ptrs;
3100 init_data.output_sbal_addr_array = (void **) out_sbal_ptrs;
3102 if (!(rc = qdio_initialize(&init_data)))
3103 card->qdio.state = QETH_QDIO_ESTABLISHED;
3105 kfree(out_sbal_ptrs);
3106 kfree(in_sbal_ptrs);
3107 kfree(qib_param_field);
3112 qeth_qdio_activate(struct qeth_card *card)
3114 QETH_DBF_TEXT(setup,3,"qdioact");
3115 return qdio_activate(CARD_DDEV(card), 0);
3119 qeth_clear_channel(struct qeth_channel *channel)
3121 unsigned long flags;
3122 struct qeth_card *card;
3125 QETH_DBF_TEXT(trace,3,"clearch");
3126 card = CARD_FROM_CDEV(channel->ccwdev);
3127 spin_lock_irqsave(get_ccwdev_lock(channel->ccwdev), flags);
3128 rc = ccw_device_clear(channel->ccwdev, QETH_CLEAR_CHANNEL_PARM);
3129 spin_unlock_irqrestore(get_ccwdev_lock(channel->ccwdev), flags);
3133 rc = wait_event_interruptible_timeout(card->wait_q,
3134 channel->state==CH_STATE_STOPPED, QETH_TIMEOUT);
3135 if (rc == -ERESTARTSYS)
3137 if (channel->state != CH_STATE_STOPPED)
3139 channel->state = CH_STATE_DOWN;
3144 qeth_halt_channel(struct qeth_channel *channel)
3146 unsigned long flags;
3147 struct qeth_card *card;
3150 QETH_DBF_TEXT(trace,3,"haltch");
3151 card = CARD_FROM_CDEV(channel->ccwdev);
3152 spin_lock_irqsave(get_ccwdev_lock(channel->ccwdev), flags);
3153 rc = ccw_device_halt(channel->ccwdev, QETH_HALT_CHANNEL_PARM);
3154 spin_unlock_irqrestore(get_ccwdev_lock(channel->ccwdev), flags);
3158 rc = wait_event_interruptible_timeout(card->wait_q,
3159 channel->state==CH_STATE_HALTED, QETH_TIMEOUT);
3160 if (rc == -ERESTARTSYS)
3162 if (channel->state != CH_STATE_HALTED)
3168 qeth_halt_channels(struct qeth_card *card)
3172 QETH_DBF_TEXT(trace,3,"haltchs");
3173 if ((rc = qeth_halt_channel(&card->read)))
3175 if ((rc = qeth_halt_channel(&card->write)))
3177 return qeth_halt_channel(&card->data);
3180 qeth_clear_channels(struct qeth_card *card)
3184 QETH_DBF_TEXT(trace,3,"clearchs");
3185 if ((rc = qeth_clear_channel(&card->read)))
3187 if ((rc = qeth_clear_channel(&card->write)))
3189 return qeth_clear_channel(&card->data);
3193 qeth_clear_halt_card(struct qeth_card *card, int halt)
3197 QETH_DBF_TEXT(trace,3,"clhacrd");
3198 QETH_DBF_HEX(trace, 3, &card, sizeof(void *));
3201 rc = qeth_halt_channels(card);
3204 return qeth_clear_channels(card);
3208 qeth_qdio_clear_card(struct qeth_card *card, int use_halt)
3212 QETH_DBF_TEXT(trace,3,"qdioclr");
3213 if (card->qdio.state == QETH_QDIO_ESTABLISHED){
3214 qdio_cleanup(CARD_DDEV(card),
3215 (card->info.type == QETH_CARD_TYPE_IQD) ?
3216 QDIO_FLAG_CLEANUP_USING_HALT :
3217 QDIO_FLAG_CLEANUP_USING_CLEAR);
3218 card->qdio.state = QETH_QDIO_ALLOCATED;
3220 rc = qeth_clear_halt_card(card, use_halt);
3221 card->state = CARD_STATE_DOWN;
3226 qeth_dm_act(struct qeth_card *card)
3229 struct qeth_cmd_buffer *iob;
3231 QETH_DBF_TEXT(setup,2,"dmact");
3233 iob = qeth_wait_for_buffer(&card->write);
3234 memcpy(iob->data, DM_ACT, DM_ACT_SIZE);
3236 memcpy(QETH_DM_ACT_DEST_ADDR(iob->data),
3237 &card->token.cm_connection_r, QETH_MPC_TOKEN_LENGTH);
3238 memcpy(QETH_DM_ACT_CONNECTION_TOKEN(iob->data),
3239 &card->token.ulp_connection_r, QETH_MPC_TOKEN_LENGTH);
3240 rc = qeth_send_control_data(card, DM_ACT_SIZE, iob, NULL, NULL);
3245 qeth_mpc_initialize(struct qeth_card *card)
3249 QETH_DBF_TEXT(setup,2,"mpcinit");
3251 if ((rc = qeth_issue_next_read(card))){
3252 QETH_DBF_TEXT_(setup, 2, "1err%d", rc);
3255 if ((rc = qeth_cm_enable(card))){
3256 QETH_DBF_TEXT_(setup, 2, "2err%d", rc);
3259 if ((rc = qeth_cm_setup(card))){
3260 QETH_DBF_TEXT_(setup, 2, "3err%d", rc);
3263 if ((rc = qeth_ulp_enable(card))){
3264 QETH_DBF_TEXT_(setup, 2, "4err%d", rc);
3267 if ((rc = qeth_ulp_setup(card))){
3268 QETH_DBF_TEXT_(setup, 2, "5err%d", rc);
3271 if ((rc = qeth_alloc_qdio_buffers(card))){
3272 QETH_DBF_TEXT_(setup, 2, "5err%d", rc);
3275 if ((rc = qeth_qdio_establish(card))){
3276 QETH_DBF_TEXT_(setup, 2, "6err%d", rc);
3277 qeth_free_qdio_buffers(card);
3280 if ((rc = qeth_qdio_activate(card))){
3281 QETH_DBF_TEXT_(setup, 2, "7err%d", rc);
3284 if ((rc = qeth_dm_act(card))){
3285 QETH_DBF_TEXT_(setup, 2, "8err%d", rc);
3291 qeth_qdio_clear_card(card, card->info.type==QETH_CARD_TYPE_OSAE);
3295 static struct net_device *
3296 qeth_get_netdevice(enum qeth_card_types type, enum qeth_link_types linktype)
3298 struct net_device *dev = NULL;
3301 case QETH_CARD_TYPE_OSAE:
3303 case QETH_LINK_TYPE_LANE_TR:
3304 case QETH_LINK_TYPE_HSTR:
3306 dev = alloc_trdev(0);
3307 #endif /* CONFIG_TR */
3310 dev = alloc_etherdev(0);
3313 case QETH_CARD_TYPE_IQD:
3314 dev = alloc_netdev(0, "hsi%d", ether_setup);
3317 dev = alloc_etherdev(0);
3323 qeth_send_packet(struct qeth_card *, struct sk_buff *);
3326 qeth_hard_start_xmit(struct sk_buff *skb, struct net_device *dev)
3329 struct qeth_card *card;
3331 QETH_DBF_TEXT(trace, 6, "hrdstxmi");
3332 card = (struct qeth_card *)dev->priv;
3334 card->stats.tx_dropped++;
3335 card->stats.tx_errors++;
3338 if ((card->state != CARD_STATE_UP) || !netif_carrier_ok(dev)) {
3339 card->stats.tx_dropped++;
3340 card->stats.tx_errors++;
3341 card->stats.tx_carrier_errors++;
3344 #ifdef CONFIG_QETH_PERF_STATS
3345 card->perf_stats.outbound_cnt++;
3346 card->perf_stats.outbound_start_time = qeth_get_micros();
3349 * We only call netif_stop_queue in case of errors. Since we've
3350 * got our own synchronization on queues we can keep the stack's
3353 if ((rc = qeth_send_packet(card, skb)))
3354 netif_stop_queue(dev);
3356 #ifdef CONFIG_QETH_PERF_STATS
3357 card->perf_stats.outbound_time += qeth_get_micros() -
3358 card->perf_stats.outbound_start_time;
3364 qeth_verify_vlan_dev(struct net_device *dev, struct qeth_card *card)
3367 #ifdef CONFIG_QETH_VLAN
3368 struct vlan_group *vg;
3371 if (!(vg = card->vlangrp))
3374 for (i = 0; i < VLAN_GROUP_ARRAY_LEN; i++){
3375 if (vg->vlan_devices[i] == dev){
3376 rc = QETH_VLAN_CARD;
3385 qeth_verify_dev(struct net_device *dev)
3387 struct qeth_card *card;
3388 unsigned long flags;
3391 read_lock_irqsave(&qeth_card_list.rwlock, flags);
3392 list_for_each_entry(card, &qeth_card_list.list, list){
3393 if (card->dev == dev){
3394 rc = QETH_REAL_CARD;
3397 rc = qeth_verify_vlan_dev(dev, card);
3401 read_unlock_irqrestore(&qeth_card_list.rwlock, flags);
3406 static struct qeth_card *
3407 qeth_get_card_from_dev(struct net_device *dev)
3409 struct qeth_card *card = NULL;
3412 rc = qeth_verify_dev(dev);
3413 if (rc == QETH_REAL_CARD)
3414 card = (struct qeth_card *)dev->priv;
3415 else if (rc == QETH_VLAN_CARD)
3416 card = (struct qeth_card *)
3417 VLAN_DEV_INFO(dev)->real_dev->priv;
3419 QETH_DBF_TEXT_(trace, 4, "%d", rc);
3424 qeth_tx_timeout(struct net_device *dev)
3426 struct qeth_card *card;
3428 card = (struct qeth_card *) dev->priv;
3429 card->stats.tx_errors++;
3430 qeth_schedule_recovery(card);
3434 qeth_open(struct net_device *dev)
3436 struct qeth_card *card;
3438 QETH_DBF_TEXT(trace, 4, "qethopen");
3440 card = (struct qeth_card *) dev->priv;
3442 if (card->state != CARD_STATE_SOFTSETUP)
3445 card->dev->flags |= IFF_UP;
3446 netif_start_queue(dev);
3447 card->data.state = CH_STATE_UP;
3448 card->state = CARD_STATE_UP;
3450 if (!card->lan_online){
3451 if (netif_carrier_ok(dev))
3452 netif_carrier_off(dev);
3453 netif_stop_queue(dev);
3459 qeth_stop(struct net_device *dev)
3461 struct qeth_card *card;
3463 QETH_DBF_TEXT(trace, 4, "qethstop");
3465 card = (struct qeth_card *) dev->priv;
3467 netif_stop_queue(dev);
3468 card->dev->flags &= ~IFF_UP;
3469 if (card->state == CARD_STATE_UP)
3470 card->state = CARD_STATE_SOFTSETUP;
3475 qeth_get_cast_type(struct qeth_card *card, struct sk_buff *skb)
3477 int cast_type = RTN_UNSPEC;
3479 if (skb->dst && skb->dst->neighbour){
3480 cast_type = skb->dst->neighbour->type;
3481 if ((cast_type == RTN_BROADCAST) ||
3482 (cast_type == RTN_MULTICAST) ||
3483 (cast_type == RTN_ANYCAST))
3488 /* try something else */
3489 if (skb->protocol == ETH_P_IPV6)
3490 return (skb->nh.raw[24] == 0xff) ? RTN_MULTICAST : 0;
3491 else if (skb->protocol == ETH_P_IP)
3492 return ((skb->nh.raw[16] & 0xf0) == 0xe0) ? RTN_MULTICAST : 0;
3494 if (!memcmp(skb->nh.raw, skb->dev->broadcast, 6))
3495 return RTN_BROADCAST;
3499 hdr_mac = *((u16 *)skb->nh.raw);
3501 switch (card->info.link_type) {
3502 case QETH_LINK_TYPE_HSTR:
3503 case QETH_LINK_TYPE_LANE_TR:
3504 if ((hdr_mac == QETH_TR_MAC_NC) ||
3505 (hdr_mac == QETH_TR_MAC_C))
3506 return RTN_MULTICAST;
3507 /* eth or so multicast? */
3509 if ((hdr_mac == QETH_ETH_MAC_V4) ||
3510 (hdr_mac == QETH_ETH_MAC_V6))
3511 return RTN_MULTICAST;
3518 qeth_get_priority_queue(struct qeth_card *card, struct sk_buff *skb,
3519 int ipv, int cast_type)
3521 if (!ipv && (card->info.type == QETH_CARD_TYPE_OSAE))
3522 return card->qdio.default_out_queue;
3523 switch (card->qdio.no_out_queues) {
3525 if (cast_type && card->info.is_multicast_different)
3526 return card->info.is_multicast_different &
3527 (card->qdio.no_out_queues - 1);
3528 if (card->qdio.do_prio_queueing && (ipv == 4)) {
3529 if (card->qdio.do_prio_queueing==QETH_PRIO_Q_ING_TOS){
3530 if (skb->nh.iph->tos & IP_TOS_NOTIMPORTANT)
3532 if (skb->nh.iph->tos & IP_TOS_HIGHRELIABILITY)
3534 if (skb->nh.iph->tos & IP_TOS_HIGHTHROUGHPUT)
3536 if (skb->nh.iph->tos & IP_TOS_LOWDELAY)
3539 if (card->qdio.do_prio_queueing==QETH_PRIO_Q_ING_PREC)
3540 return 3 - (skb->nh.iph->tos >> 6);
3541 } else if (card->qdio.do_prio_queueing && (ipv == 6)) {
3544 return card->qdio.default_out_queue;
3551 qeth_get_ip_version(struct sk_buff *skb)
3553 switch (skb->protocol) {
3564 qeth_prepare_skb(struct qeth_card *card, struct sk_buff **skb,
3565 struct qeth_hdr **hdr, int ipv)
3567 struct sk_buff *new_skb;
3568 #ifdef CONFIG_QETH_VLAN
3572 QETH_DBF_TEXT(trace, 6, "prepskb");
3573 if (skb_headroom(*skb) < sizeof(struct qeth_hdr)){
3574 new_skb = skb_realloc_headroom(*skb, sizeof(struct qeth_hdr));
3576 PRINT_ERR("qeth_prepare_skb: could "
3577 "not realloc headroom for qeth_hdr "
3578 "on interface %s", card->info.if_name);
3583 #ifdef CONFIG_QETH_VLAN
3584 if (card->vlangrp && vlan_tx_tag_present(*skb) && (ipv == 6)){
3586 * Move the mac addresses (6 bytes src, 6 bytes dest)
3587 * to the beginning of the new header. We are using three
3588 * memcpys instead of one memmove to save cycles.
3590 skb_push(*skb, VLAN_HLEN);
3591 memcpy((*skb)->data, (*skb)->data + 4, 4);
3592 memcpy((*skb)->data + 4, (*skb)->data + 8, 4);
3593 memcpy((*skb)->data + 8, (*skb)->data + 12, 4);
3594 tag = (u16 *) (*skb)->data + 12;
3596 * first two bytes = ETH_P_8021Q (0x8100)
3597 * second two bytes = VLANID
3599 *tag = __constant_htons(ETH_P_8021Q);
3600 *(tag + 1) = vlan_tx_tag_get(*skb);
3601 *(tag + 1) = htons(*(tag + 1));
3604 *hdr = (struct qeth_hdr *) skb_push(*skb, sizeof(struct qeth_hdr));
3606 * sanity check, the Linux memory allocation scheme should
3607 * never present us cases like this one (the 32bytes header plus
3608 * the first 40 bytes of the paket cross a 4k boundary)
3610 if ((((unsigned long) *hdr) & (~(PAGE_SIZE - 1))) !=
3611 (((unsigned long) *hdr + sizeof(struct qeth_hdr) +
3612 QETH_IP_HEADER_SIZE) & (~(PAGE_SIZE - 1)))) {
3613 PRINT_ERR("qeth_prepare_skb: misaligned "
3614 "packet on interface %s. Discarded.",
3615 card->info.if_name);
3622 qeth_get_qeth_hdr_flags4(int cast_type)
3624 if (cast_type == RTN_MULTICAST)
3625 return QETH_CAST_MULTICAST;
3626 if (cast_type == RTN_BROADCAST)
3627 return QETH_CAST_BROADCAST;
3628 return QETH_CAST_UNICAST;
3632 qeth_get_qeth_hdr_flags6(int cast_type)
3634 u8 ct = QETH_HDR_PASSTHRU | QETH_HDR_IPV6;
3635 if (cast_type == RTN_MULTICAST)
3636 return ct | QETH_CAST_MULTICAST;
3637 if (cast_type == RTN_ANYCAST)
3638 return ct | QETH_CAST_ANYCAST;
3639 if (cast_type == RTN_BROADCAST)
3640 return ct | QETH_CAST_BROADCAST;
3641 return ct | QETH_CAST_UNICAST;
3645 qeth_fill_header(struct qeth_card *card, struct qeth_hdr *hdr,
3646 struct sk_buff *skb, int ipv, int cast_type)
3651 QETH_DBF_TEXT(trace, 6, "fillhdr");
3652 #ifdef CONFIG_QETH_VLAN
3654 * before we're going to overwrite this location with next hop ip.
3655 * v6 uses passthrough, v4 sets the tag in the QDIO header.
3657 if (card->vlangrp && vlan_tx_tag_present(skb)) {
3658 hdr->ext_flags = (ipv == 4)? QETH_EXT_HDR_VLAN_FRAME :
3659 QETH_EXT_HDR_INCLUDE_VLAN_TAG;
3660 hdr->vlan_id = vlan_tx_tag_get(skb);
3662 #endif /* CONFIG_QETH_VLAN */
3663 hdr->length = skb->len - sizeof(struct qeth_hdr);
3664 if (ipv == 4) { /* IPv4 */
3665 hdr->flags = qeth_get_qeth_hdr_flags4(cast_type);
3666 memset(hdr->dest_addr, 0, 12);
3667 if ((skb->dst) && (skb->dst->neighbour)) {
3668 *((u32 *) (&hdr->dest_addr[12])) =
3669 *((u32 *) skb->dst->neighbour->primary_key);
3671 /* fill in destination address used in ip header */
3672 *((u32 *) (&hdr->dest_addr[12])) = skb->nh.iph->daddr;
3674 } else if (ipv == 6) { /* IPv6 or passthru */
3675 hdr->flags = qeth_get_qeth_hdr_flags6(cast_type);
3676 if ((skb->dst) && (skb->dst->neighbour)) {
3677 memcpy(hdr->dest_addr,
3678 skb->dst->neighbour->primary_key, 16);
3680 /* fill in destination address used in ip header */
3681 memcpy(hdr->dest_addr, &skb->nh.ipv6h->daddr, 16);
3683 } else { /* passthrough */
3684 if (!memcmp(skb->data + sizeof(struct qeth_hdr),
3685 skb->dev->broadcast, 6)) { /* broadcast? */
3686 hdr->flags = QETH_CAST_BROADCAST | QETH_HDR_PASSTHRU;
3688 hdr->flags = (cast_type == RTN_MULTICAST) ?
3689 QETH_CAST_MULTICAST | QETH_HDR_PASSTHRU :
3690 QETH_CAST_UNICAST | QETH_HDR_PASSTHRU;
3696 qeth_fill_buffer(struct qeth_qdio_out_q *queue, struct qeth_qdio_out_buffer *buf,
3697 char *data, struct sk_buff *skb)
3699 struct qdio_buffer *buffer;
3700 int length = skb->len;
3705 QETH_DBF_TEXT(trace, 6, "qdfillbf");
3706 buffer = buf->buffer;
3707 atomic_inc(&skb->users);
3708 skb_queue_tail(&buf->skb_list, skb);
3709 element = buf->next_element_to_fill;
3710 while (length > 0) {
3711 /* length_here is the remaining amount of data in this page */
3712 length_here = PAGE_SIZE - ((unsigned long) data % PAGE_SIZE);
3713 if (length < length_here)
3714 length_here = length;
3715 buffer->element[element].addr = data;
3716 buffer->element[element].length = length_here;
3717 length -= length_here;
3720 buffer->element[element].flags = 0;
3722 buffer->element[element].flags =
3723 SBAL_FLAGS_LAST_FRAG;
3726 buffer->element[element].flags =
3727 SBAL_FLAGS_FIRST_FRAG;
3729 buffer->element[element].flags =
3730 SBAL_FLAGS_MIDDLE_FRAG;
3732 data += length_here;
3736 buf->next_element_to_fill = element;
3737 if (!queue->do_pack) {
3738 QETH_DBF_TEXT(trace, 6, "fillbfnp");
3739 /* set state to PRIMED -> will be flushed */
3740 atomic_set(&buf->state, QETH_QDIO_BUF_PRIMED);
3742 QETH_DBF_TEXT(trace, 6, "fillbfpa");
3743 #ifdef CONFIG_QETH_PERF_STATS
3744 queue->card->perf_stats.skbs_sent_pack++;
3746 if (buf->next_element_to_fill >=
3747 QETH_MAX_BUFFER_ELEMENTS(queue->card)) {
3749 * packed buffer if full -> set state PRIMED
3750 * -> will be flushed
3752 atomic_set(&buf->state, QETH_QDIO_BUF_PRIMED);
3759 qeth_do_send_packet_fast(struct qeth_card *card, struct qeth_qdio_out_q *queue,
3760 struct sk_buff *skb, struct qeth_hdr *hdr,
3761 int elements_needed)
3763 struct qeth_qdio_out_buffer *buffer;
3766 QETH_DBF_TEXT(trace, 6, "dosndpfa");
3768 /* spin until we get the queue ... */
3769 while (atomic_compare_and_swap(QETH_OUT_Q_UNLOCKED,
3772 /* ... now we've got the queue */
3773 index = queue->next_buf_to_fill;
3774 buffer = &queue->bufs[queue->next_buf_to_fill];
3776 * check if buffer is empty to make sure that we do not 'overtake'
3777 * ourselves and try to fill a buffer that is already primed
3779 if (atomic_read(&buffer->state) != QETH_QDIO_BUF_EMPTY) {
3780 card->stats.tx_dropped++;
3781 atomic_set(&queue->state, QETH_OUT_Q_UNLOCKED);
3784 queue->next_buf_to_fill = (queue->next_buf_to_fill + 1) %
3785 QDIO_MAX_BUFFERS_PER_Q;
3786 atomic_set(&queue->state, QETH_OUT_Q_UNLOCKED);
3787 qeth_fill_buffer(queue, buffer, (char *)hdr, skb);
3788 qeth_flush_buffers(queue, 0, index, 1);
3793 qeth_do_send_packet(struct qeth_card *card, struct qeth_qdio_out_q *queue,
3794 struct sk_buff *skb, struct qeth_hdr *hdr,
3795 int elements_needed)
3797 struct qeth_qdio_out_buffer *buffer;
3799 int flush_count = 0;
3802 QETH_DBF_TEXT(trace, 6, "dosndpkt");
3804 /* spin until we get the queue ... */
3805 while (atomic_compare_and_swap(QETH_OUT_Q_UNLOCKED,
3808 start_index = queue->next_buf_to_fill;
3809 buffer = &queue->bufs[queue->next_buf_to_fill];
3811 * check if buffer is empty to make sure that we do not 'overtake'
3812 * ourselves and try to fill a buffer that is already primed
3814 if (atomic_read(&buffer->state) != QETH_QDIO_BUF_EMPTY){
3815 card->stats.tx_dropped++;
3816 atomic_set(&queue->state, QETH_OUT_Q_UNLOCKED);
3819 /* check if we need to switch packing state of this queue */
3820 qeth_switch_to_packing_if_needed(queue);
3821 if (queue->do_pack){
3822 /* does packet fit in current buffer? */
3823 if((QETH_MAX_BUFFER_ELEMENTS(card) -
3824 buffer->next_element_to_fill) < elements_needed){
3825 /* ... no -> set state PRIMED */
3826 atomic_set(&buffer->state, QETH_QDIO_BUF_PRIMED);
3828 queue->next_buf_to_fill =
3829 (queue->next_buf_to_fill + 1) %
3830 QDIO_MAX_BUFFERS_PER_Q;
3831 buffer = &queue->bufs[queue->next_buf_to_fill];
3832 /* we did a step forward, so check buffer state again */
3833 if (atomic_read(&buffer->state) != QETH_QDIO_BUF_EMPTY){
3834 card->stats.tx_dropped++;
3835 /* return EBUSY because we sent old packet, not
3836 * the current one */
3838 atomic_set(&queue->state, QETH_OUT_Q_UNLOCKED);
3843 qeth_fill_buffer(queue, buffer, (char *)hdr, skb);
3844 if (atomic_read(&buffer->state) == QETH_QDIO_BUF_PRIMED){
3845 /* next time fill the next buffer */
3847 queue->next_buf_to_fill = (queue->next_buf_to_fill + 1) %
3848 QDIO_MAX_BUFFERS_PER_Q;
3851 * queue->state will go from LOCKED -> UNLOCKED or from
3852 * LOCKED_FLUSH -> LOCKED if output_handler wanted to 'notify' us
3853 * (switch packing state or flush buffer to get another pci flag out).
3854 * In that case we will enter this loop
3856 while (atomic_dec_return(&queue->state)){
3857 /* check if we can go back to non-packing state */
3858 flush_count += qeth_switch_to_nonpacking_if_needed(queue);
3860 * check if we need to flush a packing buffer to get a pci
3861 * flag out on the queue
3863 if (!flush_count && !atomic_read(&queue->set_pci_flags_count))
3864 flush_count += qeth_flush_buffers_on_no_pci(queue);
3866 /* at this point the queue is UNLOCKED again */
3869 qeth_flush_buffers(queue, 0, start_index, flush_count);
3875 qeth_send_packet(struct qeth_card *card, struct sk_buff *skb)
3879 struct qeth_qdio_out_q *queue;
3880 struct qeth_hdr *hdr;
3881 int elements_needed;
3884 QETH_DBF_TEXT(trace, 6, "sendpkt");
3886 ipv = qeth_get_ip_version(skb);
3887 cast_type = qeth_get_cast_type(card, skb);
3888 queue = card->qdio.out_qs
3889 [qeth_get_priority_queue(card, skb, ipv, cast_type)];
3891 if ((rc = qeth_prepare_skb(card, &skb, &hdr, ipv))){
3892 QETH_DBF_TEXT_(trace, 4, "1err%d", rc);
3895 qeth_fill_header(card, hdr, skb, ipv, cast_type);
3896 elements_needed = 1 + (((((unsigned long) hdr) % PAGE_SIZE) + skb->len)
3898 if (elements_needed > QETH_MAX_BUFFER_ELEMENTS(card)){
3899 PRINT_ERR("qeth_do_send_packet: invalid size of "
3900 "IP packet. Discarded.");
3904 if (card->info.type != QETH_CARD_TYPE_IQD)
3905 rc = qeth_do_send_packet(card, queue, skb, hdr,
3908 rc = qeth_do_send_packet_fast(card, queue, skb, hdr,
3912 card->stats.tx_packets++;
3913 card->stats.tx_bytes += skb->len;
3919 qeth_mdio_read(struct net_device *dev, int phy_id, int regnum)
3921 struct qeth_card *card = (struct qeth_card *) dev->priv;
3925 case MII_BMCR: /* Basic mode control register */
3927 if ((card->info.link_type != QETH_LINK_TYPE_GBIT_ETH)&&
3928 (card->info.link_type != QETH_LINK_TYPE_10GBIT_ETH))
3929 rc |= BMCR_SPEED100;
3931 case MII_BMSR: /* Basic mode status register */
3932 rc = BMSR_ERCAP | BMSR_ANEGCOMPLETE | BMSR_LSTATUS |
3933 BMSR_10HALF | BMSR_10FULL | BMSR_100HALF | BMSR_100FULL |
3936 case MII_PHYSID1: /* PHYS ID 1 */
3937 rc = (dev->dev_addr[0] << 16) | (dev->dev_addr[1] << 8) |
3939 rc = (rc >> 5) & 0xFFFF;
3941 case MII_PHYSID2: /* PHYS ID 2 */
3942 rc = (dev->dev_addr[2] << 10) & 0xFFFF;
3944 case MII_ADVERTISE: /* Advertisement control reg */
3947 case MII_LPA: /* Link partner ability reg */
3948 rc = LPA_10HALF | LPA_10FULL | LPA_100HALF | LPA_100FULL |
3949 LPA_100BASE4 | LPA_LPACK;
3951 case MII_EXPANSION: /* Expansion register */
3953 case MII_DCOUNTER: /* disconnect counter */
3955 case MII_FCSCOUNTER: /* false carrier counter */
3957 case MII_NWAYTEST: /* N-way auto-neg test register */
3959 case MII_RERRCOUNTER: /* rx error counter */
3960 rc = card->stats.rx_errors;
3962 case MII_SREVISION: /* silicon revision */
3964 case MII_RESV1: /* reserved 1 */
3966 case MII_LBRERROR: /* loopback, rx, bypass error */
3968 case MII_PHYADDR: /* physical address */
3970 case MII_RESV2: /* reserved 2 */
3972 case MII_TPISTATUS: /* TPI status for 10mbps */
3974 case MII_NCONFIG: /* network interface config */
3984 qeth_mdio_write(struct net_device *dev, int phy_id, int regnum, int value)
3987 case MII_BMCR: /* Basic mode control register */
3988 case MII_BMSR: /* Basic mode status register */
3989 case MII_PHYSID1: /* PHYS ID 1 */
3990 case MII_PHYSID2: /* PHYS ID 2 */
3991 case MII_ADVERTISE: /* Advertisement control reg */
3992 case MII_LPA: /* Link partner ability reg */
3993 case MII_EXPANSION: /* Expansion register */
3994 case MII_DCOUNTER: /* disconnect counter */
3995 case MII_FCSCOUNTER: /* false carrier counter */
3996 case MII_NWAYTEST: /* N-way auto-neg test register */
3997 case MII_RERRCOUNTER: /* rx error counter */
3998 case MII_SREVISION: /* silicon revision */
3999 case MII_RESV1: /* reserved 1 */
4000 case MII_LBRERROR: /* loopback, rx, bypass error */
4001 case MII_PHYADDR: /* physical address */
4002 case MII_RESV2: /* reserved 2 */
4003 case MII_TPISTATUS: /* TPI status for 10mbps */
4004 case MII_NCONFIG: /* network interface config */
4010 static inline const char *
4011 qeth_arp_get_error_cause(int *rc)
4014 case QETH_IPA_ARP_RC_FAILED:
4016 return "operation failed";
4017 case QETH_IPA_ARP_RC_NOTSUPP:
4019 return "operation not supported";
4020 case QETH_IPA_ARP_RC_OUT_OF_RANGE:
4022 return "argument out of range";
4023 case QETH_IPA_ARP_RC_Q_NOTSUPP:
4025 return "query operation not supported";
4026 case QETH_IPA_ARP_RC_Q_NO_DATA:
4028 return "no query data available";
4030 return "unknown error";
4035 qeth_send_simple_setassparms(struct qeth_card *, enum qeth_ipa_funcs,
4039 qeth_arp_set_no_entries(struct qeth_card *card, int no_entries)
4044 QETH_DBF_TEXT(trace,3,"arpstnoe");
4046 /* TODO: really not supported by GuestLAN? */
4047 if (card->info.guestlan)
4049 if (!qeth_is_supported(card,IPA_ARP_PROCESSING)) {
4050 PRINT_WARN("ARP processing not supported "
4051 "on %s!\n", card->info.if_name);
4054 rc = qeth_send_simple_setassparms(card, IPA_ARP_PROCESSING,
4055 IPA_CMD_ASS_ARP_SET_NO_ENTRIES,
4059 PRINT_WARN("Could not set number of ARP entries on %s: "
4061 card->info.if_name, qeth_arp_get_error_cause(&rc),
4068 qeth_copy_arp_entries_stripped(struct qeth_arp_query_info *qinfo,
4069 struct qeth_arp_query_data *qdata,
4070 int entry_size, int uentry_size)
4076 entry_ptr = (char *)&qdata->data;
4077 uentry_ptr = (char *)(qinfo->udata + qinfo->udata_offset);
4078 for (i = 0; i < qdata->no_entries; ++i){
4079 /* strip off 32 bytes "media specific information" */
4080 memcpy(uentry_ptr, (entry_ptr + 32), entry_size - 32);
4081 entry_ptr += entry_size;
4082 uentry_ptr += uentry_size;
4087 qeth_arp_query_cb(struct qeth_card *card, struct qeth_reply *reply,
4090 struct qeth_ipa_cmd *cmd;
4091 struct qeth_arp_query_data *qdata;
4092 struct qeth_arp_query_info *qinfo;
4097 QETH_DBF_TEXT(trace,4,"arpquecb");
4099 qinfo = (struct qeth_arp_query_info *) reply->param;
4100 cmd = (struct qeth_ipa_cmd *) data;
4101 if (cmd->hdr.return_code) {
4102 QETH_DBF_TEXT_(trace,4,"qaer1%i", cmd->hdr.return_code);
4105 if (cmd->data.setassparms.hdr.return_code) {
4106 cmd->hdr.return_code = cmd->data.setassparms.hdr.return_code;
4107 QETH_DBF_TEXT_(trace,4,"qaer2%i", cmd->hdr.return_code);
4110 qdata = &cmd->data.setassparms.data.query_arp;
4111 switch(qdata->reply_bits){
4113 uentry_size = entry_size = sizeof(struct qeth_arp_qi_entry5);
4114 if (qinfo->mask_bits & QETH_QARP_STRIP_ENTRIES)
4115 uentry_size = sizeof(struct qeth_arp_qi_entry5_short);
4118 /* fall through to default */
4120 /* tr is the same as eth -> entry7 */
4121 uentry_size = entry_size = sizeof(struct qeth_arp_qi_entry7);
4122 if (qinfo->mask_bits & QETH_QARP_STRIP_ENTRIES)
4123 uentry_size = sizeof(struct qeth_arp_qi_entry7_short);
4126 /* check if there is enough room in userspace */
4127 if ((qinfo->udata_len - qinfo->udata_offset) <
4128 qdata->no_entries * uentry_size){
4129 QETH_DBF_TEXT_(trace, 4, "qaer3%i", -ENOMEM);
4130 cmd->hdr.return_code = -ENOMEM;
4131 PRINT_WARN("query ARP user space buffer is too small for "
4132 "the returned number of ARP entries. "
4133 "Aborting query!\n");
4136 QETH_DBF_TEXT_(trace, 4, "anore%i",
4137 cmd->data.setassparms.hdr.number_of_replies);
4138 QETH_DBF_TEXT_(trace, 4, "aseqn%i", cmd->data.setassparms.hdr.seq_no);
4139 QETH_DBF_TEXT_(trace, 4, "anoen%i", qdata->no_entries);
4141 if (qinfo->mask_bits & QETH_QARP_STRIP_ENTRIES) {
4142 /* strip off "media specific information" */
4143 qeth_copy_arp_entries_stripped(qinfo, qdata, entry_size,
4146 /*copy entries to user buffer*/
4147 memcpy(qinfo->udata + qinfo->udata_offset,
4148 (char *)&qdata->data, qdata->no_entries*uentry_size);
4150 qinfo->no_entries += qdata->no_entries;
4151 qinfo->udata_offset += (qdata->no_entries*uentry_size);
4152 /* check if all replies received ... */
4153 if (cmd->data.setassparms.hdr.seq_no <
4154 cmd->data.setassparms.hdr.number_of_replies)
4156 memcpy(qinfo->udata, &qinfo->no_entries, 4);
4157 /* keep STRIP_ENTRIES flag so the user program can distinguish
4158 * stripped entries from normal ones */
4159 if (qinfo->mask_bits & QETH_QARP_STRIP_ENTRIES)
4160 qdata->reply_bits |= QETH_QARP_STRIP_ENTRIES;
4161 memcpy(qinfo->udata + QETH_QARP_MASK_OFFSET,&qdata->reply_bits,2);
4165 memcpy(qinfo->udata, &i, 4);
4170 qeth_send_ipa_arp_cmd(struct qeth_card *card, struct qeth_cmd_buffer *iob,
4171 int len, int (*reply_cb)(struct qeth_card *,
4172 struct qeth_reply *,
4176 QETH_DBF_TEXT(trace,4,"sendarp");
4178 memcpy(iob->data, IPA_PDU_HEADER, IPA_PDU_HEADER_SIZE);
4179 memcpy(QETH_IPA_CMD_DEST_ADDR(iob->data),
4180 &card->token.ulp_connection_r, QETH_MPC_TOKEN_LENGTH);
4181 return qeth_send_control_data(card, IPA_PDU_HEADER_SIZE + len, iob,
4182 reply_cb, reply_param);
4186 qeth_send_ipa_snmp_cmd(struct qeth_card *card, struct qeth_cmd_buffer *iob,
4187 int len, int (*reply_cb)(struct qeth_card *,
4188 struct qeth_reply *,
4194 QETH_DBF_TEXT(trace,4,"sendsnmp");
4196 memcpy(iob->data, IPA_PDU_HEADER, IPA_PDU_HEADER_SIZE);
4197 memcpy(QETH_IPA_CMD_DEST_ADDR(iob->data),
4198 &card->token.ulp_connection_r, QETH_MPC_TOKEN_LENGTH);
4199 /* adjust PDU length fields in IPA_PDU_HEADER */
4200 s1 = (u32) IPA_PDU_HEADER_SIZE + len;
4202 memcpy(QETH_IPA_PDU_LEN_TOTAL(iob->data), &s1, 2);
4203 memcpy(QETH_IPA_PDU_LEN_PDU1(iob->data), &s2, 2);
4204 memcpy(QETH_IPA_PDU_LEN_PDU2(iob->data), &s2, 2);
4205 memcpy(QETH_IPA_PDU_LEN_PDU3(iob->data), &s2, 2);
4206 return qeth_send_control_data(card, IPA_PDU_HEADER_SIZE + len, iob,
4207 reply_cb, reply_param);
4210 static struct qeth_cmd_buffer *
4211 qeth_get_setassparms_cmd(struct qeth_card *, enum qeth_ipa_funcs,
4212 __u16, __u16, enum qeth_prot_versions);
4214 qeth_arp_query(struct qeth_card *card, char *udata)
4216 struct qeth_cmd_buffer *iob;
4217 struct qeth_arp_query_info qinfo = {0, };
4221 QETH_DBF_TEXT(trace,3,"arpquery");
4224 * currently GuestLAN does only deliver all zeros on query arp,
4225 * even though arp processing is supported (according to IPA supp.
4226 * funcs flags); since all zeros is no valueable information,
4227 * we say EOPNOTSUPP for all ARP functions
4229 if (card->info.guestlan)
4231 if (!qeth_is_supported(card,/*IPA_QUERY_ARP_ADDR_INFO*/
4232 IPA_ARP_PROCESSING)) {
4233 PRINT_WARN("ARP processing not supported "
4234 "on %s!\n", card->info.if_name);
4237 /* get size of userspace buffer and mask_bits -> 6 bytes */
4238 if (copy_from_user(&qinfo, udata, 6))
4240 if (!(qinfo.udata = kmalloc(qinfo.udata_len, GFP_KERNEL)))
4242 memset(qinfo.udata, 0, qinfo.udata_len);
4243 qinfo.udata_offset = QETH_QARP_ENTRIES_OFFSET;
4244 iob = qeth_get_setassparms_cmd(card, IPA_ARP_PROCESSING,
4245 IPA_CMD_ASS_ARP_QUERY_INFO,
4246 sizeof(int),QETH_PROT_IPV4);
4248 rc = qeth_send_ipa_arp_cmd(card, iob,
4249 QETH_SETASS_BASE_LEN+QETH_ARP_CMD_LEN,
4250 qeth_arp_query_cb, (void *)&qinfo);
4253 PRINT_WARN("Error while querying ARP cache on %s: %s "
4255 card->info.if_name, qeth_arp_get_error_cause(&rc),
4257 copy_to_user(udata, qinfo.udata, 4);
4259 copy_to_user(udata, qinfo.udata, qinfo.udata_len);
4266 * SNMP command callback
4269 qeth_snmp_command_cb(struct qeth_card *card, struct qeth_reply *reply,
4270 unsigned long sdata)
4272 struct qeth_ipa_cmd *cmd;
4273 struct qeth_arp_query_info *qinfo;
4274 struct qeth_snmp_cmd *snmp;
4275 unsigned char *data;
4278 QETH_DBF_TEXT(trace,3,"snpcmdcb");
4280 cmd = (struct qeth_ipa_cmd *) sdata;
4281 data = (unsigned char *)((char *)cmd - reply->offset);
4282 qinfo = (struct qeth_arp_query_info *) reply->param;
4283 snmp = &cmd->data.setadapterparms.data.snmp;
4285 if (cmd->hdr.return_code) {
4286 QETH_DBF_TEXT_(trace,4,"scer1%i", cmd->hdr.return_code);
4289 if (cmd->data.setadapterparms.hdr.return_code) {
4290 cmd->hdr.return_code = cmd->data.setadapterparms.hdr.return_code;
4291 QETH_DBF_TEXT_(trace,4,"scer2%i", cmd->hdr.return_code);
4294 data_len = *((__u16*)QETH_IPA_PDU_LEN_PDU1(data));
4295 if (cmd->data.setadapterparms.hdr.seq_no == 1)
4296 data_len -= (__u16)((char *)&snmp->data - (char *)cmd);
4298 data_len -= (__u16)((char*)&snmp->request - (char *)cmd);
4300 /* check if there is enough room in userspace */
4301 if ((qinfo->udata_len - qinfo->udata_offset) < data_len) {
4302 QETH_DBF_TEXT_(trace, 4, "scer3%i", -ENOMEM);
4303 cmd->hdr.return_code = -ENOMEM;
4306 QETH_DBF_TEXT_(trace, 4, "snore%i",
4307 cmd->data.setadapterparms.hdr.used_total);
4308 QETH_DBF_TEXT_(trace, 4, "sseqn%i", cmd->data.setadapterparms.hdr.seq_no);
4309 /*copy entries to user buffer*/
4310 if (cmd->data.setadapterparms.hdr.seq_no == 1) {
4311 memcpy(qinfo->udata + qinfo->udata_offset,
4313 data_len + offsetof(struct qeth_snmp_cmd,data));
4314 qinfo->udata_offset += offsetof(struct qeth_snmp_cmd, data);
4316 memcpy(qinfo->udata + qinfo->udata_offset,
4317 (char *)&snmp->request, data_len);
4319 qinfo->udata_offset += data_len;
4320 /* check if all replies received ... */
4321 QETH_DBF_TEXT_(trace, 4, "srtot%i",
4322 cmd->data.setadapterparms.hdr.used_total);
4323 QETH_DBF_TEXT_(trace, 4, "srseq%i",
4324 cmd->data.setadapterparms.hdr.seq_no);
4325 if (cmd->data.setadapterparms.hdr.seq_no <
4326 cmd->data.setadapterparms.hdr.used_total)
4331 static struct qeth_cmd_buffer *
4332 qeth_get_ipacmd_buffer(struct qeth_card *, enum qeth_ipa_cmds,
4333 enum qeth_prot_versions );
4335 static struct qeth_cmd_buffer *
4336 qeth_get_adapter_cmd(struct qeth_card *card, __u32 command, __u32 cmdlen)
4338 struct qeth_cmd_buffer *iob;
4339 struct qeth_ipa_cmd *cmd;
4341 iob = qeth_get_ipacmd_buffer(card,IPA_CMD_SETADAPTERPARMS,
4343 cmd = (struct qeth_ipa_cmd *)(iob->data+IPA_PDU_HEADER_SIZE);
4344 cmd->data.setadapterparms.hdr.cmdlength = cmdlen;
4345 cmd->data.setadapterparms.hdr.command_code = command;
4346 cmd->data.setadapterparms.hdr.used_total = 1;
4347 cmd->data.setadapterparms.hdr.seq_no = 1;
4353 * function to send SNMP commands to OSA-E card
4356 qeth_snmp_command(struct qeth_card *card, char *udata)
4358 struct qeth_cmd_buffer *iob;
4359 struct qeth_ipa_cmd *cmd;
4360 struct qeth_snmp_ureq *ureq;
4362 struct qeth_arp_query_info qinfo = {0, };
4365 QETH_DBF_TEXT(trace,3,"snmpcmd");
4367 if (card->info.guestlan)
4369 if (!qeth_adp_supported(card,IPA_SETADP_SET_SNMP_CONTROL)) {
4370 PRINT_WARN("SNMP Query MIBS not supported "
4371 "on %s!\n", card->info.if_name);
4374 /* skip 4 bytes (data_len struct member) to get req_len */
4375 if (copy_from_user(&req_len, udata + sizeof(int), sizeof(int)))
4377 ureq = kmalloc(req_len+sizeof(struct qeth_snmp_ureq_hdr), GFP_KERNEL);
4379 QETH_DBF_TEXT(trace, 2, "snmpnome");
4382 if (copy_from_user(ureq, udata,
4383 req_len+sizeof(struct qeth_snmp_ureq_hdr))){
4387 qinfo.udata_len = ureq->hdr.data_len;
4388 if (!(qinfo.udata = kmalloc(qinfo.udata_len, GFP_KERNEL))){
4392 memset(qinfo.udata, 0, qinfo.udata_len);
4393 qinfo.udata_offset = sizeof(struct qeth_snmp_ureq_hdr);
4395 iob = qeth_get_adapter_cmd(card, IPA_SETADP_SET_SNMP_CONTROL,
4396 QETH_SNMP_SETADP_CMDLENGTH + req_len);
4397 cmd = (struct qeth_ipa_cmd *)(iob->data+IPA_PDU_HEADER_SIZE);
4398 memcpy(&cmd->data.setadapterparms.data.snmp, &ureq->cmd, req_len);
4399 rc = qeth_send_ipa_snmp_cmd(card, iob, QETH_SETADP_BASE_LEN + req_len,
4400 qeth_snmp_command_cb, (void *)&qinfo);
4402 PRINT_WARN("SNMP command failed on %s: (0x%x)\n",
4403 card->info.if_name, rc);
4405 copy_to_user(udata, qinfo.udata, qinfo.udata_len);
4413 qeth_default_setassparms_cb(struct qeth_card *, struct qeth_reply *,
4417 qeth_send_setassparms(struct qeth_card *, struct qeth_cmd_buffer *,
4420 (struct qeth_card *, struct qeth_reply *, unsigned long),
4424 qeth_arp_add_entry(struct qeth_card *card, struct qeth_arp_cache_entry *entry)
4426 struct qeth_cmd_buffer *iob;
4431 QETH_DBF_TEXT(trace,3,"arpadent");
4434 * currently GuestLAN does only deliver all zeros on query arp,
4435 * even though arp processing is supported (according to IPA supp.
4436 * funcs flags); since all zeros is no valueable information,
4437 * we say EOPNOTSUPP for all ARP functions
4439 if (card->info.guestlan)
4441 if (!qeth_is_supported(card,IPA_ARP_PROCESSING)) {
4442 PRINT_WARN("ARP processing not supported "
4443 "on %s!\n", card->info.if_name);
4447 iob = qeth_get_setassparms_cmd(card, IPA_ARP_PROCESSING,
4448 IPA_CMD_ASS_ARP_ADD_ENTRY,
4449 sizeof(struct qeth_arp_cache_entry),
4451 rc = qeth_send_setassparms(card, iob,
4452 sizeof(struct qeth_arp_cache_entry),
4453 (unsigned long) entry,
4454 qeth_default_setassparms_cb, NULL);
4457 qeth_ipaddr4_to_string((u8 *)entry->ipaddr, buf);
4458 PRINT_WARN("Could not add ARP entry for address %s on %s: "
4460 buf, card->info.if_name,
4461 qeth_arp_get_error_cause(&rc), tmp, tmp);
4467 qeth_arp_remove_entry(struct qeth_card *card, struct qeth_arp_cache_entry *entry)
4469 struct qeth_cmd_buffer *iob;
4470 char buf[16] = {0, };
4474 QETH_DBF_TEXT(trace,3,"arprment");
4477 * currently GuestLAN does only deliver all zeros on query arp,
4478 * even though arp processing is supported (according to IPA supp.
4479 * funcs flags); since all zeros is no valueable information,
4480 * we say EOPNOTSUPP for all ARP functions
4482 if (card->info.guestlan)
4484 if (!qeth_is_supported(card,IPA_ARP_PROCESSING)) {
4485 PRINT_WARN("ARP processing not supported "
4486 "on %s!\n", card->info.if_name);
4489 memcpy(buf, entry, 12);
4490 iob = qeth_get_setassparms_cmd(card, IPA_ARP_PROCESSING,
4491 IPA_CMD_ASS_ARP_REMOVE_ENTRY,
4494 rc = qeth_send_setassparms(card, iob,
4495 12, (unsigned long)buf,
4496 qeth_default_setassparms_cb, NULL);
4500 qeth_ipaddr4_to_string((u8 *)entry->ipaddr, buf);
4501 PRINT_WARN("Could not delete ARP entry for address %s on %s: "
4503 buf, card->info.if_name,
4504 qeth_arp_get_error_cause(&rc), tmp, tmp);
4510 qeth_arp_flush_cache(struct qeth_card *card)
4515 QETH_DBF_TEXT(trace,3,"arpflush");
4518 * currently GuestLAN does only deliver all zeros on query arp,
4519 * even though arp processing is supported (according to IPA supp.
4520 * funcs flags); since all zeros is no valueable information,
4521 * we say EOPNOTSUPP for all ARP functions
4523 if (card->info.guestlan || (card->info.type == QETH_CARD_TYPE_IQD))
4525 if (!qeth_is_supported(card,IPA_ARP_PROCESSING)) {
4526 PRINT_WARN("ARP processing not supported "
4527 "on %s!\n", card->info.if_name);
4530 rc = qeth_send_simple_setassparms(card, IPA_ARP_PROCESSING,
4531 IPA_CMD_ASS_ARP_FLUSH_CACHE, 0);
4534 PRINT_WARN("Could not flush ARP cache on %s: %s (0x%x/%d)\n",
4535 card->info.if_name, qeth_arp_get_error_cause(&rc),
4542 qeth_do_ioctl(struct net_device *dev, struct ifreq *rq, int cmd)
4544 struct qeth_card *card = (struct qeth_card *)dev->priv;
4545 struct qeth_arp_cache_entry arp_entry;
4546 struct mii_ioctl_data *mii_data;
4552 if ((card->state != CARD_STATE_UP) &&
4553 (card->state != CARD_STATE_SOFTSETUP))
4557 case SIOC_QETH_ARP_SET_NO_ENTRIES:
4558 if (!capable(CAP_NET_ADMIN)){
4562 rc = qeth_arp_set_no_entries(card, rq->ifr_ifru.ifru_ivalue);
4564 case SIOC_QETH_ARP_QUERY_INFO:
4565 if (!capable(CAP_NET_ADMIN)){
4569 rc = qeth_arp_query(card, rq->ifr_ifru.ifru_data);
4571 case SIOC_QETH_ARP_ADD_ENTRY:
4572 if (!capable(CAP_NET_ADMIN)){
4576 if (copy_from_user(&arp_entry, rq->ifr_ifru.ifru_data,
4577 sizeof(struct qeth_arp_cache_entry)))
4580 rc = qeth_arp_add_entry(card, &arp_entry);
4582 case SIOC_QETH_ARP_REMOVE_ENTRY:
4583 if (!capable(CAP_NET_ADMIN)){
4587 if (copy_from_user(&arp_entry, rq->ifr_ifru.ifru_data,
4588 sizeof(struct qeth_arp_cache_entry)))
4591 rc = qeth_arp_remove_entry(card, &arp_entry);
4593 case SIOC_QETH_ARP_FLUSH_CACHE:
4594 if (!capable(CAP_NET_ADMIN)){
4598 rc = qeth_arp_flush_cache(card);
4600 case SIOC_QETH_ADP_SET_SNMP_CONTROL:
4601 rc = qeth_snmp_command(card, rq->ifr_ifru.ifru_data);
4603 case SIOC_QETH_GET_CARD_TYPE:
4604 if ((card->info.type == QETH_CARD_TYPE_OSAE) &&
4605 !card->info.guestlan)
4610 mii_data = if_mii(rq);
4611 mii_data->phy_id = 0;
4614 mii_data = if_mii(rq);
4615 if (mii_data->phy_id != 0)
4618 mii_data->val_out = qeth_mdio_read(dev,mii_data->phy_id,
4624 /* TODO: remove return if qeth_mdio_write does something */
4625 if (!capable(CAP_NET_ADMIN)){
4629 mii_data = if_mii(rq);
4630 if (mii_data->phy_id != 0)
4633 qeth_mdio_write(dev, mii_data->phy_id, mii_data->reg_num,
4640 QETH_DBF_TEXT_(trace, 2, "ioce%d", rc);
4644 static struct net_device_stats *
4645 qeth_get_stats(struct net_device *dev)
4647 struct qeth_card *card;
4649 card = (struct qeth_card *) (dev->priv);
4651 QETH_DBF_TEXT(trace,5,"getstat");
4653 return &card->stats;
4657 qeth_change_mtu(struct net_device *dev, int new_mtu)
4659 struct qeth_card *card;
4662 card = (struct qeth_card *) (dev->priv);
4664 QETH_DBF_TEXT(trace,4,"chgmtu");
4665 sprintf(dbf_text, "%8x", new_mtu);
4666 QETH_DBF_TEXT(trace,4,dbf_text);
4670 if (new_mtu > 65535)
4672 if ((!qeth_is_supported(card,IPA_IP_FRAGMENTATION)) &&
4673 (!qeth_mtu_is_valid(card, new_mtu)))
4679 #ifdef CONFIG_QETH_VLAN
4681 qeth_vlan_rx_register(struct net_device *dev, struct vlan_group *grp)
4683 struct qeth_card *card;
4684 unsigned long flags;
4686 QETH_DBF_TEXT(trace,4,"vlanreg");
4688 card = (struct qeth_card *) dev->priv;
4689 spin_lock_irqsave(&card->vlanlock, flags);
4690 card->vlangrp = grp;
4691 spin_unlock_irqrestore(&card->vlanlock, flags);
4695 qeth_free_vlan_buffer(struct qeth_card *card, struct qeth_qdio_out_buffer *buf,
4699 struct sk_buff *skb;
4700 struct sk_buff_head tmp_list;
4702 skb_queue_head_init(&tmp_list);
4703 for(i = 0; i < QETH_MAX_BUFFER_ELEMENTS(card); ++i){
4704 while ((skb = skb_dequeue(&buf->skb_list))){
4705 if (vlan_tx_tag_present(skb) &&
4706 (vlan_tx_tag_get(skb) == vid)) {
4707 atomic_dec(&skb->users);
4710 skb_queue_tail(&tmp_list, skb);
4713 while ((skb = skb_dequeue(&tmp_list)))
4714 skb_queue_tail(&buf->skb_list, skb);
4718 qeth_free_vlan_skbs(struct qeth_card *card, unsigned short vid)
4722 QETH_DBF_TEXT(trace, 4, "frvlskbs");
4723 for (i = 0; i < card->qdio.no_out_queues; ++i){
4724 for (j = 0; j < QDIO_MAX_BUFFERS_PER_Q; ++j)
4725 qeth_free_vlan_buffer(card, &card->qdio.
4726 out_qs[i]->bufs[j], vid);
4731 qeth_free_vlan_addresses4(struct qeth_card *card, unsigned short vid)
4733 struct in_device *in_dev;
4734 struct in_ifaddr *ifa;
4735 struct qeth_ipaddr *addr;
4737 QETH_DBF_TEXT(trace, 4, "frvaddr4");
4741 in_dev = __in_dev_get(card->vlangrp->vlan_devices[vid]);
4744 for (ifa = in_dev->ifa_list; ifa; ifa = ifa->ifa_next){
4745 addr = qeth_get_addr_buffer(QETH_PROT_IPV4);
4747 addr->u.a4.addr = ifa->ifa_address;
4748 addr->u.a4.mask = ifa->ifa_mask;
4749 addr->type = QETH_IP_TYPE_NORMAL;
4750 if (!qeth_delete_ip(card, addr))
4759 qeth_free_vlan_addresses6(struct qeth_card *card, unsigned short vid)
4761 struct inet6_dev *in6_dev;
4762 struct inet6_ifaddr *ifa;
4763 struct qeth_ipaddr *addr;
4765 QETH_DBF_TEXT(trace, 4, "frvaddr6");
4768 in6_dev = in6_dev_get(card->vlangrp->vlan_devices[vid]);
4771 for (ifa = in6_dev->addr_list; ifa; ifa = ifa->lst_next){
4772 addr = qeth_get_addr_buffer(QETH_PROT_IPV6);
4774 memcpy(&addr->u.a6.addr, &ifa->addr,
4775 sizeof(struct in6_addr));
4776 addr->u.a6.pfxlen = ifa->prefix_len;
4777 addr->type = QETH_IP_TYPE_NORMAL;
4778 if (!qeth_delete_ip(card, addr))
4782 in6_dev_put(in6_dev);
4786 qeth_vlan_rx_kill_vid(struct net_device *dev, unsigned short vid)
4788 struct qeth_card *card;
4789 unsigned long flags;
4791 QETH_DBF_TEXT(trace,4,"vlkilvid");
4793 card = (struct qeth_card *) dev->priv;
4794 /* free all skbs for the vlan device */
4795 qeth_free_vlan_skbs(card, vid);
4796 spin_lock_irqsave(&card->vlanlock, flags);
4797 /* unregister IP addresses of vlan device */
4798 qeth_free_vlan_addresses4(card, vid);
4799 qeth_free_vlan_addresses6(card, vid);
4801 card->vlangrp->vlan_devices[vid] = NULL;
4802 spin_unlock_irqrestore(&card->vlanlock, flags);
4803 if ( (qeth_set_thread_start_bit(card, QETH_SET_IP_THREAD) == 0) ||
4804 (qeth_set_thread_start_bit(card, QETH_SET_MC_THREAD) == 0) )
4805 schedule_work(&card->kernel_thread_starter);
4810 qeth_neigh_setup(struct net_device *dev, struct neigh_parms *np)
4815 #ifdef CONFIG_QETH_IPV6
4817 qeth_ipv6_generate_eui64(u8 * eui, struct net_device *dev)
4819 switch (dev->type) {
4822 case ARPHRD_IEEE802_TR:
4823 if (dev->addr_len != ETH_ALEN)
4825 memcpy(eui, dev->dev_addr, 3);
4826 memcpy(eui + 5, dev->dev_addr + 3, 3);
4827 eui[3] = (dev->dev_id >> 8) & 0xff;
4828 eui[4] = dev->dev_id & 0xff;
4837 qeth_get_mac_for_ipm(__u32 ipm, char *mac, struct net_device *dev)
4839 if (dev->type == ARPHRD_IEEE802_TR)
4840 ip_tr_mc_map(ipm, mac);
4842 ip_eth_mc_map(ipm, mac);
4845 static struct qeth_ipaddr *
4846 qeth_get_addr_buffer(enum qeth_prot_versions prot)
4848 struct qeth_ipaddr *addr;
4850 addr = kmalloc(sizeof(struct qeth_ipaddr), GFP_ATOMIC);
4852 PRINT_WARN("Not enough memory to add address\n");
4855 memset(addr,0,sizeof(struct qeth_ipaddr));
4856 addr->type = QETH_IP_TYPE_NORMAL;
4862 qeth_delete_mc_addresses(struct qeth_card *card)
4864 struct qeth_ipaddr *ipm, *iptodo;
4865 unsigned long flags;
4867 QETH_DBF_TEXT(trace,4,"delmc");
4868 spin_lock_irqsave(&card->ip_lock, flags);
4869 list_for_each_entry(ipm, &card->ip_list, entry){
4870 if (!ipm->is_multicast)
4872 iptodo = qeth_get_addr_buffer(ipm->proto);
4874 QETH_DBF_TEXT(trace, 2, "dmcnomem");
4877 memcpy(iptodo, ipm, sizeof(struct qeth_ipaddr));
4878 iptodo->users = iptodo->users * -1;
4879 if (!__qeth_insert_ip_todo(card, iptodo, 0))
4882 spin_unlock_irqrestore(&card->ip_lock, flags);
4886 qeth_add_mc(struct qeth_card *card, struct in_device *in4_dev)
4888 struct qeth_ipaddr *ipm;
4889 struct ip_mc_list *im4;
4890 char buf[MAX_ADDR_LEN];
4892 QETH_DBF_TEXT(trace,4,"addmc");
4893 for (im4 = in4_dev->mc_list; im4; im4 = im4->next) {
4894 qeth_get_mac_for_ipm(im4->multiaddr, buf, in4_dev->dev);
4895 ipm = qeth_get_addr_buffer(QETH_PROT_IPV4);
4898 ipm->u.a4.addr = im4->multiaddr;
4899 memcpy(ipm->mac,buf,OSA_ADDR_LEN);
4900 ipm->is_multicast = 1;
4901 if (!qeth_add_ip(card,ipm))
4907 qeth_add_vlan_mc(struct qeth_card *card)
4909 #ifdef CONFIG_QETH_VLAN
4910 struct in_device *in_dev;
4911 struct vlan_group *vg;
4914 QETH_DBF_TEXT(trace,4,"addmcvl");
4915 if (!qeth_is_supported(card,IPA_FULL_VLAN) ||
4916 (card->vlangrp == NULL))
4920 for (i = 0; i < VLAN_GROUP_ARRAY_LEN; i++) {
4921 if (vg->vlan_devices[i] == NULL ||
4922 !(vg->vlan_devices[i]->flags & IFF_UP))
4924 in_dev = in_dev_get(vg->vlan_devices[i]);
4927 read_lock(&in_dev->mc_list_lock);
4928 qeth_add_mc(card,in_dev);
4929 read_unlock(&in_dev->mc_list_lock);
4936 qeth_add_multicast_ipv4(struct qeth_card *card)
4938 struct in_device *in4_dev;
4940 QETH_DBF_TEXT(trace,4,"chkmcv4");
4941 in4_dev = in_dev_get(card->dev);
4942 if (in4_dev == NULL)
4944 read_lock(&in4_dev->mc_list_lock);
4945 qeth_add_mc(card, in4_dev);
4946 qeth_add_vlan_mc(card);
4947 read_unlock(&in4_dev->mc_list_lock);
4948 in_dev_put(in4_dev);
4951 #ifdef CONFIG_QETH_IPV6
4953 qeth_add_mc6(struct qeth_card *card, struct inet6_dev *in6_dev)
4955 struct qeth_ipaddr *ipm;
4956 struct ifmcaddr6 *im6;
4957 char buf[MAX_ADDR_LEN];
4959 QETH_DBF_TEXT(trace,4,"addmc6");
4960 for (im6 = in6_dev->mc_list; im6 != NULL; im6 = im6->next) {
4961 ndisc_mc_map(&im6->mca_addr, buf, in6_dev->dev, 0);
4962 ipm = qeth_get_addr_buffer(QETH_PROT_IPV6);
4965 ipm->is_multicast = 1;
4966 memcpy(ipm->mac,buf,OSA_ADDR_LEN);
4967 memcpy(&ipm->u.a6.addr,&im6->mca_addr.s6_addr,
4968 sizeof(struct in6_addr));
4969 if (!qeth_add_ip(card,ipm))
4975 qeth_add_vlan_mc6(struct qeth_card *card)
4977 #ifdef CONFIG_QETH_VLAN
4978 struct inet6_dev *in_dev;
4979 struct vlan_group *vg;
4982 QETH_DBF_TEXT(trace,4,"admc6vl");
4983 if (!qeth_is_supported(card,IPA_FULL_VLAN) ||
4984 (card->vlangrp == NULL))
4988 for (i = 0; i < VLAN_GROUP_ARRAY_LEN; i++) {
4989 if (vg->vlan_devices[i] == NULL ||
4990 !(vg->vlan_devices[i]->flags & IFF_UP))
4992 in_dev = in6_dev_get(vg->vlan_devices[i]);
4995 read_lock(&in_dev->lock);
4996 qeth_add_mc6(card,in_dev);
4997 read_unlock(&in_dev->lock);
4998 in6_dev_put(in_dev);
5000 #endif /* CONFIG_QETH_VLAN */
5004 qeth_add_multicast_ipv6(struct qeth_card *card)
5006 struct inet6_dev *in6_dev;
5008 QETH_DBF_TEXT(trace,4,"chkmcv6");
5009 if (!qeth_is_supported(card, IPA_IPV6))
5012 in6_dev = in6_dev_get(card->dev);
5013 if (in6_dev == NULL)
5015 read_lock(&in6_dev->lock);
5016 qeth_add_mc6(card, in6_dev);
5017 qeth_add_vlan_mc6(card);
5018 read_unlock(&in6_dev->lock);
5019 in6_dev_put(in6_dev);
5021 #endif /* CONFIG_QETH_IPV6 */
5024 * set multicast address on card
5027 qeth_set_multicast_list(struct net_device *dev)
5029 struct qeth_card *card;
5031 QETH_DBF_TEXT(trace,3,"setmulti");
5032 card = (struct qeth_card *) dev->priv;
5034 if (qeth_set_thread_start_bit(card, QETH_SET_MC_THREAD) == 0)
5035 schedule_work(&card->kernel_thread_starter);
5039 qeth_fill_ipacmd_header(struct qeth_card *card, struct qeth_ipa_cmd *cmd,
5040 __u8 command, enum qeth_prot_versions prot)
5042 memset(cmd, 0, sizeof (struct qeth_ipa_cmd));
5043 cmd->hdr.command = command;
5044 cmd->hdr.initiator = IPA_CMD_INITIATOR_HOST;
5045 cmd->hdr.seqno = card->seqno.ipa;
5046 cmd->hdr.adapter_type = qeth_get_ipa_adp_type(card->info.link_type);
5047 cmd->hdr.rel_adapter_no = (__u8) card->info.portno;
5048 cmd->hdr.prim_version_no = 1;
5049 cmd->hdr.param_count = 1;
5050 cmd->hdr.prot_version = prot;
5051 cmd->hdr.ipa_supported = 0;
5052 cmd->hdr.ipa_enabled = 0;
5055 static struct qeth_cmd_buffer *
5056 qeth_get_ipacmd_buffer(struct qeth_card *card, enum qeth_ipa_cmds ipacmd,
5057 enum qeth_prot_versions prot)
5059 struct qeth_cmd_buffer *iob;
5060 struct qeth_ipa_cmd *cmd;
5062 iob = qeth_wait_for_buffer(&card->write);
5063 cmd = (struct qeth_ipa_cmd *)(iob->data+IPA_PDU_HEADER_SIZE);
5064 qeth_fill_ipacmd_header(card, cmd, ipacmd, prot);
5070 qeth_send_setdelmc(struct qeth_card *card, struct qeth_ipaddr *addr, int ipacmd)
5073 struct qeth_cmd_buffer *iob;
5074 struct qeth_ipa_cmd *cmd;
5076 QETH_DBF_TEXT(trace,4,"setdelmc");
5078 iob = qeth_get_ipacmd_buffer(card, ipacmd, addr->proto);
5079 cmd = (struct qeth_ipa_cmd *)(iob->data+IPA_PDU_HEADER_SIZE);
5080 memcpy(&cmd->data.setdelipm.mac,addr->mac, OSA_ADDR_LEN);
5081 if (addr->proto == QETH_PROT_IPV6)
5082 memcpy(cmd->data.setdelipm.ip6, &addr->u.a6.addr,
5083 sizeof(struct in6_addr));
5085 memcpy(&cmd->data.setdelipm.ip4, &addr->u.a4.addr,4);
5087 rc = qeth_send_ipa_cmd(card, iob, NULL, NULL);
5092 qeth_fill_netmask(u8 *netmask, unsigned int len)
5095 for (i=0;i<16;i++) {
5100 netmask[i] = (u8)(0xFF00>>j);
5107 qeth_send_setdelip(struct qeth_card *card, struct qeth_ipaddr *addr,
5108 int ipacmd, unsigned int flags)
5111 struct qeth_cmd_buffer *iob;
5112 struct qeth_ipa_cmd *cmd;
5115 QETH_DBF_TEXT(trace,4,"setdelip");
5116 QETH_DBF_TEXT_(trace,4,"flags%02X", flags);
5118 iob = qeth_get_ipacmd_buffer(card, ipacmd, addr->proto);
5119 cmd = (struct qeth_ipa_cmd *)(iob->data+IPA_PDU_HEADER_SIZE);
5120 if (addr->proto == QETH_PROT_IPV6) {
5121 memcpy(cmd->data.setdelip6.ip_addr, &addr->u.a6.addr,
5122 sizeof(struct in6_addr));
5123 qeth_fill_netmask(netmask,addr->u.a6.pfxlen);
5124 memcpy(cmd->data.setdelip6.mask, netmask,
5125 sizeof(struct in6_addr));
5126 cmd->data.setdelip6.flags = flags;
5128 memcpy(cmd->data.setdelip4.ip_addr, &addr->u.a4.addr, 4);
5129 memcpy(cmd->data.setdelip4.mask, &addr->u.a4.mask, 4);
5130 cmd->data.setdelip4.flags = flags;
5133 rc = qeth_send_ipa_cmd(card, iob, NULL, NULL);
5139 qeth_register_addr_entry(struct qeth_card *card, struct qeth_ipaddr *addr)
5145 if (addr->proto == QETH_PROT_IPV4) {
5146 QETH_DBF_TEXT(trace, 2,"setaddr4");
5147 QETH_DBF_HEX(trace, 3, &addr->u.a4.addr, sizeof(int));
5148 } else if (addr->proto == QETH_PROT_IPV6) {
5149 QETH_DBF_TEXT(trace, 2, "setaddr6");
5150 QETH_DBF_HEX(trace,3,&addr->u.a6.addr,8);
5151 QETH_DBF_HEX(trace,3,((char *)&addr->u.a6.addr)+8,8);
5153 QETH_DBF_TEXT(trace, 2, "setaddr?");
5154 QETH_DBF_HEX(trace, 3, addr, sizeof(struct qeth_ipaddr));
5157 if (addr->is_multicast)
5158 rc = qeth_send_setdelmc(card, addr, IPA_CMD_SETIPM);
5160 rc = qeth_send_setdelip(card, addr, IPA_CMD_SETIP,
5163 QETH_DBF_TEXT(trace, 2, "failed");
5164 } while ((--cnt > 0) && rc);
5166 QETH_DBF_TEXT(trace, 2, "FAILED");
5167 /* TODO: re-activate this warning as soon as we have a
5169 qeth_ipaddr_to_string(addr->proto, (u8 *)&addr->u, buf);
5170 PRINT_WARN("Could not register IP address %s (rc=%x)\n",
5178 qeth_deregister_addr_entry(struct qeth_card *card, struct qeth_ipaddr *addr)
5183 if (addr->proto == QETH_PROT_IPV4) {
5184 QETH_DBF_TEXT(trace, 2,"deladdr4");
5185 QETH_DBF_HEX(trace, 3, &addr->u.a4.addr, sizeof(int));
5186 } else if (addr->proto == QETH_PROT_IPV6) {
5187 QETH_DBF_TEXT(trace, 2, "deladdr6");
5188 QETH_DBF_HEX(trace,3,&addr->u.a6.addr,8);
5189 QETH_DBF_HEX(trace,3,((char *)&addr->u.a6.addr)+8,8);
5191 QETH_DBF_TEXT(trace, 2, "deladdr?");
5192 QETH_DBF_HEX(trace, 3, addr, sizeof(struct qeth_ipaddr));
5194 if (addr->is_multicast)
5195 rc = qeth_send_setdelmc(card, addr, IPA_CMD_DELIPM);
5197 rc = qeth_send_setdelip(card, addr, IPA_CMD_DELIP,
5200 QETH_DBF_TEXT(trace, 2, "failed");
5201 /* TODO: re-activate this warning as soon as we have a
5203 qeth_ipaddr_to_string(addr->proto, (u8 *)&addr->u, buf);
5204 PRINT_WARN("Could not deregister IP address %s (rc=%x)\n",
5212 qeth_netdev_init(struct net_device *dev)
5214 struct qeth_card *card;
5216 card = (struct qeth_card *) dev->priv;
5218 QETH_DBF_TEXT(trace,3,"initdev");
5220 dev->tx_timeout = &qeth_tx_timeout;
5221 dev->watchdog_timeo = QETH_TX_TIMEOUT;
5222 dev->open = qeth_open;
5223 dev->stop = qeth_stop;
5224 dev->hard_start_xmit = qeth_hard_start_xmit;
5225 dev->do_ioctl = qeth_do_ioctl;
5226 dev->get_stats = qeth_get_stats;
5227 dev->change_mtu = qeth_change_mtu;
5228 dev->neigh_setup = qeth_neigh_setup;
5229 dev->set_multicast_list = qeth_set_multicast_list;
5230 #ifdef CONFIG_QETH_VLAN
5231 dev->vlan_rx_register = qeth_vlan_rx_register;
5232 dev->vlan_rx_kill_vid = qeth_vlan_rx_kill_vid;
5234 if (qeth_get_netdev_flags(card->info.type) & IFF_NOARP) {
5235 dev->rebuild_header = NULL;
5236 dev->hard_header = NULL;
5237 dev->header_cache_update = NULL;
5238 dev->hard_header_cache = NULL;
5240 #ifdef CONFIG_QETH_IPV6
5241 /*IPv6 address autoconfiguration stuff*/
5242 card->dev->dev_id = card->info.unique_id & 0xffff;
5243 if (!(card->info.unique_id & UNIQUE_ID_NOT_BY_CARD))
5244 card->dev->generate_eui64 = qeth_ipv6_generate_eui64;
5248 dev->hard_header_parse = NULL;
5249 dev->set_mac_address = NULL;
5250 dev->flags |= qeth_get_netdev_flags(card->info.type);
5251 if ((card->options.fake_broadcast) ||
5252 (card->info.broadcast_capable))
5253 dev->flags |= IFF_BROADCAST;
5255 dev->hard_header_len =
5256 qeth_get_hlen(card->info.link_type) + card->options.add_hhlen;
5257 dev->addr_len = OSA_ADDR_LEN;
5258 dev->mtu = card->info.initial_mtu;
5260 SET_MODULE_OWNER(dev);
5265 qeth_init_func_level(struct qeth_card *card)
5267 if (card->ipato.enabled) {
5268 if (card->info.type == QETH_CARD_TYPE_IQD)
5269 card->info.func_level =
5270 QETH_IDX_FUNC_LEVEL_IQD_ENA_IPAT;
5272 card->info.func_level =
5273 QETH_IDX_FUNC_LEVEL_OSAE_ENA_IPAT;
5275 if (card->info.type == QETH_CARD_TYPE_IQD)
5276 card->info.func_level =
5277 QETH_IDX_FUNC_LEVEL_IQD_DIS_IPAT;
5279 card->info.func_level =
5280 QETH_IDX_FUNC_LEVEL_OSAE_DIS_IPAT;
5285 * hardsetup card, initialize MPC and QDIO stuff
5288 qeth_hardsetup_card(struct qeth_card *card)
5293 QETH_DBF_TEXT(setup, 2, "hrdsetup");
5297 PRINT_WARN("Retrying to do IDX activates.\n");
5298 ccw_device_set_offline(CARD_DDEV(card));
5299 ccw_device_set_offline(CARD_WDEV(card));
5300 ccw_device_set_offline(CARD_RDEV(card));
5301 ccw_device_set_online(CARD_RDEV(card));
5302 ccw_device_set_online(CARD_WDEV(card));
5303 ccw_device_set_online(CARD_DDEV(card));
5305 rc = qeth_qdio_clear_card(card,card->info.type==QETH_CARD_TYPE_OSAE);
5306 if (rc == -ERESTARTSYS) {
5307 QETH_DBF_TEXT(setup, 2, "break1");
5310 QETH_DBF_TEXT_(setup, 2, "1err%d", rc);
5316 if ((rc = qeth_get_unitaddr(card))){
5317 QETH_DBF_TEXT_(setup, 2, "2err%d", rc);
5320 qeth_init_tokens(card);
5321 qeth_init_func_level(card);
5322 rc = qeth_idx_activate_channel(&card->read, qeth_idx_read_cb);
5323 if (rc == -ERESTARTSYS) {
5324 QETH_DBF_TEXT(setup, 2, "break2");
5327 QETH_DBF_TEXT_(setup, 2, "3err%d", rc);
5333 rc = qeth_idx_activate_channel(&card->write, qeth_idx_write_cb);
5334 if (rc == -ERESTARTSYS) {
5335 QETH_DBF_TEXT(setup, 2, "break3");
5338 QETH_DBF_TEXT_(setup, 2, "4err%d", rc);
5344 if ((rc = qeth_mpc_initialize(card))){
5345 QETH_DBF_TEXT_(setup, 2, "5err%d", rc);
5348 /* at first set_online allocate netdev */
5350 card->dev = qeth_get_netdevice(card->info.type,
5351 card->info.link_type);
5353 qeth_qdio_clear_card(card, card->info.type ==
5354 QETH_CARD_TYPE_OSAE);
5356 QETH_DBF_TEXT_(setup, 2, "6err%d", rc);
5359 card->dev->priv = card;
5360 card->dev->type = qeth_get_arphdr_type(card->info.type,
5361 card->info.link_type);
5362 card->dev->init = qeth_netdev_init;
5366 PRINT_ERR("Initialization in hardsetup failed! rc=%d\n", rc);
5371 qeth_default_setassparms_cb(struct qeth_card *card, struct qeth_reply *reply,
5374 struct qeth_ipa_cmd *cmd;
5376 QETH_DBF_TEXT(trace,4,"defadpcb");
5378 cmd = (struct qeth_ipa_cmd *) data;
5379 if (cmd->hdr.return_code == 0){
5380 cmd->hdr.return_code = cmd->data.setassparms.hdr.return_code;
5381 if (cmd->hdr.prot_version == QETH_PROT_IPV4)
5382 card->options.ipa4.enabled_funcs = cmd->hdr.ipa_enabled;
5383 #ifdef CONFIG_QETH_IPV6
5384 if (cmd->hdr.prot_version == QETH_PROT_IPV6)
5385 card->options.ipa6.enabled_funcs = cmd->hdr.ipa_enabled;
5388 if (cmd->data.setassparms.hdr.assist_no == IPA_INBOUND_CHECKSUM &&
5389 cmd->data.setassparms.hdr.command_code == IPA_CMD_ASS_START) {
5390 card->info.csum_mask = cmd->data.setassparms.data.flags_32bit;
5391 QETH_DBF_TEXT_(trace, 3, "csum:%d", card->info.csum_mask);
5397 qeth_default_setadapterparms_cb(struct qeth_card *card,
5398 struct qeth_reply *reply,
5401 struct qeth_ipa_cmd *cmd;
5403 QETH_DBF_TEXT(trace,4,"defadpcb");
5405 cmd = (struct qeth_ipa_cmd *) data;
5406 if (cmd->hdr.return_code == 0)
5407 cmd->hdr.return_code = cmd->data.setadapterparms.hdr.return_code;
5412 qeth_query_setadapterparms_cb(struct qeth_card *card, struct qeth_reply *reply,
5415 struct qeth_ipa_cmd *cmd;
5417 QETH_DBF_TEXT(trace,3,"quyadpcb");
5419 cmd = (struct qeth_ipa_cmd *) data;
5420 if (cmd->data.setadapterparms.data.query_cmds_supp.lan_type & 0x7f)
5421 card->info.link_type =
5422 cmd->data.setadapterparms.data.query_cmds_supp.lan_type;
5423 card->options.adp.supported_funcs =
5424 cmd->data.setadapterparms.data.query_cmds_supp.supported_cmds;
5425 return qeth_default_setadapterparms_cb(card, reply, (unsigned long)cmd);
5429 qeth_query_setadapterparms(struct qeth_card *card)
5432 struct qeth_cmd_buffer *iob;
5434 QETH_DBF_TEXT(trace,3,"queryadp");
5435 iob = qeth_get_adapter_cmd(card, IPA_SETADP_QUERY_COMMANDS_SUPPORTED,
5436 sizeof(struct qeth_ipacmd_setadpparms));
5437 rc = qeth_send_ipa_cmd(card, iob, qeth_query_setadapterparms_cb, NULL);
5442 qeth_setadpparms_change_macaddr_cb(struct qeth_card *card,
5443 struct qeth_reply *reply,
5446 struct qeth_ipa_cmd *cmd;
5448 QETH_DBF_TEXT(trace,4,"chgmaccb");
5450 cmd = (struct qeth_ipa_cmd *) data;
5451 memcpy(card->dev->dev_addr,
5452 &cmd->data.setadapterparms.data.change_addr.addr,OSA_ADDR_LEN);
5453 qeth_default_setadapterparms_cb(card, reply, (unsigned long) cmd);
5458 qeth_setadpparms_change_macaddr(struct qeth_card *card)
5461 struct qeth_cmd_buffer *iob;
5462 struct qeth_ipa_cmd *cmd;
5464 QETH_DBF_TEXT(trace,4,"chgmac");
5466 iob = qeth_get_adapter_cmd(card,IPA_SETADP_ALTER_MAC_ADDRESS,
5467 sizeof(struct qeth_ipacmd_setadpparms));
5468 cmd = (struct qeth_ipa_cmd *)(iob->data+IPA_PDU_HEADER_SIZE);
5469 cmd->data.setadapterparms.data.change_addr.cmd = CHANGE_ADDR_READ_MAC;
5470 cmd->data.setadapterparms.data.change_addr.addr_size = OSA_ADDR_LEN;
5471 memcpy(&cmd->data.setadapterparms.data.change_addr.addr,
5472 card->dev->dev_addr, OSA_ADDR_LEN);
5473 rc = qeth_send_ipa_cmd(card, iob, qeth_setadpparms_change_macaddr_cb,
5479 qeth_send_setadp_mode(struct qeth_card *card, __u32 command, __u32 mode)
5482 struct qeth_cmd_buffer *iob;
5483 struct qeth_ipa_cmd *cmd;
5485 QETH_DBF_TEXT(trace,4,"adpmode");
5487 iob = qeth_get_adapter_cmd(card, command,
5488 sizeof(struct qeth_ipacmd_setadpparms));
5489 cmd = (struct qeth_ipa_cmd *)(iob->data+IPA_PDU_HEADER_SIZE);
5490 cmd->data.setadapterparms.data.mode = mode;
5491 rc = qeth_send_ipa_cmd(card, iob, qeth_default_setadapterparms_cb,
5497 qeth_setadapter_hstr(struct qeth_card *card)
5501 QETH_DBF_TEXT(trace,4,"adphstr");
5503 if (qeth_adp_supported(card,IPA_SETADP_SET_BROADCAST_MODE)) {
5504 rc = qeth_send_setadp_mode(card, IPA_SETADP_SET_BROADCAST_MODE,
5505 card->options.broadcast_mode);
5507 PRINT_WARN("couldn't set broadcast mode on "
5509 CARD_BUS_ID(card), rc);
5510 rc = qeth_send_setadp_mode(card, IPA_SETADP_ALTER_MAC_ADDRESS,
5511 card->options.macaddr_mode);
5513 PRINT_WARN("couldn't set macaddr mode on "
5514 "device %s: x%x\n", CARD_BUS_ID(card), rc);
5517 if (card->options.broadcast_mode == QETH_TR_BROADCAST_LOCAL)
5518 PRINT_WARN("set adapter parameters not available "
5519 "to set broadcast mode, using ALLRINGS "
5520 "on device %s:\n", CARD_BUS_ID(card));
5521 if (card->options.macaddr_mode == QETH_TR_MACADDR_CANONICAL)
5522 PRINT_WARN("set adapter parameters not available "
5523 "to set macaddr mode, using NONCANONICAL "
5524 "on device %s:\n", CARD_BUS_ID(card));
5529 qeth_setadapter_parms(struct qeth_card *card)
5533 QETH_DBF_TEXT(setup, 2, "setadprm");
5535 if (!qeth_is_supported(card, IPA_SETADAPTERPARMS)){
5536 PRINT_WARN("set adapter parameters not supported "
5539 QETH_DBF_TEXT(setup, 2, " notsupp");
5542 rc = qeth_query_setadapterparms(card);
5544 PRINT_WARN("couldn't set adapter parameters on device %s: "
5545 "x%x\n", CARD_BUS_ID(card), rc);
5548 if (qeth_adp_supported(card,IPA_SETADP_ALTER_MAC_ADDRESS)) {
5549 rc = qeth_setadpparms_change_macaddr(card);
5551 PRINT_WARN("couldn't get MAC address on "
5553 CARD_BUS_ID(card), rc);
5556 if ((card->info.link_type == QETH_LINK_TYPE_HSTR) ||
5557 (card->info.link_type == QETH_LINK_TYPE_LANE_TR))
5558 rc = qeth_setadapter_hstr(card);
5565 qeth_send_startstoplan(struct qeth_card *card, enum qeth_ipa_cmds ipacmd,
5566 enum qeth_prot_versions prot)
5569 struct qeth_cmd_buffer *iob;
5571 iob = qeth_get_ipacmd_buffer(card,ipacmd,prot);
5572 rc = qeth_send_ipa_cmd(card, iob, NULL, NULL);
5578 qeth_send_startlan(struct qeth_card *card, enum qeth_prot_versions prot)
5582 QETH_DBF_TEXT_(setup, 2, "strtlan%i", prot);
5584 rc = qeth_send_startstoplan(card, IPA_CMD_STARTLAN, prot);
5589 qeth_send_stoplan(struct qeth_card *card)
5594 * TODO: according to the IPA format document page 14,
5595 * TCP/IP (we!) never issue a STOPLAN
5598 QETH_DBF_TEXT(trace, 2, "stoplan");
5600 rc = qeth_send_startstoplan(card, IPA_CMD_STOPLAN, QETH_PROT_IPV4);
5605 qeth_query_ipassists_cb(struct qeth_card *card, struct qeth_reply *reply,
5608 struct qeth_ipa_cmd *cmd;
5610 QETH_DBF_TEXT(setup, 2, "qipasscb");
5612 cmd = (struct qeth_ipa_cmd *) data;
5613 if (cmd->hdr.prot_version == QETH_PROT_IPV4) {
5614 card->options.ipa4.supported_funcs = cmd->hdr.ipa_supported;
5615 card->options.ipa4.enabled_funcs = cmd->hdr.ipa_enabled;
5617 #ifdef CONFIG_QETH_IPV6
5618 card->options.ipa6.supported_funcs = cmd->hdr.ipa_supported;
5619 card->options.ipa6.enabled_funcs = cmd->hdr.ipa_enabled;
5626 qeth_query_ipassists(struct qeth_card *card, enum qeth_prot_versions prot)
5629 struct qeth_cmd_buffer *iob;
5631 QETH_DBF_TEXT_(setup, 2, "qipassi%i", prot);
5633 iob = qeth_get_ipacmd_buffer(card,IPA_CMD_QIPASSIST,prot);
5634 rc = qeth_send_ipa_cmd(card, iob, qeth_query_ipassists_cb, NULL);
5638 static struct qeth_cmd_buffer *
5639 qeth_get_setassparms_cmd(struct qeth_card *card, enum qeth_ipa_funcs ipa_func,
5640 __u16 cmd_code, __u16 len,
5641 enum qeth_prot_versions prot)
5643 struct qeth_cmd_buffer *iob;
5644 struct qeth_ipa_cmd *cmd;
5646 QETH_DBF_TEXT(trace,4,"getasscm");
5647 iob = qeth_get_ipacmd_buffer(card,IPA_CMD_SETASSPARMS,prot);
5649 cmd = (struct qeth_ipa_cmd *)(iob->data+IPA_PDU_HEADER_SIZE);
5650 cmd->data.setassparms.hdr.assist_no = ipa_func;
5651 cmd->data.setassparms.hdr.length = 8 + len;
5652 cmd->data.setassparms.hdr.command_code = cmd_code;
5653 cmd->data.setassparms.hdr.return_code = 0;
5654 cmd->data.setassparms.hdr.seq_no = 0;
5660 qeth_send_setassparms(struct qeth_card *card, struct qeth_cmd_buffer *iob,
5661 __u16 len, long data,
5663 (struct qeth_card *,struct qeth_reply *,unsigned long),
5667 struct qeth_ipa_cmd *cmd;
5669 QETH_DBF_TEXT(trace,4,"sendassp");
5671 cmd = (struct qeth_ipa_cmd *)(iob->data+IPA_PDU_HEADER_SIZE);
5672 if (len <= sizeof(__u32))
5673 cmd->data.setassparms.data.flags_32bit = (__u32) data;
5674 else if (len > sizeof(__u32))
5675 memcpy(&cmd->data.setassparms.data, (void *) data, len);
5677 rc = qeth_send_ipa_cmd(card, iob, reply_cb, reply_param);
5681 #ifdef CONFIG_QETH_IPV6
5683 qeth_send_simple_setassparms_ipv6(struct qeth_card *card,
5684 enum qeth_ipa_funcs ipa_func, __u16 cmd_code)
5688 struct qeth_cmd_buffer *iob;
5690 QETH_DBF_TEXT(trace,4,"simassp6");
5691 iob = qeth_get_setassparms_cmd(card, ipa_func, cmd_code,
5693 rc = qeth_send_setassparms(card, iob, 0, 0,
5694 qeth_default_setassparms_cb, NULL);
5700 qeth_send_simple_setassparms(struct qeth_card *card,
5701 enum qeth_ipa_funcs ipa_func,
5702 __u16 cmd_code, long data)
5706 struct qeth_cmd_buffer *iob;
5708 QETH_DBF_TEXT(trace,4,"simassp4");
5710 length = sizeof(__u32);
5711 iob = qeth_get_setassparms_cmd(card, ipa_func, cmd_code,
5712 length, QETH_PROT_IPV4);
5713 rc = qeth_send_setassparms(card, iob, length, data,
5714 qeth_default_setassparms_cb, NULL);
5719 qeth_start_ipa_arp_processing(struct qeth_card *card)
5723 QETH_DBF_TEXT(trace,3,"ipaarp");
5725 if (!qeth_is_supported(card,IPA_ARP_PROCESSING)) {
5726 PRINT_WARN("ARP processing not supported "
5727 "on %s!\n", card->info.if_name);
5730 rc = qeth_send_simple_setassparms(card,IPA_ARP_PROCESSING,
5731 IPA_CMD_ASS_START, 0);
5733 PRINT_WARN("Could not start ARP processing "
5734 "assist on %s: 0x%x\n",
5735 card->info.if_name, rc);
5741 qeth_start_ipa_ip_fragmentation(struct qeth_card *card)
5745 QETH_DBF_TEXT(trace,3,"ipaipfrg");
5747 if (!qeth_is_supported(card, IPA_IP_FRAGMENTATION)) {
5748 PRINT_INFO("Hardware IP fragmentation not supported on %s\n",
5749 card->info.if_name);
5753 rc = qeth_send_simple_setassparms(card, IPA_IP_FRAGMENTATION,
5754 IPA_CMD_ASS_START, 0);
5756 PRINT_WARN("Could not start Hardware IP fragmentation "
5757 "assist on %s: 0x%x\n",
5758 card->info.if_name, rc);
5760 PRINT_INFO("Hardware IP fragmentation enabled \n");
5765 qeth_start_ipa_source_mac(struct qeth_card *card)
5769 QETH_DBF_TEXT(trace,3,"stsrcmac");
5771 if (!card->options.fake_ll)
5774 if (!qeth_is_supported(card, IPA_SOURCE_MAC)) {
5775 PRINT_INFO("Inbound source address not "
5776 "supported on %s\n", card->info.if_name);
5780 rc = qeth_send_simple_setassparms(card, IPA_SOURCE_MAC,
5781 IPA_CMD_ASS_START, 0);
5783 PRINT_WARN("Could not start inbound source "
5784 "assist on %s: 0x%x\n",
5785 card->info.if_name, rc);
5790 qeth_start_ipa_vlan(struct qeth_card *card)
5794 QETH_DBF_TEXT(trace,3,"strtvlan");
5796 #ifdef CONFIG_QETH_VLAN
5797 if (!qeth_is_supported(card, IPA_FULL_VLAN)) {
5798 PRINT_WARN("VLAN not supported on %s\n", card->info.if_name);
5802 rc = qeth_send_simple_setassparms(card, IPA_VLAN_PRIO,
5803 IPA_CMD_ASS_START,0);
5805 PRINT_WARN("Could not start vlan "
5806 "assist on %s: 0x%x\n",
5807 card->info.if_name, rc);
5809 PRINT_INFO("VLAN enabled \n");
5810 card->dev->features |= NETIF_F_HW_VLAN_TX | NETIF_F_HW_VLAN_RX;
5812 #endif /* QETH_VLAN */
5817 qeth_start_ipa_multicast(struct qeth_card *card)
5821 QETH_DBF_TEXT(trace,3,"stmcast");
5823 if (!qeth_is_supported(card, IPA_MULTICASTING)) {
5824 PRINT_WARN("Multicast not supported on %s\n",
5825 card->info.if_name);
5829 rc = qeth_send_simple_setassparms(card, IPA_MULTICASTING,
5830 IPA_CMD_ASS_START,0);
5832 PRINT_WARN("Could not start multicast "
5833 "assist on %s: rc=%i\n",
5834 card->info.if_name, rc);
5836 PRINT_INFO("Multicast enabled\n");
5837 card->dev->flags |= IFF_MULTICAST;
5842 #ifdef CONFIG_QETH_IPV6
5844 qeth_softsetup_ipv6(struct qeth_card *card)
5848 QETH_DBF_TEXT(trace,3,"softipv6");
5850 netif_stop_queue(card->dev);
5851 rc = qeth_send_startlan(card, QETH_PROT_IPV6);
5853 PRINT_ERR("IPv6 startlan failed on %s\n",
5854 card->info.if_name);
5857 netif_wake_queue(card->dev);
5858 rc = qeth_query_ipassists(card,QETH_PROT_IPV6);
5860 PRINT_ERR("IPv6 query ipassist failed on %s\n",
5861 card->info.if_name);
5864 rc = qeth_send_simple_setassparms(card, IPA_IPV6,
5865 IPA_CMD_ASS_START, 3);
5867 PRINT_WARN("IPv6 start assist (version 4) failed "
5869 card->info.if_name, rc);
5872 rc = qeth_send_simple_setassparms_ipv6(card, IPA_IPV6,
5875 PRINT_WARN("IPV6 start assist (version 6) failed "
5877 card->info.if_name, rc);
5880 rc = qeth_send_simple_setassparms_ipv6(card, IPA_PASSTHRU,
5883 PRINT_WARN("Could not enable passthrough "
5885 card->info.if_name, rc);
5888 PRINT_INFO("IPV6 enabled \n");
5895 qeth_start_ipa_ipv6(struct qeth_card *card)
5898 #ifdef CONFIG_QETH_IPV6
5899 QETH_DBF_TEXT(trace,3,"strtipv6");
5901 if (!qeth_is_supported(card, IPA_IPV6)) {
5902 PRINT_WARN("IPv6 not supported on %s\n",
5903 card->info.if_name);
5906 rc = qeth_softsetup_ipv6(card);
5912 qeth_start_ipa_broadcast(struct qeth_card *card)
5916 QETH_DBF_TEXT(trace,3,"stbrdcst");
5917 card->info.broadcast_capable = 0;
5918 if (!qeth_is_supported(card, IPA_FILTERING)) {
5919 PRINT_WARN("Broadcast not supported on %s\n",
5920 card->info.if_name);
5924 rc = qeth_send_simple_setassparms(card, IPA_FILTERING,
5925 IPA_CMD_ASS_START, 0);
5927 PRINT_WARN("Could not enable broadcasting filtering "
5929 card->info.if_name, rc);
5933 rc = qeth_send_simple_setassparms(card, IPA_FILTERING,
5934 IPA_CMD_ASS_CONFIGURE, 1);
5936 PRINT_WARN("Could not set up broadcast filtering on %s: 0x%x\n",
5937 card->info.if_name, rc);
5940 card->info.broadcast_capable = QETH_BROADCAST_WITH_ECHO;
5941 PRINT_INFO("Broadcast enabled \n");
5942 rc = qeth_send_simple_setassparms(card, IPA_FILTERING,
5943 IPA_CMD_ASS_ENABLE, 1);
5945 PRINT_WARN("Could not set up broadcast echo filtering on "
5946 "%s: 0x%x\n", card->info.if_name, rc);
5949 card->info.broadcast_capable = QETH_BROADCAST_WITHOUT_ECHO;
5951 if (card->info.broadcast_capable)
5952 card->dev->flags |= IFF_BROADCAST;
5954 card->dev->flags &= ~IFF_BROADCAST;
5959 qeth_send_checksum_command(struct qeth_card *card)
5963 rc = qeth_send_simple_setassparms(card, IPA_INBOUND_CHECKSUM,
5964 IPA_CMD_ASS_START, 0);
5966 PRINT_WARN("Starting Inbound HW Checksumming failed on %s: "
5967 "0x%x,\ncontinuing using Inbound SW Checksumming\n",
5968 card->info.if_name, rc);
5971 rc = qeth_send_simple_setassparms(card, IPA_INBOUND_CHECKSUM,
5973 card->info.csum_mask);
5975 PRINT_WARN("Enabling Inbound HW Checksumming failed on %s: "
5976 "0x%x,\ncontinuing using Inbound SW Checksumming\n",
5977 card->info.if_name, rc);
5984 qeth_start_ipa_checksum(struct qeth_card *card)
5988 QETH_DBF_TEXT(trace,3,"strtcsum");
5990 if (card->options.checksum_type == NO_CHECKSUMMING) {
5991 PRINT_WARN("Using no checksumming on %s.\n",
5992 card->info.if_name);
5995 if (card->options.checksum_type == SW_CHECKSUMMING) {
5996 PRINT_WARN("Using SW checksumming on %s.\n",
5997 card->info.if_name);
6000 if (!qeth_is_supported(card, IPA_INBOUND_CHECKSUM)) {
6001 PRINT_WARN("Inbound HW Checksumming not "
6002 "supported on %s,\ncontinuing "
6003 "using Inbound SW Checksumming\n",
6004 card->info.if_name);
6005 card->options.checksum_type = SW_CHECKSUMMING;
6008 rc = qeth_send_checksum_command(card);
6010 PRINT_INFO("HW Checksumming (inbound) enabled \n");
6017 qeth_print_ipassist_status(struct qeth_card *card)
6022 offset += sprintf(buf, "IPAssist options of %s: ", card->info.if_name);
6023 if (qeth_is_enabled(card, IPA_ARP_PROCESSING))
6024 offset += sprintf(buf+offset, "ARP ");
6025 if (qeth_is_enabled(card, IPA_IP_FRAGMENTATION))
6026 offset += sprintf(buf+offset, "IP_FRAG");
6027 if (qeth_is_enabled(card, IPA_SOURCE_MAC))
6028 offset += sprintf(buf+offset, "SRC_MAC");
6029 if (qeth_is_enabled(card, IPA_FULL_VLAN))
6030 offset += sprintf(buf+offset, "VLAN");
6031 if (qeth_is_enabled(card, IPA_VLAN_PRIO))
6032 offset += sprintf(buf+offset, "VLAN_PRIO");
6037 qeth_start_ipassists(struct qeth_card *card)
6039 QETH_DBF_TEXT(trace,3,"strtipas");
6040 qeth_start_ipa_arp_processing(card); /* go on*/
6041 qeth_start_ipa_ip_fragmentation(card); /* go on*/
6042 qeth_start_ipa_source_mac(card); /* go on*/
6043 qeth_start_ipa_vlan(card); /* go on*/
6044 qeth_start_ipa_multicast(card); /* go on*/
6045 qeth_start_ipa_ipv6(card); /* go on*/
6046 qeth_start_ipa_broadcast(card); /* go on*/
6047 qeth_start_ipa_checksum(card); /* go on*/
6052 qeth_send_setrouting(struct qeth_card *card, enum qeth_routing_types type,
6053 enum qeth_prot_versions prot)
6056 struct qeth_ipa_cmd *cmd;
6057 struct qeth_cmd_buffer *iob;
6059 QETH_DBF_TEXT(trace,4,"setroutg");
6060 iob = qeth_get_ipacmd_buffer(card, IPA_CMD_SETRTG, prot);
6061 cmd = (struct qeth_ipa_cmd *)(iob->data+IPA_PDU_HEADER_SIZE);
6062 cmd->data.setrtg.type = (type);
6063 rc = qeth_send_ipa_cmd(card, iob, NULL, NULL);
6070 qeth_correct_routing_type(struct qeth_card *card, enum qeth_routing_types *type,
6071 enum qeth_prot_versions prot)
6073 if (card->info.type == QETH_CARD_TYPE_IQD) {
6076 case PRIMARY_CONNECTOR:
6077 case SECONDARY_CONNECTOR:
6078 case MULTICAST_ROUTER:
6086 case PRIMARY_ROUTER:
6087 case SECONDARY_ROUTER:
6089 case MULTICAST_ROUTER:
6090 if (qeth_is_ipafunc_supported(card, prot,
6098 PRINT_WARN("Routing type '%s' not supported for interface %s.\n"
6099 "Router status set to 'no router'.\n",
6100 ((*type == PRIMARY_ROUTER)? "primary router" :
6101 (*type == SECONDARY_ROUTER)? "secondary router" :
6102 (*type == PRIMARY_CONNECTOR)? "primary connector" :
6103 (*type == SECONDARY_CONNECTOR)? "secondary connector" :
6104 (*type == MULTICAST_ROUTER)? "multicast router" :
6111 qeth_setrouting_v4(struct qeth_card *card)
6115 QETH_DBF_TEXT(trace,3,"setrtg4");
6117 qeth_correct_routing_type(card, &card->options.route4.type,
6120 rc = qeth_send_setrouting(card, card->options.route4.type,
6123 card->options.route4.type = NO_ROUTER;
6124 PRINT_WARN("Error (0x%04x) while setting routing type on %s. "
6125 "Type set to 'no router'.\n",
6126 rc, card->info.if_name);
6132 qeth_setrouting_v6(struct qeth_card *card)
6136 QETH_DBF_TEXT(trace,3,"setrtg6");
6137 #ifdef CONFIG_QETH_IPV6
6139 qeth_correct_routing_type(card, &card->options.route6.type,
6142 if ((card->options.route6.type == NO_ROUTER) ||
6143 ((card->info.type == QETH_CARD_TYPE_OSAE) &&
6144 (card->options.route6.type == MULTICAST_ROUTER) &&
6145 !qeth_is_supported6(card,IPA_OSA_MC_ROUTER)))
6147 rc = qeth_send_setrouting(card, card->options.route6.type,
6150 card->options.route6.type = NO_ROUTER;
6151 PRINT_WARN("Error (0x%04x) while setting routing type on %s. "
6152 "Type set to 'no router'.\n",
6153 rc, card->info.if_name);
6160 * softsetup card: init IPA stuff
6163 qeth_softsetup_card(struct qeth_card *card)
6167 QETH_DBF_TEXT(setup, 2, "softsetp");
6169 if ((rc = qeth_send_startlan(card, QETH_PROT_IPV4))){
6170 QETH_DBF_TEXT_(setup, 2, "1err%d", rc);
6172 PRINT_WARN("LAN on card %s if offline! "
6173 "Continuing softsetup.\n",
6175 card->lan_online = 0;
6179 card->lan_online = 1;
6180 if ((rc = qeth_setadapter_parms(card)))
6181 QETH_DBF_TEXT_(setup, 2, "2err%d", rc);
6182 if ((rc = qeth_start_ipassists(card)))
6183 QETH_DBF_TEXT_(setup, 2, "3err%d", rc);
6184 if ((rc = qeth_setrouting_v4(card)))
6185 QETH_DBF_TEXT_(setup, 2, "4err%d", rc);
6186 if ((rc = qeth_setrouting_v6(card)))
6187 QETH_DBF_TEXT_(setup, 2, "5err%d", rc);
6188 netif_stop_queue(card->dev);
6192 #ifdef CONFIG_QETH_IPV6
6194 qeth_get_unique_id_cb(struct qeth_card *card, struct qeth_reply *reply,
6197 struct qeth_ipa_cmd *cmd;
6199 cmd = (struct qeth_ipa_cmd *) data;
6200 if (cmd->hdr.return_code == 0)
6201 card->info.unique_id = *((__u16 *)
6202 &cmd->data.create_destroy_addr.unique_id[6]);
6204 card->info.unique_id = UNIQUE_ID_IF_CREATE_ADDR_FAILED |
6205 UNIQUE_ID_NOT_BY_CARD;
6206 PRINT_WARN("couldn't get a unique id from the card on device "
6207 "%s (result=x%x), using default id. ipv6 "
6208 "autoconfig on other lpars may lead to duplicate "
6209 "ip addresses. please use manually "
6210 "configured ones.\n",
6211 CARD_BUS_ID(card), cmd->hdr.return_code);
6218 qeth_put_unique_id(struct qeth_card *card)
6222 #ifdef CONFIG_QETH_IPV6
6223 struct qeth_cmd_buffer *iob;
6224 struct qeth_ipa_cmd *cmd;
6226 QETH_DBF_TEXT(trace,2,"puniqeid");
6228 if ((card->info.unique_id & UNIQUE_ID_NOT_BY_CARD) ==
6229 UNIQUE_ID_NOT_BY_CARD)
6231 iob = qeth_get_ipacmd_buffer(card, IPA_CMD_DESTROY_ADDR,
6233 cmd = (struct qeth_ipa_cmd *)(iob->data+IPA_PDU_HEADER_SIZE);
6234 *((__u16 *) &cmd->data.create_destroy_addr.unique_id[6]) =
6235 card->info.unique_id;
6236 memcpy(&cmd->data.create_destroy_addr.unique_id[0],
6237 card->dev->dev_addr, OSA_ADDR_LEN);
6238 rc = qeth_send_ipa_cmd(card, iob, NULL, NULL);
6240 card->info.unique_id = UNIQUE_ID_IF_CREATE_ADDR_FAILED |
6241 UNIQUE_ID_NOT_BY_CARD;
6250 qeth_clear_ip_list(struct qeth_card *card, int clean, int recover)
6252 struct qeth_ipaddr *addr, *tmp;
6253 unsigned long flags;
6255 QETH_DBF_TEXT(trace,4,"clearip");
6256 spin_lock_irqsave(&card->ip_lock, flags);
6257 /* clear todo list */
6258 list_for_each_entry_safe(addr, tmp, &card->ip_tbd_list, entry){
6259 list_del(&addr->entry);
6263 while (!list_empty(&card->ip_list)) {
6264 addr = list_entry(card->ip_list.next,
6265 struct qeth_ipaddr, entry);
6266 list_del_init(&addr->entry);
6268 spin_unlock_irqrestore(&card->ip_lock, flags);
6269 qeth_deregister_addr_entry(card, addr);
6270 spin_lock_irqsave(&card->ip_lock, flags);
6272 if (!recover || addr->is_multicast) {
6276 list_add_tail(&addr->entry, &card->ip_tbd_list);
6278 spin_unlock_irqrestore(&card->ip_lock, flags);
6282 qeth_set_allowed_threads(struct qeth_card *card, unsigned long threads,
6283 int clear_start_mask)
6285 unsigned long flags;
6287 spin_lock_irqsave(&card->thread_mask_lock, flags);
6288 card->thread_allowed_mask = threads;
6289 if (clear_start_mask)
6290 card->thread_start_mask &= threads;
6291 spin_unlock_irqrestore(&card->thread_mask_lock, flags);
6292 wake_up(&card->wait_q);
6296 qeth_threads_running(struct qeth_card *card, unsigned long threads)
6298 unsigned long flags;
6301 spin_lock_irqsave(&card->thread_mask_lock, flags);
6302 rc = (card->thread_running_mask & threads);
6303 spin_unlock_irqrestore(&card->thread_mask_lock, flags);
6308 qeth_wait_for_threads(struct qeth_card *card, unsigned long threads)
6310 return wait_event_interruptible(card->wait_q,
6311 qeth_threads_running(card, threads) == 0);
6315 qeth_stop_card(struct qeth_card *card)
6317 int recover_flag = 0;
6320 QETH_DBF_TEXT(setup ,2,"stopcard");
6321 QETH_DBF_HEX(setup, 2, &card, sizeof(void *));
6323 qeth_set_allowed_threads(card, 0, 1);
6324 if (qeth_wait_for_threads(card, ~QETH_RECOVER_THREAD))
6325 return -ERESTARTSYS;
6326 if (card->read.state == CH_STATE_UP &&
6327 card->write.state == CH_STATE_UP &&
6328 (card->state == CARD_STATE_UP)) {
6331 dev_close(card->dev);
6333 if (!card->use_hard_stop)
6334 if ((rc = qeth_send_stoplan(card)))
6335 QETH_DBF_TEXT_(setup, 2, "1err%d", rc);
6336 card->state = CARD_STATE_SOFTSETUP;
6338 if (card->state == CARD_STATE_SOFTSETUP) {
6339 qeth_clear_ip_list(card, !card->use_hard_stop, recover_flag);
6340 qeth_clear_ipacmd_list(card);
6341 card->state = CARD_STATE_HARDSETUP;
6343 if (card->state == CARD_STATE_HARDSETUP) {
6344 if (!card->use_hard_stop)
6345 if ((rc = qeth_put_unique_id(card)))
6346 QETH_DBF_TEXT_(setup, 2, "2err%d", rc);
6347 qeth_qdio_clear_card(card, 0);
6348 qeth_clear_qdio_buffers(card);
6349 qeth_clear_working_pool_list(card);
6350 card->state = CARD_STATE_DOWN;
6352 if (card->state == CARD_STATE_DOWN) {
6353 qeth_clear_cmd_buffers(&card->read);
6354 qeth_clear_cmd_buffers(&card->write);
6356 card->use_hard_stop = 0;
6362 qeth_get_unique_id(struct qeth_card *card)
6365 #ifdef CONFIG_QETH_IPV6
6366 struct qeth_cmd_buffer *iob;
6367 struct qeth_ipa_cmd *cmd;
6369 QETH_DBF_TEXT(setup, 2, "guniqeid");
6371 if (!qeth_is_supported(card,IPA_IPV6)) {
6372 card->info.unique_id = UNIQUE_ID_IF_CREATE_ADDR_FAILED |
6373 UNIQUE_ID_NOT_BY_CARD;
6377 iob = qeth_get_ipacmd_buffer(card, IPA_CMD_CREATE_ADDR,
6379 cmd = (struct qeth_ipa_cmd *)(iob->data+IPA_PDU_HEADER_SIZE);
6380 *((__u16 *) &cmd->data.create_destroy_addr.unique_id[6]) =
6381 card->info.unique_id;
6383 rc = qeth_send_ipa_cmd(card, iob, qeth_get_unique_id_cb, NULL);
6385 card->info.unique_id = UNIQUE_ID_IF_CREATE_ADDR_FAILED |
6386 UNIQUE_ID_NOT_BY_CARD;
6391 qeth_print_status_with_portname(struct qeth_card *card)
6396 sprintf(dbf_text, "%s", card->info.portname + 1);
6397 for (i = 0; i < 8; i++)
6399 (char) _ebcasc[(__u8) dbf_text[i]];
6401 printk("qeth: Device %s/%s/%s is a%s card%s%s%s\n"
6402 "with link type %s (portname: %s)\n",
6406 qeth_get_cardname(card),
6407 (card->info.mcl_level[0]) ? " (level: " : "",
6408 (card->info.mcl_level[0]) ? card->info.mcl_level : "",
6409 (card->info.mcl_level[0]) ? ")" : "",
6410 qeth_get_cardname_short(card),
6416 qeth_print_status_no_portname(struct qeth_card *card)
6418 if (card->info.portname[0])
6419 printk("qeth: Device %s/%s/%s is a%s "
6420 "card%s%s%s\nwith link type %s "
6421 "(no portname needed by interface).\n",
6425 qeth_get_cardname(card),
6426 (card->info.mcl_level[0]) ? " (level: " : "",
6427 (card->info.mcl_level[0]) ? card->info.mcl_level : "",
6428 (card->info.mcl_level[0]) ? ")" : "",
6429 qeth_get_cardname_short(card));
6431 printk("qeth: Device %s/%s/%s is a%s "
6432 "card%s%s%s\nwith link type %s.\n",
6436 qeth_get_cardname(card),
6437 (card->info.mcl_level[0]) ? " (level: " : "",
6438 (card->info.mcl_level[0]) ? card->info.mcl_level : "",
6439 (card->info.mcl_level[0]) ? ")" : "",
6440 qeth_get_cardname_short(card));
6444 qeth_print_status_message(struct qeth_card *card)
6446 switch (card->info.type) {
6447 case QETH_CARD_TYPE_OSAE:
6448 /* VM will use a non-zero first character
6449 * to indicate a HiperSockets like reporting
6450 * of the level OSA sets the first character to zero
6452 if (!card->info.mcl_level[0]) {
6453 sprintf(card->info.mcl_level,"%02x%02x",
6454 card->info.mcl_level[2],
6455 card->info.mcl_level[3]);
6457 card->info.mcl_level[QETH_MCL_LENGTH] = 0;
6461 case QETH_CARD_TYPE_IQD:
6462 card->info.mcl_level[0] = (char) _ebcasc[(__u8)
6463 card->info.mcl_level[0]];
6464 card->info.mcl_level[1] = (char) _ebcasc[(__u8)
6465 card->info.mcl_level[1]];
6466 card->info.mcl_level[2] = (char) _ebcasc[(__u8)
6467 card->info.mcl_level[2]];
6468 card->info.mcl_level[3] = (char) _ebcasc[(__u8)
6469 card->info.mcl_level[3]];
6470 card->info.mcl_level[QETH_MCL_LENGTH] = 0;
6473 memset(&card->info.mcl_level[0], 0, QETH_MCL_LENGTH + 1);
6475 if (card->info.portname_required)
6476 qeth_print_status_with_portname(card);
6478 qeth_print_status_no_portname(card);
6482 qeth_register_netdev(struct qeth_card *card)
6486 QETH_DBF_TEXT(setup, 3, "regnetd");
6487 if (card->dev->reg_state != NETREG_UNINITIALIZED)
6490 SET_NETDEV_DEV(card->dev, &card->gdev->dev);
6491 rc = register_netdev(card->dev);
6493 strcpy(card->info.if_name, card->dev->name);
6499 qeth_start_again(struct qeth_card *card)
6501 QETH_DBF_TEXT(setup ,2, "startag");
6504 dev_open(card->dev);
6506 if (qeth_set_thread_start_bit(card, QETH_SET_MC_THREAD) == 0)
6507 schedule_work(&card->kernel_thread_starter);
6511 qeth_set_online(struct ccwgroup_device *gdev)
6513 struct qeth_card *card = gdev->dev.driver_data;
6515 enum qeth_card_states recover_flag;
6518 QETH_DBF_TEXT(setup ,2, "setonlin");
6519 QETH_DBF_HEX(setup, 2, &card, sizeof(void *));
6521 qeth_set_allowed_threads(card, QETH_RECOVER_THREAD, 1);
6522 if (qeth_wait_for_threads(card, ~QETH_RECOVER_THREAD)){
6523 PRINT_WARN("set_online of card %s interrupted by user!\n",
6525 return -ERESTARTSYS;
6528 recover_flag = card->state;
6529 if (ccw_device_set_online(CARD_RDEV(card)) ||
6530 ccw_device_set_online(CARD_WDEV(card)) ||
6531 ccw_device_set_online(CARD_DDEV(card))){
6532 QETH_DBF_TEXT_(setup, 2, "1err%d", rc);
6536 if ((rc = qeth_hardsetup_card(card))){
6537 QETH_DBF_TEXT_(setup, 2, "2err%d", rc);
6540 card->state = CARD_STATE_HARDSETUP;
6542 if ((rc = qeth_query_ipassists(card,QETH_PROT_IPV4))){
6543 QETH_DBF_TEXT_(setup, 2, "3err%d", rc);
6546 rc = qeth_get_unique_id(card);
6549 QETH_DBF_TEXT_(setup, 2, "4err%d", rc);
6552 qeth_print_status_message(card);
6553 if ((rc = qeth_register_netdev(card))){
6554 QETH_DBF_TEXT_(setup, 2, "5err%d", rc);
6557 if ((rc = qeth_softsetup_card(card))){
6558 QETH_DBF_TEXT_(setup, 2, "6err%d", rc);
6561 card->state = CARD_STATE_SOFTSETUP;
6563 if ((rc = qeth_init_qdio_queues(card))){
6564 QETH_DBF_TEXT_(setup, 2, "7err%d", rc);
6567 /*maybe it was set offline without ifconfig down
6568 * we can also use this state for recovery purposes*/
6569 qeth_set_allowed_threads(card, 0xffffffff, 0);
6570 if (recover_flag == CARD_STATE_RECOVER)
6571 qeth_start_again(card);
6572 qeth_notify_processes();
6575 card->use_hard_stop = 1;
6576 qeth_stop_card(card);
6577 ccw_device_set_offline(CARD_DDEV(card));
6578 ccw_device_set_offline(CARD_WDEV(card));
6579 ccw_device_set_offline(CARD_RDEV(card));
6580 if (recover_flag == CARD_STATE_RECOVER)
6581 card->state = CARD_STATE_RECOVER;
6583 card->state = CARD_STATE_DOWN;
6587 static struct ccw_device_id qeth_ids[] = {
6588 {CCW_DEVICE(0x1731, 0x01), driver_info:QETH_CARD_TYPE_OSAE},
6589 {CCW_DEVICE(0x1731, 0x05), driver_info:QETH_CARD_TYPE_IQD},
6592 MODULE_DEVICE_TABLE(ccw, qeth_ids);
6594 struct device *qeth_root_dev = NULL;
6596 struct ccwgroup_driver qeth_ccwgroup_driver = {
6597 .owner = THIS_MODULE,
6599 .driver_id = 0xD8C5E3C8,
6600 .probe = qeth_probe_device,
6601 .remove = qeth_remove_device,
6602 .set_online = qeth_set_online,
6603 .set_offline = qeth_set_offline,
6606 struct ccw_driver qeth_ccw_driver = {
6609 .probe = ccwgroup_probe_ccwdev,
6610 .remove = ccwgroup_remove_ccwdev,
6615 qeth_unregister_dbf_views(void)
6618 debug_unregister(qeth_dbf_setup);
6620 debug_unregister(qeth_dbf_qerr);
6622 debug_unregister(qeth_dbf_sense);
6624 debug_unregister(qeth_dbf_misc);
6626 debug_unregister(qeth_dbf_data);
6627 if (qeth_dbf_control)
6628 debug_unregister(qeth_dbf_control);
6630 debug_unregister(qeth_dbf_trace);
6633 qeth_register_dbf_views(void)
6635 qeth_dbf_setup = debug_register(QETH_DBF_SETUP_NAME,
6636 QETH_DBF_SETUP_INDEX,
6637 QETH_DBF_SETUP_NR_AREAS,
6638 QETH_DBF_SETUP_LEN);
6639 qeth_dbf_misc = debug_register(QETH_DBF_MISC_NAME,
6640 QETH_DBF_MISC_INDEX,
6641 QETH_DBF_MISC_NR_AREAS,
6643 qeth_dbf_data = debug_register(QETH_DBF_DATA_NAME,
6644 QETH_DBF_DATA_INDEX,
6645 QETH_DBF_DATA_NR_AREAS,
6647 qeth_dbf_control = debug_register(QETH_DBF_CONTROL_NAME,
6648 QETH_DBF_CONTROL_INDEX,
6649 QETH_DBF_CONTROL_NR_AREAS,
6650 QETH_DBF_CONTROL_LEN);
6651 qeth_dbf_sense = debug_register(QETH_DBF_SENSE_NAME,
6652 QETH_DBF_SENSE_INDEX,
6653 QETH_DBF_SENSE_NR_AREAS,
6654 QETH_DBF_SENSE_LEN);
6655 qeth_dbf_qerr = debug_register(QETH_DBF_QERR_NAME,
6656 QETH_DBF_QERR_INDEX,
6657 QETH_DBF_QERR_NR_AREAS,
6659 qeth_dbf_trace = debug_register(QETH_DBF_TRACE_NAME,
6660 QETH_DBF_TRACE_INDEX,
6661 QETH_DBF_TRACE_NR_AREAS,
6662 QETH_DBF_TRACE_LEN);
6664 if ((qeth_dbf_setup == NULL) || (qeth_dbf_misc == NULL) ||
6665 (qeth_dbf_data == NULL) || (qeth_dbf_control == NULL) ||
6666 (qeth_dbf_sense == NULL) || (qeth_dbf_qerr == NULL) ||
6667 (qeth_dbf_trace == NULL)) {
6668 qeth_unregister_dbf_views();
6671 debug_register_view(qeth_dbf_setup, &debug_hex_ascii_view);
6672 debug_set_level(qeth_dbf_setup, QETH_DBF_SETUP_LEVEL);
6674 debug_register_view(qeth_dbf_misc, &debug_hex_ascii_view);
6675 debug_set_level(qeth_dbf_misc, QETH_DBF_MISC_LEVEL);
6677 debug_register_view(qeth_dbf_data, &debug_hex_ascii_view);
6678 debug_set_level(qeth_dbf_data, QETH_DBF_DATA_LEVEL);
6680 debug_register_view(qeth_dbf_control, &debug_hex_ascii_view);
6681 debug_set_level(qeth_dbf_control, QETH_DBF_CONTROL_LEVEL);
6683 debug_register_view(qeth_dbf_sense, &debug_hex_ascii_view);
6684 debug_set_level(qeth_dbf_sense, QETH_DBF_SENSE_LEVEL);
6686 debug_register_view(qeth_dbf_qerr, &debug_hex_ascii_view);
6687 debug_set_level(qeth_dbf_qerr, QETH_DBF_QERR_LEVEL);
6689 debug_register_view(qeth_dbf_trace, &debug_hex_ascii_view);
6690 debug_set_level(qeth_dbf_trace, QETH_DBF_TRACE_LEVEL);
6695 #ifdef CONFIG_QETH_IPV6
6696 extern struct neigh_table arp_tbl;
6697 static struct neigh_ops *arp_direct_ops;
6698 static int (*qeth_old_arp_constructor) (struct neighbour *);
6700 static struct neigh_ops arp_direct_ops_template = {
6704 .error_report = NULL,
6705 .output = dev_queue_xmit,
6706 .connected_output = dev_queue_xmit,
6707 .hh_output = dev_queue_xmit,
6708 .queue_xmit = dev_queue_xmit
6712 qeth_arp_constructor(struct neighbour *neigh)
6714 struct net_device *dev = neigh->dev;
6715 struct in_device *in_dev;
6716 struct neigh_parms *parms;
6718 if (!qeth_verify_dev(dev)) {
6719 return qeth_old_arp_constructor(neigh);
6723 in_dev = rcu_dereference(__in_dev_get(dev));
6724 if (in_dev == NULL) {
6729 parms = in_dev->arp_parms;
6730 __neigh_parms_put(neigh->parms);
6731 neigh->parms = neigh_parms_clone(parms);
6734 neigh->type = inet_addr_type(*(u32 *) neigh->primary_key);
6735 neigh->nud_state = NUD_NOARP;
6736 neigh->ops = arp_direct_ops;
6737 neigh->output = neigh->ops->queue_xmit;
6740 #endif /*CONFIG_QETH_IPV6*/
6743 * IP address takeover related functions
6746 qeth_clear_ipato_list(struct qeth_card *card)
6748 struct qeth_ipato_entry *ipatoe, *tmp;
6749 unsigned long flags;
6751 spin_lock_irqsave(&card->ip_lock, flags);
6752 list_for_each_entry_safe(ipatoe, tmp, &card->ipato.entries, entry) {
6753 list_del(&ipatoe->entry);
6756 spin_unlock_irqrestore(&card->ip_lock, flags);
6760 qeth_add_ipato_entry(struct qeth_card *card, struct qeth_ipato_entry *new)
6762 struct qeth_ipato_entry *ipatoe;
6763 unsigned long flags;
6766 QETH_DBF_TEXT(trace, 2, "addipato");
6767 spin_lock_irqsave(&card->ip_lock, flags);
6768 list_for_each_entry(ipatoe, &card->ipato.entries, entry){
6769 if (ipatoe->proto != new->proto)
6771 if (!memcmp(ipatoe->addr, new->addr,
6772 (ipatoe->proto == QETH_PROT_IPV4)? 4:16) &&
6773 (ipatoe->mask_bits == new->mask_bits)){
6774 PRINT_WARN("ipato entry already exists!\n");
6780 list_add_tail(&new->entry, &card->ipato.entries);
6782 spin_unlock_irqrestore(&card->ip_lock, flags);
6787 qeth_del_ipato_entry(struct qeth_card *card, enum qeth_prot_versions proto,
6788 u8 *addr, int mask_bits)
6790 struct qeth_ipato_entry *ipatoe, *tmp;
6791 unsigned long flags;
6793 QETH_DBF_TEXT(trace, 2, "delipato");
6794 spin_lock_irqsave(&card->ip_lock, flags);
6795 list_for_each_entry_safe(ipatoe, tmp, &card->ipato.entries, entry){
6796 if (ipatoe->proto != proto)
6798 if (!memcmp(ipatoe->addr, addr,
6799 (proto == QETH_PROT_IPV4)? 4:16) &&
6800 (ipatoe->mask_bits == mask_bits)){
6801 list_del(&ipatoe->entry);
6805 spin_unlock_irqrestore(&card->ip_lock, flags);
6809 qeth_convert_addr_to_bits(u8 *addr, u8 *bits, int len)
6814 for (i = 0; i < len; ++i){
6816 for (j = 7; j >= 0; --j){
6817 bits[i*8 + j] = octet & 1;
6824 qeth_is_addr_covered_by_ipato(struct qeth_card *card, struct qeth_ipaddr *addr)
6826 struct qeth_ipato_entry *ipatoe;
6827 u8 addr_bits[128] = {0, };
6828 u8 ipatoe_bits[128] = {0, };
6831 if (!card->ipato.enabled)
6834 qeth_convert_addr_to_bits((u8 *) &addr->u, addr_bits,
6835 (addr->proto == QETH_PROT_IPV4)? 4:16);
6836 list_for_each_entry(ipatoe, &card->ipato.entries, entry){
6837 if (addr->proto != ipatoe->proto)
6839 qeth_convert_addr_to_bits(ipatoe->addr, ipatoe_bits,
6840 (ipatoe->proto==QETH_PROT_IPV4) ?
6842 if (addr->proto == QETH_PROT_IPV4)
6843 rc = !memcmp(addr_bits, ipatoe_bits,
6844 min(32, ipatoe->mask_bits));
6846 rc = !memcmp(addr_bits, ipatoe_bits,
6847 min(128, ipatoe->mask_bits));
6852 if ((addr->proto == QETH_PROT_IPV4) && card->ipato.invert4)
6854 else if ((addr->proto == QETH_PROT_IPV6) && card->ipato.invert6)
6861 * VIPA related functions
6864 qeth_add_vipa(struct qeth_card *card, enum qeth_prot_versions proto,
6867 struct qeth_ipaddr *ipaddr;
6868 unsigned long flags;
6871 ipaddr = qeth_get_addr_buffer(proto);
6873 if (proto == QETH_PROT_IPV4){
6874 QETH_DBF_TEXT(trace, 2, "addvipa4");
6875 memcpy(&ipaddr->u.a4.addr, addr, 4);
6876 ipaddr->u.a4.mask = 0;
6877 #ifdef CONFIG_QETH_IPV6
6878 } else if (proto == QETH_PROT_IPV6){
6879 QETH_DBF_TEXT(trace, 2, "addvipa6");
6880 memcpy(&ipaddr->u.a6.addr, addr, 16);
6881 ipaddr->u.a6.pfxlen = 0;
6884 ipaddr->type = QETH_IP_TYPE_VIPA;
6885 ipaddr->set_flags = QETH_IPA_SETIP_VIPA_FLAG;
6886 ipaddr->del_flags = QETH_IPA_DELIP_VIPA_FLAG;
6889 spin_lock_irqsave(&card->ip_lock, flags);
6890 if (__qeth_address_exists_in_list(&card->ip_list, ipaddr, 0) ||
6891 __qeth_address_exists_in_list(&card->ip_tbd_list, ipaddr, 0))
6893 spin_unlock_irqrestore(&card->ip_lock, flags);
6895 PRINT_WARN("Cannot add VIPA. Address already exists!\n");
6898 if (!qeth_add_ip(card, ipaddr))
6900 if (qeth_set_thread_start_bit(card, QETH_SET_IP_THREAD) == 0)
6901 schedule_work(&card->kernel_thread_starter);
6906 qeth_del_vipa(struct qeth_card *card, enum qeth_prot_versions proto,
6909 struct qeth_ipaddr *ipaddr;
6911 ipaddr = qeth_get_addr_buffer(proto);
6913 if (proto == QETH_PROT_IPV4){
6914 QETH_DBF_TEXT(trace, 2, "delvipa4");
6915 memcpy(&ipaddr->u.a4.addr, addr, 4);
6916 ipaddr->u.a4.mask = 0;
6917 #ifdef CONFIG_QETH_IPV6
6918 } else if (proto == QETH_PROT_IPV6){
6919 QETH_DBF_TEXT(trace, 2, "delvipa6");
6920 memcpy(&ipaddr->u.a6.addr, addr, 16);
6921 ipaddr->u.a6.pfxlen = 0;
6924 ipaddr->type = QETH_IP_TYPE_VIPA;
6927 if (!qeth_delete_ip(card, ipaddr))
6929 if (qeth_set_thread_start_bit(card, QETH_SET_IP_THREAD) == 0)
6930 schedule_work(&card->kernel_thread_starter);
6934 * proxy ARP related functions
6937 qeth_add_rxip(struct qeth_card *card, enum qeth_prot_versions proto,
6940 struct qeth_ipaddr *ipaddr;
6941 unsigned long flags;
6944 ipaddr = qeth_get_addr_buffer(proto);
6946 if (proto == QETH_PROT_IPV4){
6947 QETH_DBF_TEXT(trace, 2, "addrxip4");
6948 memcpy(&ipaddr->u.a4.addr, addr, 4);
6949 ipaddr->u.a4.mask = 0;
6950 #ifdef CONFIG_QETH_IPV6
6951 } else if (proto == QETH_PROT_IPV6){
6952 QETH_DBF_TEXT(trace, 2, "addrxip6");
6953 memcpy(&ipaddr->u.a6.addr, addr, 16);
6954 ipaddr->u.a6.pfxlen = 0;
6957 ipaddr->type = QETH_IP_TYPE_RXIP;
6958 ipaddr->set_flags = QETH_IPA_SETIP_TAKEOVER_FLAG;
6959 ipaddr->del_flags = 0;
6962 spin_lock_irqsave(&card->ip_lock, flags);
6963 if (__qeth_address_exists_in_list(&card->ip_list, ipaddr, 0) ||
6964 __qeth_address_exists_in_list(&card->ip_tbd_list, ipaddr, 0))
6966 spin_unlock_irqrestore(&card->ip_lock, flags);
6968 PRINT_WARN("Cannot add RXIP. Address already exists!\n");
6971 if (!qeth_add_ip(card, ipaddr))
6973 if (qeth_set_thread_start_bit(card, QETH_SET_IP_THREAD) == 0)
6974 schedule_work(&card->kernel_thread_starter);
6979 qeth_del_rxip(struct qeth_card *card, enum qeth_prot_versions proto,
6982 struct qeth_ipaddr *ipaddr;
6984 ipaddr = qeth_get_addr_buffer(proto);
6986 if (proto == QETH_PROT_IPV4){
6987 QETH_DBF_TEXT(trace, 2, "addrxip4");
6988 memcpy(&ipaddr->u.a4.addr, addr, 4);
6989 ipaddr->u.a4.mask = 0;
6990 #ifdef CONFIG_QETH_IPV6
6991 } else if (proto == QETH_PROT_IPV6){
6992 QETH_DBF_TEXT(trace, 2, "addrxip6");
6993 memcpy(&ipaddr->u.a6.addr, addr, 16);
6994 ipaddr->u.a6.pfxlen = 0;
6997 ipaddr->type = QETH_IP_TYPE_RXIP;
7000 if (!qeth_delete_ip(card, ipaddr))
7002 if (qeth_set_thread_start_bit(card, QETH_SET_IP_THREAD) == 0)
7003 schedule_work(&card->kernel_thread_starter);
7010 qeth_ip_event(struct notifier_block *this,
7011 unsigned long event,void *ptr)
7013 struct in_ifaddr *ifa = (struct in_ifaddr *)ptr;
7014 struct net_device *dev =(struct net_device *) ifa->ifa_dev->dev;
7015 struct qeth_ipaddr *addr;
7016 struct qeth_card *card;
7018 QETH_DBF_TEXT(trace,3,"ipevent");
7019 card = qeth_get_card_from_dev(dev);
7023 addr = qeth_get_addr_buffer(QETH_PROT_IPV4);
7025 addr->u.a4.addr = ifa->ifa_address;
7026 addr->u.a4.mask = ifa->ifa_mask;
7027 addr->type = QETH_IP_TYPE_NORMAL;
7033 if (!qeth_add_ip(card, addr))
7037 if (!qeth_delete_ip(card, addr))
7043 if (qeth_set_thread_start_bit(card, QETH_SET_IP_THREAD) == 0)
7044 schedule_work(&card->kernel_thread_starter);
7049 static struct notifier_block qeth_ip_notifier = {
7054 #ifdef CONFIG_QETH_IPV6
7056 * IPv6 event handler
7059 qeth_ip6_event(struct notifier_block *this,
7060 unsigned long event,void *ptr)
7063 struct inet6_ifaddr *ifa = (struct inet6_ifaddr *)ptr;
7064 struct net_device *dev = (struct net_device *)ifa->idev->dev;
7065 struct qeth_ipaddr *addr;
7066 struct qeth_card *card;
7068 QETH_DBF_TEXT(trace,3,"ip6event");
7070 card = qeth_get_card_from_dev(dev);
7073 if (!qeth_is_supported(card, IPA_IPV6))
7076 addr = qeth_get_addr_buffer(QETH_PROT_IPV6);
7078 memcpy(&addr->u.a6.addr, &ifa->addr, sizeof(struct in6_addr));
7079 addr->u.a6.pfxlen = ifa->prefix_len;
7080 addr->type = QETH_IP_TYPE_NORMAL;
7086 if (!qeth_add_ip(card, addr))
7090 if (!qeth_delete_ip(card, addr))
7096 if (qeth_set_thread_start_bit(card, QETH_SET_IP_THREAD) == 0)
7097 schedule_work(&card->kernel_thread_starter);
7102 static struct notifier_block qeth_ip6_notifier = {
7109 qeth_reboot_event(struct notifier_block *this, unsigned long event, void *ptr)
7112 struct device *entry;
7113 struct qeth_card *card;
7115 down_read(&qeth_ccwgroup_driver.driver.bus->subsys.rwsem);
7116 list_for_each_entry(entry, &qeth_ccwgroup_driver.driver.devices,
7118 card = (struct qeth_card *) entry->driver_data;
7119 qeth_clear_ip_list(card, 0, 0);
7120 qeth_qdio_clear_card(card, 0);
7122 up_read(&qeth_ccwgroup_driver.driver.bus->subsys.rwsem);
7127 static struct notifier_block qeth_reboot_notifier = {
7133 qeth_register_notifiers(void)
7137 QETH_DBF_TEXT(trace,5,"regnotif");
7138 if ((r = register_reboot_notifier(&qeth_reboot_notifier)))
7140 if ((r = register_inetaddr_notifier(&qeth_ip_notifier)))
7142 #ifdef CONFIG_QETH_IPV6
7143 if ((r = register_inet6addr_notifier(&qeth_ip6_notifier)))
7148 #ifdef CONFIG_QETH_IPV6
7150 unregister_inetaddr_notifier(&qeth_ip_notifier);
7153 unregister_reboot_notifier(&qeth_reboot_notifier);
7158 * unregister all event notifiers
7161 qeth_unregister_notifiers(void)
7164 QETH_DBF_TEXT(trace,5,"unregnot");
7165 BUG_ON(unregister_reboot_notifier(&qeth_reboot_notifier));
7166 BUG_ON(unregister_inetaddr_notifier(&qeth_ip_notifier));
7167 #ifdef CONFIG_QETH_IPV6
7168 BUG_ON(unregister_inet6addr_notifier(&qeth_ip6_notifier));
7169 #endif /* QETH_IPV6 */
7173 #ifdef CONFIG_QETH_IPV6
7175 qeth_ipv6_init(void)
7177 qeth_old_arp_constructor = arp_tbl.constructor;
7178 write_lock(&arp_tbl.lock);
7179 arp_tbl.constructor = qeth_arp_constructor;
7180 write_unlock(&arp_tbl.lock);
7182 arp_direct_ops = (struct neigh_ops*)
7183 kmalloc(sizeof(struct neigh_ops), GFP_KERNEL);
7184 if (!arp_direct_ops)
7187 memcpy(arp_direct_ops, &arp_direct_ops_template,
7188 sizeof(struct neigh_ops));
7194 qeth_ipv6_uninit(void)
7196 write_lock(&arp_tbl.lock);
7197 arp_tbl.constructor = qeth_old_arp_constructor;
7198 write_unlock(&arp_tbl.lock);
7199 kfree(arp_direct_ops);
7201 #endif /* CONFIG_QETH_IPV6 */
7204 qeth_sysfs_unregister(void)
7206 qeth_remove_driver_attributes();
7207 ccw_driver_unregister(&qeth_ccw_driver);
7208 ccwgroup_driver_unregister(&qeth_ccwgroup_driver);
7209 s390_root_dev_unregister(qeth_root_dev);
7212 * register qeth at sysfs
7215 qeth_sysfs_register(void)
7219 rc = ccwgroup_driver_register(&qeth_ccwgroup_driver);
7222 rc = ccw_driver_register(&qeth_ccw_driver);
7225 rc = qeth_create_driver_attributes();
7228 qeth_root_dev = s390_root_dev_register("qeth");
7229 if (IS_ERR(qeth_root_dev)) {
7230 rc = PTR_ERR(qeth_root_dev);
7245 PRINT_INFO("loading %s (%s/%s/%s/%s/%s/%s/%s %s %s)\n",
7246 version, VERSION_QETH_C, VERSION_QETH_H,
7247 VERSION_QETH_MPC_H, VERSION_QETH_MPC_C,
7248 VERSION_QETH_FS_H, VERSION_QETH_PROC_C,
7249 VERSION_QETH_SYS_C, QETH_VERSION_IPV6,
7252 INIT_LIST_HEAD(&qeth_card_list.list);
7253 INIT_LIST_HEAD(&qeth_notify_list);
7254 spin_lock_init(&qeth_notify_lock);
7255 rwlock_init(&qeth_card_list.rwlock);
7257 if (qeth_register_dbf_views())
7259 if (qeth_sysfs_register())
7262 #ifdef CONFIG_QETH_IPV6
7263 if (qeth_ipv6_init()) {
7264 PRINT_ERR("Out of memory during ipv6 init.\n");
7267 #endif /* QETH_IPV6 */
7268 if (qeth_register_notifiers())
7270 if (qeth_create_procfs_entries())
7276 qeth_unregister_notifiers();
7278 #ifdef CONFIG_QETH_IPV6
7280 #endif /* QETH_IPV6 */
7282 qeth_sysfs_unregister();
7283 qeth_unregister_dbf_views();
7285 PRINT_ERR("Initialization failed");
7290 __exit qeth_exit(void)
7292 struct qeth_card *card, *tmp;
7293 unsigned long flags;
7295 QETH_DBF_TEXT(trace,1, "cleanup.");
7298 * Weed would not need to clean up our devices here, because the
7299 * common device layer calls qeth_remove_device for each device
7300 * as soon as we unregister our driver (done in qeth_sysfs_unregister).
7301 * But we do cleanup here so we can do a "soft" shutdown of our cards.
7302 * qeth_remove_device called by the common device layer would otherwise
7303 * do a "hard" shutdown (card->use_hard_stop is set to one in
7304 * qeth_remove_device).
7307 read_lock_irqsave(&qeth_card_list.rwlock, flags);
7308 list_for_each_entry_safe(card, tmp, &qeth_card_list.list, list){
7309 read_unlock_irqrestore(&qeth_card_list.rwlock, flags);
7310 qeth_set_offline(card->gdev);
7311 qeth_remove_device(card->gdev);
7314 read_unlock_irqrestore(&qeth_card_list.rwlock, flags);
7315 #ifdef CONFIG_QETH_IPV6
7318 qeth_unregister_notifiers();
7319 qeth_remove_procfs_entries();
7320 qeth_sysfs_unregister();
7321 qeth_unregister_dbf_views();
7322 printk("qeth: removed\n");
7325 EXPORT_SYMBOL(qeth_eyecatcher);
7326 module_init(qeth_init);
7327 module_exit(qeth_exit);
7328 MODULE_AUTHOR("Frank Pavlic <pavlic@de.ibm.com>");
7329 MODULE_DESCRIPTION("Linux on zSeries OSA Express and HiperSockets support\n" \
7330 "Copyright 2000,2003 IBM Corporation\n");
7332 MODULE_LICENSE("GPL");