3 * linux/drivers/s390/net/qeth_main.c ($Revision: 1.125 $)
5 * Linux on zSeries OSA Express and HiperSockets support
7 * Copyright 2000,2003 IBM Corporation
9 * Author(s): Original Code written by
10 * Utz Bacher (utz.bacher@de.ibm.com)
12 * Frank Pavlic (pavlic@de.ibm.com) and
13 * Thomas Spatzier <tspat@de.ibm.com>
15 * $Revision: 1.125 $ $Date: 2004/06/29 17:28:24 $
17 * This program is free software; you can redistribute it and/or modify
18 * it under the terms of the GNU General Public License as published by
19 * the Free Software Foundation; either version 2, or (at your option)
22 * This program is distributed in the hope that it will be useful,
23 * but WITHOUT ANY WARRANTY; without even the implied warranty of
24 * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
25 * GNU General Public License for more details.
27 * You should have received a copy of the GNU General Public License
28 * along with this program; if not, write to the Free Software
29 * Foundation, Inc., 675 Mass Ave, Cambridge, MA 02139, USA.
34 * eye catcher; just for debugging purposes
42 #include <linux/config.h>
43 #include <linux/module.h>
44 #include <linux/moduleparam.h>
46 #include <linux/string.h>
47 #include <linux/errno.h>
51 #include <asm/ebcdic.h>
52 #include <linux/ctype.h>
53 #include <asm/semaphore.h>
54 #include <asm/timex.h>
56 #include <linux/inetdevice.h>
57 #include <linux/netdevice.h>
58 #include <linux/sched.h>
59 #include <linux/workqueue.h>
60 #include <linux/kernel.h>
61 #include <linux/slab.h>
62 #include <linux/interrupt.h>
63 #include <linux/tcp.h>
64 #include <linux/icmp.h>
65 #include <linux/skbuff.h>
66 #include <net/route.h>
69 #include <linux/igmp.h>
71 #include <asm/uaccess.h>
72 #include <linux/init.h>
73 #include <linux/reboot.h>
75 #include <linux/mii.h>
81 #define VERSION_QETH_C "$Revision: 1.125 $"
82 static const char *version = "qeth S/390 OSA-Express driver";
85 * Debug Facility Stuff
87 static debug_info_t *qeth_dbf_setup = NULL;
88 static debug_info_t *qeth_dbf_data = NULL;
89 static debug_info_t *qeth_dbf_misc = NULL;
90 static debug_info_t *qeth_dbf_control = NULL;
91 static debug_info_t *qeth_dbf_trace = NULL;
92 static debug_info_t *qeth_dbf_sense = NULL;
93 static debug_info_t *qeth_dbf_qerr = NULL;
95 DEFINE_PER_CPU(char[256], qeth_dbf_txt_buf);
98 * some more definitions and declarations
100 static unsigned int known_devices[][10] = QETH_MODELLIST_ARRAY;
102 /* list of our cards */
103 struct qeth_card_list_struct qeth_card_list;
104 /*process list want to be notified*/
105 spinlock_t qeth_notify_lock;
106 struct list_head qeth_notify_list;
108 static void qeth_send_control_data_cb(struct qeth_channel *,
109 struct qeth_cmd_buffer *);
112 * here we go with function implementation
115 qeth_init_qdio_info(struct qeth_card *card);
118 qeth_init_qdio_queues(struct qeth_card *card);
121 qeth_alloc_qdio_buffers(struct qeth_card *card);
124 qeth_free_qdio_buffers(struct qeth_card *);
127 qeth_clear_qdio_buffers(struct qeth_card *);
130 qeth_clear_ip_list(struct qeth_card *, int, int);
133 qeth_clear_ipacmd_list(struct qeth_card *);
136 qeth_qdio_clear_card(struct qeth_card *, int);
139 qeth_clear_working_pool_list(struct qeth_card *);
142 qeth_clear_cmd_buffers(struct qeth_channel *);
145 qeth_stop(struct net_device *);
148 qeth_clear_ipato_list(struct qeth_card *);
151 qeth_is_addr_covered_by_ipato(struct qeth_card *, struct qeth_ipaddr *);
154 qeth_irq_tasklet(unsigned long);
157 qeth_set_online(struct ccwgroup_device *);
159 static struct qeth_ipaddr *
160 qeth_get_addr_buffer(enum qeth_prot_versions);
163 qeth_notify_processes(void)
165 /*notify all registered processes */
166 struct qeth_notify_list_struct *n_entry;
168 QETH_DBF_TEXT(trace,3,"procnoti");
169 spin_lock(&qeth_notify_lock);
170 list_for_each_entry(n_entry, &qeth_notify_list, list) {
171 send_sig(n_entry->signum, n_entry->task, 1);
173 spin_unlock(&qeth_notify_lock);
177 qeth_notifier_unregister(struct task_struct *p)
179 struct qeth_notify_list_struct *n_entry, *tmp;
181 QETH_DBF_TEXT(trace, 2, "notunreg");
182 spin_lock(&qeth_notify_lock);
183 list_for_each_entry_safe(n_entry, tmp, &qeth_notify_list, list) {
184 if (n_entry->task == p) {
185 list_del(&n_entry->list);
191 spin_unlock(&qeth_notify_lock);
195 qeth_notifier_register(struct task_struct *p, int signum)
197 struct qeth_notify_list_struct *n_entry;
199 QETH_DBF_TEXT(trace, 2, "notreg");
200 /*check first if entry already exists*/
201 spin_lock(&qeth_notify_lock);
202 list_for_each_entry(n_entry, &qeth_notify_list, list) {
203 if (n_entry->task == p) {
204 n_entry->signum = signum;
205 spin_unlock(&qeth_notify_lock);
209 spin_unlock(&qeth_notify_lock);
211 n_entry = (struct qeth_notify_list_struct *)
212 kmalloc(sizeof(struct qeth_notify_list_struct),GFP_KERNEL);
216 n_entry->signum = signum;
217 spin_lock(&qeth_notify_lock);
218 list_add(&n_entry->list,&qeth_notify_list);
219 spin_unlock(&qeth_notify_lock);
225 * free channel command buffers
228 qeth_clean_channel(struct qeth_channel *channel)
232 QETH_DBF_TEXT(setup, 2, "freech");
233 for (cnt = 0; cnt < QETH_CMD_BUFFER_NO; cnt++)
234 kfree(channel->iob[cnt].data);
241 qeth_free_card(struct qeth_card *card)
244 QETH_DBF_TEXT(setup, 2, "freecrd");
245 QETH_DBF_HEX(setup, 2, &card, sizeof(void *));
246 qeth_clean_channel(&card->read);
247 qeth_clean_channel(&card->write);
249 free_netdev(card->dev);
250 qeth_clear_ip_list(card, 0, 0);
251 qeth_clear_ipato_list(card);
252 qeth_free_qdio_buffers(card);
257 * alloc memory for command buffer per channel
260 qeth_setup_channel(struct qeth_channel *channel)
264 QETH_DBF_TEXT(setup, 2, "setupch");
265 for (cnt=0; cnt < QETH_CMD_BUFFER_NO; cnt++) {
266 channel->iob[cnt].data = (char *)
267 kmalloc(QETH_BUFSIZE, GFP_DMA|GFP_KERNEL);
268 if (channel->iob[cnt].data == NULL)
270 channel->iob[cnt].state = BUF_STATE_FREE;
271 channel->iob[cnt].channel = channel;
272 channel->iob[cnt].callback = qeth_send_control_data_cb;
273 channel->iob[cnt].rc = 0;
275 if (cnt < QETH_CMD_BUFFER_NO) {
277 kfree(channel->iob[cnt].data);
281 channel->io_buf_no = 0;
282 atomic_set(&channel->irq_pending, 0);
283 spin_lock_init(&channel->iob_lock);
285 init_waitqueue_head(&channel->wait_q);
286 channel->irq_tasklet.data = (unsigned long) channel;
287 channel->irq_tasklet.func = qeth_irq_tasklet;
292 * alloc memory for card structure
294 static struct qeth_card *
295 qeth_alloc_card(void)
297 struct qeth_card *card;
299 QETH_DBF_TEXT(setup, 2, "alloccrd");
300 card = (struct qeth_card *) kmalloc(sizeof(struct qeth_card),
304 QETH_DBF_HEX(setup, 2, &card, sizeof(void *));
305 memset(card, 0, sizeof(struct qeth_card));
306 if (qeth_setup_channel(&card->read)) {
310 if (qeth_setup_channel(&card->write)) {
311 qeth_clean_channel(&card->read);
319 __qeth_check_irb_error(struct ccw_device *cdev, struct irb *irb)
324 switch (PTR_ERR(irb)) {
326 PRINT_WARN("i/o-error on device %s\n", cdev->dev.bus_id);
327 QETH_DBF_TEXT(trace, 2, "ckirberr");
328 QETH_DBF_TEXT_(trace, 2, " rc%d", -EIO);
331 PRINT_WARN("timeout on device %s\n", cdev->dev.bus_id);
332 QETH_DBF_TEXT(trace, 2, "ckirberr");
333 QETH_DBF_TEXT_(trace, 2, " rc%d", -ETIMEDOUT);
336 PRINT_WARN("unknown error %ld on device %s\n", PTR_ERR(irb),
338 QETH_DBF_TEXT(trace, 2, "ckirberr");
339 QETH_DBF_TEXT(trace, 2, " rc???");
345 qeth_get_problem(struct ccw_device *cdev, struct irb *irb)
350 sense = (char *) irb->ecw;
351 cstat = irb->scsw.cstat;
352 dstat = irb->scsw.dstat;
354 if (cstat & (SCHN_STAT_CHN_CTRL_CHK | SCHN_STAT_INTF_CTRL_CHK |
355 SCHN_STAT_CHN_DATA_CHK | SCHN_STAT_CHAIN_CHECK |
356 SCHN_STAT_PROT_CHECK | SCHN_STAT_PROG_CHECK)) {
357 QETH_DBF_TEXT(trace,2, "CGENCHK");
358 PRINT_WARN("check on device %s, dstat=x%x, cstat=x%x ",
359 cdev->dev.bus_id, dstat, cstat);
360 HEXDUMP16(WARN, "irb: ", irb);
361 HEXDUMP16(WARN, "irb: ", ((char *) irb) + 32);
365 if (dstat & DEV_STAT_UNIT_CHECK) {
366 if (sense[SENSE_RESETTING_EVENT_BYTE] &
367 SENSE_RESETTING_EVENT_FLAG) {
368 QETH_DBF_TEXT(trace,2,"REVIND");
371 if (sense[SENSE_COMMAND_REJECT_BYTE] &
372 SENSE_COMMAND_REJECT_FLAG) {
373 QETH_DBF_TEXT(trace,2,"CMDREJi");
376 if ((sense[2] == 0xaf) && (sense[3] == 0xfe)) {
377 QETH_DBF_TEXT(trace,2,"AFFE");
380 if ((!sense[0]) && (!sense[1]) && (!sense[2]) && (!sense[3])) {
381 QETH_DBF_TEXT(trace,2,"ZEROSEN");
384 QETH_DBF_TEXT(trace,2,"DGENCHK");
389 static int qeth_issue_next_read(struct qeth_card *);
395 qeth_irq(struct ccw_device *cdev, unsigned long intparm, struct irb *irb)
399 struct qeth_cmd_buffer *buffer;
400 struct qeth_channel *channel;
401 struct qeth_card *card;
403 QETH_DBF_TEXT(trace,5,"irq");
405 if (__qeth_check_irb_error(cdev, irb))
407 cstat = irb->scsw.cstat;
408 dstat = irb->scsw.dstat;
410 card = CARD_FROM_CDEV(cdev);
414 if (card->read.ccwdev == cdev){
415 channel = &card->read;
416 QETH_DBF_TEXT(trace,5,"read");
417 } else if (card->write.ccwdev == cdev) {
418 channel = &card->write;
419 QETH_DBF_TEXT(trace,5,"write");
421 channel = &card->data;
422 QETH_DBF_TEXT(trace,5,"data");
424 atomic_set(&channel->irq_pending, 0);
426 if (irb->scsw.fctl & (SCSW_FCTL_CLEAR_FUNC))
427 channel->state = CH_STATE_STOPPED;
429 if (irb->scsw.fctl & (SCSW_FCTL_HALT_FUNC))
430 channel->state = CH_STATE_HALTED;
432 /*let's wake up immediately on data channel*/
433 if ((channel == &card->data) && (intparm != 0))
436 if (intparm == QETH_CLEAR_CHANNEL_PARM) {
437 QETH_DBF_TEXT(trace, 6, "clrchpar");
438 /* we don't have to handle this further */
441 if (intparm == QETH_HALT_CHANNEL_PARM) {
442 QETH_DBF_TEXT(trace, 6, "hltchpar");
443 /* we don't have to handle this further */
446 if ((dstat & DEV_STAT_UNIT_EXCEP) ||
447 (dstat & DEV_STAT_UNIT_CHECK) ||
449 if (irb->esw.esw0.erw.cons) {
450 /* TODO: we should make this s390dbf */
451 PRINT_WARN("sense data available on channel %s.\n",
452 CHANNEL_ID(channel));
453 PRINT_WARN(" cstat 0x%X\n dstat 0x%X\n", cstat, dstat);
454 HEXDUMP16(WARN,"irb: ",irb);
455 HEXDUMP16(WARN,"sense data: ",irb->ecw);
457 rc = qeth_get_problem(cdev,irb);
459 qeth_schedule_recovery(card);
465 buffer = (struct qeth_cmd_buffer *) __va((addr_t)intparm);
466 buffer->state = BUF_STATE_PROCESSED;
468 if (channel == &card->data)
471 if (channel == &card->read &&
472 channel->state == CH_STATE_UP)
473 qeth_issue_next_read(card);
475 tasklet_schedule(&channel->irq_tasklet);
478 wake_up(&card->wait_q);
482 * tasklet function scheduled from irq handler
485 qeth_irq_tasklet(unsigned long data)
487 struct qeth_card *card;
488 struct qeth_channel *channel;
489 struct qeth_cmd_buffer *iob;
492 QETH_DBF_TEXT(trace,5,"irqtlet");
493 channel = (struct qeth_channel *) data;
495 index = channel->buf_no;
496 card = CARD_FROM_CDEV(channel->ccwdev);
497 while (iob[index].state == BUF_STATE_PROCESSED) {
498 if (iob[index].callback !=NULL) {
499 iob[index].callback(channel,iob + index);
501 index = (index + 1) % QETH_CMD_BUFFER_NO;
503 channel->buf_no = index;
504 wake_up(&card->wait_q);
507 static int qeth_stop_card(struct qeth_card *);
510 qeth_set_offline(struct ccwgroup_device *cgdev)
512 struct qeth_card *card = (struct qeth_card *) cgdev->dev.driver_data;
513 enum qeth_card_states recover_flag;
515 QETH_DBF_TEXT(setup, 3, "setoffl");
516 QETH_DBF_HEX(setup, 3, &card, sizeof(void *));
518 recover_flag = card->state;
519 if (qeth_stop_card(card) == -ERESTARTSYS){
520 PRINT_WARN("Stopping card %s interrupted by user!\n",
524 ccw_device_set_offline(CARD_DDEV(card));
525 ccw_device_set_offline(CARD_WDEV(card));
526 ccw_device_set_offline(CARD_RDEV(card));
527 if (recover_flag == CARD_STATE_UP)
528 card->state = CARD_STATE_RECOVER;
529 qeth_notify_processes();
534 qeth_remove_device(struct ccwgroup_device *cgdev)
536 struct qeth_card *card = (struct qeth_card *) cgdev->dev.driver_data;
539 QETH_DBF_TEXT(setup, 3, "rmdev");
540 QETH_DBF_HEX(setup, 3, &card, sizeof(void *));
545 if (cgdev->state == CCWGROUP_ONLINE){
546 card->use_hard_stop = 1;
547 qeth_set_offline(cgdev);
549 /* remove form our internal list */
550 write_lock_irqsave(&qeth_card_list.rwlock, flags);
551 list_del(&card->list);
552 write_unlock_irqrestore(&qeth_card_list.rwlock, flags);
554 unregister_netdev(card->dev);
555 qeth_remove_device_attributes(&cgdev->dev);
556 qeth_free_card(card);
557 cgdev->dev.driver_data = NULL;
558 put_device(&cgdev->dev);
562 qeth_register_addr_entry(struct qeth_card *, struct qeth_ipaddr *);
564 qeth_deregister_addr_entry(struct qeth_card *, struct qeth_ipaddr *);
567 * Add/remove address to/from card's ip list, i.e. try to add or remove
568 * reference to/from an IP address that is already registered on the card.
570 * 0 address was on card and its reference count has been adjusted,
571 * but is still > 0, so nothing has to be done
572 * also returns 0 if card was not on card and the todo was to delete
573 * the address -> there is also nothing to be done
574 * 1 address was not on card and the todo is to add it to the card's ip
576 * -1 address was on card and its reference count has been decremented
577 * to <= 0 by the todo -> address must be removed from card
580 __qeth_ref_ip_on_card(struct qeth_card *card, struct qeth_ipaddr *todo,
581 struct qeth_ipaddr **__addr)
583 struct qeth_ipaddr *addr;
586 list_for_each_entry(addr, &card->ip_list, entry) {
587 if ((addr->proto == QETH_PROT_IPV4) &&
588 (todo->proto == QETH_PROT_IPV4) &&
589 (addr->type == todo->type) &&
590 (addr->u.a4.addr == todo->u.a4.addr) &&
591 (addr->u.a4.mask == todo->u.a4.mask) ){
595 if ((addr->proto == QETH_PROT_IPV6) &&
596 (todo->proto == QETH_PROT_IPV6) &&
597 (addr->type == todo->type) &&
598 (addr->u.a6.pfxlen == todo->u.a6.pfxlen) &&
599 (memcmp(&addr->u.a6.addr, &todo->u.a6.addr,
600 sizeof(struct in6_addr)) == 0)) {
606 addr->users += todo->users;
607 if (addr->users <= 0){
611 /* for VIPA and RXIP limit refcount to 1 */
612 if (addr->type != QETH_IP_TYPE_NORMAL)
617 if (todo->users > 0){
618 /* for VIPA and RXIP limit refcount to 1 */
619 if (todo->type != QETH_IP_TYPE_NORMAL)
627 __qeth_address_exists_in_list(struct list_head *list, struct qeth_ipaddr *addr,
630 struct qeth_ipaddr *tmp;
632 list_for_each_entry(tmp, list, entry) {
633 if ((tmp->proto == QETH_PROT_IPV4) &&
634 (addr->proto == QETH_PROT_IPV4) &&
635 ((same_type && (tmp->type == addr->type)) ||
636 (!same_type && (tmp->type != addr->type)) ) &&
637 (tmp->u.a4.addr == addr->u.a4.addr) ){
640 if ((tmp->proto == QETH_PROT_IPV6) &&
641 (addr->proto == QETH_PROT_IPV6) &&
642 ((same_type && (tmp->type == addr->type)) ||
643 (!same_type && (tmp->type != addr->type)) ) &&
644 (memcmp(&tmp->u.a6.addr, &addr->u.a6.addr,
645 sizeof(struct in6_addr)) == 0) ) {
653 * Add IP to be added to todo list. If there is already an "add todo"
654 * in this list we just incremenent the reference count.
655 * Returns 0 if we just incremented reference count.
658 __qeth_insert_ip_todo(struct qeth_card *card, struct qeth_ipaddr *addr, int add)
660 struct qeth_ipaddr *tmp, *t;
663 list_for_each_entry_safe(tmp, t, &card->ip_tbd_list, entry) {
664 if ((tmp->proto == QETH_PROT_IPV4) &&
665 (addr->proto == QETH_PROT_IPV4) &&
666 (tmp->type == addr->type) &&
667 (tmp->is_multicast == addr->is_multicast) &&
668 (tmp->u.a4.addr == addr->u.a4.addr) &&
669 (tmp->u.a4.mask == addr->u.a4.mask) ){
673 if ((tmp->proto == QETH_PROT_IPV6) &&
674 (addr->proto == QETH_PROT_IPV6) &&
675 (tmp->type == addr->type) &&
676 (tmp->is_multicast == addr->is_multicast) &&
677 (tmp->u.a6.pfxlen == addr->u.a6.pfxlen) &&
678 (memcmp(&tmp->u.a6.addr, &addr->u.a6.addr,
679 sizeof(struct in6_addr)) == 0) ){
685 if (addr->users != 0)
686 tmp->users += addr->users;
688 tmp->users += add? 1:-1;
689 if (tmp->users == 0){
690 list_del(&tmp->entry);
695 if (addr->users == 0)
696 addr->users += add? 1:-1;
697 if (add && (addr->type == QETH_IP_TYPE_NORMAL) &&
698 qeth_is_addr_covered_by_ipato(card, addr)){
699 QETH_DBF_TEXT(trace, 2, "tkovaddr");
700 addr->set_flags |= QETH_IPA_SETIP_TAKEOVER_FLAG;
702 list_add_tail(&addr->entry, &card->ip_tbd_list);
708 * Remove IP address from list
711 qeth_delete_ip(struct qeth_card *card, struct qeth_ipaddr *addr)
716 QETH_DBF_TEXT(trace,4,"delip");
717 if (addr->proto == QETH_PROT_IPV4)
718 QETH_DBF_HEX(trace,4,&addr->u.a4.addr,4);
720 QETH_DBF_HEX(trace,4,&addr->u.a6.addr,4);
721 QETH_DBF_HEX(trace,4,((char *)&addr->u.a6.addr)+4,4);
723 spin_lock_irqsave(&card->ip_lock, flags);
724 rc = __qeth_insert_ip_todo(card, addr, 0);
725 spin_unlock_irqrestore(&card->ip_lock, flags);
730 qeth_add_ip(struct qeth_card *card, struct qeth_ipaddr *addr)
735 QETH_DBF_TEXT(trace,4,"addip");
736 if (addr->proto == QETH_PROT_IPV4)
737 QETH_DBF_HEX(trace,4,&addr->u.a4.addr,4);
739 QETH_DBF_HEX(trace,4,&addr->u.a6.addr,4);
740 QETH_DBF_HEX(trace,4,((char *)&addr->u.a6.addr)+4,4);
742 spin_lock_irqsave(&card->ip_lock, flags);
743 rc = __qeth_insert_ip_todo(card, addr, 1);
744 spin_unlock_irqrestore(&card->ip_lock, flags);
749 qeth_reinsert_todos(struct qeth_card *card, struct list_head *todos)
751 struct qeth_ipaddr *todo, *tmp;
753 list_for_each_entry_safe(todo, tmp, todos, entry){
754 list_del_init(&todo->entry);
755 if (todo->users < 0) {
756 if (!qeth_delete_ip(card, todo))
759 if (!qeth_add_ip(card, todo))
766 qeth_set_ip_addr_list(struct qeth_card *card)
768 struct list_head failed_todos;
769 struct qeth_ipaddr *todo, *addr;
773 QETH_DBF_TEXT(trace, 2, "sdiplist");
774 QETH_DBF_HEX(trace, 2, &card, sizeof(void *));
776 INIT_LIST_HEAD(&failed_todos);
778 spin_lock_irqsave(&card->ip_lock, flags);
779 while (!list_empty(&card->ip_tbd_list)) {
780 todo = list_entry(card->ip_tbd_list.next,
781 struct qeth_ipaddr, entry);
782 list_del_init(&todo->entry);
783 rc = __qeth_ref_ip_on_card(card, todo, &addr);
785 /* nothing to be done; only adjusted refcount */
787 } else if (rc == 1) {
788 /* new entry to be added to on-card list */
789 spin_unlock_irqrestore(&card->ip_lock, flags);
790 rc = qeth_register_addr_entry(card, todo);
791 spin_lock_irqsave(&card->ip_lock, flags);
793 list_add_tail(&todo->entry, &card->ip_list);
795 list_add_tail(&todo->entry, &failed_todos);
796 } else if (rc == -1) {
797 /* on-card entry to be removed */
798 list_del_init(&addr->entry);
799 spin_unlock_irqrestore(&card->ip_lock, flags);
800 rc = qeth_deregister_addr_entry(card, addr);
801 spin_lock_irqsave(&card->ip_lock, flags);
806 list_add_tail(&addr->entry, &card->ip_list);
807 list_add_tail(&todo->entry, &failed_todos);
811 spin_unlock_irqrestore(&card->ip_lock, flags);
812 qeth_reinsert_todos(card, &failed_todos);
815 static void qeth_delete_mc_addresses(struct qeth_card *);
816 static void qeth_add_multicast_ipv4(struct qeth_card *);
817 #ifdef CONFIG_QETH_IPV6
818 static void qeth_add_multicast_ipv6(struct qeth_card *);
822 qeth_set_thread_start_bit(struct qeth_card *card, unsigned long thread)
826 spin_lock_irqsave(&card->thread_mask_lock, flags);
827 if ( !(card->thread_allowed_mask & thread) ||
828 (card->thread_start_mask & thread) ) {
829 spin_unlock_irqrestore(&card->thread_mask_lock, flags);
832 card->thread_start_mask |= thread;
833 spin_unlock_irqrestore(&card->thread_mask_lock, flags);
838 qeth_clear_thread_start_bit(struct qeth_card *card, unsigned long thread)
842 spin_lock_irqsave(&card->thread_mask_lock, flags);
843 card->thread_start_mask &= ~thread;
844 spin_unlock_irqrestore(&card->thread_mask_lock, flags);
845 wake_up(&card->wait_q);
849 qeth_clear_thread_running_bit(struct qeth_card *card, unsigned long thread)
853 spin_lock_irqsave(&card->thread_mask_lock, flags);
854 card->thread_running_mask &= ~thread;
855 spin_unlock_irqrestore(&card->thread_mask_lock, flags);
856 wake_up(&card->wait_q);
860 __qeth_do_run_thread(struct qeth_card *card, unsigned long thread)
865 spin_lock_irqsave(&card->thread_mask_lock, flags);
866 if (card->thread_start_mask & thread){
867 if ((card->thread_allowed_mask & thread) &&
868 !(card->thread_running_mask & thread)){
870 card->thread_start_mask &= ~thread;
871 card->thread_running_mask |= thread;
875 spin_unlock_irqrestore(&card->thread_mask_lock, flags);
880 qeth_do_run_thread(struct qeth_card *card, unsigned long thread)
884 wait_event(card->wait_q,
885 (rc = __qeth_do_run_thread(card, thread)) >= 0);
890 qeth_register_mc_addresses(void *ptr)
892 struct qeth_card *card;
894 card = (struct qeth_card *) ptr;
895 daemonize("qeth_reg_mcaddrs");
896 QETH_DBF_TEXT(trace,4,"regmcth1");
897 if (!qeth_do_run_thread(card, QETH_SET_MC_THREAD))
899 QETH_DBF_TEXT(trace,4,"regmcth2");
900 qeth_delete_mc_addresses(card);
901 qeth_add_multicast_ipv4(card);
902 #ifdef CONFIG_QETH_IPV6
903 qeth_add_multicast_ipv6(card);
905 qeth_set_ip_addr_list(card);
906 qeth_clear_thread_running_bit(card, QETH_SET_MC_THREAD);
911 qeth_register_ip_address(void *ptr)
913 struct qeth_card *card;
915 card = (struct qeth_card *) ptr;
916 daemonize("qeth_reg_ip");
917 QETH_DBF_TEXT(trace,4,"regipth1");
918 if (!qeth_do_run_thread(card, QETH_SET_IP_THREAD))
920 QETH_DBF_TEXT(trace,4,"regipth2");
921 qeth_set_ip_addr_list(card);
922 qeth_clear_thread_running_bit(card, QETH_SET_IP_THREAD);
927 qeth_recover(void *ptr)
929 struct qeth_card *card;
932 card = (struct qeth_card *) ptr;
933 daemonize("qeth_recover");
934 QETH_DBF_TEXT(trace,2,"recover1");
935 QETH_DBF_HEX(trace, 2, &card, sizeof(void *));
936 if (!qeth_do_run_thread(card, QETH_RECOVER_THREAD))
938 QETH_DBF_TEXT(trace,2,"recover2");
939 PRINT_WARN("Recovery of device %s started ...\n",
941 card->use_hard_stop = 1;
942 qeth_set_offline(card->gdev);
943 rc = qeth_set_online(card->gdev);
945 PRINT_INFO("Device %s successfully recovered!\n",
948 PRINT_INFO("Device %s could not be recovered!\n",
950 /* don't run another scheduled recovery */
951 qeth_clear_thread_start_bit(card, QETH_RECOVER_THREAD);
952 qeth_clear_thread_running_bit(card, QETH_RECOVER_THREAD);
957 qeth_schedule_recovery(struct qeth_card *card)
959 QETH_DBF_TEXT(trace,2,"startrec");
961 if (qeth_set_thread_start_bit(card, QETH_RECOVER_THREAD) == 0)
962 schedule_work(&card->kernel_thread_starter);
966 qeth_do_start_thread(struct qeth_card *card, unsigned long thread)
971 spin_lock_irqsave(&card->thread_mask_lock, flags);
972 QETH_DBF_TEXT_(trace, 4, " %02x%02x%02x",
973 (u8) card->thread_start_mask,
974 (u8) card->thread_allowed_mask,
975 (u8) card->thread_running_mask);
976 rc = (card->thread_start_mask & thread);
977 spin_unlock_irqrestore(&card->thread_mask_lock, flags);
982 qeth_start_kernel_thread(struct qeth_card *card)
984 QETH_DBF_TEXT(trace , 2, "strthrd");
986 if (card->read.state != CH_STATE_UP &&
987 card->write.state != CH_STATE_UP)
990 if (qeth_do_start_thread(card, QETH_SET_IP_THREAD))
991 kernel_thread(qeth_register_ip_address, (void *) card, SIGCHLD);
992 if (qeth_do_start_thread(card, QETH_SET_MC_THREAD))
993 kernel_thread(qeth_register_mc_addresses, (void *)card,SIGCHLD);
994 if (qeth_do_start_thread(card, QETH_RECOVER_THREAD))
995 kernel_thread(qeth_recover, (void *) card, SIGCHLD);
1000 qeth_set_intial_options(struct qeth_card *card)
1002 card->options.route4.type = NO_ROUTER;
1003 #ifdef CONFIG_QETH_IPV6
1004 card->options.route6.type = NO_ROUTER;
1005 #endif /* QETH_IPV6 */
1006 card->options.checksum_type = QETH_CHECKSUM_DEFAULT;
1007 card->options.broadcast_mode = QETH_TR_BROADCAST_ALLRINGS;
1008 card->options.macaddr_mode = QETH_TR_MACADDR_NONCANONICAL;
1009 card->options.fake_broadcast = 0;
1010 card->options.add_hhlen = DEFAULT_ADD_HHLEN;
1011 card->options.fake_ll = 0;
1015 * initialize channels ,card and all state machines
1018 qeth_setup_card(struct qeth_card *card)
1021 QETH_DBF_TEXT(setup, 2, "setupcrd");
1022 QETH_DBF_HEX(setup, 2, &card, sizeof(void *));
1024 card->read.state = CH_STATE_DOWN;
1025 card->write.state = CH_STATE_DOWN;
1026 card->data.state = CH_STATE_DOWN;
1027 card->state = CARD_STATE_DOWN;
1028 card->lan_online = 0;
1029 card->use_hard_stop = 0;
1031 #ifdef CONFIG_QETH_VLAN
1032 spin_lock_init(&card->vlanlock);
1033 card->vlangrp = NULL;
1035 spin_lock_init(&card->ip_lock);
1036 spin_lock_init(&card->thread_mask_lock);
1037 card->thread_start_mask = 0;
1038 card->thread_allowed_mask = 0;
1039 card->thread_running_mask = 0;
1040 INIT_WORK(&card->kernel_thread_starter,
1041 (void *)qeth_start_kernel_thread,card);
1042 INIT_LIST_HEAD(&card->ip_list);
1043 INIT_LIST_HEAD(&card->ip_tbd_list);
1044 INIT_LIST_HEAD(&card->cmd_waiter_list);
1045 init_waitqueue_head(&card->wait_q);
1046 /* intial options */
1047 qeth_set_intial_options(card);
1048 /* IP address takeover */
1049 INIT_LIST_HEAD(&card->ipato.entries);
1050 card->ipato.enabled = 0;
1051 card->ipato.invert4 = 0;
1052 card->ipato.invert6 = 0;
1053 /* init QDIO stuff */
1054 qeth_init_qdio_info(card);
1059 qeth_determine_card_type(struct qeth_card *card)
1063 QETH_DBF_TEXT(setup, 2, "detcdtyp");
1065 while (known_devices[i][4]) {
1066 if ((CARD_RDEV(card)->id.dev_type == known_devices[i][2]) &&
1067 (CARD_RDEV(card)->id.dev_model == known_devices[i][3])) {
1068 card->info.type = known_devices[i][4];
1069 card->qdio.no_out_queues = known_devices[i][8];
1070 card->info.is_multicast_different = known_devices[i][9];
1075 card->info.type = QETH_CARD_TYPE_UNKNOWN;
1076 PRINT_ERR("unknown card type on device %s\n", CARD_BUS_ID(card));
1081 qeth_probe_device(struct ccwgroup_device *gdev)
1083 struct qeth_card *card;
1085 unsigned long flags;
1088 QETH_DBF_TEXT(setup, 2, "probedev");
1091 if (!get_device(dev))
1094 card = qeth_alloc_card();
1097 QETH_DBF_TEXT_(setup, 2, "1err%d", -ENOMEM);
1100 if ((rc = qeth_setup_card(card))){
1101 QETH_DBF_TEXT_(setup, 2, "2err%d", rc);
1103 qeth_free_card(card);
1106 gdev->dev.driver_data = card;
1108 gdev->cdev[0]->handler = qeth_irq;
1109 gdev->cdev[1]->handler = qeth_irq;
1110 gdev->cdev[2]->handler = qeth_irq;
1112 rc = qeth_create_device_attributes(dev);
1115 qeth_free_card(card);
1118 card->read.ccwdev = gdev->cdev[0];
1119 card->write.ccwdev = gdev->cdev[1];
1120 card->data.ccwdev = gdev->cdev[2];
1121 if ((rc = qeth_determine_card_type(card))){
1122 PRINT_WARN("%s: not a valid card type\n", __func__);
1123 QETH_DBF_TEXT_(setup, 2, "3err%d", rc);
1125 qeth_free_card(card);
1128 /* insert into our internal list */
1129 write_lock_irqsave(&qeth_card_list.rwlock, flags);
1130 list_add_tail(&card->list, &qeth_card_list.list);
1131 write_unlock_irqrestore(&qeth_card_list.rwlock, flags);
1137 qeth_get_unitaddr(struct qeth_card *card)
1143 QETH_DBF_TEXT(setup, 2, "getunit");
1144 rc = read_conf_data(CARD_DDEV(card), (void **) &prcd, &length);
1146 PRINT_ERR("read_conf_data for device %s returned %i\n",
1147 CARD_DDEV_ID(card), rc);
1150 card->info.chpid = prcd[30];
1151 card->info.unit_addr2 = prcd[31];
1152 card->info.cula = prcd[63];
1153 card->info.guestlan = ((prcd[0x10] == _ascebc['V']) &&
1154 (prcd[0x11] == _ascebc['M']));
1159 qeth_init_tokens(struct qeth_card *card)
1161 card->token.issuer_rm_w = 0x00010103UL;
1162 card->token.cm_filter_w = 0x00010108UL;
1163 card->token.cm_connection_w = 0x0001010aUL;
1164 card->token.ulp_filter_w = 0x0001010bUL;
1165 card->token.ulp_connection_w = 0x0001010dUL;
1169 raw_devno_from_bus_id(char *id)
1171 id += (strlen(id) - 4);
1172 return (__u16) simple_strtoul(id, &id, 16);
1178 qeth_setup_ccw(struct qeth_channel *channel,unsigned char *iob, __u32 len)
1180 struct qeth_card *card;
1182 QETH_DBF_TEXT(trace, 4, "setupccw");
1183 card = CARD_FROM_CDEV(channel->ccwdev);
1184 if (channel == &card->read)
1185 memcpy(&channel->ccw, READ_CCW, sizeof(struct ccw1));
1187 memcpy(&channel->ccw, WRITE_CCW, sizeof(struct ccw1));
1188 channel->ccw.count = len;
1189 channel->ccw.cda = (__u32) __pa(iob);
1193 * get free buffer for ccws (IDX activation, lancmds,ipassists...)
1195 static struct qeth_cmd_buffer *
1196 __qeth_get_buffer(struct qeth_channel *channel)
1200 QETH_DBF_TEXT(trace, 6, "getbuff");
1201 index = channel->io_buf_no;
1203 if (channel->iob[index].state == BUF_STATE_FREE) {
1204 channel->iob[index].state = BUF_STATE_LOCKED;
1205 channel->io_buf_no = (channel->io_buf_no + 1) %
1207 memset(channel->iob[index].data, 0, QETH_BUFSIZE);
1208 return channel->iob + index;
1210 index = (index + 1) % QETH_CMD_BUFFER_NO;
1211 } while(index != channel->io_buf_no);
1217 * release command buffer
1220 qeth_release_buffer(struct qeth_channel *channel, struct qeth_cmd_buffer *iob)
1222 unsigned long flags;
1224 QETH_DBF_TEXT(trace, 6, "relbuff");
1225 spin_lock_irqsave(&channel->iob_lock, flags);
1226 memset(iob->data, 0, QETH_BUFSIZE);
1227 iob->state = BUF_STATE_FREE;
1228 iob->callback = qeth_send_control_data_cb;
1230 spin_unlock_irqrestore(&channel->iob_lock, flags);
1233 static struct qeth_cmd_buffer *
1234 qeth_get_buffer(struct qeth_channel *channel)
1236 struct qeth_cmd_buffer *buffer = NULL;
1237 unsigned long flags;
1239 spin_lock_irqsave(&channel->iob_lock, flags);
1240 buffer = __qeth_get_buffer(channel);
1241 spin_unlock_irqrestore(&channel->iob_lock, flags);
1245 static struct qeth_cmd_buffer *
1246 qeth_wait_for_buffer(struct qeth_channel *channel)
1248 struct qeth_cmd_buffer *buffer;
1249 wait_event(channel->wait_q,
1250 ((buffer = qeth_get_buffer(channel)) != NULL));
1255 qeth_clear_cmd_buffers(struct qeth_channel *channel)
1259 for (cnt=0; cnt < QETH_CMD_BUFFER_NO; cnt++)
1260 qeth_release_buffer(channel,&channel->iob[cnt]);
1261 channel->buf_no = 0;
1262 channel->io_buf_no = 0;
1266 * start IDX for read and write channel
1269 qeth_idx_activate_get_answer(struct qeth_channel *channel,
1270 void (*idx_reply_cb)(struct qeth_channel *,
1271 struct qeth_cmd_buffer *))
1273 struct qeth_cmd_buffer *iob;
1274 unsigned long flags;
1276 struct qeth_card *card;
1278 QETH_DBF_TEXT(setup, 2, "idxanswr");
1279 card = CARD_FROM_CDEV(channel->ccwdev);
1280 iob = qeth_get_buffer(channel);
1281 iob->callback = idx_reply_cb;
1282 memcpy(&channel->ccw, READ_CCW, sizeof(struct ccw1));
1283 channel->ccw.count = QETH_BUFSIZE;
1284 channel->ccw.cda = (__u32) __pa(iob->data);
1286 wait_event(card->wait_q,
1287 atomic_compare_and_swap(0,1,&channel->irq_pending) == 0);
1288 QETH_DBF_TEXT(setup, 6, "noirqpnd");
1289 spin_lock_irqsave(get_ccwdev_lock(channel->ccwdev), flags);
1290 rc = ccw_device_start(channel->ccwdev,
1291 &channel->ccw,(addr_t) iob, 0, 0);
1292 spin_unlock_irqrestore(get_ccwdev_lock(channel->ccwdev), flags);
1295 PRINT_ERR("qeth: Error2 in activating channel rc=%d\n",rc);
1296 QETH_DBF_TEXT_(setup, 2, "2err%d", rc);
1297 atomic_set(&channel->irq_pending, 0);
1298 wake_up(&card->wait_q);
1301 rc = wait_event_interruptible_timeout(card->wait_q,
1302 channel->state == CH_STATE_UP, QETH_TIMEOUT);
1303 if (rc == -ERESTARTSYS)
1305 if (channel->state != CH_STATE_UP){
1307 QETH_DBF_TEXT_(setup, 2, "3err%d", rc);
1314 qeth_idx_activate_channel(struct qeth_channel *channel,
1315 void (*idx_reply_cb)(struct qeth_channel *,
1316 struct qeth_cmd_buffer *))
1318 struct qeth_card *card;
1319 struct qeth_cmd_buffer *iob;
1320 unsigned long flags;
1324 card = CARD_FROM_CDEV(channel->ccwdev);
1326 QETH_DBF_TEXT(setup, 2, "idxactch");
1328 iob = qeth_get_buffer(channel);
1329 iob->callback = idx_reply_cb;
1330 memcpy(&channel->ccw, WRITE_CCW, sizeof(struct ccw1));
1331 channel->ccw.count = IDX_ACTIVATE_SIZE;
1332 channel->ccw.cda = (__u32) __pa(iob->data);
1333 if (channel == &card->write) {
1334 memcpy(iob->data, IDX_ACTIVATE_WRITE, IDX_ACTIVATE_SIZE);
1335 memcpy(QETH_TRANSPORT_HEADER_SEQ_NO(iob->data),
1336 &card->seqno.trans_hdr, QETH_SEQ_NO_LENGTH);
1337 card->seqno.trans_hdr++;
1339 memcpy(iob->data, IDX_ACTIVATE_READ, IDX_ACTIVATE_SIZE);
1340 memcpy(QETH_TRANSPORT_HEADER_SEQ_NO(iob->data),
1341 &card->seqno.trans_hdr, QETH_SEQ_NO_LENGTH);
1343 memcpy(QETH_IDX_ACT_ISSUER_RM_TOKEN(iob->data),
1344 &card->token.issuer_rm_w,QETH_MPC_TOKEN_LENGTH);
1345 memcpy(QETH_IDX_ACT_FUNC_LEVEL(iob->data),
1346 &card->info.func_level,sizeof(__u16));
1347 temp = raw_devno_from_bus_id(CARD_DDEV_ID(card));
1348 memcpy(QETH_IDX_ACT_QDIO_DEV_CUA(iob->data), &temp, 2);
1349 temp = (card->info.cula << 8) + card->info.unit_addr2;
1350 memcpy(QETH_IDX_ACT_QDIO_DEV_REALADDR(iob->data), &temp, 2);
1352 wait_event(card->wait_q,
1353 atomic_compare_and_swap(0,1,&channel->irq_pending) == 0);
1354 QETH_DBF_TEXT(setup, 6, "noirqpnd");
1355 spin_lock_irqsave(get_ccwdev_lock(channel->ccwdev), flags);
1356 rc = ccw_device_start(channel->ccwdev,
1357 &channel->ccw,(addr_t) iob, 0, 0);
1358 spin_unlock_irqrestore(get_ccwdev_lock(channel->ccwdev), flags);
1361 PRINT_ERR("qeth: Error1 in activating channel. rc=%d\n",rc);
1362 QETH_DBF_TEXT_(setup, 2, "1err%d", rc);
1363 atomic_set(&channel->irq_pending, 0);
1364 wake_up(&card->wait_q);
1367 rc = wait_event_interruptible_timeout(card->wait_q,
1368 channel->state == CH_STATE_ACTIVATING, QETH_TIMEOUT);
1369 if (rc == -ERESTARTSYS)
1371 if (channel->state != CH_STATE_ACTIVATING) {
1372 PRINT_WARN("qeth: IDX activate timed out!\n");
1373 QETH_DBF_TEXT_(setup, 2, "2err%d", -ETIME);
1376 return qeth_idx_activate_get_answer(channel,idx_reply_cb);
1380 qeth_peer_func_level(int level)
1382 if ((level & 0xff) == 8)
1383 return (level & 0xff) + 0x400;
1384 if (((level >> 8) & 3) == 1)
1385 return (level & 0xff) + 0x200;
1390 qeth_idx_write_cb(struct qeth_channel *channel, struct qeth_cmd_buffer *iob)
1392 struct qeth_card *card;
1395 QETH_DBF_TEXT(setup ,2, "idxwrcb");
1397 if (channel->state == CH_STATE_DOWN) {
1398 channel->state = CH_STATE_ACTIVATING;
1401 card = CARD_FROM_CDEV(channel->ccwdev);
1403 if (!(QETH_IS_IDX_ACT_POS_REPLY(iob->data))) {
1404 PRINT_ERR("IDX_ACTIVATE on write channel device %s: negative "
1405 "reply\n", CARD_WDEV_ID(card));
1408 memcpy(&temp, QETH_IDX_ACT_FUNC_LEVEL(iob->data), 2);
1409 if ((temp & ~0x0100) != qeth_peer_func_level(card->info.func_level)) {
1410 PRINT_WARN("IDX_ACTIVATE on write channel device %s: "
1411 "function level mismatch "
1412 "(sent: 0x%x, received: 0x%x)\n",
1413 CARD_WDEV_ID(card), card->info.func_level, temp);
1416 channel->state = CH_STATE_UP;
1418 qeth_release_buffer(channel, iob);
1422 qeth_check_idx_response(unsigned char *buffer)
1427 QETH_DBF_HEX(control, 2, buffer, QETH_DBF_CONTROL_LEN);
1428 if ((buffer[2] & 0xc0) == 0xc0) {
1429 PRINT_WARN("received an IDX TERMINATE "
1430 "with cause code 0x%02x%s\n",
1432 ((buffer[4] == 0x22) ?
1433 " -- try another portname" : ""));
1434 QETH_DBF_TEXT(trace, 2, "ckidxres");
1435 QETH_DBF_TEXT(trace, 2, " idxterm");
1436 QETH_DBF_TEXT_(trace, 2, " rc%d", -EIO);
1443 qeth_idx_read_cb(struct qeth_channel *channel, struct qeth_cmd_buffer *iob)
1445 struct qeth_card *card;
1448 QETH_DBF_TEXT(setup , 2, "idxrdcb");
1449 if (channel->state == CH_STATE_DOWN) {
1450 channel->state = CH_STATE_ACTIVATING;
1454 card = CARD_FROM_CDEV(channel->ccwdev);
1455 if (qeth_check_idx_response(iob->data)) {
1458 if (!(QETH_IS_IDX_ACT_POS_REPLY(iob->data))) {
1459 PRINT_ERR("IDX_ACTIVATE on read channel device %s: negative "
1460 "reply\n", CARD_RDEV_ID(card));
1465 * temporary fix for microcode bug
1466 * to revert it,replace OR by AND
1468 if ( (!QETH_IDX_NO_PORTNAME_REQUIRED(iob->data)) ||
1469 (card->info.type == QETH_CARD_TYPE_OSAE) )
1470 card->info.portname_required = 1;
1472 memcpy(&temp, QETH_IDX_ACT_FUNC_LEVEL(iob->data), 2);
1473 if (temp != qeth_peer_func_level(card->info.func_level)) {
1474 PRINT_WARN("IDX_ACTIVATE on read channel device %s: function "
1475 "level mismatch (sent: 0x%x, received: 0x%x)\n",
1476 CARD_RDEV_ID(card), card->info.func_level, temp);
1479 memcpy(&card->token.issuer_rm_r,
1480 QETH_IDX_ACT_ISSUER_RM_TOKEN(iob->data),
1481 QETH_MPC_TOKEN_LENGTH);
1482 memcpy(&card->info.mcl_level[0],
1483 QETH_IDX_REPLY_LEVEL(iob->data), QETH_MCL_LENGTH);
1484 channel->state = CH_STATE_UP;
1486 qeth_release_buffer(channel,iob);
1490 qeth_issue_next_read(struct qeth_card *card)
1493 struct qeth_cmd_buffer *iob;
1495 QETH_DBF_TEXT(trace,5,"issnxrd");
1496 if (card->read.state != CH_STATE_UP)
1498 iob = qeth_get_buffer(&card->read);
1500 PRINT_WARN("issue_next_read failed: no iob available!\n");
1503 qeth_setup_ccw(&card->read, iob->data, QETH_BUFSIZE);
1504 wait_event(card->wait_q,
1505 atomic_compare_and_swap(0,1,&card->read.irq_pending) == 0);
1506 QETH_DBF_TEXT(trace, 6, "noirqpnd");
1507 rc = ccw_device_start(card->read.ccwdev, &card->read.ccw,
1508 (addr_t) iob, 0, 0);
1510 PRINT_ERR("Error in starting next read ccw! rc=%i\n", rc);
1511 atomic_set(&card->read.irq_pending, 0);
1512 qeth_schedule_recovery(card);
1513 wake_up(&card->wait_q);
1518 static struct qeth_reply *
1519 qeth_alloc_reply(struct qeth_card *card)
1521 struct qeth_reply *reply;
1523 reply = kmalloc(sizeof(struct qeth_reply), GFP_ATOMIC);
1525 memset(reply, 0, sizeof(struct qeth_reply));
1526 atomic_set(&reply->refcnt, 1);
1533 qeth_get_reply(struct qeth_reply *reply)
1535 WARN_ON(atomic_read(&reply->refcnt) <= 0);
1536 atomic_inc(&reply->refcnt);
1540 qeth_put_reply(struct qeth_reply *reply)
1542 WARN_ON(atomic_read(&reply->refcnt) <= 0);
1543 if (atomic_dec_and_test(&reply->refcnt))
1548 qeth_cmd_timeout(unsigned long data)
1550 struct qeth_reply *reply, *list_reply, *r;
1551 unsigned long flags;
1553 reply = (struct qeth_reply *) data;
1554 spin_lock_irqsave(&reply->card->lock, flags);
1555 list_for_each_entry_safe(list_reply, r,
1556 &reply->card->cmd_waiter_list, list) {
1557 if (reply == list_reply){
1558 qeth_get_reply(reply);
1559 list_del_init(&reply->list);
1560 spin_unlock_irqrestore(&reply->card->lock, flags);
1562 reply->received = 1;
1563 wake_up(&reply->wait_q);
1564 qeth_put_reply(reply);
1568 spin_unlock_irqrestore(&reply->card->lock, flags);
1572 qeth_reset_ip_addresses(struct qeth_card *card)
1574 QETH_DBF_TEXT(trace, 2, "rstipadd");
1576 qeth_clear_ip_list(card, 0, 1);
1577 if ( (qeth_set_thread_start_bit(card, QETH_SET_IP_THREAD) == 0) ||
1578 (qeth_set_thread_start_bit(card, QETH_SET_MC_THREAD) == 0) )
1579 schedule_work(&card->kernel_thread_starter);
1582 static struct qeth_ipa_cmd *
1583 qeth_check_ipa_data(struct qeth_card *card, struct qeth_cmd_buffer *iob)
1585 struct qeth_ipa_cmd *cmd = NULL;
1587 QETH_DBF_TEXT(trace,5,"chkipad");
1588 if (IS_IPA(iob->data)){
1589 cmd = (struct qeth_ipa_cmd *) PDU_ENCAPSULATION(iob->data);
1590 if (IS_IPA_REPLY(cmd))
1593 switch (cmd->hdr.command) {
1594 case IPA_CMD_STOPLAN:
1595 PRINT_WARN("Link failure on %s (CHPID 0x%X) - "
1596 "there is a network problem or "
1597 "someone pulled the cable or "
1598 "disabled the port.\n",
1601 card->lan_online = 0;
1602 if (netif_carrier_ok(card->dev)) {
1603 netif_carrier_off(card->dev);
1604 netif_stop_queue(card->dev);
1607 case IPA_CMD_STARTLAN:
1608 PRINT_INFO("Link reestablished on %s "
1609 "(CHPID 0x%X). Scheduling "
1610 "IP address reset.\n",
1613 card->lan_online = 1;
1614 if (!netif_carrier_ok(card->dev)) {
1615 netif_carrier_on(card->dev);
1616 netif_wake_queue(card->dev);
1618 qeth_reset_ip_addresses(card);
1620 case IPA_CMD_REGISTER_LOCAL_ADDR:
1621 QETH_DBF_TEXT(trace,3, "irla");
1623 case IPA_CMD_UNREGISTER_LOCAL_ADDR:
1624 PRINT_WARN("probably problem on %s: "
1625 "received IPA command 0x%X\n",
1630 PRINT_WARN("Received data is IPA "
1631 "but not a reply!\n");
1640 * wake all waiting ipa commands
1643 qeth_clear_ipacmd_list(struct qeth_card *card)
1645 struct qeth_reply *reply, *r;
1646 unsigned long flags;
1648 QETH_DBF_TEXT(trace, 4, "clipalst");
1650 spin_lock_irqsave(&card->lock, flags);
1651 list_for_each_entry_safe(reply, r, &card->cmd_waiter_list, list) {
1652 qeth_get_reply(reply);
1654 reply->received = 1;
1655 list_del_init(&reply->list);
1656 wake_up(&reply->wait_q);
1657 qeth_put_reply(reply);
1659 spin_unlock_irqrestore(&card->lock, flags);
1663 qeth_send_control_data_cb(struct qeth_channel *channel,
1664 struct qeth_cmd_buffer *iob)
1666 struct qeth_card *card;
1667 struct qeth_reply *reply, *r;
1668 struct qeth_ipa_cmd *cmd;
1669 unsigned long flags;
1672 QETH_DBF_TEXT(trace,4,"sndctlcb");
1674 card = CARD_FROM_CDEV(channel->ccwdev);
1675 if (qeth_check_idx_response(iob->data)) {
1676 qeth_clear_ipacmd_list(card);
1677 qeth_schedule_recovery(card);
1681 cmd = qeth_check_ipa_data(card, iob);
1682 if ((cmd == NULL) && (card->state != CARD_STATE_DOWN))
1685 spin_lock_irqsave(&card->lock, flags);
1686 list_for_each_entry_safe(reply, r, &card->cmd_waiter_list, list) {
1687 if ((reply->seqno == QETH_IDX_COMMAND_SEQNO) ||
1688 ((cmd) && (reply->seqno == cmd->hdr.seqno))) {
1689 qeth_get_reply(reply);
1690 list_del_init(&reply->list);
1691 spin_unlock_irqrestore(&card->lock, flags);
1693 if (reply->callback != NULL) {
1695 reply->offset = (__u16)((char*)cmd -
1697 keep_reply = reply->callback(card,
1699 (unsigned long)cmd);
1702 keep_reply = reply->callback(card,
1704 (unsigned long)iob);
1707 reply->rc = (s16) cmd->hdr.return_code;
1709 reply->rc = iob->rc;
1711 spin_lock_irqsave(&card->lock, flags);
1712 list_add_tail(&reply->list,
1713 &card->cmd_waiter_list);
1714 spin_unlock_irqrestore(&card->lock, flags);
1716 reply->received = 1;
1717 wake_up(&reply->wait_q);
1719 qeth_put_reply(reply);
1723 spin_unlock_irqrestore(&card->lock, flags);
1725 memcpy(&card->seqno.pdu_hdr_ack,
1726 QETH_PDU_HEADER_SEQ_NO(iob->data),
1727 QETH_SEQ_NO_LENGTH);
1728 qeth_release_buffer(channel,iob);
1732 qeth_send_control_data(struct qeth_card *card, int len,
1733 struct qeth_cmd_buffer *iob,
1735 (struct qeth_card *, struct qeth_reply*, unsigned long),
1740 unsigned long flags;
1741 struct qeth_reply *reply;
1742 struct timer_list timer;
1744 QETH_DBF_TEXT(trace, 2, "sendctl");
1746 qeth_setup_ccw(&card->write,iob->data,len);
1748 memcpy(QETH_TRANSPORT_HEADER_SEQ_NO(iob->data),
1749 &card->seqno.trans_hdr, QETH_SEQ_NO_LENGTH);
1750 card->seqno.trans_hdr++;
1752 memcpy(QETH_PDU_HEADER_SEQ_NO(iob->data),
1753 &card->seqno.pdu_hdr, QETH_SEQ_NO_LENGTH);
1754 card->seqno.pdu_hdr++;
1755 memcpy(QETH_PDU_HEADER_ACK_SEQ_NO(iob->data),
1756 &card->seqno.pdu_hdr_ack, QETH_SEQ_NO_LENGTH);
1757 iob->callback = qeth_release_buffer;
1759 reply = qeth_alloc_reply(card);
1761 PRINT_WARN("Could no alloc qeth_reply!\n");
1764 reply->callback = reply_cb;
1765 reply->param = reply_param;
1766 if (card->state == CARD_STATE_DOWN)
1767 reply->seqno = QETH_IDX_COMMAND_SEQNO;
1769 reply->seqno = card->seqno.ipa++;
1771 timer.function = qeth_cmd_timeout;
1772 timer.data = (unsigned long) reply;
1773 if (IS_IPA(iob->data))
1774 timer.expires = jiffies + QETH_IPA_TIMEOUT;
1776 timer.expires = jiffies + QETH_TIMEOUT;
1777 init_waitqueue_head(&reply->wait_q);
1778 spin_lock_irqsave(&card->lock, flags);
1779 list_add_tail(&reply->list, &card->cmd_waiter_list);
1780 spin_unlock_irqrestore(&card->lock, flags);
1781 QETH_DBF_HEX(control, 2, iob->data, QETH_DBF_CONTROL_LEN);
1782 wait_event(card->wait_q,
1783 atomic_compare_and_swap(0,1,&card->write.irq_pending) == 0);
1784 QETH_DBF_TEXT(trace, 6, "noirqpnd");
1785 spin_lock_irqsave(get_ccwdev_lock(card->write.ccwdev), flags);
1786 rc = ccw_device_start(card->write.ccwdev, &card->write.ccw,
1787 (addr_t) iob, 0, 0);
1788 spin_unlock_irqrestore(get_ccwdev_lock(card->write.ccwdev), flags);
1790 PRINT_WARN("qeth_send_control_data: "
1791 "ccw_device_start rc = %i\n", rc);
1792 QETH_DBF_TEXT_(trace, 2, " err%d", rc);
1793 spin_lock_irqsave(&card->lock, flags);
1794 list_del_init(&reply->list);
1795 qeth_put_reply(reply);
1796 spin_unlock_irqrestore(&card->lock, flags);
1797 qeth_release_buffer(iob->channel, iob);
1798 atomic_set(&card->write.irq_pending, 0);
1799 wake_up(&card->wait_q);
1803 wait_event(reply->wait_q, reply->received);
1806 qeth_put_reply(reply);
1811 qeth_send_ipa_cmd(struct qeth_card *card, struct qeth_cmd_buffer *iob,
1813 (struct qeth_card *,struct qeth_reply*, unsigned long),
1818 QETH_DBF_TEXT(trace,4,"sendipa");
1820 memcpy(iob->data, IPA_PDU_HEADER, IPA_PDU_HEADER_SIZE);
1821 memcpy(QETH_IPA_CMD_DEST_ADDR(iob->data),
1822 &card->token.ulp_connection_r, QETH_MPC_TOKEN_LENGTH);
1824 rc = qeth_send_control_data(card, IPA_CMD_LENGTH, iob,
1825 reply_cb, reply_param);
1831 qeth_cm_enable_cb(struct qeth_card *card, struct qeth_reply *reply,
1834 struct qeth_cmd_buffer *iob;
1836 QETH_DBF_TEXT(setup, 2, "cmenblcb");
1838 iob = (struct qeth_cmd_buffer *) data;
1839 memcpy(&card->token.cm_filter_r,
1840 QETH_CM_ENABLE_RESP_FILTER_TOKEN(iob->data),
1841 QETH_MPC_TOKEN_LENGTH);
1842 QETH_DBF_TEXT_(setup, 2, " rc%d", iob->rc);
1847 qeth_cm_enable(struct qeth_card *card)
1850 struct qeth_cmd_buffer *iob;
1852 QETH_DBF_TEXT(setup,2,"cmenable");
1854 iob = qeth_wait_for_buffer(&card->write);
1855 memcpy(iob->data, CM_ENABLE, CM_ENABLE_SIZE);
1856 memcpy(QETH_CM_ENABLE_ISSUER_RM_TOKEN(iob->data),
1857 &card->token.issuer_rm_r, QETH_MPC_TOKEN_LENGTH);
1858 memcpy(QETH_CM_ENABLE_FILTER_TOKEN(iob->data),
1859 &card->token.cm_filter_w, QETH_MPC_TOKEN_LENGTH);
1861 rc = qeth_send_control_data(card, CM_ENABLE_SIZE, iob,
1862 qeth_cm_enable_cb, NULL);
1867 qeth_cm_setup_cb(struct qeth_card *card, struct qeth_reply *reply,
1871 struct qeth_cmd_buffer *iob;
1873 QETH_DBF_TEXT(setup, 2, "cmsetpcb");
1875 iob = (struct qeth_cmd_buffer *) data;
1876 memcpy(&card->token.cm_connection_r,
1877 QETH_CM_SETUP_RESP_DEST_ADDR(iob->data),
1878 QETH_MPC_TOKEN_LENGTH);
1879 QETH_DBF_TEXT_(setup, 2, " rc%d", iob->rc);
1884 qeth_cm_setup(struct qeth_card *card)
1887 struct qeth_cmd_buffer *iob;
1889 QETH_DBF_TEXT(setup,2,"cmsetup");
1891 iob = qeth_wait_for_buffer(&card->write);
1892 memcpy(iob->data, CM_SETUP, CM_SETUP_SIZE);
1893 memcpy(QETH_CM_SETUP_DEST_ADDR(iob->data),
1894 &card->token.issuer_rm_r, QETH_MPC_TOKEN_LENGTH);
1895 memcpy(QETH_CM_SETUP_CONNECTION_TOKEN(iob->data),
1896 &card->token.cm_connection_w, QETH_MPC_TOKEN_LENGTH);
1897 memcpy(QETH_CM_SETUP_FILTER_TOKEN(iob->data),
1898 &card->token.cm_filter_r, QETH_MPC_TOKEN_LENGTH);
1899 rc = qeth_send_control_data(card, CM_SETUP_SIZE, iob,
1900 qeth_cm_setup_cb, NULL);
1906 qeth_ulp_enable_cb(struct qeth_card *card, struct qeth_reply *reply,
1910 __u16 mtu, framesize;
1913 struct qeth_cmd_buffer *iob;
1915 QETH_DBF_TEXT(setup, 2, "ulpenacb");
1917 iob = (struct qeth_cmd_buffer *) data;
1918 memcpy(&card->token.ulp_filter_r,
1919 QETH_ULP_ENABLE_RESP_FILTER_TOKEN(iob->data),
1920 QETH_MPC_TOKEN_LENGTH);
1921 if (qeth_get_mtu_out_of_mpc(card->info.type)) {
1922 memcpy(&framesize, QETH_ULP_ENABLE_RESP_MAX_MTU(iob->data), 2);
1923 mtu = qeth_get_mtu_outof_framesize(framesize);
1926 QETH_DBF_TEXT_(setup, 2, " rc%d", iob->rc);
1929 card->info.max_mtu = mtu;
1930 card->info.initial_mtu = mtu;
1931 card->qdio.in_buf_size = mtu + 2 * PAGE_SIZE;
1933 card->info.initial_mtu = qeth_get_initial_mtu_for_card(card);
1934 card->info.max_mtu = qeth_get_max_mtu_for_card(card->info.type);
1935 card->qdio.in_buf_size = QETH_IN_BUF_SIZE_DEFAULT;
1938 memcpy(&len, QETH_ULP_ENABLE_RESP_DIFINFO_LEN(iob->data), 2);
1939 if (len >= QETH_MPC_DIFINFO_LEN_INDICATES_LINK_TYPE) {
1941 QETH_ULP_ENABLE_RESP_LINK_TYPE(iob->data), 1);
1942 card->info.link_type = link_type;
1944 card->info.link_type = 0;
1945 QETH_DBF_TEXT_(setup, 2, " rc%d", iob->rc);
1950 qeth_ulp_enable(struct qeth_card *card)
1953 struct qeth_cmd_buffer *iob;
1955 /*FIXME: trace view callbacks*/
1956 QETH_DBF_TEXT(setup,2,"ulpenabl");
1958 iob = qeth_wait_for_buffer(&card->write);
1959 memcpy(iob->data, ULP_ENABLE, ULP_ENABLE_SIZE);
1961 *(QETH_ULP_ENABLE_LINKNUM(iob->data)) =
1962 (__u8) card->info.portno;
1964 memcpy(QETH_ULP_ENABLE_DEST_ADDR(iob->data),
1965 &card->token.cm_connection_r, QETH_MPC_TOKEN_LENGTH);
1966 memcpy(QETH_ULP_ENABLE_FILTER_TOKEN(iob->data),
1967 &card->token.ulp_filter_w, QETH_MPC_TOKEN_LENGTH);
1968 memcpy(QETH_ULP_ENABLE_PORTNAME_AND_LL(iob->data),
1969 card->info.portname, 9);
1970 rc = qeth_send_control_data(card, ULP_ENABLE_SIZE, iob,
1971 qeth_ulp_enable_cb, NULL);
1977 __raw_devno_from_bus_id(char *id)
1979 id += (strlen(id) - 4);
1980 return (__u16) simple_strtoul(id, &id, 16);
1984 qeth_ulp_setup_cb(struct qeth_card *card, struct qeth_reply *reply,
1987 struct qeth_cmd_buffer *iob;
1989 QETH_DBF_TEXT(setup, 2, "ulpstpcb");
1991 iob = (struct qeth_cmd_buffer *) data;
1992 memcpy(&card->token.ulp_connection_r,
1993 QETH_ULP_SETUP_RESP_CONNECTION_TOKEN(iob->data),
1994 QETH_MPC_TOKEN_LENGTH);
1995 QETH_DBF_TEXT_(setup, 2, " rc%d", iob->rc);
2000 qeth_ulp_setup(struct qeth_card *card)
2004 struct qeth_cmd_buffer *iob;
2006 QETH_DBF_TEXT(setup,2,"ulpsetup");
2008 iob = qeth_wait_for_buffer(&card->write);
2009 memcpy(iob->data, ULP_SETUP, ULP_SETUP_SIZE);
2011 memcpy(QETH_ULP_SETUP_DEST_ADDR(iob->data),
2012 &card->token.cm_connection_r, QETH_MPC_TOKEN_LENGTH);
2013 memcpy(QETH_ULP_SETUP_CONNECTION_TOKEN(iob->data),
2014 &card->token.ulp_connection_w, QETH_MPC_TOKEN_LENGTH);
2015 memcpy(QETH_ULP_SETUP_FILTER_TOKEN(iob->data),
2016 &card->token.ulp_filter_r, QETH_MPC_TOKEN_LENGTH);
2018 temp = __raw_devno_from_bus_id(CARD_DDEV_ID(card));
2019 memcpy(QETH_ULP_SETUP_CUA(iob->data), &temp, 2);
2020 temp = (card->info.cula << 8) + card->info.unit_addr2;
2021 memcpy(QETH_ULP_SETUP_REAL_DEVADDR(iob->data), &temp, 2);
2022 rc = qeth_send_control_data(card, ULP_SETUP_SIZE, iob,
2023 qeth_ulp_setup_cb, NULL);
2028 qeth_check_for_inbound_error(struct qeth_qdio_buffer *buf,
2029 unsigned int qdio_error,
2030 unsigned int siga_error)
2034 if (qdio_error || siga_error) {
2035 QETH_DBF_TEXT(trace, 2, "qdinerr");
2036 QETH_DBF_TEXT(qerr, 2, "qdinerr");
2037 QETH_DBF_TEXT_(qerr, 2, " F15=%02X",
2038 buf->buffer->element[15].flags & 0xff);
2039 QETH_DBF_TEXT_(qerr, 2, " F14=%02X",
2040 buf->buffer->element[14].flags & 0xff);
2041 QETH_DBF_TEXT_(qerr, 2, " qerr=%X", qdio_error);
2042 QETH_DBF_TEXT_(qerr, 2, " serr=%X", siga_error);
2048 static inline struct sk_buff *
2049 qeth_get_skb(unsigned int length)
2051 struct sk_buff* skb;
2052 #ifdef CONFIG_QETH_VLAN
2053 if ((skb = dev_alloc_skb(length + VLAN_HLEN)))
2054 skb_reserve(skb, VLAN_HLEN);
2056 skb = dev_alloc_skb(length);
2061 static inline struct sk_buff *
2062 qeth_get_next_skb(struct qeth_card *card, struct qdio_buffer *buffer,
2063 struct qdio_buffer_element **__element, int *__offset,
2064 struct qeth_hdr **hdr)
2066 struct qdio_buffer_element *element = *__element;
2067 int offset = *__offset;
2068 struct sk_buff *skb = NULL;
2073 QETH_DBF_TEXT(trace,6,"nextskb");
2074 /* qeth_hdr must not cross element boundaries */
2075 if (element->length < offset + sizeof(struct qeth_hdr)){
2076 if (qeth_is_last_sbale(element))
2080 if (element->length < sizeof(struct qeth_hdr))
2083 *hdr = element->addr + offset;
2085 offset += sizeof(struct qeth_hdr);
2086 skb_len = (*hdr)->length;
2089 if (card->options.fake_ll){
2090 if (!(skb = qeth_get_skb(skb_len + QETH_FAKE_LL_LEN)))
2092 skb_pull(skb, QETH_FAKE_LL_LEN);
2093 } else if (!(skb = qeth_get_skb(skb_len)))
2095 data_ptr = element->addr + offset;
2097 data_len = min(skb_len, (int)(element->length - offset));
2099 memcpy(skb_put(skb, data_len), data_ptr, data_len);
2100 skb_len -= data_len;
2102 if (qeth_is_last_sbale(element)){
2103 QETH_DBF_TEXT(trace,4,"unexeob");
2104 QETH_DBF_TEXT_(trace,4,"%s",CARD_BUS_ID(card));
2105 QETH_DBF_TEXT(qerr,2,"unexeob");
2106 QETH_DBF_TEXT_(qerr,2,"%s",CARD_BUS_ID(card));
2107 QETH_DBF_HEX(misc,4,buffer,sizeof(*buffer));
2108 dev_kfree_skb_irq(skb);
2109 card->stats.rx_errors++;
2114 data_ptr = element->addr;
2119 *__element = element;
2123 if (net_ratelimit()){
2124 PRINT_WARN("No memory for packet received on %s.\n",
2125 card->info.if_name);
2126 QETH_DBF_TEXT(trace,2,"noskbmem");
2127 QETH_DBF_TEXT_(trace,2,"%s",CARD_BUS_ID(card));
2129 card->stats.rx_dropped++;
2133 static inline unsigned short
2134 qeth_type_trans(struct sk_buff *skb, struct net_device *dev)
2137 struct qeth_card *card;
2139 QETH_DBF_TEXT(trace,5,"typtrans");
2141 card = (struct qeth_card *)dev->priv;
2143 if ((card->info.link_type == QETH_LINK_TYPE_HSTR) ||
2144 (card->info.link_type == QETH_LINK_TYPE_LANE_TR))
2145 return tr_type_trans(skb,dev);
2146 #endif /* CONFIG_TR */
2148 skb->mac.raw = skb->data;
2149 skb_pull(skb, ETH_ALEN * 2 + sizeof (short));
2150 eth = skb->mac.ethernet;
2152 if (*eth->h_dest & 1) {
2153 if (memcmp(eth->h_dest, dev->broadcast, ETH_ALEN) == 0)
2154 skb->pkt_type = PACKET_BROADCAST;
2156 skb->pkt_type = PACKET_MULTICAST;
2158 skb->pkt_type = PACKET_OTHERHOST;
2160 if (ntohs(eth->h_proto) >= 1536)
2161 return eth->h_proto;
2162 if (*(unsigned short *) (skb->data) == 0xFFFF)
2163 return htons(ETH_P_802_3);
2164 return htons(ETH_P_802_2);
2168 qeth_rebuild_skb_fake_ll(struct qeth_card *card, struct sk_buff *skb,
2169 struct qeth_hdr *hdr)
2171 struct ethhdr *fake_hdr;
2172 struct iphdr *ip_hdr;
2174 QETH_DBF_TEXT(trace,5,"skbfake");
2175 skb->mac.raw = skb->data - QETH_FAKE_LL_LEN;
2176 /* this is a fake ethernet header */
2177 fake_hdr = (struct ethhdr *) skb->mac.raw;
2179 /* the destination MAC address */
2180 switch (skb->pkt_type){
2181 case PACKET_MULTICAST:
2182 switch (skb->protocol){
2183 #ifdef CONFIG_QETH_IPV6
2184 case __constant_htons(ETH_P_IPV6):
2185 ndisc_mc_map((struct in6_addr *)
2186 skb->data + QETH_FAKE_LL_V6_ADDR_POS,
2187 fake_hdr->h_dest, card->dev, 0);
2189 #endif /* CONFIG_QETH_IPV6 */
2190 case __constant_htons(ETH_P_IP):
2191 ip_hdr = (struct iphdr *)skb->data;
2192 if (card->dev->type == ARPHRD_IEEE802_TR)
2193 ip_tr_mc_map(ip_hdr->daddr, fake_hdr->h_dest);
2195 ip_eth_mc_map(ip_hdr->daddr, fake_hdr->h_dest);
2198 memcpy(fake_hdr->h_dest, card->dev->dev_addr, ETH_ALEN);
2201 case PACKET_BROADCAST:
2202 memset(fake_hdr->h_dest, 0xff, ETH_ALEN);
2205 memcpy(fake_hdr->h_dest, card->dev->dev_addr, ETH_ALEN);
2207 /* the source MAC address */
2208 if (hdr->ext_flags & QETH_HDR_EXT_SRC_MAC_ADDR)
2209 memcpy(fake_hdr->h_source, &hdr->dest_addr[2], ETH_ALEN);
2211 memset(fake_hdr->h_source, 0, ETH_ALEN);
2213 fake_hdr->h_proto = skb->protocol;
2217 qeth_rebuild_skb_vlan(struct qeth_card *card, struct sk_buff *skb,
2218 struct qeth_hdr *hdr)
2220 #ifdef CONFIG_QETH_VLAN
2223 if (hdr->ext_flags & QETH_HDR_EXT_VLAN_FRAME) {
2224 vlan_tag = (u16 *) skb_push(skb, VLAN_HLEN);
2225 *vlan_tag = hdr->vlan_id;
2226 *(vlan_tag + 1) = skb->protocol;
2227 skb->protocol = __constant_htons(ETH_P_8021Q);
2229 #endif /* CONFIG_QETH_VLAN */
2233 qeth_rebuild_skb(struct qeth_card *card, struct sk_buff *skb,
2234 struct qeth_hdr *hdr)
2236 #ifdef CONFIG_QETH_IPV6
2237 if (hdr->flags & QETH_HDR_PASSTHRU){
2238 skb->protocol = qeth_type_trans(skb, card->dev);
2241 #endif /* CONFIG_QETH_IPV6 */
2242 skb->protocol = htons((hdr->flags & QETH_HDR_IPV6)? ETH_P_IPV6 :
2244 switch (hdr->flags & QETH_HDR_CAST_MASK){
2245 case QETH_CAST_UNICAST:
2246 skb->pkt_type = PACKET_HOST;
2248 case QETH_CAST_MULTICAST:
2249 skb->pkt_type = PACKET_MULTICAST;
2250 card->stats.multicast++;
2252 case QETH_CAST_BROADCAST:
2253 skb->pkt_type = PACKET_BROADCAST;
2254 card->stats.multicast++;
2256 case QETH_CAST_ANYCAST:
2257 case QETH_CAST_NOCAST:
2259 skb->pkt_type = PACKET_HOST;
2261 if (card->options.fake_ll)
2262 qeth_rebuild_skb_fake_ll(card, skb, hdr);
2264 skb->mac.raw = skb->data;
2265 skb->ip_summed = card->options.checksum_type;
2266 if (card->options.checksum_type == HW_CHECKSUMMING){
2267 if ( (hdr->ext_flags &
2268 (QETH_HDR_EXT_CSUM_HDR_REQ |
2269 QETH_HDR_EXT_CSUM_TRANSP_REQ)) ==
2270 (QETH_HDR_EXT_CSUM_HDR_REQ |
2271 QETH_HDR_EXT_CSUM_TRANSP_REQ) )
2272 skb->ip_summed = CHECKSUM_UNNECESSARY;
2274 skb->ip_summed = SW_CHECKSUMMING;
2276 qeth_rebuild_skb_vlan(card, skb, hdr);
2280 qeth_process_inbound_buffer(struct qeth_card *card,
2281 struct qeth_qdio_buffer *buf, int index)
2283 struct qdio_buffer_element *element;
2285 struct sk_buff *skb;
2286 struct qeth_hdr *hdr;
2289 /* get first element of current buffer */
2290 element = (struct qdio_buffer_element *)&buf->buffer->element[0];
2292 #ifdef CONFIG_QETH_PERF_STATS
2293 card->perf_stats.bufs_rec++;
2295 while((skb = qeth_get_next_skb(card, buf->buffer, &element,
2297 qeth_rebuild_skb(card, skb, hdr);
2298 /* is device UP ? */
2299 if (!(card->dev->flags & IFF_UP)){
2300 dev_kfree_skb_irq(skb);
2303 skb->dev = card->dev;
2304 rxrc = netif_rx(skb);
2305 card->dev->last_rx = jiffies;
2306 card->stats.rx_packets++;
2307 card->stats.rx_bytes += skb->len;
2311 static inline struct qeth_buffer_pool_entry *
2312 qeth_get_buffer_pool_entry(struct qeth_card *card)
2314 struct qeth_buffer_pool_entry *entry, *tmp;
2316 QETH_DBF_TEXT(trace, 6, "gtbfplen");
2318 list_for_each_entry_safe(entry, tmp,
2319 &card->qdio.in_buf_pool.entry_list, list){
2320 list_del_init(&entry->list);
2327 qeth_init_input_buffer(struct qeth_card *card, struct qeth_qdio_buffer *buf)
2329 struct qeth_buffer_pool_entry *pool_entry;
2332 pool_entry = qeth_get_buffer_pool_entry(card);
2334 * since the buffer is accessed only from the input_tasklet
2335 * there shouldn't be a need to synchronize; also, since we use
2336 * the QETH_IN_BUF_REQUEUE_THRESHOLD we should never run out off
2339 BUG_ON(!pool_entry);
2341 buf->pool_entry = pool_entry;
2342 for(i = 0; i < QETH_MAX_BUFFER_ELEMENTS(card); ++i){
2343 buf->buffer->element[i].length = PAGE_SIZE;
2344 buf->buffer->element[i].addr = pool_entry->elements[i];
2345 if (i == QETH_MAX_BUFFER_ELEMENTS(card) - 1)
2346 buf->buffer->element[i].flags = SBAL_FLAGS_LAST_ENTRY;
2348 buf->buffer->element[i].flags = 0;
2350 buf->state = QETH_QDIO_BUF_EMPTY;
2354 qeth_clear_output_buffer(struct qeth_qdio_out_q *queue,
2355 struct qeth_qdio_out_buffer *buf)
2358 struct sk_buff *skb;
2360 /* is PCI flag set on buffer? */
2361 if (buf->buffer->element[0].flags & 0x40)
2362 atomic_dec(&queue->set_pci_flags_count);
2364 for(i = 0; i < QETH_MAX_BUFFER_ELEMENTS(queue->card); ++i){
2365 buf->buffer->element[i].length = 0;
2366 buf->buffer->element[i].addr = NULL;
2367 buf->buffer->element[i].flags = 0;
2368 while ((skb = skb_dequeue(&buf->skb_list))){
2369 atomic_dec(&skb->users);
2370 dev_kfree_skb_irq(skb);
2373 buf->next_element_to_fill = 0;
2374 atomic_set(&buf->state, QETH_QDIO_BUF_EMPTY);
2378 qeth_queue_input_buffer(struct qeth_card *card, int index)
2380 struct qeth_qdio_q *queue = card->qdio.in_q;
2385 QETH_DBF_TEXT(trace,6,"queinbuf");
2386 count = (index < queue->next_buf_to_init)?
2387 card->qdio.in_buf_pool.buf_count -
2388 (queue->next_buf_to_init - index) :
2389 card->qdio.in_buf_pool.buf_count -
2390 (queue->next_buf_to_init + QDIO_MAX_BUFFERS_PER_Q - index);
2391 /* only requeue at a certain threshold to avoid SIGAs */
2392 if (count >= QETH_IN_BUF_REQUEUE_THRESHOLD(card)){
2393 for (i = queue->next_buf_to_init;
2394 i < queue->next_buf_to_init + count; ++i)
2395 qeth_init_input_buffer(card,
2396 &queue->bufs[i % QDIO_MAX_BUFFERS_PER_Q]);
2398 * according to old code it should be avoided to requeue all
2399 * 128 buffers in order to benefit from PCI avoidance.
2400 * this function keeps at least one buffer (the buffer at
2401 * 'index') un-requeued -> this buffer is the first buffer that
2402 * will be requeued the next time
2404 #ifdef CONFIG_QETH_PERF_STATS
2405 card->perf_stats.inbound_do_qdio_cnt++;
2406 card->perf_stats.inbound_do_qdio_start_time = qeth_get_micros();
2408 rc = do_QDIO(CARD_DDEV(card),
2409 QDIO_FLAG_SYNC_INPUT | QDIO_FLAG_UNDER_INTERRUPT,
2410 0, queue->next_buf_to_init, count, NULL);
2411 #ifdef CONFIG_QETH_PERF_STATS
2412 card->perf_stats.inbound_do_qdio_time += qeth_get_micros() -
2413 card->perf_stats.inbound_do_qdio_start_time;
2416 PRINT_WARN("qeth_queue_input_buffer's do_QDIO "
2417 "return %i (device %s).\n",
2418 rc, CARD_DDEV_ID(card));
2419 QETH_DBF_TEXT(trace,2,"qinberr");
2420 QETH_DBF_TEXT_(trace,2,"%s",CARD_BUS_ID(card));
2422 queue->next_buf_to_init = (queue->next_buf_to_init + count) %
2423 QDIO_MAX_BUFFERS_PER_Q;
2428 qeth_put_buffer_pool_entry(struct qeth_card *card,
2429 struct qeth_buffer_pool_entry *entry)
2431 QETH_DBF_TEXT(trace, 6, "ptbfplen");
2432 list_add_tail(&entry->list, &card->qdio.in_buf_pool.entry_list);
2436 qeth_qdio_input_handler(struct ccw_device * ccwdev, unsigned int status,
2437 unsigned int qdio_err, unsigned int siga_err,
2438 unsigned int queue, int first_element, int count,
2439 unsigned long card_ptr)
2441 struct net_device *net_dev;
2442 struct qeth_card *card;
2443 struct qeth_qdio_buffer *buffer;
2447 QETH_DBF_TEXT(trace, 6, "qdinput");
2448 card = (struct qeth_card *) card_ptr;
2449 net_dev = card->dev;
2450 #ifdef CONFIG_QETH_PERF_STATS
2451 card->perf_stats.inbound_cnt++;
2452 card->perf_stats.inbound_start_time = qeth_get_micros();
2454 if (status & QDIO_STATUS_LOOK_FOR_ERROR) {
2455 if (status & QDIO_STATUS_ACTIVATE_CHECK_CONDITION){
2456 QETH_DBF_TEXT(trace, 1,"qdinchk");
2457 QETH_DBF_TEXT_(trace,1,"%s",CARD_BUS_ID(card));
2458 QETH_DBF_TEXT_(trace,1,"%04X%04X",first_element,count);
2459 QETH_DBF_TEXT_(trace,1,"%04X%04X", queue, status);
2460 qeth_schedule_recovery(card);
2464 for (i = first_element; i < (first_element + count); ++i) {
2465 index = i % QDIO_MAX_BUFFERS_PER_Q;
2466 buffer = &card->qdio.in_q->bufs[index];
2467 if (!((status == QDIO_STATUS_LOOK_FOR_ERROR) &&
2468 qeth_check_for_inbound_error(buffer, qdio_err, siga_err)))
2469 qeth_process_inbound_buffer(card, buffer, index);
2470 /* clear buffer and give back to hardware */
2471 qeth_put_buffer_pool_entry(card, buffer->pool_entry);
2472 qeth_queue_input_buffer(card, index);
2474 #ifdef CONFIG_QETH_PERF_STATS
2475 card->perf_stats.inbound_time += qeth_get_micros() -
2476 card->perf_stats.inbound_start_time;
2481 qeth_handle_send_error(struct qeth_card *card,
2482 struct qeth_qdio_out_buffer *buffer,
2483 int qdio_err, int siga_err)
2485 int sbalf15 = buffer->buffer->element[15].flags & 0xff;
2486 int cc = siga_err & 3;
2488 QETH_DBF_TEXT(trace, 6, "hdsnderr");
2492 QETH_DBF_TEXT(trace, 1,"lnkfail");
2493 QETH_DBF_TEXT_(trace,1,"%s",CARD_BUS_ID(card));
2494 QETH_DBF_TEXT_(trace,1,"%04x %02x",
2495 (u16)qdio_err, (u8)sbalf15);
2496 return QETH_SEND_ERROR_LINK_FAILURE;
2498 return QETH_SEND_ERROR_NONE;
2500 if (siga_err & QDIO_SIGA_ERROR_B_BIT_SET) {
2501 QETH_DBF_TEXT(trace, 1, "SIGAcc2B");
2502 QETH_DBF_TEXT_(trace,1,"%s",CARD_BUS_ID(card));
2503 return QETH_SEND_ERROR_KICK_IT;
2505 if ((sbalf15 >= 15) && (sbalf15 <= 31))
2506 return QETH_SEND_ERROR_RETRY;
2507 return QETH_SEND_ERROR_LINK_FAILURE;
2508 /* look at qdio_error and sbalf 15 */
2510 QETH_DBF_TEXT(trace, 1, "SIGAcc1");
2511 QETH_DBF_TEXT_(trace,1,"%s",CARD_BUS_ID(card));
2512 return QETH_SEND_ERROR_LINK_FAILURE;
2514 QETH_DBF_TEXT(trace, 1, "SIGAcc3");
2515 QETH_DBF_TEXT_(trace,1,"%s",CARD_BUS_ID(card));
2516 return QETH_SEND_ERROR_KICK_IT;
2518 return QETH_SEND_ERROR_LINK_FAILURE;
2522 qeth_flush_buffers(struct qeth_qdio_out_q *queue, int under_int,
2523 int index, int count)
2525 struct qeth_qdio_out_buffer *buf;
2529 QETH_DBF_TEXT(trace, 6, "flushbuf");
2531 for (i = index; i < index + count; ++i) {
2532 buf = &queue->bufs[i % QDIO_MAX_BUFFERS_PER_Q];
2533 buf->buffer->element[buf->next_element_to_fill - 1].flags |=
2534 SBAL_FLAGS_LAST_ENTRY;
2536 if (queue->card->info.type == QETH_CARD_TYPE_IQD)
2539 if (!queue->do_pack){
2540 if ((atomic_read(&queue->used_buffers) >=
2541 (QETH_HIGH_WATERMARK_PACK -
2542 QETH_WATERMARK_PACK_FUZZ)) &&
2543 !atomic_read(&queue->set_pci_flags_count)){
2544 /* it's likely that we'll go to packing
2546 atomic_inc(&queue->set_pci_flags_count);
2547 buf->buffer->element[0].flags |= 0x40;
2550 if (!atomic_read(&queue->set_pci_flags_count)){
2552 * there's no outstanding PCI any more, so we
2553 * have to request a PCI to be sure the the PCI
2554 * will wake at some time in the future then we
2555 * can flush packed buffers that might still be
2556 * hanging around, which can happen if no
2557 * further send was requested by the stack
2559 atomic_inc(&queue->set_pci_flags_count);
2560 buf->buffer->element[0].flags |= 0x40;
2562 #ifdef CONFIG_QETH_PERF_STATS
2563 queue->card->perf_stats.bufs_sent_pack++;
2568 queue->card->dev->trans_start = jiffies;
2569 #ifdef CONFIG_QETH_PERF_STATS
2570 queue->card->perf_stats.outbound_do_qdio_cnt++;
2571 queue->card->perf_stats.outbound_do_qdio_start_time = qeth_get_micros();
2574 rc = do_QDIO(CARD_DDEV(queue->card),
2575 QDIO_FLAG_SYNC_OUTPUT | QDIO_FLAG_UNDER_INTERRUPT,
2576 queue->queue_no, index, count, NULL);
2578 rc = do_QDIO(CARD_DDEV(queue->card), QDIO_FLAG_SYNC_OUTPUT,
2579 queue->queue_no, index, count, NULL);
2580 #ifdef CONFIG_QETH_PERF_STATS
2581 queue->card->perf_stats.outbound_do_qdio_time += qeth_get_micros() -
2582 queue->card->perf_stats.outbound_do_qdio_start_time;
2585 QETH_DBF_SPRINTF(trace, 0, "qeth_flush_buffers: do_QDIO "
2586 "returned error (%i) on device %s.",
2587 rc, CARD_DDEV_ID(queue->card));
2588 QETH_DBF_TEXT(trace, 2, "flushbuf");
2589 QETH_DBF_TEXT_(trace, 2, " err%d", rc);
2590 queue->card->stats.tx_errors += count;
2591 /* ok, since do_QDIO went wrong the buffers have not been given
2592 * to the hardware. they still belong to us, so we can clear
2593 * them and reuse then, i.e. set back next_buf_to_fill*/
2594 for (i = index; i < index + count; ++i) {
2595 buf = &queue->bufs[i % QDIO_MAX_BUFFERS_PER_Q];
2596 qeth_clear_output_buffer(queue, buf);
2598 queue->next_buf_to_fill = index;
2601 atomic_add(count, &queue->used_buffers);
2602 #ifdef CONFIG_QETH_PERF_STATS
2603 queue->card->perf_stats.bufs_sent += count;
2608 * switches between PACKING and non-PACKING state if needed.
2609 * has to be called holding queue->lock
2612 qeth_switch_packing_state(struct qeth_qdio_out_q *queue)
2614 struct qeth_qdio_out_buffer *buffer;
2615 int flush_count = 0;
2617 QETH_DBF_TEXT(trace, 6, "swipack");
2618 if (!queue->do_pack) {
2619 if (atomic_read(&queue->used_buffers)
2620 >= QETH_HIGH_WATERMARK_PACK){
2621 /* switch non-PACKING -> PACKING */
2622 QETH_DBF_TEXT(trace, 6, "np->pack");
2623 #ifdef CONFIG_QETH_PERF_STATS
2624 queue->card->perf_stats.sc_dp_p++;
2629 if (atomic_read(&queue->used_buffers)
2630 <= QETH_LOW_WATERMARK_PACK) {
2631 /* switch PACKING -> non-PACKING */
2632 QETH_DBF_TEXT(trace, 6, "pack->np");
2633 #ifdef CONFIG_QETH_PERF_STATS
2634 queue->card->perf_stats.sc_p_dp++;
2637 /* flush packing buffers */
2638 buffer = &queue->bufs[queue->next_buf_to_fill];
2639 if ((atomic_read(&buffer->state) ==
2640 QETH_QDIO_BUF_EMPTY) &&
2641 (buffer->next_element_to_fill > 0)) {
2642 atomic_set(&buffer->state,QETH_QDIO_BUF_PRIMED);
2644 queue->next_buf_to_fill =
2645 (queue->next_buf_to_fill + 1) %
2646 QDIO_MAX_BUFFERS_PER_Q;
2654 qeth_flush_buffers_on_no_pci(struct qeth_qdio_out_q *queue, int under_int)
2656 struct qeth_qdio_out_buffer *buffer;
2659 index = queue->next_buf_to_fill;
2660 buffer = &queue->bufs[index];
2661 if((atomic_read(&buffer->state) == QETH_QDIO_BUF_EMPTY) &&
2662 (buffer->next_element_to_fill > 0)){
2663 /* it's a packing buffer */
2664 atomic_set(&buffer->state, QETH_QDIO_BUF_PRIMED);
2665 queue->next_buf_to_fill =
2666 (queue->next_buf_to_fill + 1) % QDIO_MAX_BUFFERS_PER_Q;
2667 qeth_flush_buffers(queue, under_int, index, 1);
2672 qeth_qdio_output_handler(struct ccw_device * ccwdev, unsigned int status,
2673 unsigned int qdio_error, unsigned int siga_error,
2674 unsigned int __queue, int first_element, int count,
2675 unsigned long card_ptr)
2677 struct qeth_card *card = (struct qeth_card *) card_ptr;
2678 struct qeth_qdio_out_q *queue = card->qdio.out_qs[__queue];
2679 struct qeth_qdio_out_buffer *buffer;
2682 QETH_DBF_TEXT(trace, 6, "qdouhdl");
2683 if (status & QDIO_STATUS_LOOK_FOR_ERROR) {
2684 if (status & QDIO_STATUS_ACTIVATE_CHECK_CONDITION){
2685 QETH_DBF_SPRINTF(trace, 2, "On device %s: "
2686 "received active check "
2687 "condition (0x%08x).",
2688 CARD_BUS_ID(card), status);
2689 QETH_DBF_TEXT(trace, 2, "chkcond");
2690 QETH_DBF_TEXT_(trace, 2, "%08x", status);
2691 netif_stop_queue(card->dev);
2692 qeth_schedule_recovery(card);
2697 #ifdef CONFIG_QETH_PERF_STATS
2698 card->perf_stats.outbound_handler_cnt++;
2699 card->perf_stats.outbound_handler_start_time = qeth_get_micros();
2701 for(i = first_element; i < (first_element + count); ++i){
2702 buffer = &queue->bufs[i % QDIO_MAX_BUFFERS_PER_Q];
2703 /*we only handle the KICK_IT error by doing a recovery */
2704 if (qeth_handle_send_error(card, buffer, qdio_error, siga_error)
2705 == QETH_SEND_ERROR_KICK_IT){
2706 netif_stop_queue(card->dev);
2707 qeth_schedule_recovery(card);
2710 qeth_clear_output_buffer(queue, buffer);
2712 atomic_sub(count, &queue->used_buffers);
2714 netif_wake_queue(card->dev);
2715 #ifdef CONFIG_QETH_PERF_STATS
2716 card->perf_stats.outbound_handler_time += qeth_get_micros() -
2717 card->perf_stats.outbound_handler_start_time;
2722 qeth_create_qib_param_field(struct qeth_card *card)
2726 param_field = kmalloc(QDIO_MAX_BUFFERS_PER_Q * sizeof(char),
2731 memset(param_field, 0, QDIO_MAX_BUFFERS_PER_Q * sizeof(char));
2733 param_field[0] = _ascebc['P'];
2734 param_field[1] = _ascebc['C'];
2735 param_field[2] = _ascebc['I'];
2736 param_field[3] = _ascebc['T'];
2737 *((unsigned int *) (¶m_field[4])) = QETH_PCI_THRESHOLD_A(card);
2738 *((unsigned int *) (¶m_field[8])) = QETH_PCI_THRESHOLD_B(card);
2739 *((unsigned int *) (¶m_field[12])) = QETH_PCI_TIMER_VALUE(card);
2745 qeth_initialize_working_pool_list(struct qeth_card *card)
2747 struct qeth_buffer_pool_entry *entry;
2749 QETH_DBF_TEXT(trace,5,"inwrklst");
2751 list_for_each_entry(entry,
2752 &card->qdio.init_pool.entry_list, init_list) {
2753 qeth_put_buffer_pool_entry(card,entry);
2758 qeth_clear_working_pool_list(struct qeth_card *card)
2760 struct qeth_buffer_pool_entry *pool_entry, *tmp;
2762 QETH_DBF_TEXT(trace,5,"clwrklst");
2763 list_for_each_entry_safe(pool_entry, tmp,
2764 &card->qdio.in_buf_pool.entry_list, list){
2765 list_del(&pool_entry->list);
2770 qeth_free_buffer_pool(struct qeth_card *card)
2772 struct qeth_buffer_pool_entry *pool_entry, *tmp;
2774 QETH_DBF_TEXT(trace,5,"freepool");
2775 list_for_each_entry_safe(pool_entry, tmp,
2776 &card->qdio.init_pool.entry_list, init_list){
2777 for (i = 0; i < QETH_MAX_BUFFER_ELEMENTS(card); ++i)
2778 free_page((unsigned long)pool_entry->elements[i]);
2779 list_del(&pool_entry->init_list);
2785 qeth_alloc_buffer_pool(struct qeth_card *card)
2787 struct qeth_buffer_pool_entry *pool_entry;
2791 QETH_DBF_TEXT(trace,5,"clwkpool");
2792 for (i = 0; i < card->qdio.init_pool.buf_count; ++i){
2793 pool_entry = kmalloc(sizeof(*pool_entry), GFP_KERNEL);
2795 qeth_free_buffer_pool(card);
2798 for(j = 0; j < QETH_MAX_BUFFER_ELEMENTS(card); ++j){
2799 ptr = (void *) __get_free_page(GFP_KERNEL);
2802 free_page((unsigned long)
2803 pool_entry->elements[--j]);
2805 qeth_free_buffer_pool(card);
2808 pool_entry->elements[j] = ptr;
2810 list_add(&pool_entry->init_list,
2811 &card->qdio.init_pool.entry_list);
2817 qeth_realloc_buffer_pool(struct qeth_card *card, int bufcnt)
2819 QETH_DBF_TEXT(trace, 2, "realcbp");
2821 if ((card->state != CARD_STATE_DOWN) &&
2822 (card->state != CARD_STATE_RECOVER))
2825 /* TODO: steel/add buffers from/to a running card's buffer pool (?) */
2826 qeth_clear_working_pool_list(card);
2827 qeth_free_buffer_pool(card);
2828 card->qdio.in_buf_pool.buf_count = bufcnt;
2829 card->qdio.init_pool.buf_count = bufcnt;
2830 return qeth_alloc_buffer_pool(card);
2834 qeth_alloc_qdio_buffers(struct qeth_card *card)
2838 QETH_DBF_TEXT(setup, 2, "allcqdbf");
2840 if (card->qdio.state == QETH_QDIO_ALLOCATED)
2843 card->qdio.in_q = kmalloc(sizeof(struct qeth_qdio_q), GFP_KERNEL);
2844 if (!card->qdio.in_q)
2846 QETH_DBF_TEXT(setup, 2, "inq");
2847 QETH_DBF_HEX(setup, 2, &card->qdio.in_q, sizeof(void *));
2848 memset(card->qdio.in_q, 0, sizeof(struct qeth_qdio_q));
2849 /* give inbound qeth_qdio_buffers their qdio_buffers */
2850 for (i = 0; i < QDIO_MAX_BUFFERS_PER_Q; ++i)
2851 card->qdio.in_q->bufs[i].buffer =
2852 &card->qdio.in_q->qdio_bufs[i];
2853 /* inbound buffer pool */
2854 if (qeth_alloc_buffer_pool(card)){
2855 kfree(card->qdio.in_q);
2860 kmalloc(card->qdio.no_out_queues *
2861 sizeof(struct qeth_qdio_out_q *), GFP_KERNEL);
2862 if (!card->qdio.out_qs){
2863 qeth_free_buffer_pool(card);
2866 for (i = 0; i < card->qdio.no_out_queues; ++i){
2867 card->qdio.out_qs[i] = kmalloc(sizeof(struct qeth_qdio_out_q),
2869 if (!card->qdio.out_qs[i]){
2871 kfree(card->qdio.out_qs[--i]);
2872 kfree(card->qdio.out_qs);
2875 QETH_DBF_TEXT_(setup, 2, "outq %i", i);
2876 QETH_DBF_HEX(setup, 2, &card->qdio.out_qs[i], sizeof(void *));
2877 memset(card->qdio.out_qs[i], 0, sizeof(struct qeth_qdio_out_q));
2878 card->qdio.out_qs[i]->queue_no = i;
2879 /* give inbound qeth_qdio_buffers their qdio_buffers */
2880 for (j = 0; j < QDIO_MAX_BUFFERS_PER_Q; ++j){
2881 card->qdio.out_qs[i]->bufs[j].buffer =
2882 &card->qdio.out_qs[i]->qdio_bufs[j];
2883 skb_queue_head_init(&card->qdio.out_qs[i]->bufs[j].
2887 card->qdio.state = QETH_QDIO_ALLOCATED;
2892 qeth_free_qdio_buffers(struct qeth_card *card)
2896 QETH_DBF_TEXT(trace, 2, "freeqdbf");
2897 if (card->qdio.state == QETH_QDIO_UNINITIALIZED)
2899 kfree(card->qdio.in_q);
2900 /* inbound buffer pool */
2901 qeth_free_buffer_pool(card);
2902 /* free outbound qdio_qs */
2903 for (i = 0; i < card->qdio.no_out_queues; ++i){
2904 for (j = 0; j < QDIO_MAX_BUFFERS_PER_Q; ++j)
2905 qeth_clear_output_buffer(card->qdio.out_qs[i],
2906 &card->qdio.out_qs[i]->bufs[j]);
2907 kfree(card->qdio.out_qs[i]);
2909 kfree(card->qdio.out_qs);
2910 card->qdio.state = QETH_QDIO_UNINITIALIZED;
2914 qeth_clear_qdio_buffers(struct qeth_card *card)
2918 QETH_DBF_TEXT(trace, 2, "clearqdbf");
2919 /* clear outbound buffers to free skbs */
2920 for (i = 0; i < card->qdio.no_out_queues; ++i)
2921 if (card->qdio.out_qs[i]){
2922 for (j = 0; j < QDIO_MAX_BUFFERS_PER_Q; ++j)
2923 qeth_clear_output_buffer(card->qdio.out_qs[i],
2924 &card->qdio.out_qs[i]->bufs[j]);
2929 qeth_init_qdio_info(struct qeth_card *card)
2931 QETH_DBF_TEXT(setup, 4, "intqdinf");
2932 card->qdio.state = QETH_QDIO_UNINITIALIZED;
2934 card->qdio.in_buf_size = QETH_IN_BUF_SIZE_DEFAULT;
2935 card->qdio.init_pool.buf_count = QETH_IN_BUF_COUNT_DEFAULT;
2936 card->qdio.in_buf_pool.buf_count = card->qdio.init_pool.buf_count;
2937 INIT_LIST_HEAD(&card->qdio.in_buf_pool.entry_list);
2938 INIT_LIST_HEAD(&card->qdio.init_pool.entry_list);
2940 card->qdio.do_prio_queueing = QETH_PRIOQ_DEFAULT;
2941 card->qdio.default_out_queue = QETH_DEFAULT_QUEUE;
2945 qeth_init_qdio_queues(struct qeth_card *card)
2950 QETH_DBF_TEXT(setup, 2, "initqdqs");
2953 memset(card->qdio.in_q->qdio_bufs, 0,
2954 QDIO_MAX_BUFFERS_PER_Q * sizeof(struct qdio_buffer));
2955 qeth_initialize_working_pool_list(card);
2956 /*give only as many buffers to hardware as we have buffer pool entries*/
2957 for (i = 0; i < card->qdio.in_buf_pool.buf_count - 1; ++i)
2958 qeth_init_input_buffer(card, &card->qdio.in_q->bufs[i]);
2959 card->qdio.in_q->next_buf_to_init = card->qdio.in_buf_pool.buf_count - 1;
2960 rc = do_QDIO(CARD_DDEV(card), QDIO_FLAG_SYNC_INPUT, 0, 0,
2961 card->qdio.in_buf_pool.buf_count - 1, NULL);
2963 QETH_DBF_TEXT_(setup, 2, "1err%d", rc);
2966 rc = qdio_synchronize(CARD_DDEV(card), QDIO_FLAG_SYNC_INPUT, 0);
2968 QETH_DBF_TEXT_(setup, 2, "2err%d", rc);
2971 /* outbound queue */
2972 for (i = 0; i < card->qdio.no_out_queues; ++i){
2973 memset(card->qdio.out_qs[i]->qdio_bufs, 0,
2974 QDIO_MAX_BUFFERS_PER_Q * sizeof(struct qdio_buffer));
2975 for (j = 0; j < QDIO_MAX_BUFFERS_PER_Q; ++j){
2976 qeth_clear_output_buffer(card->qdio.out_qs[i],
2977 &card->qdio.out_qs[i]->bufs[j]);
2979 card->qdio.out_qs[i]->card = card;
2980 card->qdio.out_qs[i]->next_buf_to_fill = 0;
2981 card->qdio.out_qs[i]->do_pack = 0;
2982 atomic_set(&card->qdio.out_qs[i]->used_buffers,0);
2983 atomic_set(&card->qdio.out_qs[i]->set_pci_flags_count, 0);
2984 spin_lock_init(&card->qdio.out_qs[i]->lock);
2990 qeth_qdio_establish(struct qeth_card *card)
2992 struct qdio_initialize init_data;
2993 char *qib_param_field;
2994 struct qdio_buffer **in_sbal_ptrs;
2995 struct qdio_buffer **out_sbal_ptrs;
2999 QETH_DBF_TEXT(setup, 2, "qdioest");
3000 qib_param_field = qeth_create_qib_param_field(card);
3001 if (!qib_param_field)
3004 in_sbal_ptrs = kmalloc(QDIO_MAX_BUFFERS_PER_Q * sizeof(void *),
3006 if (!in_sbal_ptrs) {
3007 kfree(qib_param_field);
3010 for(i = 0; i < QDIO_MAX_BUFFERS_PER_Q; ++i)
3011 in_sbal_ptrs[i] = (struct qdio_buffer *)
3012 virt_to_phys(card->qdio.in_q->bufs[i].buffer);
3015 kmalloc(card->qdio.no_out_queues * QDIO_MAX_BUFFERS_PER_Q *
3016 sizeof(void *), GFP_KERNEL);
3017 if (!out_sbal_ptrs) {
3018 kfree(in_sbal_ptrs);
3019 kfree(qib_param_field);
3022 for(i = 0, k = 0; i < card->qdio.no_out_queues; ++i)
3023 for(j = 0; j < QDIO_MAX_BUFFERS_PER_Q; ++j, ++k){
3024 out_sbal_ptrs[k] = (struct qdio_buffer *)
3025 virt_to_phys(card->qdio.out_qs[i]->
3029 memset(&init_data, 0, sizeof(struct qdio_initialize));
3030 init_data.cdev = CARD_DDEV(card);
3031 init_data.q_format = qeth_get_qdio_q_format(card);
3032 init_data.qib_param_field_format = 0;
3033 init_data.qib_param_field = qib_param_field;
3034 init_data.min_input_threshold = QETH_MIN_INPUT_THRESHOLD;
3035 init_data.max_input_threshold = QETH_MAX_INPUT_THRESHOLD;
3036 init_data.min_output_threshold = QETH_MIN_OUTPUT_THRESHOLD;
3037 init_data.max_output_threshold = QETH_MAX_OUTPUT_THRESHOLD;
3038 init_data.no_input_qs = 1;
3039 init_data.no_output_qs = card->qdio.no_out_queues;
3040 init_data.input_handler = (qdio_handler_t *)
3041 qeth_qdio_input_handler;
3042 init_data.output_handler = (qdio_handler_t *)
3043 qeth_qdio_output_handler;
3044 init_data.int_parm = (unsigned long) card;
3045 init_data.flags = QDIO_INBOUND_0COPY_SBALS |
3046 QDIO_OUTBOUND_0COPY_SBALS |
3047 QDIO_USE_OUTBOUND_PCIS;
3048 init_data.input_sbal_addr_array = (void **) in_sbal_ptrs;
3049 init_data.output_sbal_addr_array = (void **) out_sbal_ptrs;
3051 if (!(rc = qdio_initialize(&init_data)))
3052 card->qdio.state = QETH_QDIO_ESTABLISHED;
3054 kfree(out_sbal_ptrs);
3055 kfree(in_sbal_ptrs);
3056 kfree(qib_param_field);
3061 qeth_qdio_activate(struct qeth_card *card)
3063 QETH_DBF_TEXT(setup,3,"qdioact");
3064 return qdio_activate(CARD_DDEV(card), 0);
3068 qeth_clear_channel(struct qeth_channel *channel)
3070 unsigned long flags;
3071 struct qeth_card *card;
3074 QETH_DBF_TEXT(trace,3,"clearch");
3075 card = CARD_FROM_CDEV(channel->ccwdev);
3076 spin_lock_irqsave(get_ccwdev_lock(channel->ccwdev), flags);
3077 rc = ccw_device_clear(channel->ccwdev, QETH_CLEAR_CHANNEL_PARM);
3078 spin_unlock_irqrestore(get_ccwdev_lock(channel->ccwdev), flags);
3082 rc = wait_event_interruptible_timeout(card->wait_q,
3083 channel->state==CH_STATE_STOPPED, QETH_TIMEOUT);
3084 if (rc == -ERESTARTSYS)
3086 if (channel->state != CH_STATE_STOPPED)
3088 channel->state = CH_STATE_DOWN;
3093 qeth_halt_channel(struct qeth_channel *channel)
3095 unsigned long flags;
3096 struct qeth_card *card;
3099 QETH_DBF_TEXT(trace,3,"haltch");
3100 card = CARD_FROM_CDEV(channel->ccwdev);
3101 spin_lock_irqsave(get_ccwdev_lock(channel->ccwdev), flags);
3102 rc = ccw_device_halt(channel->ccwdev, QETH_HALT_CHANNEL_PARM);
3103 spin_unlock_irqrestore(get_ccwdev_lock(channel->ccwdev), flags);
3107 rc = wait_event_interruptible_timeout(card->wait_q,
3108 channel->state==CH_STATE_HALTED, QETH_TIMEOUT);
3109 if (rc == -ERESTARTSYS)
3111 if (channel->state != CH_STATE_HALTED)
3117 qeth_halt_channels(struct qeth_card *card)
3121 QETH_DBF_TEXT(trace,3,"haltchs");
3122 if ((rc = qeth_halt_channel(&card->read)))
3124 if ((rc = qeth_halt_channel(&card->write)))
3126 return qeth_halt_channel(&card->data);
3129 qeth_clear_channels(struct qeth_card *card)
3133 QETH_DBF_TEXT(trace,3,"clearchs");
3134 if ((rc = qeth_clear_channel(&card->read)))
3136 if ((rc = qeth_clear_channel(&card->write)))
3138 return qeth_clear_channel(&card->data);
3142 qeth_clear_halt_card(struct qeth_card *card, int halt)
3146 QETH_DBF_TEXT(trace,3,"clhacrd");
3147 QETH_DBF_HEX(trace, 3, &card, sizeof(void *));
3150 rc = qeth_halt_channels(card);
3153 return qeth_clear_channels(card);
3157 qeth_qdio_clear_card(struct qeth_card *card, int use_halt)
3161 QETH_DBF_TEXT(trace,3,"qdioclr");
3162 if (card->qdio.state == QETH_QDIO_ESTABLISHED){
3163 qdio_cleanup(CARD_DDEV(card),
3164 (card->info.type == QETH_CARD_TYPE_IQD) ?
3165 QDIO_FLAG_CLEANUP_USING_HALT :
3166 QDIO_FLAG_CLEANUP_USING_CLEAR);
3167 card->qdio.state = QETH_QDIO_ALLOCATED;
3169 rc = qeth_clear_halt_card(card, use_halt);
3170 card->state = CARD_STATE_DOWN;
3175 qeth_dm_act(struct qeth_card *card)
3178 struct qeth_cmd_buffer *iob;
3180 QETH_DBF_TEXT(setup,2,"dmact");
3182 iob = qeth_wait_for_buffer(&card->write);
3183 memcpy(iob->data, DM_ACT, DM_ACT_SIZE);
3185 memcpy(QETH_DM_ACT_DEST_ADDR(iob->data),
3186 &card->token.cm_connection_r, QETH_MPC_TOKEN_LENGTH);
3187 memcpy(QETH_DM_ACT_CONNECTION_TOKEN(iob->data),
3188 &card->token.ulp_connection_r, QETH_MPC_TOKEN_LENGTH);
3189 rc = qeth_send_control_data(card, DM_ACT_SIZE, iob, NULL, NULL);
3194 qeth_mpc_initialize(struct qeth_card *card)
3198 QETH_DBF_TEXT(setup,2,"mpcinit");
3200 if ((rc = qeth_issue_next_read(card))){
3201 QETH_DBF_TEXT_(setup, 2, "1err%d", rc);
3204 if ((rc = qeth_cm_enable(card))){
3205 QETH_DBF_TEXT_(setup, 2, "2err%d", rc);
3208 if ((rc = qeth_cm_setup(card))){
3209 QETH_DBF_TEXT_(setup, 2, "3err%d", rc);
3212 if ((rc = qeth_ulp_enable(card))){
3213 QETH_DBF_TEXT_(setup, 2, "4err%d", rc);
3216 if ((rc = qeth_ulp_setup(card))){
3217 QETH_DBF_TEXT_(setup, 2, "5err%d", rc);
3220 if ((rc = qeth_alloc_qdio_buffers(card))){
3221 QETH_DBF_TEXT_(setup, 2, "5err%d", rc);
3224 if ((rc = qeth_qdio_establish(card))){
3225 QETH_DBF_TEXT_(setup, 2, "6err%d", rc);
3226 qeth_free_qdio_buffers(card);
3229 if ((rc = qeth_qdio_activate(card))){
3230 QETH_DBF_TEXT_(setup, 2, "7err%d", rc);
3233 if ((rc = qeth_dm_act(card))){
3234 QETH_DBF_TEXT_(setup, 2, "8err%d", rc);
3240 qeth_qdio_clear_card(card, card->info.type==QETH_CARD_TYPE_OSAE);
3244 static struct net_device *
3245 qeth_get_netdevice(enum qeth_card_types type, enum qeth_link_types linktype)
3247 struct net_device *dev = NULL;
3250 case QETH_CARD_TYPE_OSAE:
3252 case QETH_LINK_TYPE_LANE_TR:
3253 case QETH_LINK_TYPE_HSTR:
3255 dev = alloc_trdev(0);
3256 #endif /* CONFIG_TR */
3259 dev = alloc_etherdev(0);
3262 case QETH_CARD_TYPE_IQD:
3263 dev = alloc_netdev(0, "hsi%d", ether_setup);
3266 dev = alloc_etherdev(0);
3272 qeth_send_packet(struct qeth_card *, struct sk_buff *);
3275 qeth_hard_start_xmit(struct sk_buff *skb, struct net_device *dev)
3278 struct qeth_card *card;
3280 QETH_DBF_TEXT(trace, 6, "hrdstxmi");
3281 card = (struct qeth_card *)dev->priv;
3283 card->stats.tx_dropped++;
3284 card->stats.tx_errors++;
3287 if ((card->state != CARD_STATE_UP) || !netif_carrier_ok(dev)) {
3288 card->stats.tx_dropped++;
3289 card->stats.tx_errors++;
3290 card->stats.tx_carrier_errors++;
3293 #ifdef CONFIG_QETH_PERF_STATS
3294 card->perf_stats.outbound_cnt++;
3295 card->perf_stats.outbound_start_time = qeth_get_micros();
3298 * dev_queue_xmit should ensure that we are called packet
3301 netif_stop_queue(dev);
3302 if (!(rc = qeth_send_packet(card, skb)))
3303 netif_wake_queue(dev);
3305 #ifdef CONFIG_QETH_PERF_STATS
3306 card->perf_stats.outbound_time += qeth_get_micros() -
3307 card->perf_stats.outbound_start_time;
3313 qeth_verify_vlan_dev(struct net_device *dev, struct qeth_card *card)
3316 #ifdef CONFIG_QETH_VLAN
3317 struct vlan_group *vg;
3320 if (!(vg = card->vlangrp))
3323 for (i = 0; i < VLAN_GROUP_ARRAY_LEN; i++){
3324 if (vg->vlan_devices[i] == dev){
3325 rc = QETH_VLAN_CARD;
3334 qeth_verify_dev(struct net_device *dev)
3336 struct qeth_card *card;
3337 unsigned long flags;
3340 read_lock_irqsave(&qeth_card_list.rwlock, flags);
3341 list_for_each_entry(card, &qeth_card_list.list, list){
3342 if (card->dev == dev){
3343 rc = QETH_REAL_CARD;
3346 rc = qeth_verify_vlan_dev(dev, card);
3350 read_unlock_irqrestore(&qeth_card_list.rwlock, flags);
3355 static struct qeth_card *
3356 qeth_get_card_from_dev(struct net_device *dev)
3358 struct qeth_card *card = NULL;
3361 rc = qeth_verify_dev(dev);
3362 if (rc == QETH_REAL_CARD)
3363 card = (struct qeth_card *)dev->priv;
3364 else if (rc == QETH_VLAN_CARD)
3365 card = (struct qeth_card *)
3366 VLAN_DEV_INFO(dev)->real_dev->priv;
3368 QETH_DBF_TEXT_(trace, 4, "%d", rc);
3373 qeth_tx_timeout(struct net_device *dev)
3375 struct qeth_card *card;
3377 card = (struct qeth_card *) dev->priv;
3378 card->stats.tx_errors++;
3379 qeth_schedule_recovery(card);
3383 qeth_open(struct net_device *dev)
3385 struct qeth_card *card;
3387 QETH_DBF_TEXT(trace, 4, "qethopen");
3389 card = (struct qeth_card *) dev->priv;
3391 if (card->state != CARD_STATE_SOFTSETUP)
3394 card->dev->flags |= IFF_UP;
3395 netif_start_queue(dev);
3396 card->data.state = CH_STATE_UP;
3397 card->state = CARD_STATE_UP;
3399 if (!card->lan_online){
3400 if (netif_carrier_ok(dev))
3401 netif_carrier_off(dev);
3402 netif_stop_queue(dev);
3408 qeth_stop(struct net_device *dev)
3410 struct qeth_card *card;
3412 QETH_DBF_TEXT(trace, 4, "qethstop");
3414 card = (struct qeth_card *) dev->priv;
3416 netif_stop_queue(dev);
3417 card->dev->flags &= ~IFF_UP;
3418 if (card->state == CARD_STATE_UP)
3419 card->state = CARD_STATE_SOFTSETUP;
3424 qeth_get_cast_type(struct qeth_card *card, struct sk_buff *skb)
3426 int cast_type = RTN_UNSPEC;
3428 if (skb->dst && skb->dst->neighbour){
3429 cast_type = skb->dst->neighbour->type;
3430 if ((cast_type == RTN_BROADCAST) ||
3431 (cast_type == RTN_MULTICAST) ||
3432 (cast_type == RTN_ANYCAST))
3437 /* try something else */
3438 if (skb->protocol == ETH_P_IPV6)
3439 return (skb->nh.raw[24] == 0xff) ? RTN_MULTICAST : 0;
3440 else if (skb->protocol == ETH_P_IP)
3441 return ((skb->nh.raw[16] & 0xf0) == 0xe0) ? RTN_MULTICAST : 0;
3443 if (!memcmp(skb->nh.raw, skb->dev->broadcast, 6))
3444 return RTN_BROADCAST;
3448 hdr_mac = *((u16 *)skb->nh.raw);
3450 switch (card->info.link_type) {
3451 case QETH_LINK_TYPE_HSTR:
3452 case QETH_LINK_TYPE_LANE_TR:
3453 if ((hdr_mac == QETH_TR_MAC_NC) ||
3454 (hdr_mac == QETH_TR_MAC_C))
3455 return RTN_MULTICAST;
3456 /* eth or so multicast? */
3458 if ((hdr_mac == QETH_ETH_MAC_V4) ||
3459 (hdr_mac == QETH_ETH_MAC_V6))
3460 return RTN_MULTICAST;
3467 qeth_get_priority_queue(struct qeth_card *card, struct sk_buff *skb,
3468 int ipv, int cast_type)
3470 if (!ipv && (card->info.type == QETH_CARD_TYPE_OSAE))
3471 return card->qdio.default_out_queue;
3472 switch (card->qdio.no_out_queues) {
3474 if (cast_type && card->info.is_multicast_different)
3475 return card->info.is_multicast_different &
3476 (card->qdio.no_out_queues - 1);
3477 if (card->qdio.do_prio_queueing && (ipv == 4)) {
3478 if (card->qdio.do_prio_queueing==QETH_PRIO_Q_ING_TOS){
3479 if (skb->nh.iph->tos & IP_TOS_NOTIMPORTANT)
3481 if (skb->nh.iph->tos & IP_TOS_HIGHRELIABILITY)
3483 if (skb->nh.iph->tos & IP_TOS_HIGHTHROUGHPUT)
3485 if (skb->nh.iph->tos & IP_TOS_LOWDELAY)
3488 if (card->qdio.do_prio_queueing==QETH_PRIO_Q_ING_PREC)
3489 return 3 - (skb->nh.iph->tos >> 6);
3490 } else if (card->qdio.do_prio_queueing && (ipv == 6)) {
3493 return card->qdio.default_out_queue;
3500 qeth_get_ip_version(struct sk_buff *skb)
3502 switch (skb->protocol) {
3513 qeth_prepare_skb(struct qeth_card *card, struct sk_buff **skb,
3514 struct qeth_hdr **hdr, int ipv)
3516 struct sk_buff *new_skb;
3517 #ifdef CONFIG_QETH_VLAN
3521 QETH_DBF_TEXT(trace, 6, "prepskb");
3522 if (skb_headroom(*skb) < sizeof(struct qeth_hdr)){
3523 new_skb = skb_realloc_headroom(*skb, sizeof(struct qeth_hdr));
3525 PRINT_ERR("qeth_prepare_skb: could "
3526 "not realloc headroom for qeth_hdr "
3527 "on interface %s", card->info.if_name);
3532 #ifdef CONFIG_QETH_VLAN
3533 if (card->vlangrp && vlan_tx_tag_present(*skb) && (ipv == 6)){
3535 * Move the mac addresses (6 bytes src, 6 bytes dest)
3536 * to the beginning of the new header. We are using three
3537 * memcpys instead of one memmove to save cycles.
3539 skb_push(*skb, VLAN_HLEN);
3540 memcpy((*skb)->data, (*skb)->data + 4, 4);
3541 memcpy((*skb)->data + 4, (*skb)->data + 8, 4);
3542 memcpy((*skb)->data + 8, (*skb)->data + 12, 4);
3543 tag = (u16 *) (*skb)->data + 12;
3545 * first two bytes = ETH_P_8021Q (0x8100)
3546 * second two bytes = VLANID
3548 *tag = __constant_htons(ETH_P_8021Q);
3549 *(tag + 1) = vlan_tx_tag_get(*skb);
3550 *(tag + 1) = htons(*(tag + 1));
3553 *hdr = (struct qeth_hdr *) skb_push(*skb, sizeof(struct qeth_hdr));
3555 * sanity check, the Linux memory allocation scheme should
3556 * never present us cases like this one (the 32bytes header plus
3557 * the first 40 bytes of the paket cross a 4k boundary)
3559 if ((((unsigned long) *hdr) & (~(PAGE_SIZE - 1))) !=
3560 (((unsigned long) *hdr + sizeof(struct qeth_hdr) +
3561 QETH_IP_HEADER_SIZE) & (~(PAGE_SIZE - 1)))) {
3562 PRINT_ERR("qeth_prepare_skb: misaligned "
3563 "packet on interface %s. Discarded.",
3564 card->info.if_name);
3571 qeth_get_qeth_hdr_flags4(int cast_type)
3573 if (cast_type == RTN_MULTICAST)
3574 return QETH_CAST_MULTICAST;
3575 if (cast_type == RTN_BROADCAST)
3576 return QETH_CAST_BROADCAST;
3577 return QETH_CAST_UNICAST;
3581 qeth_get_qeth_hdr_flags6(int cast_type)
3583 u8 ct = QETH_HDR_PASSTHRU | QETH_HDR_IPV6;
3584 if (cast_type == RTN_MULTICAST)
3585 return ct | QETH_CAST_MULTICAST;
3586 if (cast_type == RTN_ANYCAST)
3587 return ct | QETH_CAST_ANYCAST;
3588 if (cast_type == RTN_BROADCAST)
3589 return ct | QETH_CAST_BROADCAST;
3590 return ct | QETH_CAST_UNICAST;
3594 qeth_fill_header(struct qeth_card *card, struct qeth_hdr *hdr,
3595 struct sk_buff *skb, int ipv, int cast_type)
3600 QETH_DBF_TEXT(trace, 6, "fillhdr");
3601 #ifdef CONFIG_QETH_VLAN
3603 * before we're going to overwrite this location with next hop ip.
3604 * v6 uses passthrough, v4 sets the tag in the QDIO header.
3606 if (card->vlangrp && vlan_tx_tag_present(skb)) {
3607 hdr->ext_flags = (ipv == 4)? QETH_EXT_HDR_VLAN_FRAME :
3608 QETH_EXT_HDR_INCLUDE_VLAN_TAG;
3609 hdr->vlan_id = vlan_tx_tag_get(skb);
3611 #endif /* CONFIG_QETH_VLAN */
3612 hdr->length = skb->len - sizeof(struct qeth_hdr);
3613 if (ipv == 4) { /* IPv4 */
3614 hdr->flags = qeth_get_qeth_hdr_flags4(cast_type);
3615 memset(hdr->dest_addr, 0, 12);
3616 if ((skb->dst) && (skb->dst->neighbour)) {
3617 *((u32 *) (&hdr->dest_addr[12])) =
3618 *((u32 *) skb->dst->neighbour->primary_key);
3620 /* fill in destination address used in ip header */
3621 *((u32 *) (&hdr->dest_addr[12])) = skb->nh.iph->daddr;
3623 } else if (ipv == 6) { /* IPv6 or passthru */
3624 hdr->flags = qeth_get_qeth_hdr_flags6(cast_type);
3625 if ((skb->dst) && (skb->dst->neighbour)) {
3626 memcpy(hdr->dest_addr,
3627 skb->dst->neighbour->primary_key, 16);
3629 /* fill in destination address used in ip header */
3630 memcpy(hdr->dest_addr, &skb->nh.ipv6h->daddr, 16);
3632 } else { /* passthrough */
3633 if (!memcmp(skb->data + sizeof(struct qeth_hdr),
3634 skb->dev->broadcast, 6)) { /* broadcast? */
3635 hdr->flags = QETH_CAST_BROADCAST | QETH_HDR_PASSTHRU;
3637 hdr->flags = (cast_type == RTN_MULTICAST) ?
3638 QETH_CAST_MULTICAST | QETH_HDR_PASSTHRU :
3639 QETH_CAST_UNICAST | QETH_HDR_PASSTHRU;
3645 qeth_fill_buffer(struct qeth_qdio_out_q *queue, struct qeth_qdio_out_buffer *buf,
3646 char *data, struct sk_buff *skb)
3648 struct qdio_buffer *buffer;
3649 int length = skb->len;
3654 QETH_DBF_TEXT(trace, 6, "qdfillbf");
3655 buffer = buf->buffer;
3656 atomic_inc(&skb->users);
3657 skb_queue_tail(&buf->skb_list, skb);
3658 element = buf->next_element_to_fill;
3659 while (length > 0) {
3660 /* length_here is the remaining amount of data in this page */
3661 length_here = PAGE_SIZE - ((unsigned long) data % PAGE_SIZE);
3662 if (length < length_here)
3663 length_here = length;
3664 buffer->element[element].addr = data;
3665 buffer->element[element].length = length_here;
3666 length -= length_here;
3669 buffer->element[element].flags = 0;
3671 buffer->element[element].flags =
3672 SBAL_FLAGS_LAST_FRAG;
3675 buffer->element[element].flags =
3676 SBAL_FLAGS_FIRST_FRAG;
3678 buffer->element[element].flags =
3679 SBAL_FLAGS_MIDDLE_FRAG;
3681 data += length_here;
3685 buf->next_element_to_fill = element;
3686 if (!queue->do_pack) {
3687 QETH_DBF_TEXT(trace, 6, "fillbfnp");
3688 /* set state to PRIMED -> will be flushed */
3689 atomic_set(&buf->state, QETH_QDIO_BUF_PRIMED);
3691 QETH_DBF_TEXT(trace, 6, "fillbfpa");
3692 #ifdef CONFIG_QETH_PERF_STATS
3693 queue->card->perf_stats.skbs_sent_pack++;
3695 if (buf->next_element_to_fill >=
3696 QETH_MAX_BUFFER_ELEMENTS(queue->card)) {
3698 * packed buffer if full -> set state PRIMED
3699 * -> will be flushed
3701 atomic_set(&buf->state, QETH_QDIO_BUF_PRIMED);
3708 qeth_do_send_packet_fast(struct qeth_card *card, struct qeth_qdio_out_q *queue,
3709 struct sk_buff *skb, struct qeth_hdr *hdr,
3710 int elements_needed)
3712 struct qeth_qdio_out_buffer *buffer;
3715 QETH_DBF_TEXT(trace, 6, "dosndpfa");
3717 spin_lock(&queue->lock);
3718 index = queue->next_buf_to_fill;
3719 buffer = &queue->bufs[queue->next_buf_to_fill];
3721 * check if buffer is empty to make sure that we do not 'overtake'
3722 * ourselves and try to fill a buffer that is already primed
3724 if (atomic_read(&buffer->state) != QETH_QDIO_BUF_EMPTY) {
3725 card->stats.tx_dropped++;
3726 spin_unlock(&queue->lock);
3729 queue->next_buf_to_fill = (queue->next_buf_to_fill + 1) %
3730 QDIO_MAX_BUFFERS_PER_Q;
3731 qeth_fill_buffer(queue, buffer, (char *)hdr, skb);
3732 qeth_flush_buffers(queue, 0, index, 1);
3733 spin_unlock(&queue->lock);
3738 qeth_do_send_packet(struct qeth_card *card, struct qeth_qdio_out_q *queue,
3739 struct sk_buff *skb, struct qeth_hdr *hdr,
3740 int elements_needed)
3742 struct qeth_qdio_out_buffer *buffer;
3744 int flush_count = 0;
3747 QETH_DBF_TEXT(trace, 6, "dosndpkt");
3749 spin_lock(&queue->lock);
3750 start_index = queue->next_buf_to_fill;
3751 buffer = &queue->bufs[queue->next_buf_to_fill];
3753 * check if buffer is empty to make sure that we do not 'overtake'
3754 * ourselves and try to fill a buffer that is already primed
3756 if (atomic_read(&buffer->state) != QETH_QDIO_BUF_EMPTY){
3757 card->stats.tx_dropped++;
3758 spin_unlock(&queue->lock);
3761 if (queue->do_pack){
3762 /* does packet fit in current buffer? */
3763 if((QETH_MAX_BUFFER_ELEMENTS(card) -
3764 buffer->next_element_to_fill) < elements_needed){
3765 /* ... no -> set state PRIMED */
3766 atomic_set(&buffer->state, QETH_QDIO_BUF_PRIMED);
3768 queue->next_buf_to_fill =
3769 (queue->next_buf_to_fill + 1) %
3770 QDIO_MAX_BUFFERS_PER_Q;
3771 buffer = &queue->bufs[queue->next_buf_to_fill];
3772 /* we did a step forward, so check buffer state again */
3773 if (atomic_read(&buffer->state) != QETH_QDIO_BUF_EMPTY){
3774 card->stats.tx_dropped++;
3775 qeth_flush_buffers(queue, 0, start_index, 1);
3776 spin_unlock(&queue->lock);
3777 /* return EBUSY because we sent old packet, not
3778 * the current one */
3783 qeth_fill_buffer(queue, buffer, (char *)hdr, skb);
3784 if (atomic_read(&buffer->state) == QETH_QDIO_BUF_PRIMED){
3785 /* next time fill the next buffer */
3787 queue->next_buf_to_fill = (queue->next_buf_to_fill + 1) %
3788 QDIO_MAX_BUFFERS_PER_Q;
3790 /* check if we need to switch packing state of this queue */
3791 flush_count += qeth_switch_packing_state(queue);
3794 qeth_flush_buffers(queue, 0, start_index, flush_count);
3796 if (!atomic_read(&queue->set_pci_flags_count))
3797 qeth_flush_buffers_on_no_pci(queue, 0);
3799 spin_unlock(&queue->lock);
3804 qeth_send_packet(struct qeth_card *card, struct sk_buff *skb)
3808 struct qeth_qdio_out_q *queue;
3809 struct qeth_hdr *hdr;
3810 int elements_needed;
3813 QETH_DBF_TEXT(trace, 6, "sendpkt");
3815 ipv = qeth_get_ip_version(skb);
3816 cast_type = qeth_get_cast_type(card, skb);
3817 queue = card->qdio.out_qs
3818 [qeth_get_priority_queue(card, skb, ipv, cast_type)];
3820 if ((rc = qeth_prepare_skb(card, &skb, &hdr, ipv))){
3821 QETH_DBF_TEXT_(trace, 4, "1err%d", rc);
3824 qeth_fill_header(card, hdr, skb, ipv, cast_type);
3825 elements_needed = 1 + (((((unsigned long) hdr) % PAGE_SIZE) + skb->len)
3827 if (elements_needed > QETH_MAX_BUFFER_ELEMENTS(card)){
3828 PRINT_ERR("qeth_do_send_packet: invalid size of "
3829 "IP packet. Discarded.");
3833 if (card->info.type != QETH_CARD_TYPE_IQD)
3834 rc = qeth_do_send_packet(card, queue, skb, hdr,
3837 rc = qeth_do_send_packet_fast(card, queue, skb, hdr,
3841 card->stats.tx_packets++;
3842 card->stats.tx_bytes += skb->len;
3848 qeth_mdio_read(struct net_device *dev, int phy_id, int regnum)
3850 struct qeth_card *card = (struct qeth_card *) dev->priv;
3854 case MII_BMCR: /* Basic mode control register */
3856 if(card->info.link_type != QETH_LINK_TYPE_GBIT_ETH)
3857 rc |= BMCR_SPEED100;
3859 case MII_BMSR: /* Basic mode status register */
3860 rc = BMSR_ERCAP | BMSR_ANEGCOMPLETE | BMSR_LSTATUS |
3861 BMSR_10HALF | BMSR_10FULL | BMSR_100HALF | BMSR_100FULL |
3864 case MII_PHYSID1: /* PHYS ID 1 */
3865 rc = (dev->dev_addr[0] << 16) | (dev->dev_addr[1] << 8) |
3867 rc = (rc >> 5) & 0xFFFF;
3869 case MII_PHYSID2: /* PHYS ID 2 */
3870 rc = (dev->dev_addr[2] << 10) & 0xFFFF;
3872 case MII_ADVERTISE: /* Advertisement control reg */
3875 case MII_LPA: /* Link partner ability reg */
3876 rc = LPA_10HALF | LPA_10FULL | LPA_100HALF | LPA_100FULL |
3877 LPA_100BASE4 | LPA_LPACK;
3879 case MII_EXPANSION: /* Expansion register */
3881 case MII_DCOUNTER: /* disconnect counter */
3883 case MII_FCSCOUNTER: /* false carrier counter */
3885 case MII_NWAYTEST: /* N-way auto-neg test register */
3887 case MII_RERRCOUNTER: /* rx error counter */
3888 rc = card->stats.rx_errors;
3890 case MII_SREVISION: /* silicon revision */
3892 case MII_RESV1: /* reserved 1 */
3894 case MII_LBRERROR: /* loopback, rx, bypass error */
3896 case MII_PHYADDR: /* physical address */
3898 case MII_RESV2: /* reserved 2 */
3900 case MII_TPISTATUS: /* TPI status for 10mbps */
3902 case MII_NCONFIG: /* network interface config */
3912 qeth_mdio_write(struct net_device *dev, int phy_id, int regnum, int value)
3915 case MII_BMCR: /* Basic mode control register */
3916 case MII_BMSR: /* Basic mode status register */
3917 case MII_PHYSID1: /* PHYS ID 1 */
3918 case MII_PHYSID2: /* PHYS ID 2 */
3919 case MII_ADVERTISE: /* Advertisement control reg */
3920 case MII_LPA: /* Link partner ability reg */
3921 case MII_EXPANSION: /* Expansion register */
3922 case MII_DCOUNTER: /* disconnect counter */
3923 case MII_FCSCOUNTER: /* false carrier counter */
3924 case MII_NWAYTEST: /* N-way auto-neg test register */
3925 case MII_RERRCOUNTER: /* rx error counter */
3926 case MII_SREVISION: /* silicon revision */
3927 case MII_RESV1: /* reserved 1 */
3928 case MII_LBRERROR: /* loopback, rx, bypass error */
3929 case MII_PHYADDR: /* physical address */
3930 case MII_RESV2: /* reserved 2 */
3931 case MII_TPISTATUS: /* TPI status for 10mbps */
3932 case MII_NCONFIG: /* network interface config */
3938 static inline const char *
3939 qeth_arp_get_error_cause(int *rc)
3942 case QETH_IPA_ARP_RC_FAILED:
3944 return "operation failed";
3945 case QETH_IPA_ARP_RC_NOTSUPP:
3947 return "operation not supported";
3948 case QETH_IPA_ARP_RC_OUT_OF_RANGE:
3950 return "argument out of range";
3951 case QETH_IPA_ARP_RC_Q_NOTSUPP:
3953 return "query operation not supported";
3954 case QETH_IPA_ARP_RC_Q_NO_DATA:
3956 return "no query data available";
3958 return "unknown error";
3963 qeth_send_simple_setassparms(struct qeth_card *, enum qeth_ipa_funcs,
3967 qeth_arp_set_no_entries(struct qeth_card *card, int no_entries)
3972 QETH_DBF_TEXT(trace,3,"arpstnoe");
3974 /* TODO: really not supported by GuestLAN? */
3975 if (card->info.guestlan)
3977 if (!qeth_is_supported(card,IPA_ARP_PROCESSING)) {
3978 PRINT_WARN("ARP processing not supported "
3979 "on %s!\n", card->info.if_name);
3982 rc = qeth_send_simple_setassparms(card, IPA_ARP_PROCESSING,
3983 IPA_CMD_ASS_ARP_SET_NO_ENTRIES,
3987 PRINT_WARN("Could not set number of ARP entries on %s: "
3989 card->info.if_name, qeth_arp_get_error_cause(&rc),
3996 qeth_copy_arp_entries_stripped(struct qeth_arp_query_info *qinfo,
3997 struct qeth_arp_query_data *qdata,
3998 int entry_size, int uentry_size)
4004 entry_ptr = (char *)&qdata->data;
4005 uentry_ptr = (char *)(qinfo->udata + qinfo->udata_offset);
4006 for (i = 0; i < qdata->no_entries; ++i){
4007 /* strip off 32 bytes "media specific information" */
4008 memcpy(uentry_ptr, (entry_ptr + 32), entry_size - 32);
4009 entry_ptr += entry_size;
4010 uentry_ptr += uentry_size;
4015 qeth_arp_query_cb(struct qeth_card *card, struct qeth_reply *reply,
4018 struct qeth_ipa_cmd *cmd;
4019 struct qeth_arp_query_data *qdata;
4020 struct qeth_arp_query_info *qinfo;
4025 QETH_DBF_TEXT(trace,4,"arpquecb");
4027 qinfo = (struct qeth_arp_query_info *) reply->param;
4028 cmd = (struct qeth_ipa_cmd *) data;
4029 if (cmd->hdr.return_code) {
4030 QETH_DBF_TEXT_(trace,4,"qaer1%i", cmd->hdr.return_code);
4033 if (cmd->data.setassparms.hdr.return_code) {
4034 cmd->hdr.return_code = cmd->data.setassparms.hdr.return_code;
4035 QETH_DBF_TEXT_(trace,4,"qaer2%i", cmd->hdr.return_code);
4038 qdata = &cmd->data.setassparms.data.query_arp;
4039 switch(qdata->reply_bits){
4041 uentry_size = entry_size = sizeof(struct qeth_arp_qi_entry5);
4042 if (qinfo->mask_bits & QETH_QARP_STRIP_ENTRIES)
4043 uentry_size = sizeof(struct qeth_arp_qi_entry5_short);
4046 /* fall through to default */
4048 /* tr is the same as eth -> entry7 */
4049 uentry_size = entry_size = sizeof(struct qeth_arp_qi_entry7);
4050 if (qinfo->mask_bits & QETH_QARP_STRIP_ENTRIES)
4051 uentry_size = sizeof(struct qeth_arp_qi_entry7_short);
4054 /* check if there is enough room in userspace */
4055 if ((qinfo->udata_len - qinfo->udata_offset) <
4056 qdata->no_entries * uentry_size){
4057 QETH_DBF_TEXT_(trace, 4, "qaer3%i", -ENOMEM);
4058 cmd->hdr.return_code = -ENOMEM;
4059 PRINT_WARN("query ARP user space buffer is too small for "
4060 "the returned number of ARP entries. "
4061 "Aborting query!\n");
4064 QETH_DBF_TEXT_(trace, 4, "anore%i",
4065 cmd->data.setassparms.hdr.number_of_replies);
4066 QETH_DBF_TEXT_(trace, 4, "aseqn%i", cmd->data.setassparms.hdr.seq_no);
4067 QETH_DBF_TEXT_(trace, 4, "anoen%i", qdata->no_entries);
4069 if (qinfo->mask_bits & QETH_QARP_STRIP_ENTRIES) {
4070 /* strip off "media specific information" */
4071 qeth_copy_arp_entries_stripped(qinfo, qdata, entry_size,
4074 /*copy entries to user buffer*/
4075 memcpy(qinfo->udata + qinfo->udata_offset,
4076 (char *)&qdata->data, qdata->no_entries*uentry_size);
4078 qinfo->no_entries += qdata->no_entries;
4079 qinfo->udata_offset += (qdata->no_entries*uentry_size);
4080 /* check if all replies received ... */
4081 if (cmd->data.setassparms.hdr.seq_no <
4082 cmd->data.setassparms.hdr.number_of_replies)
4084 memcpy(qinfo->udata, &qinfo->no_entries, 4);
4085 /* keep STRIP_ENTRIES flag so the user program can distinguish
4086 * stripped entries from normal ones */
4087 if (qinfo->mask_bits & QETH_QARP_STRIP_ENTRIES)
4088 qdata->reply_bits |= QETH_QARP_STRIP_ENTRIES;
4089 memcpy(qinfo->udata + QETH_QARP_MASK_OFFSET,&qdata->reply_bits,2);
4093 memcpy(qinfo->udata, &i, 4);
4098 qeth_send_ipa_arp_cmd(struct qeth_card *card, struct qeth_cmd_buffer *iob,
4099 int len, int (*reply_cb)(struct qeth_card *,
4100 struct qeth_reply *,
4104 QETH_DBF_TEXT(trace,4,"sendarp");
4106 memcpy(iob->data, IPA_PDU_HEADER, IPA_PDU_HEADER_SIZE);
4107 memcpy(QETH_IPA_CMD_DEST_ADDR(iob->data),
4108 &card->token.ulp_connection_r, QETH_MPC_TOKEN_LENGTH);
4109 return qeth_send_control_data(card, IPA_PDU_HEADER_SIZE + len, iob,
4110 reply_cb, reply_param);
4114 qeth_send_ipa_snmp_cmd(struct qeth_card *card, struct qeth_cmd_buffer *iob,
4115 int len, int (*reply_cb)(struct qeth_card *,
4116 struct qeth_reply *,
4122 QETH_DBF_TEXT(trace,4,"sendsnmp");
4124 memcpy(iob->data, IPA_PDU_HEADER, IPA_PDU_HEADER_SIZE);
4125 memcpy(QETH_IPA_CMD_DEST_ADDR(iob->data),
4126 &card->token.ulp_connection_r, QETH_MPC_TOKEN_LENGTH);
4127 /* adjust PDU length fields in IPA_PDU_HEADER */
4128 s1 = (u32) IPA_PDU_HEADER_SIZE + len;
4130 memcpy(QETH_IPA_PDU_LEN_TOTAL(iob->data), &s1, 2);
4131 memcpy(QETH_IPA_PDU_LEN_PDU1(iob->data), &s2, 2);
4132 memcpy(QETH_IPA_PDU_LEN_PDU2(iob->data), &s2, 2);
4133 memcpy(QETH_IPA_PDU_LEN_PDU3(iob->data), &s2, 2);
4134 return qeth_send_control_data(card, IPA_PDU_HEADER_SIZE + len, iob,
4135 reply_cb, reply_param);
4138 static struct qeth_cmd_buffer *
4139 qeth_get_setassparms_cmd(struct qeth_card *, enum qeth_ipa_funcs,
4140 __u16, __u16, enum qeth_prot_versions);
4142 qeth_arp_query(struct qeth_card *card, char *udata)
4144 struct qeth_cmd_buffer *iob;
4145 struct qeth_arp_query_info qinfo = {0, };
4149 QETH_DBF_TEXT(trace,3,"arpquery");
4152 * currently GuestLAN does only deliver all zeros on query arp,
4153 * even though arp processing is supported (according to IPA supp.
4154 * funcs flags); since all zeros is no valueable information,
4155 * we say EOPNOTSUPP for all ARP functions
4157 if (card->info.guestlan)
4159 if (!qeth_is_supported(card,/*IPA_QUERY_ARP_ADDR_INFO*/
4160 IPA_ARP_PROCESSING)) {
4161 PRINT_WARN("ARP processing not supported "
4162 "on %s!\n", card->info.if_name);
4165 /* get size of userspace buffer and mask_bits -> 6 bytes */
4166 if (copy_from_user(&qinfo, udata, 6))
4168 if (!(qinfo.udata = kmalloc(qinfo.udata_len, GFP_KERNEL)))
4170 memset(qinfo.udata, 0, qinfo.udata_len);
4171 qinfo.udata_offset = QETH_QARP_ENTRIES_OFFSET;
4172 iob = qeth_get_setassparms_cmd(card, IPA_ARP_PROCESSING,
4173 IPA_CMD_ASS_ARP_QUERY_INFO,
4174 sizeof(int),QETH_PROT_IPV4);
4176 rc = qeth_send_ipa_arp_cmd(card, iob,
4177 QETH_SETASS_BASE_LEN+QETH_ARP_CMD_LEN,
4178 qeth_arp_query_cb, (void *)&qinfo);
4181 PRINT_WARN("Error while querying ARP cache on %s: %s "
4183 card->info.if_name, qeth_arp_get_error_cause(&rc),
4185 copy_to_user(udata, qinfo.udata, 4);
4187 copy_to_user(udata, qinfo.udata, qinfo.udata_len);
4194 * SNMP command callback
4197 qeth_snmp_command_cb(struct qeth_card *card, struct qeth_reply *reply,
4198 unsigned long sdata)
4200 struct qeth_ipa_cmd *cmd;
4201 struct qeth_arp_query_info *qinfo;
4202 struct qeth_snmp_cmd *snmp;
4203 unsigned char *data;
4206 QETH_DBF_TEXT(trace,3,"snpcmdcb");
4208 cmd = (struct qeth_ipa_cmd *) sdata;
4209 data = (unsigned char *)((char *)cmd - reply->offset);
4210 qinfo = (struct qeth_arp_query_info *) reply->param;
4211 snmp = &cmd->data.setadapterparms.data.snmp;
4213 if (cmd->hdr.return_code) {
4214 QETH_DBF_TEXT_(trace,4,"scer1%i", cmd->hdr.return_code);
4217 if (cmd->data.setadapterparms.hdr.return_code) {
4218 cmd->hdr.return_code = cmd->data.setadapterparms.hdr.return_code;
4219 QETH_DBF_TEXT_(trace,4,"scer2%i", cmd->hdr.return_code);
4222 data_len = *((__u16*)QETH_IPA_PDU_LEN_PDU1(data));
4223 if (cmd->data.setadapterparms.hdr.seq_no == 1)
4224 data_len -= (__u16)((char *)&snmp->data - (char *)cmd);
4226 data_len -= (__u16)((char*)&snmp->request - (char *)cmd);
4228 /* check if there is enough room in userspace */
4229 if ((qinfo->udata_len - qinfo->udata_offset) < data_len) {
4230 QETH_DBF_TEXT_(trace, 4, "scer3%i", -ENOMEM);
4231 cmd->hdr.return_code = -ENOMEM;
4234 QETH_DBF_TEXT_(trace, 4, "snore%i",
4235 cmd->data.setadapterparms.hdr.used_total);
4236 QETH_DBF_TEXT_(trace, 4, "sseqn%i", cmd->data.setadapterparms.hdr.seq_no);
4237 /*copy entries to user buffer*/
4238 if (cmd->data.setadapterparms.hdr.seq_no == 1) {
4239 memcpy(qinfo->udata + qinfo->udata_offset,
4241 data_len + offsetof(struct qeth_snmp_cmd,data));
4242 qinfo->udata_offset += offsetof(struct qeth_snmp_cmd, data);
4244 memcpy(qinfo->udata + qinfo->udata_offset,
4245 (char *)&snmp->request, data_len);
4247 qinfo->udata_offset += data_len;
4248 /* check if all replies received ... */
4249 QETH_DBF_TEXT_(trace, 4, "srtot%i",
4250 cmd->data.setadapterparms.hdr.used_total);
4251 QETH_DBF_TEXT_(trace, 4, "srseq%i",
4252 cmd->data.setadapterparms.hdr.seq_no);
4253 if (cmd->data.setadapterparms.hdr.seq_no <
4254 cmd->data.setadapterparms.hdr.used_total)
4259 static struct qeth_cmd_buffer *
4260 qeth_get_ipacmd_buffer(struct qeth_card *, enum qeth_ipa_cmds,
4261 enum qeth_prot_versions );
4263 static struct qeth_cmd_buffer *
4264 qeth_get_adapter_cmd(struct qeth_card *card, __u32 command, __u32 cmdlen)
4266 struct qeth_cmd_buffer *iob;
4267 struct qeth_ipa_cmd *cmd;
4269 iob = qeth_get_ipacmd_buffer(card,IPA_CMD_SETADAPTERPARMS,
4271 cmd = (struct qeth_ipa_cmd *)(iob->data+IPA_PDU_HEADER_SIZE);
4272 cmd->data.setadapterparms.hdr.cmdlength = cmdlen;
4273 cmd->data.setadapterparms.hdr.command_code = command;
4274 cmd->data.setadapterparms.hdr.used_total = 1;
4275 cmd->data.setadapterparms.hdr.seq_no = 1;
4281 * function to send SNMP commands to OSA-E card
4284 qeth_snmp_command(struct qeth_card *card, char *udata)
4286 struct qeth_cmd_buffer *iob;
4287 struct qeth_ipa_cmd *cmd;
4288 struct qeth_snmp_ureq *ureq;
4290 struct qeth_arp_query_info qinfo = {0, };
4293 QETH_DBF_TEXT(trace,3,"snmpcmd");
4295 if (card->info.guestlan)
4297 if (!qeth_adp_supported(card,IPA_SETADP_SET_SNMP_CONTROL)) {
4298 PRINT_WARN("SNMP Query MIBS not supported "
4299 "on %s!\n", card->info.if_name);
4302 /* skip 4 bytes (data_len struct member) to get req_len */
4303 if (copy_from_user(&req_len, udata + sizeof(int), sizeof(int)))
4305 ureq = kmalloc(req_len, GFP_KERNEL);
4307 QETH_DBF_TEXT(trace, 2, "snmpnome");
4310 if (copy_from_user(ureq, udata, req_len)){
4314 qinfo.udata_len = ureq->hdr.data_len;
4315 if (!(qinfo.udata = kmalloc(qinfo.udata_len, GFP_KERNEL))){
4319 memset(qinfo.udata, 0, qinfo.udata_len);
4320 qinfo.udata_offset = sizeof(struct qeth_snmp_ureq_hdr);
4322 iob = qeth_get_adapter_cmd(card, IPA_SETADP_SET_SNMP_CONTROL,
4323 QETH_SNMP_SETADP_CMDLENGTH + req_len);
4324 cmd = (struct qeth_ipa_cmd *)(iob->data+IPA_PDU_HEADER_SIZE);
4325 memcpy(&cmd->data.setadapterparms.data.snmp, &ureq->cmd, req_len);
4326 rc = qeth_send_ipa_snmp_cmd(card, iob, QETH_SETADP_BASE_LEN + req_len,
4327 qeth_snmp_command_cb, (void *)&qinfo);
4329 PRINT_WARN("SNMP command failed on %s: (0x%x)\n",
4330 card->info.if_name, rc);
4332 copy_to_user(udata, qinfo.udata, qinfo.udata_len);
4340 qeth_default_setassparms_cb(struct qeth_card *, struct qeth_reply *,
4344 qeth_send_setassparms(struct qeth_card *, struct qeth_cmd_buffer *,
4347 (struct qeth_card *, struct qeth_reply *, unsigned long),
4351 qeth_arp_add_entry(struct qeth_card *card, struct qeth_arp_cache_entry *entry)
4353 struct qeth_cmd_buffer *iob;
4358 QETH_DBF_TEXT(trace,3,"arpadent");
4361 * currently GuestLAN does only deliver all zeros on query arp,
4362 * even though arp processing is supported (according to IPA supp.
4363 * funcs flags); since all zeros is no valueable information,
4364 * we say EOPNOTSUPP for all ARP functions
4366 if (card->info.guestlan)
4368 if (!qeth_is_supported(card,IPA_ARP_PROCESSING)) {
4369 PRINT_WARN("ARP processing not supported "
4370 "on %s!\n", card->info.if_name);
4374 iob = qeth_get_setassparms_cmd(card, IPA_ARP_PROCESSING,
4375 IPA_CMD_ASS_ARP_ADD_ENTRY,
4376 sizeof(struct qeth_arp_cache_entry),
4378 rc = qeth_send_setassparms(card, iob,
4379 sizeof(struct qeth_arp_cache_entry),
4380 (unsigned long) entry,
4381 qeth_default_setassparms_cb, NULL);
4384 qeth_ipaddr4_to_string((u8 *)entry->ipaddr, buf);
4385 PRINT_WARN("Could not add ARP entry for address %s on %s: "
4387 buf, card->info.if_name,
4388 qeth_arp_get_error_cause(&rc), tmp, tmp);
4394 qeth_arp_remove_entry(struct qeth_card *card, struct qeth_arp_cache_entry *entry)
4396 struct qeth_cmd_buffer *iob;
4397 char buf[16] = {0, };
4401 QETH_DBF_TEXT(trace,3,"arprment");
4404 * currently GuestLAN does only deliver all zeros on query arp,
4405 * even though arp processing is supported (according to IPA supp.
4406 * funcs flags); since all zeros is no valueable information,
4407 * we say EOPNOTSUPP for all ARP functions
4409 if (card->info.guestlan)
4411 if (!qeth_is_supported(card,IPA_ARP_PROCESSING)) {
4412 PRINT_WARN("ARP processing not supported "
4413 "on %s!\n", card->info.if_name);
4416 memcpy(buf, entry, 12);
4417 iob = qeth_get_setassparms_cmd(card, IPA_ARP_PROCESSING,
4418 IPA_CMD_ASS_ARP_REMOVE_ENTRY,
4421 rc = qeth_send_setassparms(card, iob,
4422 12, (unsigned long)buf,
4423 qeth_default_setassparms_cb, NULL);
4427 qeth_ipaddr4_to_string((u8 *)entry->ipaddr, buf);
4428 PRINT_WARN("Could not delete ARP entry for address %s on %s: "
4430 buf, card->info.if_name,
4431 qeth_arp_get_error_cause(&rc), tmp, tmp);
4437 qeth_arp_flush_cache(struct qeth_card *card)
4442 QETH_DBF_TEXT(trace,3,"arpflush");
4445 * currently GuestLAN does only deliver all zeros on query arp,
4446 * even though arp processing is supported (according to IPA supp.
4447 * funcs flags); since all zeros is no valueable information,
4448 * we say EOPNOTSUPP for all ARP functions
4450 if (card->info.guestlan || (card->info.type == QETH_CARD_TYPE_IQD))
4452 if (!qeth_is_supported(card,IPA_ARP_PROCESSING)) {
4453 PRINT_WARN("ARP processing not supported "
4454 "on %s!\n", card->info.if_name);
4457 rc = qeth_send_simple_setassparms(card, IPA_ARP_PROCESSING,
4458 IPA_CMD_ASS_ARP_FLUSH_CACHE, 0);
4461 PRINT_WARN("Could not flush ARP cache on %s: %s (0x%x/%d)\n",
4462 card->info.if_name, qeth_arp_get_error_cause(&rc),
4469 qeth_do_ioctl(struct net_device *dev, struct ifreq *rq, int cmd)
4471 struct qeth_card *card = (struct qeth_card *)dev->priv;
4472 struct qeth_arp_cache_entry arp_entry;
4473 struct mii_ioctl_data *mii_data;
4479 if (card->state != CARD_STATE_UP)
4483 case SIOC_QETH_ARP_SET_NO_ENTRIES:
4484 if (!capable(CAP_NET_ADMIN)){
4488 rc = qeth_arp_set_no_entries(card, rq->ifr_ifru.ifru_ivalue);
4490 case SIOC_QETH_ARP_QUERY_INFO:
4491 if (!capable(CAP_NET_ADMIN)){
4495 rc = qeth_arp_query(card, rq->ifr_ifru.ifru_data);
4497 case SIOC_QETH_ARP_ADD_ENTRY:
4498 if (!capable(CAP_NET_ADMIN)){
4502 if (copy_from_user(&arp_entry, rq->ifr_ifru.ifru_data,
4503 sizeof(struct qeth_arp_cache_entry)))
4506 rc = qeth_arp_add_entry(card, &arp_entry);
4508 case SIOC_QETH_ARP_REMOVE_ENTRY:
4509 if (!capable(CAP_NET_ADMIN)){
4513 if (copy_from_user(&arp_entry, rq->ifr_ifru.ifru_data,
4514 sizeof(struct qeth_arp_cache_entry)))
4517 rc = qeth_arp_remove_entry(card, &arp_entry);
4519 case SIOC_QETH_ARP_FLUSH_CACHE:
4520 if (!capable(CAP_NET_ADMIN)){
4524 rc = qeth_arp_flush_cache(card);
4526 case SIOC_QETH_ADP_SET_SNMP_CONTROL:
4527 rc = qeth_snmp_command(card, rq->ifr_ifru.ifru_data);
4529 case SIOC_QETH_GET_CARD_TYPE:
4530 if ((card->info.type == QETH_CARD_TYPE_OSAE) &&
4531 !card->info.guestlan)
4536 mii_data = if_mii(rq);
4537 mii_data->phy_id = 0;
4540 mii_data = if_mii(rq);
4541 if (mii_data->phy_id != 0)
4544 mii_data->val_out = qeth_mdio_read(dev,mii_data->phy_id,
4550 /* TODO: remove return if qeth_mdio_write does something */
4551 if (!capable(CAP_NET_ADMIN)){
4555 mii_data = if_mii(rq);
4556 if (mii_data->phy_id != 0)
4559 qeth_mdio_write(dev, mii_data->phy_id, mii_data->reg_num,
4566 QETH_DBF_TEXT_(trace, 2, "ioce%d", rc);
4570 static struct net_device_stats *
4571 qeth_get_stats(struct net_device *dev)
4573 struct qeth_card *card;
4575 card = (struct qeth_card *) (dev->priv);
4577 QETH_DBF_TEXT(trace,5,"getstat");
4579 return &card->stats;
4583 qeth_change_mtu(struct net_device *dev, int new_mtu)
4585 struct qeth_card *card;
4588 card = (struct qeth_card *) (dev->priv);
4590 QETH_DBF_TEXT(trace,4,"chgmtu");
4591 sprintf(dbf_text, "%8x", new_mtu);
4592 QETH_DBF_TEXT(trace,4,dbf_text);
4596 if (new_mtu > 65535)
4598 if ((!qeth_is_supported(card,IPA_IP_FRAGMENTATION)) &&
4599 (!qeth_mtu_is_valid(card, new_mtu)))
4605 #ifdef CONFIG_QETH_VLAN
4607 qeth_vlan_rx_register(struct net_device *dev, struct vlan_group *grp)
4609 struct qeth_card *card;
4610 unsigned long flags;
4612 QETH_DBF_TEXT(trace,4,"vlanreg");
4614 card = (struct qeth_card *) dev->priv;
4615 spin_lock_irqsave(&card->vlanlock, flags);
4616 card->vlangrp = grp;
4617 spin_unlock_irqrestore(&card->vlanlock, flags);
4621 qeth_free_vlan_buffer(struct qeth_card *card, struct qeth_qdio_out_buffer *buf,
4625 struct sk_buff *skb;
4626 struct sk_buff_head tmp_list;
4628 skb_queue_head_init(&tmp_list);
4629 for(i = 0; i < QETH_MAX_BUFFER_ELEMENTS(card); ++i){
4630 while ((skb = skb_dequeue(&buf->skb_list))){
4631 if (vlan_tx_tag_present(skb) &&
4632 (vlan_tx_tag_get(skb) == vid)) {
4633 atomic_dec(&skb->users);
4636 skb_queue_tail(&tmp_list, skb);
4639 while ((skb = skb_dequeue(&tmp_list)))
4640 skb_queue_tail(&buf->skb_list, skb);
4644 qeth_free_vlan_skbs(struct qeth_card *card, unsigned short vid)
4648 QETH_DBF_TEXT(trace, 4, "frvlskbs");
4649 for (i = 0; i < card->qdio.no_out_queues; ++i){
4650 for (j = 0; j < QDIO_MAX_BUFFERS_PER_Q; ++j)
4651 qeth_free_vlan_buffer(card, &card->qdio.
4652 out_qs[i]->bufs[j], vid);
4657 qeth_free_vlan_addresses4(struct qeth_card *card, unsigned short vid)
4659 struct in_device *in_dev;
4660 struct in_ifaddr *ifa;
4661 struct qeth_ipaddr *addr;
4663 QETH_DBF_TEXT(trace, 4, "frvaddr4");
4666 in_dev = in_dev_get(card->vlangrp->vlan_devices[vid]);
4669 for (ifa = in_dev->ifa_list; ifa; ifa = ifa->ifa_next){
4670 addr = qeth_get_addr_buffer(QETH_PROT_IPV4);
4672 addr->u.a4.addr = ifa->ifa_address;
4673 addr->u.a4.mask = ifa->ifa_mask;
4674 addr->type = QETH_IP_TYPE_NORMAL;
4675 if (!qeth_delete_ip(card, addr))
4683 qeth_free_vlan_addresses6(struct qeth_card *card, unsigned short vid)
4685 struct inet6_dev *in6_dev;
4686 struct inet6_ifaddr *ifa;
4687 struct qeth_ipaddr *addr;
4689 QETH_DBF_TEXT(trace, 4, "frvaddr6");
4692 in6_dev = in6_dev_get(card->vlangrp->vlan_devices[vid]);
4695 for (ifa = in6_dev->addr_list; ifa; ifa = ifa->lst_next){
4696 addr = qeth_get_addr_buffer(QETH_PROT_IPV6);
4698 memcpy(&addr->u.a6.addr, &ifa->addr,
4699 sizeof(struct in6_addr));
4700 addr->u.a6.pfxlen = ifa->prefix_len;
4701 addr->type = QETH_IP_TYPE_NORMAL;
4702 if (!qeth_delete_ip(card, addr))
4706 in6_dev_put(in6_dev);
4710 qeth_vlan_rx_kill_vid(struct net_device *dev, unsigned short vid)
4712 struct qeth_card *card;
4713 unsigned long flags;
4715 QETH_DBF_TEXT(trace,4,"vlkilvid");
4717 card = (struct qeth_card *) dev->priv;
4718 /* free all skbs for the vlan device */
4719 qeth_free_vlan_skbs(card, vid);
4720 spin_lock_irqsave(&card->vlanlock, flags);
4721 /* unregister IP addresses of vlan device */
4722 qeth_free_vlan_addresses4(card, vid);
4723 qeth_free_vlan_addresses6(card, vid);
4725 card->vlangrp->vlan_devices[vid] = NULL;
4726 spin_unlock_irqrestore(&card->vlanlock, flags);
4727 if ( (qeth_set_thread_start_bit(card, QETH_SET_IP_THREAD) == 0) ||
4728 (qeth_set_thread_start_bit(card, QETH_SET_MC_THREAD) == 0) )
4729 schedule_work(&card->kernel_thread_starter);
4734 qeth_neigh_setup(struct net_device *dev, struct neigh_parms *np)
4739 #ifdef CONFIG_QETH_IPV6
4741 qeth_ipv6_generate_eui64(u8 * eui, struct net_device *dev)
4743 switch (dev->type) {
4746 case ARPHRD_IEEE802_TR:
4747 if (dev->addr_len != ETH_ALEN)
4749 memcpy(eui, dev->dev_addr, 3);
4750 memcpy(eui + 5, dev->dev_addr + 3, 3);
4751 eui[3] = (dev->dev_id >> 8) & 0xff;
4752 eui[4] = dev->dev_id & 0xff;
4761 qeth_get_mac_for_ipm(__u32 ipm, char *mac, struct net_device *dev)
4763 if (dev->type == ARPHRD_IEEE802_TR)
4764 ip_tr_mc_map(ipm, mac);
4766 ip_eth_mc_map(ipm, mac);
4769 static struct qeth_ipaddr *
4770 qeth_get_addr_buffer(enum qeth_prot_versions prot)
4772 struct qeth_ipaddr *addr;
4774 addr = kmalloc(sizeof(struct qeth_ipaddr), GFP_ATOMIC);
4776 PRINT_WARN("Not enough memory to add address\n");
4779 memset(addr,0,sizeof(struct qeth_ipaddr));
4780 addr->type = QETH_IP_TYPE_NORMAL;
4786 qeth_delete_mc_addresses(struct qeth_card *card)
4788 struct qeth_ipaddr *ipm, *iptodo;
4789 unsigned long flags;
4791 QETH_DBF_TEXT(trace,4,"delmc");
4792 spin_lock_irqsave(&card->ip_lock, flags);
4793 list_for_each_entry(ipm, &card->ip_list, entry){
4794 if (!ipm->is_multicast)
4796 iptodo = qeth_get_addr_buffer(ipm->proto);
4798 QETH_DBF_TEXT(trace, 2, "dmcnomem");
4801 memcpy(iptodo, ipm, sizeof(struct qeth_ipaddr));
4802 iptodo->users = iptodo->users * -1;
4803 if (!__qeth_insert_ip_todo(card, iptodo, 0))
4806 spin_unlock_irqrestore(&card->ip_lock, flags);
4810 qeth_add_mc(struct qeth_card *card, struct in_device *in4_dev)
4812 struct qeth_ipaddr *ipm;
4813 struct ip_mc_list *im4;
4814 char buf[MAX_ADDR_LEN];
4816 QETH_DBF_TEXT(trace,4,"addmc");
4817 for (im4 = in4_dev->mc_list; im4; im4 = im4->next) {
4818 qeth_get_mac_for_ipm(im4->multiaddr, buf, in4_dev->dev);
4819 ipm = qeth_get_addr_buffer(QETH_PROT_IPV4);
4822 ipm->u.a4.addr = im4->multiaddr;
4823 memcpy(ipm->mac,buf,OSA_ADDR_LEN);
4824 ipm->is_multicast = 1;
4825 if (!qeth_add_ip(card,ipm))
4831 qeth_add_vlan_mc(struct qeth_card *card)
4833 #ifdef CONFIG_QETH_VLAN
4834 struct in_device *in_dev;
4835 struct vlan_group *vg;
4838 QETH_DBF_TEXT(trace,4,"addmcvl");
4839 if (!qeth_is_supported(card,IPA_FULL_VLAN) ||
4840 (card->vlangrp == NULL))
4844 for (i = 0; i < VLAN_GROUP_ARRAY_LEN; i++) {
4845 if (vg->vlan_devices[i] == NULL ||
4846 !(vg->vlan_devices[i]->flags & IFF_UP))
4848 in_dev = in_dev_get(vg->vlan_devices[i]);
4851 read_lock(&in_dev->lock);
4852 qeth_add_mc(card,in_dev);
4853 read_unlock(&in_dev->lock);
4860 qeth_add_multicast_ipv4(struct qeth_card *card)
4862 struct in_device *in4_dev;
4864 QETH_DBF_TEXT(trace,4,"chkmcv4");
4865 in4_dev = in_dev_get(card->dev);
4866 if (in4_dev == NULL)
4868 read_lock(&in4_dev->lock);
4869 qeth_add_mc(card, in4_dev);
4870 qeth_add_vlan_mc(card);
4871 read_unlock(&in4_dev->lock);
4872 in_dev_put(in4_dev);
4875 #ifdef CONFIG_QETH_IPV6
4877 qeth_add_mc6(struct qeth_card *card, struct inet6_dev *in6_dev)
4879 struct qeth_ipaddr *ipm;
4880 struct ifmcaddr6 *im6;
4881 char buf[MAX_ADDR_LEN];
4883 QETH_DBF_TEXT(trace,4,"addmc6");
4884 for (im6 = in6_dev->mc_list; im6 != NULL; im6 = im6->next) {
4885 ndisc_mc_map(&im6->mca_addr, buf, in6_dev->dev, 0);
4886 ipm = qeth_get_addr_buffer(QETH_PROT_IPV6);
4889 ipm->is_multicast = 1;
4890 memcpy(ipm->mac,buf,OSA_ADDR_LEN);
4891 memcpy(&ipm->u.a6.addr,&im6->mca_addr.s6_addr,
4892 sizeof(struct in6_addr));
4893 if (!qeth_add_ip(card,ipm))
4899 qeth_add_vlan_mc6(struct qeth_card *card)
4901 #ifdef CONFIG_QETH_VLAN
4902 struct inet6_dev *in_dev;
4903 struct vlan_group *vg;
4906 QETH_DBF_TEXT(trace,4,"admc6vl");
4907 if (!qeth_is_supported(card,IPA_FULL_VLAN) ||
4908 (card->vlangrp == NULL))
4912 for (i = 0; i < VLAN_GROUP_ARRAY_LEN; i++) {
4913 if (vg->vlan_devices[i] == NULL ||
4914 !(vg->vlan_devices[i]->flags & IFF_UP))
4916 in_dev = in6_dev_get(vg->vlan_devices[i]);
4919 read_lock(&in_dev->lock);
4920 qeth_add_mc6(card,in_dev);
4921 read_unlock(&in_dev->lock);
4922 in6_dev_put(in_dev);
4924 #endif /* CONFIG_QETH_VLAN */
4928 qeth_add_multicast_ipv6(struct qeth_card *card)
4930 struct inet6_dev *in6_dev;
4932 QETH_DBF_TEXT(trace,4,"chkmcv6");
4933 if (!qeth_is_supported(card, IPA_IPV6))
4936 in6_dev = in6_dev_get(card->dev);
4937 if (in6_dev == NULL)
4939 read_lock(&in6_dev->lock);
4940 qeth_add_mc6(card, in6_dev);
4941 qeth_add_vlan_mc6(card);
4942 read_unlock(&in6_dev->lock);
4943 in6_dev_put(in6_dev);
4945 #endif /* CONFIG_QETH_IPV6 */
4948 * set multicast address on card
4951 qeth_set_multicast_list(struct net_device *dev)
4953 struct qeth_card *card;
4955 QETH_DBF_TEXT(trace,3,"setmulti");
4956 card = (struct qeth_card *) dev->priv;
4958 if (qeth_set_thread_start_bit(card, QETH_SET_MC_THREAD) == 0)
4959 schedule_work(&card->kernel_thread_starter);
4963 qeth_fill_ipacmd_header(struct qeth_card *card, struct qeth_ipa_cmd *cmd,
4964 __u8 command, enum qeth_prot_versions prot)
4966 memset(cmd, 0, sizeof (struct qeth_ipa_cmd));
4967 cmd->hdr.command = command;
4968 cmd->hdr.initiator = IPA_CMD_INITIATOR_HOST;
4969 cmd->hdr.seqno = card->seqno.ipa;
4970 cmd->hdr.adapter_type = qeth_get_ipa_adp_type(card->info.link_type);
4971 cmd->hdr.rel_adapter_no = (__u8) card->info.portno;
4972 cmd->hdr.prim_version_no = 1;
4973 cmd->hdr.param_count = 1;
4974 cmd->hdr.prot_version = prot;
4975 cmd->hdr.ipa_supported = 0;
4976 cmd->hdr.ipa_enabled = 0;
4979 static struct qeth_cmd_buffer *
4980 qeth_get_ipacmd_buffer(struct qeth_card *card, enum qeth_ipa_cmds ipacmd,
4981 enum qeth_prot_versions prot)
4983 struct qeth_cmd_buffer *iob;
4984 struct qeth_ipa_cmd *cmd;
4986 iob = qeth_wait_for_buffer(&card->write);
4987 cmd = (struct qeth_ipa_cmd *)(iob->data+IPA_PDU_HEADER_SIZE);
4988 qeth_fill_ipacmd_header(card, cmd, ipacmd, prot);
4994 qeth_send_setdelmc(struct qeth_card *card, struct qeth_ipaddr *addr, int ipacmd)
4997 struct qeth_cmd_buffer *iob;
4998 struct qeth_ipa_cmd *cmd;
5000 QETH_DBF_TEXT(trace,4,"setdelmc");
5002 iob = qeth_get_ipacmd_buffer(card, ipacmd, addr->proto);
5003 cmd = (struct qeth_ipa_cmd *)(iob->data+IPA_PDU_HEADER_SIZE);
5004 memcpy(&cmd->data.setdelipm.mac,addr->mac, OSA_ADDR_LEN);
5005 if (addr->proto == QETH_PROT_IPV6)
5006 memcpy(cmd->data.setdelipm.ip6, &addr->u.a6.addr,
5007 sizeof(struct in6_addr));
5009 memcpy(&cmd->data.setdelipm.ip4, &addr->u.a4.addr,4);
5011 rc = qeth_send_ipa_cmd(card, iob, NULL, NULL);
5016 qeth_fill_netmask(u8 *netmask, unsigned int len)
5019 for (i=0;i<16;i++) {
5024 netmask[i] = (u8)(0xFF00>>j);
5031 qeth_send_setdelip(struct qeth_card *card, struct qeth_ipaddr *addr,
5032 int ipacmd, unsigned int flags)
5035 struct qeth_cmd_buffer *iob;
5036 struct qeth_ipa_cmd *cmd;
5039 QETH_DBF_TEXT(trace,4,"setdelip");
5040 QETH_DBF_TEXT_(trace,4,"flags%02X", flags);
5042 iob = qeth_get_ipacmd_buffer(card, ipacmd, addr->proto);
5043 cmd = (struct qeth_ipa_cmd *)(iob->data+IPA_PDU_HEADER_SIZE);
5044 if (addr->proto == QETH_PROT_IPV6) {
5045 memcpy(cmd->data.setdelip6.ip_addr, &addr->u.a6.addr,
5046 sizeof(struct in6_addr));
5047 qeth_fill_netmask(netmask,addr->u.a6.pfxlen);
5048 memcpy(cmd->data.setdelip6.mask, netmask,
5049 sizeof(struct in6_addr));
5050 cmd->data.setdelip6.flags = flags;
5052 memcpy(cmd->data.setdelip4.ip_addr, &addr->u.a4.addr, 4);
5053 memcpy(cmd->data.setdelip4.mask, &addr->u.a4.mask, 4);
5054 cmd->data.setdelip4.flags = flags;
5057 rc = qeth_send_ipa_cmd(card, iob, NULL, NULL);
5063 qeth_register_addr_entry(struct qeth_card *card, struct qeth_ipaddr *addr)
5069 if (addr->proto == QETH_PROT_IPV4) {
5070 QETH_DBF_TEXT(trace, 2,"setaddr4");
5071 QETH_DBF_HEX(trace, 3, &addr->u.a4.addr, sizeof(int));
5072 } else if (addr->proto == QETH_PROT_IPV6) {
5073 QETH_DBF_TEXT(trace, 2, "setaddr6");
5074 QETH_DBF_HEX(trace,3,&addr->u.a6.addr,8);
5075 QETH_DBF_HEX(trace,3,((char *)&addr->u.a6.addr)+8,8);
5077 QETH_DBF_TEXT(trace, 2, "setaddr?");
5078 QETH_DBF_HEX(trace, 3, addr, sizeof(struct qeth_ipaddr));
5081 if (addr->is_multicast)
5082 rc = qeth_send_setdelmc(card, addr, IPA_CMD_SETIPM);
5084 rc = qeth_send_setdelip(card, addr, IPA_CMD_SETIP,
5087 QETH_DBF_TEXT(trace, 2, "failed");
5088 } while ((--cnt > 0) && rc);
5090 QETH_DBF_TEXT(trace, 2, "FAILED");
5091 /* TODO: re-activate this warning as soon as we have a
5093 qeth_ipaddr_to_string(addr->proto, (u8 *)&addr->u, buf);
5094 PRINT_WARN("Could not register IP address %s (rc=%x)\n",
5102 qeth_deregister_addr_entry(struct qeth_card *card, struct qeth_ipaddr *addr)
5107 if (addr->proto == QETH_PROT_IPV4) {
5108 QETH_DBF_TEXT(trace, 2,"deladdr4");
5109 QETH_DBF_HEX(trace, 3, &addr->u.a4.addr, sizeof(int));
5110 } else if (addr->proto == QETH_PROT_IPV6) {
5111 QETH_DBF_TEXT(trace, 2, "deladdr6");
5112 QETH_DBF_HEX(trace,3,&addr->u.a6.addr,8);
5113 QETH_DBF_HEX(trace,3,((char *)&addr->u.a6.addr)+8,8);
5115 QETH_DBF_TEXT(trace, 2, "deladdr?");
5116 QETH_DBF_HEX(trace, 3, addr, sizeof(struct qeth_ipaddr));
5118 if (addr->is_multicast)
5119 rc = qeth_send_setdelmc(card, addr, IPA_CMD_DELIPM);
5121 rc = qeth_send_setdelip(card, addr, IPA_CMD_DELIP,
5124 QETH_DBF_TEXT(trace, 2, "failed");
5125 /* TODO: re-activate this warning as soon as we have a
5127 qeth_ipaddr_to_string(addr->proto, (u8 *)&addr->u, buf);
5128 PRINT_WARN("Could not deregister IP address %s (rc=%x)\n",
5136 qeth_netdev_init(struct net_device *dev)
5138 struct qeth_card *card;
5140 card = (struct qeth_card *) dev->priv;
5142 QETH_DBF_TEXT(trace,3,"initdev");
5144 dev->tx_timeout = &qeth_tx_timeout;
5145 dev->watchdog_timeo = QETH_TX_TIMEOUT;
5146 dev->open = qeth_open;
5147 dev->stop = qeth_stop;
5148 dev->hard_start_xmit = qeth_hard_start_xmit;
5149 dev->do_ioctl = qeth_do_ioctl;
5150 dev->get_stats = qeth_get_stats;
5151 dev->change_mtu = qeth_change_mtu;
5152 dev->neigh_setup = qeth_neigh_setup;
5153 dev->set_multicast_list = qeth_set_multicast_list;
5154 #ifdef CONFIG_QETH_VLAN
5155 dev->vlan_rx_register = qeth_vlan_rx_register;
5156 dev->vlan_rx_kill_vid = qeth_vlan_rx_kill_vid;
5158 if (qeth_get_netdev_flags(card->info.type) & IFF_NOARP) {
5159 dev->rebuild_header = NULL;
5160 dev->hard_header = NULL;
5161 dev->header_cache_update = NULL;
5162 dev->hard_header_cache = NULL;
5164 #ifdef CONFIG_QETH_IPV6
5165 /*IPv6 address autoconfiguration stuff*/
5166 card->dev->dev_id = card->info.unique_id & 0xffff;
5167 if (!(card->info.unique_id & UNIQUE_ID_NOT_BY_CARD))
5168 card->dev->generate_eui64 = qeth_ipv6_generate_eui64;
5172 dev->hard_header_parse = NULL;
5173 dev->set_mac_address = NULL;
5174 dev->flags |= qeth_get_netdev_flags(card->info.type);
5175 if ((card->options.fake_broadcast) ||
5176 (card->info.broadcast_capable))
5177 dev->flags |= IFF_BROADCAST;
5179 dev->hard_header_len =
5180 qeth_get_hlen(card->info.link_type) + card->options.add_hhlen;
5181 dev->addr_len = OSA_ADDR_LEN;
5182 dev->mtu = card->info.initial_mtu;
5184 SET_MODULE_OWNER(dev);
5189 qeth_init_func_level(struct qeth_card *card)
5191 if (card->ipato.enabled) {
5192 if (card->info.type == QETH_CARD_TYPE_IQD)
5193 card->info.func_level =
5194 QETH_IDX_FUNC_LEVEL_IQD_ENA_IPAT;
5196 card->info.func_level =
5197 QETH_IDX_FUNC_LEVEL_OSAE_ENA_IPAT;
5199 if (card->info.type == QETH_CARD_TYPE_IQD)
5200 card->info.func_level =
5201 QETH_IDX_FUNC_LEVEL_IQD_DIS_IPAT;
5203 card->info.func_level =
5204 QETH_IDX_FUNC_LEVEL_OSAE_DIS_IPAT;
5209 * hardsetup card, initialize MPC and QDIO stuff
5212 qeth_hardsetup_card(struct qeth_card *card)
5217 QETH_DBF_TEXT(setup, 2, "hrdsetup");
5221 PRINT_WARN("Retrying to do IDX activates.\n");
5222 ccw_device_set_offline(CARD_DDEV(card));
5223 ccw_device_set_offline(CARD_WDEV(card));
5224 ccw_device_set_offline(CARD_RDEV(card));
5225 ccw_device_set_online(CARD_RDEV(card));
5226 ccw_device_set_online(CARD_WDEV(card));
5227 ccw_device_set_online(CARD_DDEV(card));
5229 rc = qeth_qdio_clear_card(card,card->info.type==QETH_CARD_TYPE_OSAE);
5230 if (rc == -ERESTARTSYS) {
5231 QETH_DBF_TEXT(setup, 2, "break1");
5234 QETH_DBF_TEXT_(setup, 2, "1err%d", rc);
5240 if ((rc = qeth_get_unitaddr(card))){
5241 QETH_DBF_TEXT_(setup, 2, "2err%d", rc);
5244 qeth_init_tokens(card);
5245 qeth_init_func_level(card);
5246 rc = qeth_idx_activate_channel(&card->read, qeth_idx_read_cb);
5247 if (rc == -ERESTARTSYS) {
5248 QETH_DBF_TEXT(setup, 2, "break2");
5251 QETH_DBF_TEXT_(setup, 2, "3err%d", rc);
5257 rc = qeth_idx_activate_channel(&card->write, qeth_idx_write_cb);
5258 if (rc == -ERESTARTSYS) {
5259 QETH_DBF_TEXT(setup, 2, "break3");
5262 QETH_DBF_TEXT_(setup, 2, "4err%d", rc);
5268 if ((rc = qeth_mpc_initialize(card))){
5269 QETH_DBF_TEXT_(setup, 2, "5err%d", rc);
5272 /* at first set_online allocate netdev */
5274 card->dev = qeth_get_netdevice(card->info.type,
5275 card->info.link_type);
5277 qeth_qdio_clear_card(card, card->info.type ==
5278 QETH_CARD_TYPE_OSAE);
5280 QETH_DBF_TEXT_(setup, 2, "6err%d", rc);
5283 card->dev->priv = card;
5284 card->dev->type = qeth_get_arphdr_type(card->info.type,
5285 card->info.link_type);
5286 card->dev->init = qeth_netdev_init;
5290 PRINT_ERR("Initialization in hardsetup failed! rc=%d\n", rc);
5295 qeth_default_setassparms_cb(struct qeth_card *card, struct qeth_reply *reply,
5298 struct qeth_ipa_cmd *cmd;
5300 QETH_DBF_TEXT(trace,4,"defadpcb");
5302 cmd = (struct qeth_ipa_cmd *) data;
5303 if (cmd->hdr.return_code == 0){
5304 cmd->hdr.return_code = cmd->data.setassparms.hdr.return_code;
5305 if (cmd->hdr.prot_version == QETH_PROT_IPV4)
5306 card->options.ipa4.enabled_funcs = cmd->hdr.ipa_enabled;
5307 #ifdef CONFIG_QETH_IPV6
5308 if (cmd->hdr.prot_version == QETH_PROT_IPV6)
5309 card->options.ipa6.enabled_funcs = cmd->hdr.ipa_enabled;
5312 if (cmd->data.setassparms.hdr.assist_no == IPA_INBOUND_CHECKSUM &&
5313 cmd->data.setassparms.hdr.command_code == IPA_CMD_ASS_START) {
5314 card->info.csum_mask = cmd->data.setassparms.data.flags_32bit;
5315 QETH_DBF_TEXT_(trace, 3, "csum:%d", card->info.csum_mask);
5321 qeth_default_setadapterparms_cb(struct qeth_card *card,
5322 struct qeth_reply *reply,
5325 struct qeth_ipa_cmd *cmd;
5327 QETH_DBF_TEXT(trace,4,"defadpcb");
5329 cmd = (struct qeth_ipa_cmd *) data;
5330 if (cmd->hdr.return_code == 0)
5331 cmd->hdr.return_code = cmd->data.setadapterparms.hdr.return_code;
5336 qeth_query_setadapterparms_cb(struct qeth_card *card, struct qeth_reply *reply,
5339 struct qeth_ipa_cmd *cmd;
5341 QETH_DBF_TEXT(trace,3,"quyadpcb");
5343 cmd = (struct qeth_ipa_cmd *) data;
5344 if (cmd->data.setadapterparms.data.query_cmds_supp.lan_type & 0x7f)
5345 card->info.link_type =
5346 cmd->data.setadapterparms.data.query_cmds_supp.lan_type;
5347 card->options.adp.supported_funcs =
5348 cmd->data.setadapterparms.data.query_cmds_supp.supported_cmds;
5349 return qeth_default_setadapterparms_cb(card, reply, (unsigned long)cmd);
5353 qeth_query_setadapterparms(struct qeth_card *card)
5356 struct qeth_cmd_buffer *iob;
5358 QETH_DBF_TEXT(trace,3,"queryadp");
5359 iob = qeth_get_adapter_cmd(card, IPA_SETADP_QUERY_COMMANDS_SUPPORTED,
5360 sizeof(struct qeth_ipacmd_setadpparms));
5361 rc = qeth_send_ipa_cmd(card, iob, qeth_query_setadapterparms_cb, NULL);
5366 qeth_setadpparms_change_macaddr_cb(struct qeth_card *card,
5367 struct qeth_reply *reply,
5370 struct qeth_ipa_cmd *cmd;
5372 QETH_DBF_TEXT(trace,4,"chgmaccb");
5374 cmd = (struct qeth_ipa_cmd *) data;
5375 memcpy(card->dev->dev_addr,
5376 &cmd->data.setadapterparms.data.change_addr.addr,OSA_ADDR_LEN);
5377 qeth_default_setadapterparms_cb(card, reply, (unsigned long) cmd);
5382 qeth_setadpparms_change_macaddr(struct qeth_card *card)
5385 struct qeth_cmd_buffer *iob;
5386 struct qeth_ipa_cmd *cmd;
5388 QETH_DBF_TEXT(trace,4,"chgmac");
5390 iob = qeth_get_adapter_cmd(card,IPA_SETADP_ALTER_MAC_ADDRESS,
5391 sizeof(struct qeth_ipacmd_setadpparms));
5392 cmd = (struct qeth_ipa_cmd *)(iob->data+IPA_PDU_HEADER_SIZE);
5393 cmd->data.setadapterparms.data.change_addr.cmd = CHANGE_ADDR_READ_MAC;
5394 cmd->data.setadapterparms.data.change_addr.addr_size = OSA_ADDR_LEN;
5395 memcpy(&cmd->data.setadapterparms.data.change_addr.addr,
5396 card->dev->dev_addr, OSA_ADDR_LEN);
5397 rc = qeth_send_ipa_cmd(card, iob, qeth_setadpparms_change_macaddr_cb,
5403 qeth_send_setadp_mode(struct qeth_card *card, __u32 command, __u32 mode)
5406 struct qeth_cmd_buffer *iob;
5407 struct qeth_ipa_cmd *cmd;
5409 QETH_DBF_TEXT(trace,4,"adpmode");
5411 iob = qeth_get_adapter_cmd(card, command,
5412 sizeof(struct qeth_ipacmd_setadpparms));
5413 cmd = (struct qeth_ipa_cmd *)(iob->data+IPA_PDU_HEADER_SIZE);
5414 cmd->data.setadapterparms.data.mode = mode;
5415 rc = qeth_send_ipa_cmd(card, iob, qeth_default_setadapterparms_cb,
5421 qeth_setadapter_hstr(struct qeth_card *card)
5425 QETH_DBF_TEXT(trace,4,"adphstr");
5427 if (qeth_adp_supported(card,IPA_SETADP_SET_BROADCAST_MODE)) {
5428 rc = qeth_send_setadp_mode(card, IPA_SETADP_SET_BROADCAST_MODE,
5429 card->options.broadcast_mode);
5431 PRINT_WARN("couldn't set broadcast mode on "
5433 CARD_BUS_ID(card), rc);
5434 rc = qeth_send_setadp_mode(card, IPA_SETADP_ALTER_MAC_ADDRESS,
5435 card->options.macaddr_mode);
5437 PRINT_WARN("couldn't set macaddr mode on "
5438 "device %s: x%x\n", CARD_BUS_ID(card), rc);
5441 if (card->options.broadcast_mode == QETH_TR_BROADCAST_LOCAL)
5442 PRINT_WARN("set adapter parameters not available "
5443 "to set broadcast mode, using ALLRINGS "
5444 "on device %s:\n", CARD_BUS_ID(card));
5445 if (card->options.macaddr_mode == QETH_TR_MACADDR_CANONICAL)
5446 PRINT_WARN("set adapter parameters not available "
5447 "to set macaddr mode, using NONCANONICAL "
5448 "on device %s:\n", CARD_BUS_ID(card));
5453 qeth_setadapter_parms(struct qeth_card *card)
5457 QETH_DBF_TEXT(setup, 2, "setadprm");
5459 if (!qeth_is_supported(card, IPA_SETADAPTERPARMS)){
5460 PRINT_WARN("set adapter parameters not supported "
5463 QETH_DBF_TEXT(setup, 2, " notsupp");
5466 rc = qeth_query_setadapterparms(card);
5468 PRINT_WARN("couldn't set adapter parameters on device %s: "
5469 "x%x\n", CARD_BUS_ID(card), rc);
5472 if (qeth_adp_supported(card,IPA_SETADP_ALTER_MAC_ADDRESS)) {
5473 rc = qeth_setadpparms_change_macaddr(card);
5475 PRINT_WARN("couldn't get MAC address on "
5477 CARD_BUS_ID(card), rc);
5480 if ((card->info.link_type == QETH_LINK_TYPE_HSTR) ||
5481 (card->info.link_type == QETH_LINK_TYPE_LANE_TR))
5482 rc = qeth_setadapter_hstr(card);
5489 qeth_send_startstoplan(struct qeth_card *card, enum qeth_ipa_cmds ipacmd,
5490 enum qeth_prot_versions prot)
5493 struct qeth_cmd_buffer *iob;
5495 iob = qeth_get_ipacmd_buffer(card,ipacmd,prot);
5496 rc = qeth_send_ipa_cmd(card, iob, NULL, NULL);
5502 qeth_send_startlan(struct qeth_card *card, enum qeth_prot_versions prot)
5506 QETH_DBF_TEXT_(setup, 2, "strtlan%i", prot);
5508 rc = qeth_send_startstoplan(card, IPA_CMD_STARTLAN, prot);
5513 qeth_send_stoplan(struct qeth_card *card)
5518 * TODO: according to the IPA format document page 14,
5519 * TCP/IP (we!) never issue a STOPLAN
5522 QETH_DBF_TEXT(trace, 2, "stoplan");
5524 rc = qeth_send_startstoplan(card, IPA_CMD_STOPLAN, QETH_PROT_IPV4);
5529 qeth_query_ipassists_cb(struct qeth_card *card, struct qeth_reply *reply,
5532 struct qeth_ipa_cmd *cmd;
5534 QETH_DBF_TEXT(setup, 2, "qipasscb");
5536 cmd = (struct qeth_ipa_cmd *) data;
5537 if (cmd->hdr.prot_version == QETH_PROT_IPV4) {
5538 card->options.ipa4.supported_funcs = cmd->hdr.ipa_supported;
5539 card->options.ipa4.enabled_funcs = cmd->hdr.ipa_enabled;
5541 #ifdef CONFIG_QETH_IPV6
5542 card->options.ipa6.supported_funcs = cmd->hdr.ipa_supported;
5543 card->options.ipa6.enabled_funcs = cmd->hdr.ipa_enabled;
5550 qeth_query_ipassists(struct qeth_card *card, enum qeth_prot_versions prot)
5553 struct qeth_cmd_buffer *iob;
5555 QETH_DBF_TEXT_(setup, 2, "qipassi%i", prot);
5557 iob = qeth_get_ipacmd_buffer(card,IPA_CMD_QIPASSIST,prot);
5558 rc = qeth_send_ipa_cmd(card, iob, qeth_query_ipassists_cb, NULL);
5562 static struct qeth_cmd_buffer *
5563 qeth_get_setassparms_cmd(struct qeth_card *card, enum qeth_ipa_funcs ipa_func,
5564 __u16 cmd_code, __u16 len,
5565 enum qeth_prot_versions prot)
5567 struct qeth_cmd_buffer *iob;
5568 struct qeth_ipa_cmd *cmd;
5570 QETH_DBF_TEXT(trace,4,"getasscm");
5571 iob = qeth_get_ipacmd_buffer(card,IPA_CMD_SETASSPARMS,prot);
5573 cmd = (struct qeth_ipa_cmd *)(iob->data+IPA_PDU_HEADER_SIZE);
5574 cmd->data.setassparms.hdr.assist_no = ipa_func;
5575 cmd->data.setassparms.hdr.length = 8 + len;
5576 cmd->data.setassparms.hdr.command_code = cmd_code;
5577 cmd->data.setassparms.hdr.return_code = 0;
5578 cmd->data.setassparms.hdr.seq_no = 0;
5584 qeth_send_setassparms(struct qeth_card *card, struct qeth_cmd_buffer *iob,
5585 __u16 len, long data,
5587 (struct qeth_card *,struct qeth_reply *,unsigned long),
5591 struct qeth_ipa_cmd *cmd;
5593 QETH_DBF_TEXT(trace,4,"sendassp");
5595 cmd = (struct qeth_ipa_cmd *)(iob->data+IPA_PDU_HEADER_SIZE);
5596 if (len <= sizeof(__u32))
5597 cmd->data.setassparms.data.flags_32bit = (__u32) data;
5598 else if (len > sizeof(__u32))
5599 memcpy(&cmd->data.setassparms.data, (void *) data, len);
5601 rc = qeth_send_ipa_cmd(card, iob, reply_cb, reply_param);
5605 #ifdef CONFIG_QETH_IPV6
5607 qeth_send_simple_setassparms_ipv6(struct qeth_card *card,
5608 enum qeth_ipa_funcs ipa_func, __u16 cmd_code)
5612 struct qeth_cmd_buffer *iob;
5614 QETH_DBF_TEXT(trace,4,"simassp6");
5615 iob = qeth_get_setassparms_cmd(card, ipa_func, cmd_code,
5617 rc = qeth_send_setassparms(card, iob, 0, 0,
5618 qeth_default_setassparms_cb, NULL);
5624 qeth_send_simple_setassparms(struct qeth_card *card,
5625 enum qeth_ipa_funcs ipa_func,
5626 __u16 cmd_code, long data)
5630 struct qeth_cmd_buffer *iob;
5632 QETH_DBF_TEXT(trace,4,"simassp4");
5634 length = sizeof(__u32);
5635 iob = qeth_get_setassparms_cmd(card, ipa_func, cmd_code,
5636 length, QETH_PROT_IPV4);
5637 rc = qeth_send_setassparms(card, iob, length, data,
5638 qeth_default_setassparms_cb, NULL);
5643 qeth_start_ipa_arp_processing(struct qeth_card *card)
5647 QETH_DBF_TEXT(trace,3,"ipaarp");
5649 if (!qeth_is_supported(card,IPA_ARP_PROCESSING)) {
5650 PRINT_WARN("ARP processing not supported "
5651 "on %s!\n", card->info.if_name);
5654 rc = qeth_send_simple_setassparms(card,IPA_ARP_PROCESSING,
5655 IPA_CMD_ASS_START, 0);
5657 PRINT_WARN("Could not start ARP processing "
5658 "assist on %s: 0x%x\n",
5659 card->info.if_name, rc);
5665 qeth_start_ipa_ip_fragmentation(struct qeth_card *card)
5669 QETH_DBF_TEXT(trace,3,"ipaipfrg");
5671 if (!qeth_is_supported(card, IPA_IP_FRAGMENTATION)) {
5672 PRINT_INFO("IP fragmentation not supported on %s\n",
5673 card->info.if_name);
5677 rc = qeth_send_simple_setassparms(card, IPA_IP_FRAGMENTATION,
5678 IPA_CMD_ASS_START, 0);
5680 PRINT_WARN("Could not start IP fragmentation "
5681 "assist on %s: 0x%x\n",
5682 card->info.if_name, rc);
5684 PRINT_INFO("IP fragmentation enabled \n");
5689 qeth_start_ipa_source_mac(struct qeth_card *card)
5693 QETH_DBF_TEXT(trace,3,"stsrcmac");
5695 if (!card->options.fake_ll)
5698 if (!qeth_is_supported(card, IPA_SOURCE_MAC)) {
5699 PRINT_INFO("Inbound source address not "
5700 "supported on %s\n", card->info.if_name);
5704 rc = qeth_send_simple_setassparms(card, IPA_SOURCE_MAC,
5705 IPA_CMD_ASS_START, 0);
5707 PRINT_WARN("Could not start inbound source "
5708 "assist on %s: 0x%x\n",
5709 card->info.if_name, rc);
5714 qeth_start_ipa_vlan(struct qeth_card *card)
5718 QETH_DBF_TEXT(trace,3,"strtvlan");
5720 #ifdef CONFIG_QETH_VLAN
5721 if (!qeth_is_supported(card, IPA_FULL_VLAN)) {
5722 PRINT_WARN("VLAN not supported on %s\n", card->info.if_name);
5726 rc = qeth_send_simple_setassparms(card, IPA_VLAN_PRIO,
5727 IPA_CMD_ASS_START,0);
5729 PRINT_WARN("Could not start vlan "
5730 "assist on %s: 0x%x\n",
5731 card->info.if_name, rc);
5733 PRINT_INFO("VLAN enabled \n");
5734 card->dev->features |= NETIF_F_HW_VLAN_TX | NETIF_F_HW_VLAN_RX;
5736 #endif /* QETH_VLAN */
5741 qeth_start_ipa_multicast(struct qeth_card *card)
5745 QETH_DBF_TEXT(trace,3,"stmcast");
5747 if (!qeth_is_supported(card, IPA_MULTICASTING)) {
5748 PRINT_WARN("Multicast not supported on %s\n",
5749 card->info.if_name);
5753 rc = qeth_send_simple_setassparms(card, IPA_MULTICASTING,
5754 IPA_CMD_ASS_START,0);
5756 PRINT_WARN("Could not start multicast "
5757 "assist on %s: rc=%i\n",
5758 card->info.if_name, rc);
5760 PRINT_INFO("Multicast enabled\n");
5761 card->dev->flags |= IFF_MULTICAST;
5766 #ifdef CONFIG_QETH_IPV6
5768 qeth_softsetup_ipv6(struct qeth_card *card)
5772 QETH_DBF_TEXT(trace,3,"softipv6");
5774 netif_stop_queue(card->dev);
5775 rc = qeth_send_startlan(card, QETH_PROT_IPV6);
5777 PRINT_ERR("IPv6 startlan failed on %s\n",
5778 card->info.if_name);
5781 netif_wake_queue(card->dev);
5782 rc = qeth_query_ipassists(card,QETH_PROT_IPV6);
5784 PRINT_ERR("IPv6 query ipassist failed on %s\n",
5785 card->info.if_name);
5788 rc = qeth_send_simple_setassparms(card, IPA_IPV6,
5789 IPA_CMD_ASS_START, 3);
5791 PRINT_WARN("IPv6 start assist (version 4) failed "
5793 card->info.if_name, rc);
5796 rc = qeth_send_simple_setassparms_ipv6(card, IPA_IPV6,
5799 PRINT_WARN("IPV6 start assist (version 6) failed "
5801 card->info.if_name, rc);
5804 rc = qeth_send_simple_setassparms_ipv6(card, IPA_PASSTHRU,
5807 PRINT_WARN("Could not enable passthrough "
5809 card->info.if_name, rc);
5812 PRINT_INFO("IPV6 enabled \n");
5819 qeth_start_ipa_ipv6(struct qeth_card *card)
5822 #ifdef CONFIG_QETH_IPV6
5823 QETH_DBF_TEXT(trace,3,"strtipv6");
5825 if (!qeth_is_supported(card, IPA_IPV6)) {
5826 PRINT_WARN("IPv6 not supported on %s\n",
5827 card->info.if_name);
5830 rc = qeth_softsetup_ipv6(card);
5836 qeth_start_ipa_broadcast(struct qeth_card *card)
5840 QETH_DBF_TEXT(trace,3,"stbrdcst");
5841 card->info.broadcast_capable = 0;
5842 if (!qeth_is_supported(card, IPA_FILTERING)) {
5843 PRINT_WARN("Broadcast not supported on %s\n",
5844 card->info.if_name);
5848 rc = qeth_send_simple_setassparms(card, IPA_FILTERING,
5849 IPA_CMD_ASS_START, 0);
5851 PRINT_WARN("Could not enable broadcasting filtering "
5853 card->info.if_name, rc);
5857 rc = qeth_send_simple_setassparms(card, IPA_FILTERING,
5858 IPA_CMD_ASS_CONFIGURE, 1);
5860 PRINT_WARN("Could not set up broadcast filtering on %s: 0x%x\n",
5861 card->info.if_name, rc);
5864 card->info.broadcast_capable = QETH_BROADCAST_WITH_ECHO;
5865 PRINT_INFO("Broadcast enabled \n");
5866 rc = qeth_send_simple_setassparms(card, IPA_FILTERING,
5867 IPA_CMD_ASS_ENABLE, 1);
5869 PRINT_WARN("Could not set up broadcast echo filtering on "
5870 "%s: 0x%x\n", card->info.if_name, rc);
5873 card->info.broadcast_capable = QETH_BROADCAST_WITHOUT_ECHO;
5875 if (card->info.broadcast_capable)
5876 card->dev->flags |= IFF_BROADCAST;
5878 card->dev->flags &= ~IFF_BROADCAST;
5883 qeth_send_checksum_command(struct qeth_card *card)
5887 rc = qeth_send_simple_setassparms(card, IPA_INBOUND_CHECKSUM,
5888 IPA_CMD_ASS_START, 0);
5890 PRINT_WARN("Starting Inbound HW Checksumming failed on %s: "
5891 "0x%x,\ncontinuing using Inbound SW Checksumming\n",
5892 card->info.if_name, rc);
5895 rc = qeth_send_simple_setassparms(card, IPA_INBOUND_CHECKSUM,
5897 card->info.csum_mask);
5899 PRINT_WARN("Enabling Inbound HW Checksumming failed on %s: "
5900 "0x%x,\ncontinuing using Inbound SW Checksumming\n",
5901 card->info.if_name, rc);
5908 qeth_start_ipa_checksum(struct qeth_card *card)
5912 QETH_DBF_TEXT(trace,3,"strtcsum");
5914 if (card->options.checksum_type == NO_CHECKSUMMING) {
5915 PRINT_WARN("Using no checksumming on %s.\n",
5916 card->info.if_name);
5919 if (card->options.checksum_type == SW_CHECKSUMMING) {
5920 PRINT_WARN("Using SW checksumming on %s.\n",
5921 card->info.if_name);
5924 if (!qeth_is_supported(card, IPA_INBOUND_CHECKSUM)) {
5925 PRINT_WARN("Inbound HW Checksumming not "
5926 "supported on %s,\ncontinuing "
5927 "using Inbound SW Checksumming\n",
5928 card->info.if_name);
5929 card->options.checksum_type = SW_CHECKSUMMING;
5932 rc = qeth_send_checksum_command(card);
5934 PRINT_INFO("HW Checksumming (inbound) enabled \n");
5941 qeth_print_ipassist_status(struct qeth_card *card)
5946 offset += sprintf(buf, "IPAssist options of %s: ", card->info.if_name);
5947 if (qeth_is_enabled(card, IPA_ARP_PROCESSING))
5948 offset += sprintf(buf+offset, "ARP ");
5949 if (qeth_is_enabled(card, IPA_IP_FRAGMENTATION))
5950 offset += sprintf(buf+offset, "IP_FRAG");
5951 if (qeth_is_enabled(card, IPA_SOURCE_MAC))
5952 offset += sprintf(buf+offset, "SRC_MAC");
5953 if (qeth_is_enabled(card, IPA_FULL_VLAN))
5954 offset += sprintf(buf+offset, "VLAN");
5955 if (qeth_is_enabled(card, IPA_VLAN_PRIO))
5956 offset += sprintf(buf+offset, "VLAN_PRIO");
5961 qeth_start_ipassists(struct qeth_card *card)
5963 QETH_DBF_TEXT(trace,3,"strtipas");
5964 qeth_start_ipa_arp_processing(card); /* go on*/
5965 qeth_start_ipa_ip_fragmentation(card); /* go on*/
5966 qeth_start_ipa_source_mac(card); /* go on*/
5967 qeth_start_ipa_vlan(card); /* go on*/
5968 qeth_start_ipa_multicast(card); /* go on*/
5969 qeth_start_ipa_ipv6(card); /* go on*/
5970 qeth_start_ipa_broadcast(card); /* go on*/
5971 qeth_start_ipa_checksum(card); /* go on*/
5976 qeth_send_setrouting(struct qeth_card *card, enum qeth_routing_types type,
5977 enum qeth_prot_versions prot)
5980 struct qeth_ipa_cmd *cmd;
5981 struct qeth_cmd_buffer *iob;
5983 QETH_DBF_TEXT(trace,4,"setroutg");
5984 iob = qeth_get_ipacmd_buffer(card, IPA_CMD_SETRTG, prot);
5985 cmd = (struct qeth_ipa_cmd *)(iob->data+IPA_PDU_HEADER_SIZE);
5986 cmd->data.setrtg.type = (type);
5987 rc = qeth_send_ipa_cmd(card, iob, NULL, NULL);
5994 qeth_correct_routing_type(struct qeth_card *card, enum qeth_routing_types *type,
5995 enum qeth_prot_versions prot)
5997 if (card->info.type == QETH_CARD_TYPE_IQD) {
6000 case PRIMARY_CONNECTOR:
6001 case SECONDARY_CONNECTOR:
6002 case MULTICAST_ROUTER:
6010 case PRIMARY_ROUTER:
6011 case SECONDARY_ROUTER:
6013 case MULTICAST_ROUTER:
6014 if (qeth_is_ipafunc_supported(card, prot,
6022 PRINT_WARN("Routing type '%s' not supported for interface %s.\n"
6023 "Router status set to 'no router'.\n",
6024 ((*type == PRIMARY_ROUTER)? "primary router" :
6025 (*type == SECONDARY_ROUTER)? "secondary router" :
6026 (*type == PRIMARY_CONNECTOR)? "primary connector" :
6027 (*type == SECONDARY_CONNECTOR)? "secondary connector" :
6028 (*type == MULTICAST_ROUTER)? "multicast router" :
6035 qeth_setrouting_v4(struct qeth_card *card)
6039 QETH_DBF_TEXT(trace,3,"setrtg4");
6041 qeth_correct_routing_type(card, &card->options.route4.type,
6044 rc = qeth_send_setrouting(card, card->options.route4.type,
6047 card->options.route4.type = NO_ROUTER;
6048 PRINT_WARN("Error (0x%04x) while setting routing type on %s. "
6049 "Type set to 'no router'.\n",
6050 rc, card->info.if_name);
6056 qeth_setrouting_v6(struct qeth_card *card)
6060 QETH_DBF_TEXT(trace,3,"setrtg6");
6061 #ifdef CONFIG_QETH_IPV6
6063 qeth_correct_routing_type(card, &card->options.route6.type,
6066 if ((card->options.route6.type == NO_ROUTER) ||
6067 ((card->info.type == QETH_CARD_TYPE_OSAE) &&
6068 (card->options.route6.type == MULTICAST_ROUTER) &&
6069 !qeth_is_supported6(card,IPA_OSA_MC_ROUTER)))
6071 rc = qeth_send_setrouting(card, card->options.route6.type,
6074 card->options.route6.type = NO_ROUTER;
6075 PRINT_WARN("Error (0x%04x) while setting routing type on %s. "
6076 "Type set to 'no router'.\n",
6077 rc, card->info.if_name);
6084 * softsetup card: init IPA stuff
6087 qeth_softsetup_card(struct qeth_card *card)
6091 QETH_DBF_TEXT(setup, 2, "softsetp");
6093 if ((rc = qeth_send_startlan(card, QETH_PROT_IPV4))){
6094 QETH_DBF_TEXT_(setup, 2, "1err%d", rc);
6096 PRINT_WARN("LAN on card %s if offline! "
6097 "Continuing softsetup.\n",
6099 card->lan_online = 0;
6103 card->lan_online = 1;
6104 if ((rc = qeth_setadapter_parms(card)))
6105 QETH_DBF_TEXT_(setup, 2, "2err%d", rc);
6106 if ((rc = qeth_start_ipassists(card)))
6107 QETH_DBF_TEXT_(setup, 2, "3err%d", rc);
6108 if ((rc = qeth_setrouting_v4(card)))
6109 QETH_DBF_TEXT_(setup, 2, "4err%d", rc);
6110 if ((rc = qeth_setrouting_v6(card)))
6111 QETH_DBF_TEXT_(setup, 2, "5err%d", rc);
6112 netif_stop_queue(card->dev);
6116 #ifdef CONFIG_QETH_IPV6
6118 qeth_get_unique_id_cb(struct qeth_card *card, struct qeth_reply *reply,
6121 struct qeth_ipa_cmd *cmd;
6123 cmd = (struct qeth_ipa_cmd *) data;
6124 if (cmd->hdr.return_code == 0)
6125 card->info.unique_id = *((__u16 *)
6126 &cmd->data.create_destroy_addr.unique_id[6]);
6128 card->info.unique_id = UNIQUE_ID_IF_CREATE_ADDR_FAILED |
6129 UNIQUE_ID_NOT_BY_CARD;
6130 PRINT_WARN("couldn't get a unique id from the card on device "
6131 "%s (result=x%x), using default id. ipv6 "
6132 "autoconfig on other lpars may lead to duplicate "
6133 "ip addresses. please use manually "
6134 "configured ones.\n",
6135 CARD_BUS_ID(card), cmd->hdr.return_code);
6142 qeth_put_unique_id(struct qeth_card *card)
6146 #ifdef CONFIG_QETH_IPV6
6147 struct qeth_cmd_buffer *iob;
6148 struct qeth_ipa_cmd *cmd;
6150 QETH_DBF_TEXT(trace,2,"puniqeid");
6152 if ((card->info.unique_id & UNIQUE_ID_NOT_BY_CARD) ==
6153 UNIQUE_ID_NOT_BY_CARD)
6155 iob = qeth_get_ipacmd_buffer(card, IPA_CMD_DESTROY_ADDR,
6157 cmd = (struct qeth_ipa_cmd *)(iob->data+IPA_PDU_HEADER_SIZE);
6158 *((__u16 *) &cmd->data.create_destroy_addr.unique_id[6]) =
6159 card->info.unique_id;
6160 memcpy(&cmd->data.create_destroy_addr.unique_id[0],
6161 card->dev->dev_addr, OSA_ADDR_LEN);
6162 rc = qeth_send_ipa_cmd(card, iob, NULL, NULL);
6164 card->info.unique_id = UNIQUE_ID_IF_CREATE_ADDR_FAILED |
6165 UNIQUE_ID_NOT_BY_CARD;
6174 qeth_clear_ip_list(struct qeth_card *card, int clean, int recover)
6176 struct qeth_ipaddr *addr, *tmp;
6177 unsigned long flags;
6179 QETH_DBF_TEXT(trace,4,"clearip");
6180 spin_lock_irqsave(&card->ip_lock, flags);
6181 /* clear todo list */
6182 list_for_each_entry_safe(addr, tmp, &card->ip_tbd_list, entry){
6183 list_del(&addr->entry);
6187 while (!list_empty(&card->ip_list)) {
6188 addr = list_entry(card->ip_list.next,
6189 struct qeth_ipaddr, entry);
6190 list_del_init(&addr->entry);
6192 spin_unlock_irqrestore(&card->ip_lock, flags);
6193 qeth_deregister_addr_entry(card, addr);
6194 spin_lock_irqsave(&card->ip_lock, flags);
6196 if (!recover || addr->is_multicast) {
6200 list_add_tail(&addr->entry, &card->ip_tbd_list);
6202 spin_unlock_irqrestore(&card->ip_lock, flags);
6206 qeth_set_allowed_threads(struct qeth_card *card, unsigned long threads,
6207 int clear_start_mask)
6209 unsigned long flags;
6211 spin_lock_irqsave(&card->thread_mask_lock, flags);
6212 card->thread_allowed_mask = threads;
6213 if (clear_start_mask)
6214 card->thread_start_mask &= threads;
6215 spin_unlock_irqrestore(&card->thread_mask_lock, flags);
6216 wake_up(&card->wait_q);
6220 qeth_threads_running(struct qeth_card *card, unsigned long threads)
6222 unsigned long flags;
6225 spin_lock_irqsave(&card->thread_mask_lock, flags);
6226 rc = (card->thread_running_mask & threads);
6227 spin_unlock_irqrestore(&card->thread_mask_lock, flags);
6232 qeth_wait_for_threads(struct qeth_card *card, unsigned long threads)
6234 return wait_event_interruptible(card->wait_q,
6235 qeth_threads_running(card, threads) == 0);
6239 qeth_stop_card(struct qeth_card *card)
6241 int recover_flag = 0;
6244 QETH_DBF_TEXT(setup ,2,"stopcard");
6245 QETH_DBF_HEX(setup, 2, &card, sizeof(void *));
6247 qeth_set_allowed_threads(card, 0, 1);
6248 if (qeth_wait_for_threads(card, ~QETH_RECOVER_THREAD))
6249 return -ERESTARTSYS;
6250 if (card->read.state == CH_STATE_UP &&
6251 card->write.state == CH_STATE_UP &&
6252 (card->state == CARD_STATE_UP)) {
6255 dev_close(card->dev);
6257 if (!card->use_hard_stop)
6258 if ((rc = qeth_send_stoplan(card)))
6259 QETH_DBF_TEXT_(setup, 2, "1err%d", rc);
6260 card->state = CARD_STATE_SOFTSETUP;
6262 if (card->state == CARD_STATE_SOFTSETUP) {
6263 qeth_clear_ip_list(card, !card->use_hard_stop, recover_flag);
6264 qeth_clear_ipacmd_list(card);
6265 card->state = CARD_STATE_HARDSETUP;
6267 if (card->state == CARD_STATE_HARDSETUP) {
6268 if (!card->use_hard_stop)
6269 if ((rc = qeth_put_unique_id(card)))
6270 QETH_DBF_TEXT_(setup, 2, "2err%d", rc);
6271 qeth_qdio_clear_card(card, 0);
6272 qeth_clear_qdio_buffers(card);
6273 qeth_clear_working_pool_list(card);
6274 card->state = CARD_STATE_DOWN;
6276 if (card->state == CARD_STATE_DOWN) {
6277 qeth_clear_cmd_buffers(&card->read);
6278 qeth_clear_cmd_buffers(&card->write);
6280 card->use_hard_stop = 0;
6286 qeth_get_unique_id(struct qeth_card *card)
6289 #ifdef CONFIG_QETH_IPV6
6290 struct qeth_cmd_buffer *iob;
6291 struct qeth_ipa_cmd *cmd;
6293 QETH_DBF_TEXT(setup, 2, "guniqeid");
6295 if (!qeth_is_supported(card,IPA_IPV6)) {
6296 card->info.unique_id = UNIQUE_ID_IF_CREATE_ADDR_FAILED |
6297 UNIQUE_ID_NOT_BY_CARD;
6301 iob = qeth_get_ipacmd_buffer(card, IPA_CMD_CREATE_ADDR,
6303 cmd = (struct qeth_ipa_cmd *)(iob->data+IPA_PDU_HEADER_SIZE);
6304 *((__u16 *) &cmd->data.create_destroy_addr.unique_id[6]) =
6305 card->info.unique_id;
6307 rc = qeth_send_ipa_cmd(card, iob, qeth_get_unique_id_cb, NULL);
6309 card->info.unique_id = UNIQUE_ID_IF_CREATE_ADDR_FAILED |
6310 UNIQUE_ID_NOT_BY_CARD;
6315 qeth_print_status_with_portname(struct qeth_card *card)
6320 sprintf(dbf_text, "%s", card->info.portname + 1);
6321 for (i = 0; i < 8; i++)
6323 (char) _ebcasc[(__u8) dbf_text[i]];
6325 printk("qeth: Device %s/%s/%s is a%s card%s%s%s\n"
6326 "with link type %s (portname: %s)\n",
6330 qeth_get_cardname(card),
6331 (card->info.mcl_level[0]) ? " (level: " : "",
6332 (card->info.mcl_level[0]) ? card->info.mcl_level : "",
6333 (card->info.mcl_level[0]) ? ")" : "",
6334 qeth_get_cardname_short(card),
6340 qeth_print_status_no_portname(struct qeth_card *card)
6342 if (card->info.portname[0])
6343 printk("qeth: Device %s/%s/%s is a%s "
6344 "card%s%s%s\nwith link type %s "
6345 "(no portname needed by interface).\n",
6349 qeth_get_cardname(card),
6350 (card->info.mcl_level[0]) ? " (level: " : "",
6351 (card->info.mcl_level[0]) ? card->info.mcl_level : "",
6352 (card->info.mcl_level[0]) ? ")" : "",
6353 qeth_get_cardname_short(card));
6355 printk("qeth: Device %s/%s/%s is a%s "
6356 "card%s%s%s\nwith link type %s.\n",
6360 qeth_get_cardname(card),
6361 (card->info.mcl_level[0]) ? " (level: " : "",
6362 (card->info.mcl_level[0]) ? card->info.mcl_level : "",
6363 (card->info.mcl_level[0]) ? ")" : "",
6364 qeth_get_cardname_short(card));
6368 qeth_print_status_message(struct qeth_card *card)
6370 switch (card->info.type) {
6371 case QETH_CARD_TYPE_OSAE:
6372 /* VM will use a non-zero first character
6373 * to indicate a HiperSockets like reporting
6374 * of the level OSA sets the first character to zero
6376 if (!card->info.mcl_level[0]) {
6377 sprintf(card->info.mcl_level,"%02x%02x",
6378 card->info.mcl_level[2],
6379 card->info.mcl_level[3]);
6381 card->info.mcl_level[QETH_MCL_LENGTH] = 0;
6385 case QETH_CARD_TYPE_IQD:
6386 card->info.mcl_level[0] = (char) _ebcasc[(__u8)
6387 card->info.mcl_level[0]];
6388 card->info.mcl_level[1] = (char) _ebcasc[(__u8)
6389 card->info.mcl_level[1]];
6390 card->info.mcl_level[2] = (char) _ebcasc[(__u8)
6391 card->info.mcl_level[2]];
6392 card->info.mcl_level[3] = (char) _ebcasc[(__u8)
6393 card->info.mcl_level[3]];
6394 card->info.mcl_level[QETH_MCL_LENGTH] = 0;
6397 memset(&card->info.mcl_level[0], 0, QETH_MCL_LENGTH + 1);
6399 if (card->info.portname_required)
6400 qeth_print_status_with_portname(card);
6402 qeth_print_status_no_portname(card);
6406 qeth_register_netdev(struct qeth_card *card)
6410 QETH_DBF_TEXT(setup, 3, "regnetd");
6411 if (card->dev->reg_state != NETREG_UNINITIALIZED)
6414 SET_NETDEV_DEV(card->dev, &card->gdev->dev);
6415 rc = register_netdev(card->dev);
6417 strcpy(card->info.if_name, card->dev->name);
6423 qeth_start_again(struct qeth_card *card)
6425 QETH_DBF_TEXT(setup ,2, "startag");
6428 dev_open(card->dev);
6430 if (qeth_set_thread_start_bit(card, QETH_SET_MC_THREAD) == 0)
6431 schedule_work(&card->kernel_thread_starter);
6435 qeth_set_online(struct ccwgroup_device *gdev)
6437 struct qeth_card *card = gdev->dev.driver_data;
6439 enum qeth_card_states recover_flag;
6442 QETH_DBF_TEXT(setup ,2, "setonlin");
6443 QETH_DBF_HEX(setup, 2, &card, sizeof(void *));
6445 qeth_set_allowed_threads(card, QETH_RECOVER_THREAD, 1);
6446 if (qeth_wait_for_threads(card, ~QETH_RECOVER_THREAD)){
6447 PRINT_WARN("set_online of card %s interrupted by user!\n",
6449 return -ERESTARTSYS;
6452 recover_flag = card->state;
6453 if (ccw_device_set_online(CARD_RDEV(card)) ||
6454 ccw_device_set_online(CARD_WDEV(card)) ||
6455 ccw_device_set_online(CARD_DDEV(card))){
6456 QETH_DBF_TEXT_(setup, 2, "1err%d", rc);
6460 if ((rc = qeth_hardsetup_card(card))){
6461 QETH_DBF_TEXT_(setup, 2, "2err%d", rc);
6464 card->state = CARD_STATE_HARDSETUP;
6466 if ((rc = qeth_query_ipassists(card,QETH_PROT_IPV4))){
6467 QETH_DBF_TEXT_(setup, 2, "3err%d", rc);
6470 rc = qeth_get_unique_id(card);
6473 QETH_DBF_TEXT_(setup, 2, "4err%d", rc);
6476 qeth_print_status_message(card);
6477 if ((rc = qeth_register_netdev(card))){
6478 QETH_DBF_TEXT_(setup, 2, "5err%d", rc);
6481 if ((rc = qeth_softsetup_card(card))){
6482 QETH_DBF_TEXT_(setup, 2, "6err%d", rc);
6485 card->state = CARD_STATE_SOFTSETUP;
6487 if ((rc = qeth_init_qdio_queues(card))){
6488 QETH_DBF_TEXT_(setup, 2, "7err%d", rc);
6491 /*maybe it was set offline without ifconfig down
6492 * we can also use this state for recovery purposes*/
6493 qeth_set_allowed_threads(card, 0xffffffff, 0);
6494 if (recover_flag == CARD_STATE_RECOVER)
6495 qeth_start_again(card);
6496 qeth_notify_processes();
6499 card->use_hard_stop = 1;
6500 qeth_stop_card(card);
6501 ccw_device_set_offline(CARD_DDEV(card));
6502 ccw_device_set_offline(CARD_WDEV(card));
6503 ccw_device_set_offline(CARD_RDEV(card));
6504 if (recover_flag == CARD_STATE_RECOVER)
6505 card->state = CARD_STATE_RECOVER;
6507 card->state = CARD_STATE_DOWN;
6511 static struct ccw_device_id qeth_ids[] = {
6512 {CCW_DEVICE(0x1731, 0x01), driver_info:QETH_CARD_TYPE_OSAE},
6513 {CCW_DEVICE(0x1731, 0x05), driver_info:QETH_CARD_TYPE_IQD},
6516 MODULE_DEVICE_TABLE(ccw, qeth_ids);
6518 struct device *qeth_root_dev = NULL;
6520 struct ccwgroup_driver qeth_ccwgroup_driver = {
6521 .owner = THIS_MODULE,
6523 .driver_id = 0xD8C5E3C8,
6524 .probe = qeth_probe_device,
6525 .remove = qeth_remove_device,
6526 .set_online = qeth_set_online,
6527 .set_offline = qeth_set_offline,
6530 struct ccw_driver qeth_ccw_driver = {
6533 .probe = ccwgroup_probe_ccwdev,
6534 .remove = ccwgroup_remove_ccwdev,
6539 qeth_unregister_dbf_views(void)
6542 debug_unregister(qeth_dbf_setup);
6544 debug_unregister(qeth_dbf_qerr);
6546 debug_unregister(qeth_dbf_sense);
6548 debug_unregister(qeth_dbf_misc);
6550 debug_unregister(qeth_dbf_data);
6551 if (qeth_dbf_control)
6552 debug_unregister(qeth_dbf_control);
6554 debug_unregister(qeth_dbf_trace);
6557 qeth_register_dbf_views(void)
6559 qeth_dbf_setup = debug_register(QETH_DBF_SETUP_NAME,
6560 QETH_DBF_SETUP_INDEX,
6561 QETH_DBF_SETUP_NR_AREAS,
6562 QETH_DBF_SETUP_LEN);
6563 qeth_dbf_misc = debug_register(QETH_DBF_MISC_NAME,
6564 QETH_DBF_MISC_INDEX,
6565 QETH_DBF_MISC_NR_AREAS,
6567 qeth_dbf_data = debug_register(QETH_DBF_DATA_NAME,
6568 QETH_DBF_DATA_INDEX,
6569 QETH_DBF_DATA_NR_AREAS,
6571 qeth_dbf_control = debug_register(QETH_DBF_CONTROL_NAME,
6572 QETH_DBF_CONTROL_INDEX,
6573 QETH_DBF_CONTROL_NR_AREAS,
6574 QETH_DBF_CONTROL_LEN);
6575 qeth_dbf_sense = debug_register(QETH_DBF_SENSE_NAME,
6576 QETH_DBF_SENSE_INDEX,
6577 QETH_DBF_SENSE_NR_AREAS,
6578 QETH_DBF_SENSE_LEN);
6579 qeth_dbf_qerr = debug_register(QETH_DBF_QERR_NAME,
6580 QETH_DBF_QERR_INDEX,
6581 QETH_DBF_QERR_NR_AREAS,
6583 qeth_dbf_trace = debug_register(QETH_DBF_TRACE_NAME,
6584 QETH_DBF_TRACE_INDEX,
6585 QETH_DBF_TRACE_NR_AREAS,
6586 QETH_DBF_TRACE_LEN);
6588 if ((qeth_dbf_setup == NULL) || (qeth_dbf_misc == NULL) ||
6589 (qeth_dbf_data == NULL) || (qeth_dbf_control == NULL) ||
6590 (qeth_dbf_sense == NULL) || (qeth_dbf_qerr == NULL) ||
6591 (qeth_dbf_trace == NULL)) {
6592 qeth_unregister_dbf_views();
6595 debug_register_view(qeth_dbf_setup, &debug_hex_ascii_view);
6596 debug_set_level(qeth_dbf_setup, QETH_DBF_SETUP_LEVEL);
6598 debug_register_view(qeth_dbf_misc, &debug_hex_ascii_view);
6599 debug_set_level(qeth_dbf_misc, QETH_DBF_MISC_LEVEL);
6601 debug_register_view(qeth_dbf_data, &debug_hex_ascii_view);
6602 debug_set_level(qeth_dbf_data, QETH_DBF_DATA_LEVEL);
6604 debug_register_view(qeth_dbf_control, &debug_hex_ascii_view);
6605 debug_set_level(qeth_dbf_control, QETH_DBF_CONTROL_LEVEL);
6607 debug_register_view(qeth_dbf_sense, &debug_hex_ascii_view);
6608 debug_set_level(qeth_dbf_sense, QETH_DBF_SENSE_LEVEL);
6610 debug_register_view(qeth_dbf_qerr, &debug_hex_ascii_view);
6611 debug_set_level(qeth_dbf_qerr, QETH_DBF_QERR_LEVEL);
6613 debug_register_view(qeth_dbf_trace, &debug_hex_ascii_view);
6614 debug_set_level(qeth_dbf_trace, QETH_DBF_TRACE_LEVEL);
6619 #ifdef CONFIG_QETH_IPV6
6620 extern struct neigh_table arp_tbl;
6621 static struct neigh_ops *arp_direct_ops;
6622 static int (*qeth_old_arp_constructor) (struct neighbour *);
6624 static struct neigh_ops arp_direct_ops_template = {
6628 .error_report = NULL,
6629 .output = dev_queue_xmit,
6630 .connected_output = dev_queue_xmit,
6631 .hh_output = dev_queue_xmit,
6632 .queue_xmit = dev_queue_xmit
6636 qeth_arp_constructor(struct neighbour *neigh)
6638 struct net_device *dev = neigh->dev;
6639 struct in_device *in_dev = in_dev_get(dev);
6643 if (!qeth_verify_dev(dev)) {
6645 return qeth_old_arp_constructor(neigh);
6648 neigh->type = inet_addr_type(*(u32 *) neigh->primary_key);
6649 if (in_dev->arp_parms)
6650 neigh->parms = in_dev->arp_parms;
6652 neigh->nud_state = NUD_NOARP;
6653 neigh->ops = arp_direct_ops;
6654 neigh->output = neigh->ops->queue_xmit;
6657 #endif /*CONFIG_QETH_IPV6*/
6660 * IP address takeover related functions
6663 qeth_clear_ipato_list(struct qeth_card *card)
6665 struct qeth_ipato_entry *ipatoe, *tmp;
6666 unsigned long flags;
6668 spin_lock_irqsave(&card->ip_lock, flags);
6669 list_for_each_entry_safe(ipatoe, tmp, &card->ipato.entries, entry) {
6670 list_del(&ipatoe->entry);
6673 spin_unlock_irqrestore(&card->ip_lock, flags);
6677 qeth_add_ipato_entry(struct qeth_card *card, struct qeth_ipato_entry *new)
6679 struct qeth_ipato_entry *ipatoe;
6680 unsigned long flags;
6683 QETH_DBF_TEXT(trace, 2, "addipato");
6684 spin_lock_irqsave(&card->ip_lock, flags);
6685 list_for_each_entry(ipatoe, &card->ipato.entries, entry){
6686 if (ipatoe->proto != new->proto)
6688 if (!memcmp(ipatoe->addr, new->addr,
6689 (ipatoe->proto == QETH_PROT_IPV4)? 4:16) &&
6690 (ipatoe->mask_bits == new->mask_bits)){
6691 PRINT_WARN("ipato entry already exists!\n");
6697 list_add_tail(&new->entry, &card->ipato.entries);
6699 spin_unlock_irqrestore(&card->ip_lock, flags);
6704 qeth_del_ipato_entry(struct qeth_card *card, enum qeth_prot_versions proto,
6705 u8 *addr, int mask_bits)
6707 struct qeth_ipato_entry *ipatoe, *tmp;
6708 unsigned long flags;
6710 QETH_DBF_TEXT(trace, 2, "delipato");
6711 spin_lock_irqsave(&card->ip_lock, flags);
6712 list_for_each_entry_safe(ipatoe, tmp, &card->ipato.entries, entry){
6713 if (ipatoe->proto != proto)
6715 if (!memcmp(ipatoe->addr, addr,
6716 (proto == QETH_PROT_IPV4)? 4:16) &&
6717 (ipatoe->mask_bits == mask_bits)){
6718 list_del(&ipatoe->entry);
6722 spin_unlock_irqrestore(&card->ip_lock, flags);
6726 qeth_convert_addr_to_bits(u8 *addr, u8 *bits, int len)
6731 for (i = 0; i < len; ++i){
6733 for (j = 7; j >= 0; --j){
6734 bits[i*8 + j] = octet & 1;
6741 qeth_is_addr_covered_by_ipato(struct qeth_card *card, struct qeth_ipaddr *addr)
6743 struct qeth_ipato_entry *ipatoe;
6744 u8 addr_bits[128] = {0, };
6745 u8 ipatoe_bits[128] = {0, };
6748 if (!card->ipato.enabled)
6751 qeth_convert_addr_to_bits((u8 *) &addr->u, addr_bits,
6752 (addr->proto == QETH_PROT_IPV4)? 4:16);
6753 list_for_each_entry(ipatoe, &card->ipato.entries, entry){
6754 if (addr->proto != ipatoe->proto)
6756 qeth_convert_addr_to_bits(ipatoe->addr, ipatoe_bits,
6757 (ipatoe->proto==QETH_PROT_IPV4) ?
6759 if (addr->proto == QETH_PROT_IPV4)
6760 rc = !memcmp(addr_bits, ipatoe_bits,
6761 min(32, ipatoe->mask_bits));
6763 rc = !memcmp(addr_bits, ipatoe_bits,
6764 min(128, ipatoe->mask_bits));
6769 if ((addr->proto == QETH_PROT_IPV4) && card->ipato.invert4)
6771 else if ((addr->proto == QETH_PROT_IPV6) && card->ipato.invert6)
6778 * VIPA related functions
6781 qeth_add_vipa(struct qeth_card *card, enum qeth_prot_versions proto,
6784 struct qeth_ipaddr *ipaddr;
6785 unsigned long flags;
6788 ipaddr = qeth_get_addr_buffer(proto);
6790 if (proto == QETH_PROT_IPV4){
6791 QETH_DBF_TEXT(trace, 2, "addvipa4");
6792 memcpy(&ipaddr->u.a4.addr, addr, 4);
6793 ipaddr->u.a4.mask = 0;
6794 #ifdef CONFIG_QETH_IPV6
6795 } else if (proto == QETH_PROT_IPV6){
6796 QETH_DBF_TEXT(trace, 2, "addvipa6");
6797 memcpy(&ipaddr->u.a6.addr, addr, 16);
6798 ipaddr->u.a6.pfxlen = 0;
6801 ipaddr->type = QETH_IP_TYPE_VIPA;
6802 ipaddr->set_flags = QETH_IPA_SETIP_VIPA_FLAG;
6803 ipaddr->del_flags = QETH_IPA_DELIP_VIPA_FLAG;
6806 spin_lock_irqsave(&card->ip_lock, flags);
6807 if (__qeth_address_exists_in_list(&card->ip_list, ipaddr, 0) ||
6808 __qeth_address_exists_in_list(&card->ip_tbd_list, ipaddr, 0))
6810 spin_unlock_irqrestore(&card->ip_lock, flags);
6812 PRINT_WARN("Cannot add VIPA. Address already exists!\n");
6815 if (!qeth_add_ip(card, ipaddr))
6817 if (qeth_set_thread_start_bit(card, QETH_SET_IP_THREAD) == 0)
6818 schedule_work(&card->kernel_thread_starter);
6823 qeth_del_vipa(struct qeth_card *card, enum qeth_prot_versions proto,
6826 struct qeth_ipaddr *ipaddr;
6828 ipaddr = qeth_get_addr_buffer(proto);
6830 if (proto == QETH_PROT_IPV4){
6831 QETH_DBF_TEXT(trace, 2, "delvipa4");
6832 memcpy(&ipaddr->u.a4.addr, addr, 4);
6833 ipaddr->u.a4.mask = 0;
6834 #ifdef CONFIG_QETH_IPV6
6835 } else if (proto == QETH_PROT_IPV6){
6836 QETH_DBF_TEXT(trace, 2, "delvipa6");
6837 memcpy(&ipaddr->u.a6.addr, addr, 16);
6838 ipaddr->u.a6.pfxlen = 0;
6841 ipaddr->type = QETH_IP_TYPE_VIPA;
6844 if (!qeth_delete_ip(card, ipaddr))
6846 if (qeth_set_thread_start_bit(card, QETH_SET_IP_THREAD) == 0)
6847 schedule_work(&card->kernel_thread_starter);
6851 * proxy ARP related functions
6854 qeth_add_rxip(struct qeth_card *card, enum qeth_prot_versions proto,
6857 struct qeth_ipaddr *ipaddr;
6858 unsigned long flags;
6861 ipaddr = qeth_get_addr_buffer(proto);
6863 if (proto == QETH_PROT_IPV4){
6864 QETH_DBF_TEXT(trace, 2, "addrxip4");
6865 memcpy(&ipaddr->u.a4.addr, addr, 4);
6866 ipaddr->u.a4.mask = 0;
6867 #ifdef CONFIG_QETH_IPV6
6868 } else if (proto == QETH_PROT_IPV6){
6869 QETH_DBF_TEXT(trace, 2, "addrxip6");
6870 memcpy(&ipaddr->u.a6.addr, addr, 16);
6871 ipaddr->u.a6.pfxlen = 0;
6874 ipaddr->type = QETH_IP_TYPE_RXIP;
6875 ipaddr->set_flags = QETH_IPA_SETIP_TAKEOVER_FLAG;
6876 ipaddr->del_flags = 0;
6879 spin_lock_irqsave(&card->ip_lock, flags);
6880 if (__qeth_address_exists_in_list(&card->ip_list, ipaddr, 0) ||
6881 __qeth_address_exists_in_list(&card->ip_tbd_list, ipaddr, 0))
6883 spin_unlock_irqrestore(&card->ip_lock, flags);
6885 PRINT_WARN("Cannot add RXIP. Address already exists!\n");
6888 if (!qeth_add_ip(card, ipaddr))
6890 if (qeth_set_thread_start_bit(card, QETH_SET_IP_THREAD) == 0)
6891 schedule_work(&card->kernel_thread_starter);
6896 qeth_del_rxip(struct qeth_card *card, enum qeth_prot_versions proto,
6899 struct qeth_ipaddr *ipaddr;
6901 ipaddr = qeth_get_addr_buffer(proto);
6903 if (proto == QETH_PROT_IPV4){
6904 QETH_DBF_TEXT(trace, 2, "addrxip4");
6905 memcpy(&ipaddr->u.a4.addr, addr, 4);
6906 ipaddr->u.a4.mask = 0;
6907 #ifdef CONFIG_QETH_IPV6
6908 } else if (proto == QETH_PROT_IPV6){
6909 QETH_DBF_TEXT(trace, 2, "addrxip6");
6910 memcpy(&ipaddr->u.a6.addr, addr, 16);
6911 ipaddr->u.a6.pfxlen = 0;
6914 ipaddr->type = QETH_IP_TYPE_RXIP;
6917 if (!qeth_delete_ip(card, ipaddr))
6919 if (qeth_set_thread_start_bit(card, QETH_SET_IP_THREAD) == 0)
6920 schedule_work(&card->kernel_thread_starter);
6927 qeth_ip_event(struct notifier_block *this,
6928 unsigned long event,void *ptr)
6930 struct in_ifaddr *ifa = (struct in_ifaddr *)ptr;
6931 struct net_device *dev =(struct net_device *) ifa->ifa_dev->dev;
6932 struct qeth_ipaddr *addr;
6933 struct qeth_card *card;
6935 QETH_DBF_TEXT(trace,3,"ipevent");
6936 card = qeth_get_card_from_dev(dev);
6940 addr = qeth_get_addr_buffer(QETH_PROT_IPV4);
6942 addr->u.a4.addr = ifa->ifa_address;
6943 addr->u.a4.mask = ifa->ifa_mask;
6944 addr->type = QETH_IP_TYPE_NORMAL;
6950 if (!qeth_add_ip(card, addr))
6954 if (!qeth_delete_ip(card, addr))
6960 if (qeth_set_thread_start_bit(card, QETH_SET_IP_THREAD) == 0)
6961 schedule_work(&card->kernel_thread_starter);
6966 static struct notifier_block qeth_ip_notifier = {
6971 #ifdef CONFIG_QETH_IPV6
6973 * IPv6 event handler
6976 qeth_ip6_event(struct notifier_block *this,
6977 unsigned long event,void *ptr)
6980 struct inet6_ifaddr *ifa = (struct inet6_ifaddr *)ptr;
6981 struct net_device *dev = (struct net_device *)ifa->idev->dev;
6982 struct qeth_ipaddr *addr;
6983 struct qeth_card *card;
6985 QETH_DBF_TEXT(trace,3,"ip6event");
6987 card = qeth_get_card_from_dev(dev);
6990 if (!qeth_is_supported(card, IPA_IPV6))
6993 addr = qeth_get_addr_buffer(QETH_PROT_IPV6);
6995 memcpy(&addr->u.a6.addr, &ifa->addr, sizeof(struct in6_addr));
6996 addr->u.a6.pfxlen = ifa->prefix_len;
6997 addr->type = QETH_IP_TYPE_NORMAL;
7003 if (!qeth_add_ip(card, addr))
7007 if (!qeth_delete_ip(card, addr))
7013 if (qeth_set_thread_start_bit(card, QETH_SET_IP_THREAD) == 0)
7014 schedule_work(&card->kernel_thread_starter);
7019 static struct notifier_block qeth_ip6_notifier = {
7026 qeth_reboot_event(struct notifier_block *this, unsigned long event, void *ptr)
7029 struct device *entry;
7030 struct qeth_card *card;
7032 down_read(&qeth_ccwgroup_driver.driver.bus->subsys.rwsem);
7033 list_for_each_entry(entry, &qeth_ccwgroup_driver.driver.devices,
7035 card = (struct qeth_card *) entry->driver_data;
7036 qeth_clear_ip_list(card, 0, 0);
7037 qeth_qdio_clear_card(card, 0);
7039 up_read(&qeth_ccwgroup_driver.driver.bus->subsys.rwsem);
7044 static struct notifier_block qeth_reboot_notifier = {
7050 qeth_register_notifiers(void)
7054 QETH_DBF_TEXT(trace,5,"regnotif");
7055 if ((r = register_reboot_notifier(&qeth_reboot_notifier)))
7057 if ((r = register_inetaddr_notifier(&qeth_ip_notifier)))
7059 #ifdef CONFIG_QETH_IPV6
7060 if ((r = register_inet6addr_notifier(&qeth_ip6_notifier)))
7065 #ifdef CONFIG_QETH_IPV6
7067 unregister_inetaddr_notifier(&qeth_ip_notifier);
7070 unregister_reboot_notifier(&qeth_reboot_notifier);
7075 * unregister all event notifiers
7078 qeth_unregister_notifiers(void)
7081 QETH_DBF_TEXT(trace,5,"unregnot");
7082 BUG_ON(unregister_reboot_notifier(&qeth_reboot_notifier));
7083 BUG_ON(unregister_inetaddr_notifier(&qeth_ip_notifier));
7084 #ifdef CONFIG_QETH_IPV6
7085 BUG_ON(unregister_inet6addr_notifier(&qeth_ip6_notifier));
7086 #endif /* QETH_IPV6 */
7090 #ifdef CONFIG_QETH_IPV6
7092 qeth_ipv6_init(void)
7094 qeth_old_arp_constructor = arp_tbl.constructor;
7095 write_lock(&arp_tbl.lock);
7096 arp_tbl.constructor = qeth_arp_constructor;
7097 write_unlock(&arp_tbl.lock);
7099 arp_direct_ops = (struct neigh_ops*)
7100 kmalloc(sizeof(struct neigh_ops), GFP_KERNEL);
7101 if (!arp_direct_ops)
7104 memcpy(arp_direct_ops, &arp_direct_ops_template,
7105 sizeof(struct neigh_ops));
7111 qeth_ipv6_uninit(void)
7113 write_lock(&arp_tbl.lock);
7114 arp_tbl.constructor = qeth_old_arp_constructor;
7115 write_unlock(&arp_tbl.lock);
7116 kfree(arp_direct_ops);
7118 #endif /* CONFIG_QETH_IPV6 */
7121 qeth_sysfs_unregister(void)
7123 qeth_remove_driver_attributes();
7124 ccw_driver_unregister(&qeth_ccw_driver);
7125 ccwgroup_driver_unregister(&qeth_ccwgroup_driver);
7126 s390_root_dev_unregister(qeth_root_dev);
7129 * register qeth at sysfs
7132 qeth_sysfs_register(void)
7136 rc = ccwgroup_driver_register(&qeth_ccwgroup_driver);
7139 rc = ccw_driver_register(&qeth_ccw_driver);
7142 rc = qeth_create_driver_attributes();
7145 qeth_root_dev = s390_root_dev_register("qeth");
7146 if (IS_ERR(qeth_root_dev)) {
7147 rc = PTR_ERR(qeth_root_dev);
7162 PRINT_INFO("loading %s (%s/%s/%s/%s/%s/%s/%s %s %s)\n",
7163 version, VERSION_QETH_C, VERSION_QETH_H,
7164 VERSION_QETH_MPC_H, VERSION_QETH_MPC_C,
7165 VERSION_QETH_FS_H, VERSION_QETH_PROC_C,
7166 VERSION_QETH_SYS_C, QETH_VERSION_IPV6,
7169 INIT_LIST_HEAD(&qeth_card_list.list);
7170 INIT_LIST_HEAD(&qeth_notify_list);
7171 spin_lock_init(&qeth_notify_lock);
7172 rwlock_init(&qeth_card_list.rwlock);
7174 if (qeth_register_dbf_views())
7176 if (qeth_sysfs_register())
7179 #ifdef CONFIG_QETH_IPV6
7180 if (qeth_ipv6_init()) {
7181 PRINT_ERR("Out of memory during ipv6 init.\n");
7184 #endif /* QETH_IPV6 */
7185 if (qeth_register_notifiers())
7187 if (qeth_create_procfs_entries())
7193 qeth_unregister_notifiers();
7195 #ifdef CONFIG_QETH_IPV6
7197 #endif /* QETH_IPV6 */
7199 qeth_sysfs_unregister();
7200 qeth_unregister_dbf_views();
7202 PRINT_ERR("Initialization failed");
7207 __exit qeth_exit(void)
7209 struct qeth_card *card, *tmp;
7210 unsigned long flags;
7212 QETH_DBF_TEXT(trace,1, "cleanup.");
7215 * Weed would not need to clean up our devices here, because the
7216 * common device layer calls qeth_remove_device for each device
7217 * as soon as we unregister our driver (done in qeth_sysfs_unregister).
7218 * But we do cleanup here so we can do a "soft" shutdown of our cards.
7219 * qeth_remove_device called by the common device layer would otherwise
7220 * do a "hard" shutdown (card->use_hard_stop is set to one in
7221 * qeth_remove_device).
7224 read_lock_irqsave(&qeth_card_list.rwlock, flags);
7225 list_for_each_entry_safe(card, tmp, &qeth_card_list.list, list){
7226 read_unlock_irqrestore(&qeth_card_list.rwlock, flags);
7227 qeth_set_offline(card->gdev);
7228 qeth_remove_device(card->gdev);
7231 read_unlock_irqrestore(&qeth_card_list.rwlock, flags);
7232 #ifdef CONFIG_QETH_IPV6
7235 qeth_unregister_notifiers();
7236 qeth_remove_procfs_entries();
7237 qeth_sysfs_unregister();
7238 qeth_unregister_dbf_views();
7239 printk("qeth: removed\n");
7242 EXPORT_SYMBOL(qeth_eyecatcher);
7243 module_init(qeth_init);
7244 module_exit(qeth_exit);
7245 MODULE_AUTHOR("Frank Pavlic <pavlic@de.ibm.com>");
7246 MODULE_DESCRIPTION("Linux on zSeries OSA Express and HiperSockets support\n" \
7247 "Copyright 2000,2003 IBM Corporation\n");
7249 MODULE_LICENSE("GPL");