*/
+#include <linux/config.h>
#include <linux/module.h>
#include <linux/moduleparam.h>
#include <linux/string.h>
DEFINE_PER_CPU(char[256], qeth_dbf_txt_buf);
-static struct lock_class_key qdio_out_skb_queue_key;
-
/**
* some more definitions and declarations
*/
struct qeth_card *card;
QETH_DBF_TEXT(setup, 2, "alloccrd");
- card = kzalloc(sizeof(struct qeth_card), GFP_DMA|GFP_KERNEL);
+ card = (struct qeth_card *) kmalloc(sizeof(struct qeth_card),
+ GFP_DMA|GFP_KERNEL);
if (!card)
return NULL;
QETH_DBF_HEX(setup, 2, &card, sizeof(void *));
+ memset(card, 0, sizeof(struct qeth_card));
if (qeth_setup_channel(&card->read)) {
kfree(card);
return NULL;
QETH_DBF_TEXT(setup, 3, "setoffl");
QETH_DBF_HEX(setup, 3, &card, sizeof(void *));
-
+
if (card->dev && netif_carrier_ok(card->dev))
netif_carrier_off(card->dev);
recover_flag = card->state;
list_for_each_entry(addr, &card->ip_list, entry) {
if (card->options.layer2) {
if ((addr->type == todo->type) &&
- (memcmp(&addr->mac, &todo->mac,
+ (memcmp(&addr->mac, &todo->mac,
OSA_ADDR_LEN) == 0)) {
found = 1;
break;
}
continue;
- }
+ }
if ((addr->proto == QETH_PROT_IPV4) &&
(todo->proto == QETH_PROT_IPV4) &&
(addr->type == todo->type) &&
if (card->options.layer2) {
if ((tmp->type == addr->type) &&
(tmp->is_multicast == addr->is_multicast) &&
- (memcmp(&tmp->mac, &addr->mac,
+ (memcmp(&tmp->mac, &addr->mac,
OSA_ADDR_LEN) == 0)) {
found = 1;
break;
}
continue;
- }
+ }
if ((tmp->proto == QETH_PROT_IPV4) &&
(addr->proto == QETH_PROT_IPV4) &&
(tmp->type == addr->type) &&
card->options.layer2 = 1;
else
card->options.layer2 = 0;
- card->options.performance_stats = 1;
}
/**
"due to hardware limitations!\n");
card->qdio.no_out_queues = 1;
card->qdio.default_out_queue = 0;
- }
+ }
return 0;
}
i++;
return -ENODEV;
QETH_DBF_TEXT_(setup, 2, "%s", gdev->dev.bus_id);
-
+
card = qeth_alloc_card();
if (!card) {
put_device(dev);
put_device(dev);
qeth_free_card(card);
return rc;
- }
+ }
if ((rc = qeth_setup_card(card))){
QETH_DBF_TEXT_(setup, 2, "2err%d", rc);
put_device(dev);
static void
qeth_clear_cmd_buffers(struct qeth_channel *channel)
{
- int cnt;
+ int cnt = 0;
for (cnt=0; cnt < QETH_CMD_BUFFER_NO; cnt++)
qeth_release_buffer(channel,&channel->iob[cnt]);
{
struct qeth_reply *reply;
- reply = kzalloc(sizeof(struct qeth_reply), GFP_ATOMIC);
+ reply = kmalloc(sizeof(struct qeth_reply), GFP_ATOMIC);
if (reply){
+ memset(reply, 0, sizeof(struct qeth_reply));
atomic_set(&reply->refcnt, 1);
reply->card = card;
};
"IP address reset.\n",
QETH_CARD_IFNAME(card),
card->info.chpid);
- netif_carrier_on(card->dev);
qeth_schedule_recovery(card);
return NULL;
case IPA_CMD_MODCCID:
&card->seqno.pdu_hdr_ack, QETH_SEQ_NO_LENGTH);
QETH_DBF_HEX(control, 2, iob->data, QETH_DBF_CONTROL_LEN);
}
-
+
static int
qeth_send_control_data(struct qeth_card *card, int len,
struct qeth_cmd_buffer *iob,
wake_up(&card->wait_q);
}
return rc;
-}
+}
static inline void
qeth_prepare_ipa_cmd(struct qeth_card *card, struct qeth_cmd_buffer *iob,
memcpy(QETH_IPA_PDU_LEN_PDU3(iob->data), &s2, 2);
return qeth_osn_send_control_data(card, s1, iob);
}
-
+
static int
qeth_send_ipa_cmd(struct qeth_card *card, struct qeth_cmd_buffer *iob,
int (*reply_cb)
qeth_rebuild_skb_fake_ll_eth(card, skb, hdr);
}
+static inline void
+qeth_rebuild_skb_vlan(struct qeth_card *card, struct sk_buff *skb,
+ struct qeth_hdr *hdr)
+{
+#ifdef CONFIG_QETH_VLAN
+ u16 *vlan_tag;
+
+ if (hdr->hdr.l3.ext_flags &
+ (QETH_HDR_EXT_VLAN_FRAME | QETH_HDR_EXT_INCLUDE_VLAN_TAG)) {
+ vlan_tag = (u16 *) skb_push(skb, VLAN_HLEN);
+ *vlan_tag = (hdr->hdr.l3.ext_flags & QETH_HDR_EXT_VLAN_FRAME)?
+ hdr->hdr.l3.vlan_id : *((u16 *)&hdr->hdr.l3.dest_addr[12]);
+ *(vlan_tag + 1) = skb->protocol;
+ skb->protocol = __constant_htons(ETH_P_8021Q);
+ }
+#endif /* CONFIG_QETH_VLAN */
+}
+
static inline __u16
qeth_layer2_rebuild_skb(struct qeth_card *card, struct sk_buff *skb,
struct qeth_hdr *hdr)
return vlan_id;
}
-static inline __u16
+static inline void
qeth_rebuild_skb(struct qeth_card *card, struct sk_buff *skb,
struct qeth_hdr *hdr)
{
- unsigned short vlan_id = 0;
#ifdef CONFIG_QETH_IPV6
if (hdr->hdr.l3.flags & QETH_HDR_PASSTHRU) {
skb->pkt_type = PACKET_HOST;
skb->protocol = qeth_type_trans(skb, card->dev);
- return 0;
+ return;
}
#endif /* CONFIG_QETH_IPV6 */
skb->protocol = htons((hdr->hdr.l3.flags & QETH_HDR_IPV6)? ETH_P_IPV6 :
default:
skb->pkt_type = PACKET_HOST;
}
-
- if (hdr->hdr.l3.ext_flags &
- (QETH_HDR_EXT_VLAN_FRAME | QETH_HDR_EXT_INCLUDE_VLAN_TAG)) {
- vlan_id = (hdr->hdr.l3.ext_flags & QETH_HDR_EXT_VLAN_FRAME)?
- hdr->hdr.l3.vlan_id : *((u16 *)&hdr->hdr.l3.dest_addr[12]);
- }
-
+ qeth_rebuild_skb_vlan(card, skb, hdr);
if (card->options.fake_ll)
qeth_rebuild_skb_fake_ll(card, skb, hdr);
else
else
skb->ip_summed = SW_CHECKSUMMING;
}
- return vlan_id;
}
static inline void
int offset;
int rxrc;
__u16 vlan_tag = 0;
- __u16 *vlan_addr;
/* get first element of current buffer */
element = (struct qdio_buffer_element *)&buf->buffer->element[0];
offset = 0;
- if (card->options.performance_stats)
- card->perf_stats.bufs_rec++;
+#ifdef CONFIG_QETH_PERF_STATS
+ card->perf_stats.bufs_rec++;
+#endif
while((skb = qeth_get_next_skb(card, buf->buffer, &element,
&offset, &hdr))) {
skb->dev = card->dev;
if (hdr->hdr.l2.id == QETH_HEADER_TYPE_LAYER2)
vlan_tag = qeth_layer2_rebuild_skb(card, skb, hdr);
- else if (hdr->hdr.l3.id == QETH_HEADER_TYPE_LAYER3)
- vlan_tag = qeth_rebuild_skb(card, skb, hdr);
+ else if (hdr->hdr.l3.id == QETH_HEADER_TYPE_LAYER3)
+ qeth_rebuild_skb(card, skb, hdr);
else { /*in case of OSN*/
skb_push(skb, sizeof(struct qeth_hdr));
memcpy(skb->data, hdr, sizeof(struct qeth_hdr));
dev_kfree_skb_any(skb);
continue;
}
- if (card->info.type == QETH_CARD_TYPE_OSN)
- rxrc = card->osn_info.data_cb(skb);
- else
#ifdef CONFIG_QETH_VLAN
if (vlan_tag)
- if (card->vlangrp)
- vlan_hwaccel_rx(skb, card->vlangrp, vlan_tag);
- else {
- dev_kfree_skb_any(skb);
- continue;
- }
+ vlan_hwaccel_rx(skb, card->vlangrp, vlan_tag);
else
#endif
+ if (card->info.type == QETH_CARD_TYPE_OSN)
+ rxrc = card->osn_info.data_cb(skb);
+ else
rxrc = netif_rx(skb);
card->dev->last_rx = jiffies;
card->stats.rx_packets++;
{
struct qeth_buffer_pool_entry *pool_entry;
int i;
-
+
pool_entry = qeth_get_buffer_pool_entry(card);
/*
* since the buffer is accessed only from the input_tasklet
* 'index') un-requeued -> this buffer is the first buffer that
* will be requeued the next time
*/
- if (card->options.performance_stats) {
- card->perf_stats.inbound_do_qdio_cnt++;
- card->perf_stats.inbound_do_qdio_start_time =
- qeth_get_micros();
- }
+#ifdef CONFIG_QETH_PERF_STATS
+ card->perf_stats.inbound_do_qdio_cnt++;
+ card->perf_stats.inbound_do_qdio_start_time = qeth_get_micros();
+#endif
rc = do_QDIO(CARD_DDEV(card),
QDIO_FLAG_SYNC_INPUT | QDIO_FLAG_UNDER_INTERRUPT,
0, queue->next_buf_to_init, count, NULL);
- if (card->options.performance_stats)
- card->perf_stats.inbound_do_qdio_time +=
- qeth_get_micros() -
- card->perf_stats.inbound_do_qdio_start_time;
+#ifdef CONFIG_QETH_PERF_STATS
+ card->perf_stats.inbound_do_qdio_time += qeth_get_micros() -
+ card->perf_stats.inbound_do_qdio_start_time;
+#endif
if (rc){
PRINT_WARN("qeth_queue_input_buffer's do_QDIO "
"return %i (device %s).\n",
QETH_DBF_TEXT(trace, 6, "qdinput");
card = (struct qeth_card *) card_ptr;
net_dev = card->dev;
- if (card->options.performance_stats) {
- card->perf_stats.inbound_cnt++;
- card->perf_stats.inbound_start_time = qeth_get_micros();
- }
+#ifdef CONFIG_QETH_PERF_STATS
+ card->perf_stats.inbound_cnt++;
+ card->perf_stats.inbound_start_time = qeth_get_micros();
+#endif
if (status & QDIO_STATUS_LOOK_FOR_ERROR) {
if (status & QDIO_STATUS_ACTIVATE_CHECK_CONDITION){
QETH_DBF_TEXT(trace, 1,"qdinchk");
index = i % QDIO_MAX_BUFFERS_PER_Q;
buffer = &card->qdio.in_q->bufs[index];
if (!((status & QDIO_STATUS_LOOK_FOR_ERROR) &&
- qeth_check_qdio_errors(buffer->buffer,
+ qeth_check_qdio_errors(buffer->buffer,
qdio_err, siga_err,"qinerr")))
qeth_process_inbound_buffer(card, buffer, index);
/* clear buffer and give back to hardware */
qeth_put_buffer_pool_entry(card, buffer->pool_entry);
qeth_queue_input_buffer(card, index);
}
- if (card->options.performance_stats)
- card->perf_stats.inbound_time += qeth_get_micros() -
- card->perf_stats.inbound_start_time;
+#ifdef CONFIG_QETH_PERF_STATS
+ card->perf_stats.inbound_time += qeth_get_micros() -
+ card->perf_stats.inbound_start_time;
+#endif
}
static inline int
QETH_DBF_TEXT_(trace,1,"%s",CARD_BUS_ID(card));
return QETH_SEND_ERROR_LINK_FAILURE;
case 3:
- default:
QETH_DBF_TEXT(trace, 1, "SIGAcc3");
QETH_DBF_TEXT_(trace,1,"%s",CARD_BUS_ID(card));
return QETH_SEND_ERROR_KICK_IT;
}
+ return QETH_SEND_ERROR_LINK_FAILURE;
}
void
}
queue->card->dev->trans_start = jiffies;
- if (queue->card->options.performance_stats) {
- queue->card->perf_stats.outbound_do_qdio_cnt++;
- queue->card->perf_stats.outbound_do_qdio_start_time =
- qeth_get_micros();
- }
+#ifdef CONFIG_QETH_PERF_STATS
+ queue->card->perf_stats.outbound_do_qdio_cnt++;
+ queue->card->perf_stats.outbound_do_qdio_start_time = qeth_get_micros();
+#endif
if (under_int)
rc = do_QDIO(CARD_DDEV(queue->card),
QDIO_FLAG_SYNC_OUTPUT | QDIO_FLAG_UNDER_INTERRUPT,
else
rc = do_QDIO(CARD_DDEV(queue->card), QDIO_FLAG_SYNC_OUTPUT,
queue->queue_no, index, count, NULL);
- if (queue->card->options.performance_stats)
- queue->card->perf_stats.outbound_do_qdio_time +=
- qeth_get_micros() -
- queue->card->perf_stats.outbound_do_qdio_start_time;
+#ifdef CONFIG_QETH_PERF_STATS
+ queue->card->perf_stats.outbound_do_qdio_time += qeth_get_micros() -
+ queue->card->perf_stats.outbound_do_qdio_start_time;
+#endif
if (rc){
QETH_DBF_TEXT(trace, 2, "flushbuf");
QETH_DBF_TEXT_(trace, 2, " err%d", rc);
return;
}
atomic_add(count, &queue->used_buffers);
- if (queue->card->options.performance_stats)
- queue->card->perf_stats.bufs_sent += count;
+#ifdef CONFIG_QETH_PERF_STATS
+ queue->card->perf_stats.bufs_sent += count;
+#endif
}
/*
>= QETH_HIGH_WATERMARK_PACK){
/* switch non-PACKING -> PACKING */
QETH_DBF_TEXT(trace, 6, "np->pack");
- if (queue->card->options.performance_stats)
- queue->card->perf_stats.sc_dp_p++;
+#ifdef CONFIG_QETH_PERF_STATS
+ queue->card->perf_stats.sc_dp_p++;
+#endif
queue->do_pack = 1;
}
}
<= QETH_LOW_WATERMARK_PACK) {
/* switch PACKING -> non-PACKING */
QETH_DBF_TEXT(trace, 6, "pack->np");
- if (queue->card->options.performance_stats)
- queue->card->perf_stats.sc_p_dp++;
+#ifdef CONFIG_QETH_PERF_STATS
+ queue->card->perf_stats.sc_p_dp++;
+#endif
queue->do_pack = 0;
/* flush packing buffers */
buffer = &queue->bufs[queue->next_buf_to_fill];
queue->next_buf_to_fill =
(queue->next_buf_to_fill + 1) %
QDIO_MAX_BUFFERS_PER_Q;
- }
+ }
}
}
return flush_count;
!atomic_read(&queue->set_pci_flags_count))
flush_cnt +=
qeth_flush_buffers_on_no_pci(queue);
- if (queue->card->options.performance_stats &&
- q_was_packing)
+#ifdef CONFIG_QETH_PERF_STATS
+ if (q_was_packing)
queue->card->perf_stats.bufs_sent_pack +=
flush_cnt;
+#endif
if (flush_cnt)
qeth_flush_buffers(queue, 1, index, flush_cnt);
atomic_set(&queue->state, QETH_OUT_Q_UNLOCKED);
return;
}
}
- if (card->options.performance_stats) {
- card->perf_stats.outbound_handler_cnt++;
- card->perf_stats.outbound_handler_start_time =
- qeth_get_micros();
- }
+#ifdef CONFIG_QETH_PERF_STATS
+ card->perf_stats.outbound_handler_cnt++;
+ card->perf_stats.outbound_handler_start_time = qeth_get_micros();
+#endif
for(i = first_element; i < (first_element + count); ++i){
buffer = &queue->bufs[i % QDIO_MAX_BUFFERS_PER_Q];
/*we only handle the KICK_IT error by doing a recovery */
qeth_check_outbound_queue(queue);
netif_wake_queue(queue->card->dev);
- if (card->options.performance_stats)
- card->perf_stats.outbound_handler_time += qeth_get_micros() -
- card->perf_stats.outbound_handler_start_time;
+#ifdef CONFIG_QETH_PERF_STATS
+ card->perf_stats.outbound_handler_time += qeth_get_micros() -
+ card->perf_stats.outbound_handler_start_time;
+#endif
}
static void
QETH_DBF_TEXT(setup, 2, "allcqdbf");
- if (atomic_cmpxchg(&card->qdio.state, QETH_QDIO_UNINITIALIZED,
- QETH_QDIO_ALLOCATED) != QETH_QDIO_UNINITIALIZED)
+ if (card->qdio.state == QETH_QDIO_ALLOCATED)
return 0;
- card->qdio.in_q = kmalloc(sizeof(struct qeth_qdio_q),
+ card->qdio.in_q = kmalloc(sizeof(struct qeth_qdio_q),
GFP_KERNEL|GFP_DMA);
if (!card->qdio.in_q)
- goto out_nomem;
+ return - ENOMEM;
QETH_DBF_TEXT(setup, 2, "inq");
QETH_DBF_HEX(setup, 2, &card->qdio.in_q, sizeof(void *));
memset(card->qdio.in_q, 0, sizeof(struct qeth_qdio_q));
card->qdio.in_q->bufs[i].buffer =
&card->qdio.in_q->qdio_bufs[i];
/* inbound buffer pool */
- if (qeth_alloc_buffer_pool(card))
- goto out_freeinq;
+ if (qeth_alloc_buffer_pool(card)){
+ kfree(card->qdio.in_q);
+ return -ENOMEM;
+ }
/* outbound */
card->qdio.out_qs =
kmalloc(card->qdio.no_out_queues *
sizeof(struct qeth_qdio_out_q *), GFP_KERNEL);
- if (!card->qdio.out_qs)
- goto out_freepool;
- for (i = 0; i < card->qdio.no_out_queues; ++i) {
+ if (!card->qdio.out_qs){
+ qeth_free_buffer_pool(card);
+ return -ENOMEM;
+ }
+ for (i = 0; i < card->qdio.no_out_queues; ++i){
card->qdio.out_qs[i] = kmalloc(sizeof(struct qeth_qdio_out_q),
GFP_KERNEL|GFP_DMA);
- if (!card->qdio.out_qs[i])
- goto out_freeoutq;
+ if (!card->qdio.out_qs[i]){
+ while (i > 0)
+ kfree(card->qdio.out_qs[--i]);
+ kfree(card->qdio.out_qs);
+ return -ENOMEM;
+ }
QETH_DBF_TEXT_(setup, 2, "outq %i", i);
QETH_DBF_HEX(setup, 2, &card->qdio.out_qs[i], sizeof(void *));
memset(card->qdio.out_qs[i], 0, sizeof(struct qeth_qdio_out_q));
&card->qdio.out_qs[i]->qdio_bufs[j];
skb_queue_head_init(&card->qdio.out_qs[i]->bufs[j].
skb_list);
- lockdep_set_class(
- &card->qdio.out_qs[i]->bufs[j].skb_list.lock,
- &qdio_out_skb_queue_key);
INIT_LIST_HEAD(&card->qdio.out_qs[i]->bufs[j].ctx_list);
}
}
+ card->qdio.state = QETH_QDIO_ALLOCATED;
return 0;
-
-out_freeoutq:
- while (i > 0)
- kfree(card->qdio.out_qs[--i]);
- kfree(card->qdio.out_qs);
-out_freepool:
- qeth_free_buffer_pool(card);
-out_freeinq:
- kfree(card->qdio.in_q);
-out_nomem:
- atomic_set(&card->qdio.state, QETH_QDIO_UNINITIALIZED);
- return -ENOMEM;
}
static void
int i, j;
QETH_DBF_TEXT(trace, 2, "freeqdbf");
- if (atomic_swap(&card->qdio.state, QETH_QDIO_UNINITIALIZED) ==
- QETH_QDIO_UNINITIALIZED)
+ if (card->qdio.state == QETH_QDIO_UNINITIALIZED)
return;
kfree(card->qdio.in_q);
/* inbound buffer pool */
kfree(card->qdio.out_qs[i]);
}
kfree(card->qdio.out_qs);
+ card->qdio.state = QETH_QDIO_UNINITIALIZED;
}
static void
qeth_init_qdio_info(struct qeth_card *card)
{
QETH_DBF_TEXT(setup, 4, "intqdinf");
- atomic_set(&card->qdio.state, QETH_QDIO_UNINITIALIZED);
+ card->qdio.state = QETH_QDIO_UNINITIALIZED;
/* inbound */
card->qdio.in_buf_size = QETH_IN_BUF_SIZE_DEFAULT;
card->qdio.init_pool.buf_count = QETH_IN_BUF_COUNT_DEFAULT;
struct qdio_buffer **in_sbal_ptrs;
struct qdio_buffer **out_sbal_ptrs;
int i, j, k;
- int rc = 0;
+ int rc;
QETH_DBF_TEXT(setup, 2, "qdioest");
- qib_param_field = kzalloc(QDIO_MAX_BUFFERS_PER_Q * sizeof(char),
+ qib_param_field = kmalloc(QDIO_MAX_BUFFERS_PER_Q * sizeof(char),
GFP_KERNEL);
if (!qib_param_field)
return -ENOMEM;
+ memset(qib_param_field, 0, QDIO_MAX_BUFFERS_PER_Q * sizeof(char));
+
qeth_create_qib_param_field(card, qib_param_field);
qeth_create_qib_param_field_blkt(card, qib_param_field);
init_data.input_sbal_addr_array = (void **) in_sbal_ptrs;
init_data.output_sbal_addr_array = (void **) out_sbal_ptrs;
- if (atomic_cmpxchg(&card->qdio.state, QETH_QDIO_ALLOCATED,
- QETH_QDIO_ESTABLISHED) == QETH_QDIO_ALLOCATED)
- if ((rc = qdio_initialize(&init_data)))
- atomic_set(&card->qdio.state, QETH_QDIO_ALLOCATED);
+ if (!(rc = qdio_initialize(&init_data)))
+ card->qdio.state = QETH_QDIO_ESTABLISHED;
kfree(out_sbal_ptrs);
kfree(in_sbal_ptrs);
rc3 = qeth_halt_channel(&card->data);
if (rc1)
return rc1;
- if (rc2)
+ if (rc2)
return rc2;
return rc3;
}
rc3 = qeth_clear_channel(&card->data);
if (rc1)
return rc1;
- if (rc2)
+ if (rc2)
return rc2;
return rc3;
}
int rc = 0;
QETH_DBF_TEXT(trace,3,"qdioclr");
- switch (atomic_cmpxchg(&card->qdio.state, QETH_QDIO_ESTABLISHED,
- QETH_QDIO_CLEANING)) {
- case QETH_QDIO_ESTABLISHED:
+ if (card->qdio.state == QETH_QDIO_ESTABLISHED){
if ((rc = qdio_cleanup(CARD_DDEV(card),
- (card->info.type == QETH_CARD_TYPE_IQD) ?
- QDIO_FLAG_CLEANUP_USING_HALT :
- QDIO_FLAG_CLEANUP_USING_CLEAR)))
+ (card->info.type == QETH_CARD_TYPE_IQD) ?
+ QDIO_FLAG_CLEANUP_USING_HALT :
+ QDIO_FLAG_CLEANUP_USING_CLEAR)))
QETH_DBF_TEXT_(trace, 3, "1err%d", rc);
- atomic_set(&card->qdio.state, QETH_QDIO_ALLOCATED);
- break;
- case QETH_QDIO_CLEANING:
- return rc;
- default:
- break;
+ card->qdio.state = QETH_QDIO_ALLOCATED;
}
if ((rc = qeth_clear_halt_card(card, use_halt)))
QETH_DBF_TEXT_(trace, 3, "2err%d", rc);
/* return OK; otherwise ksoftirqd goes to 100% */
return NETDEV_TX_OK;
}
- if (card->options.performance_stats) {
- card->perf_stats.outbound_cnt++;
- card->perf_stats.outbound_start_time = qeth_get_micros();
- }
+#ifdef CONFIG_QETH_PERF_STATS
+ card->perf_stats.outbound_cnt++;
+ card->perf_stats.outbound_start_time = qeth_get_micros();
+#endif
netif_stop_queue(dev);
if ((rc = qeth_send_packet(card, skb))) {
if (rc == -EBUSY) {
}
}
netif_wake_queue(dev);
- if (card->options.performance_stats)
- card->perf_stats.outbound_time += qeth_get_micros() -
- card->perf_stats.outbound_start_time;
+#ifdef CONFIG_QETH_PERF_STATS
+ card->perf_stats.outbound_time += qeth_get_micros() -
+ card->perf_stats.outbound_start_time;
+#endif
return rc;
}
QETH_DBF_TEXT(trace,4,"nomacadr");
return -EPERM;
}
- card->data.state = CH_STATE_UP;
- card->state = CARD_STATE_UP;
card->dev->flags |= IFF_UP;
netif_start_queue(dev);
+ card->data.state = CH_STATE_UP;
+ card->state = CARD_STATE_UP;
if (!card->lan_online && netif_carrier_ok(dev))
netif_carrier_off(dev);
card = (struct qeth_card *) dev->priv;
- netif_tx_disable(dev);
+ netif_stop_queue(dev);
card->dev->flags &= ~IFF_UP;
if (card->state == CARD_STATE_UP)
card->state = CARD_STATE_SOFTSETUP;
if ((hdr_mac == QETH_TR_MAC_NC) ||
(hdr_mac == QETH_TR_MAC_C))
return RTN_MULTICAST;
- break;
/* eth or so multicast? */
default:
if ((hdr_mac == QETH_ETH_MAC_V4) ||
}
}
-static inline struct qeth_hdr *
-__qeth_prepare_skb(struct qeth_card *card, struct sk_buff *skb, int ipv)
+static inline int
+qeth_prepare_skb(struct qeth_card *card, struct sk_buff **skb,
+ struct qeth_hdr **hdr, int ipv)
{
+ int rc = 0;
#ifdef CONFIG_QETH_VLAN
u16 *tag;
- if (card->vlangrp && vlan_tx_tag_present(skb) &&
+#endif
+
+ QETH_DBF_TEXT(trace, 6, "prepskb");
+ if (card->info.type == QETH_CARD_TYPE_OSN) {
+ *hdr = (struct qeth_hdr *)(*skb)->data;
+ return rc;
+ }
+ rc = qeth_realloc_headroom(card, skb, sizeof(struct qeth_hdr));
+ if (rc)
+ return rc;
+#ifdef CONFIG_QETH_VLAN
+ if (card->vlangrp && vlan_tx_tag_present(*skb) &&
((ipv == 6) || card->options.layer2) ) {
/*
* Move the mac addresses (6 bytes src, 6 bytes dest)
* to the beginning of the new header. We are using three
* memcpys instead of one memmove to save cycles.
*/
- skb_push(skb, VLAN_HLEN);
- memcpy(skb->data, skb->data + 4, 4);
- memcpy(skb->data + 4, skb->data + 8, 4);
- memcpy(skb->data + 8, skb->data + 12, 4);
- tag = (u16 *)(skb->data + 12);
+ skb_push(*skb, VLAN_HLEN);
+ memcpy((*skb)->data, (*skb)->data + 4, 4);
+ memcpy((*skb)->data + 4, (*skb)->data + 8, 4);
+ memcpy((*skb)->data + 8, (*skb)->data + 12, 4);
+ tag = (u16 *)((*skb)->data + 12);
/*
* first two bytes = ETH_P_8021Q (0x8100)
* second two bytes = VLANID
*/
*tag = __constant_htons(ETH_P_8021Q);
- *(tag + 1) = htons(vlan_tx_tag_get(skb));
+ *(tag + 1) = htons(vlan_tx_tag_get(*skb));
}
#endif
- return ((struct qeth_hdr *)
- qeth_push_skb(card, skb, sizeof(struct qeth_hdr)));
-}
-
-static inline void
-__qeth_free_new_skb(struct sk_buff *orig_skb, struct sk_buff *new_skb)
-{
- if (orig_skb != new_skb)
- dev_kfree_skb_any(new_skb);
-}
-
-static inline struct sk_buff *
-qeth_prepare_skb(struct qeth_card *card, struct sk_buff *skb,
- struct qeth_hdr **hdr, int ipv)
-{
- struct sk_buff *new_skb;
-
- QETH_DBF_TEXT(trace, 6, "prepskb");
-
- new_skb = qeth_realloc_headroom(card, skb, sizeof(struct qeth_hdr));
- if (new_skb == NULL)
- return NULL;
- *hdr = __qeth_prepare_skb(card, new_skb, ipv);
- if (*hdr == NULL) {
- __qeth_free_new_skb(skb, new_skb);
- return NULL;
- }
- return new_skb;
+ *hdr = (struct qeth_hdr *)
+ qeth_push_skb(card, skb, sizeof(struct qeth_hdr));
+ if (hdr == NULL)
+ return -EINVAL;
+ return 0;
}
static inline u8
}
} else { /* passthrough */
if((skb->dev->type == ARPHRD_IEEE802_TR) &&
- !memcmp(skb->data + sizeof(struct qeth_hdr) +
+ !memcmp(skb->data + sizeof(struct qeth_hdr) +
sizeof(__u16), skb->dev->broadcast, 6)) {
hdr->hdr.l3.flags = QETH_CAST_BROADCAST |
QETH_HDR_PASSTHRU;
flush_cnt = 1;
} else {
QETH_DBF_TEXT(trace, 6, "fillbfpa");
- if (queue->card->options.performance_stats)
- queue->card->perf_stats.skbs_sent_pack++;
+#ifdef CONFIG_QETH_PERF_STATS
+ queue->card->perf_stats.skbs_sent_pack++;
+#endif
if (buf->next_element_to_fill >=
QETH_MAX_BUFFER_ELEMENTS(queue->card)) {
/*
* check if buffer is empty to make sure that we do not 'overtake'
* ourselves and try to fill a buffer that is already primed
*/
- if (atomic_read(&buffer->state) != QETH_QDIO_BUF_EMPTY)
- goto out;
+ if (atomic_read(&buffer->state) != QETH_QDIO_BUF_EMPTY) {
+ card->stats.tx_dropped++;
+ atomic_set(&queue->state, QETH_OUT_Q_UNLOCKED);
+ return -EBUSY;
+ }
if (ctx == NULL)
queue->next_buf_to_fill = (queue->next_buf_to_fill + 1) %
QDIO_MAX_BUFFERS_PER_Q;
else {
buffers_needed = qeth_eddp_check_buffers_for_context(queue,ctx);
- if (buffers_needed < 0)
- goto out;
+ if (buffers_needed < 0) {
+ card->stats.tx_dropped++;
+ atomic_set(&queue->state, QETH_OUT_Q_UNLOCKED);
+ return -EBUSY;
+ }
queue->next_buf_to_fill =
(queue->next_buf_to_fill + buffers_needed) %
QDIO_MAX_BUFFERS_PER_Q;
qeth_flush_buffers(queue, 0, index, flush_cnt);
}
return 0;
-out:
- atomic_set(&queue->state, QETH_OUT_Q_UNLOCKED);
- return -EBUSY;
}
static inline int
* check if buffer is empty to make sure that we do not 'overtake'
* ourselves and try to fill a buffer that is already primed
*/
- if (atomic_read(&buffer->state) != QETH_QDIO_BUF_EMPTY) {
+ if (atomic_read(&buffer->state) != QETH_QDIO_BUF_EMPTY){
+ card->stats.tx_dropped++;
atomic_set(&queue->state, QETH_OUT_Q_UNLOCKED);
return -EBUSY;
}
* again */
if (atomic_read(&buffer->state) !=
QETH_QDIO_BUF_EMPTY){
+ card->stats.tx_dropped++;
qeth_flush_buffers(queue, 0, start_index, flush_count);
atomic_set(&queue->state, QETH_OUT_Q_UNLOCKED);
return -EBUSY;
* free buffers) to handle eddp context */
if (qeth_eddp_check_buffers_for_context(queue,ctx) < 0){
printk("eddp tx_dropped 1\n");
+ card->stats.tx_dropped++;
rc = -EBUSY;
goto out;
}
tmp = qeth_eddp_fill_buffer(queue,ctx,queue->next_buf_to_fill);
if (tmp < 0) {
printk("eddp tx_dropped 2\n");
+ card->stats.tx_dropped++;
rc = - EBUSY;
goto out;
}
qeth_flush_buffers(queue, 0, start_index, flush_count);
}
/* at this point the queue is UNLOCKED again */
- if (queue->card->options.performance_stats && do_pack)
+#ifdef CONFIG_QETH_PERF_STATS
+ if (do_pack)
queue->card->perf_stats.bufs_sent_pack += flush_count;
+#endif /* CONFIG_QETH_PERF_STATS */
return rc;
}
static inline int
-qeth_get_elements_no(struct qeth_card *card, void *hdr,
+qeth_get_elements_no(struct qeth_card *card, void *hdr,
struct sk_buff *skb, int elems)
{
int elements_needed = 0;
- if (skb_shinfo(skb)->nr_frags > 0)
+ if (skb_shinfo(skb)->nr_frags > 0) {
elements_needed = (skb_shinfo(skb)->nr_frags + 1);
- if (elements_needed == 0)
+ }
+ if (elements_needed == 0 )
elements_needed = 1 + (((((unsigned long) hdr) % PAGE_SIZE)
+ skb->len) >> PAGE_SHIFT);
if ((elements_needed + elems) > QETH_MAX_BUFFER_ELEMENTS(card)){
- PRINT_ERR("Invalid size of IP packet "
- "(Number=%d / Length=%d). Discarded.\n",
+ PRINT_ERR("qeth_do_send_packet: invalid size of "
+ "IP packet (Number=%d / Length=%d). Discarded.\n",
(elements_needed+elems), skb->len);
return 0;
}
return elements_needed;
}
-
static inline int
qeth_send_packet(struct qeth_card *card, struct sk_buff *skb)
{
int elements_needed = 0;
enum qeth_large_send_types large_send = QETH_LARGE_SEND_NO;
struct qeth_eddp_context *ctx = NULL;
- int tx_bytes = skb->len;
- unsigned short nr_frags = skb_shinfo(skb)->nr_frags;
- unsigned short tso_size = skb_shinfo(skb)->gso_size;
- struct sk_buff *new_skb, *new_skb2;
int rc;
QETH_DBF_TEXT(trace, 6, "sendpkt");
- new_skb = skb;
- if ((card->info.type == QETH_CARD_TYPE_OSN) &&
- (skb->protocol == htons(ETH_P_IPV6)))
- return -EPERM;
- cast_type = qeth_get_cast_type(card, skb);
- if ((cast_type == RTN_BROADCAST) &&
- (card->info.broadcast_capable == 0))
- return -EPERM;
- queue = card->qdio.out_qs
- [qeth_get_priority_queue(card, skb, ipv, cast_type)];
if (!card->options.layer2) {
ipv = qeth_get_ip_version(skb);
if ((card->dev->hard_header == qeth_fake_header) && ipv) {
- new_skb = qeth_pskb_unshare(skb, GFP_ATOMIC);
- if (!new_skb)
- return -ENOMEM;
+ if ((skb = qeth_pskb_unshare(skb,GFP_ATOMIC)) == NULL) {
+ card->stats.tx_dropped++;
+ dev_kfree_skb_irq(skb);
+ return 0;
+ }
if(card->dev->type == ARPHRD_IEEE802_TR){
- skb_pull(new_skb, QETH_FAKE_LL_LEN_TR);
+ skb_pull(skb, QETH_FAKE_LL_LEN_TR);
} else {
- skb_pull(new_skb, QETH_FAKE_LL_LEN_ETH);
+ skb_pull(skb, QETH_FAKE_LL_LEN_ETH);
}
}
}
- if (skb_is_gso(skb))
+ if ((card->info.type == QETH_CARD_TYPE_OSN) &&
+ (skb->protocol == htons(ETH_P_IPV6))) {
+ dev_kfree_skb_any(skb);
+ return 0;
+ }
+ cast_type = qeth_get_cast_type(card, skb);
+ if ((cast_type == RTN_BROADCAST) &&
+ (card->info.broadcast_capable == 0)){
+ card->stats.tx_dropped++;
+ card->stats.tx_errors++;
+ dev_kfree_skb_any(skb);
+ return NETDEV_TX_OK;
+ }
+ queue = card->qdio.out_qs
+ [qeth_get_priority_queue(card, skb, ipv, cast_type)];
+
+ if (skb_shinfo(skb)->tso_size)
large_send = card->options.large_send;
- /* check on OSN device*/
- if (card->info.type == QETH_CARD_TYPE_OSN)
- hdr = (struct qeth_hdr *)new_skb->data;
- /*are we able to do TSO ? */
+
+ /*are we able to do TSO ? If so ,prepare and send it from here */
if ((large_send == QETH_LARGE_SEND_TSO) &&
(cast_type == RTN_UNSPEC)) {
- rc = qeth_tso_prepare_packet(card, new_skb, ipv, cast_type);
+ rc = qeth_tso_prepare_packet(card, skb, ipv, cast_type);
if (rc) {
- __qeth_free_new_skb(skb, new_skb);
- return rc;
- }
+ card->stats.tx_dropped++;
+ card->stats.tx_errors++;
+ dev_kfree_skb_any(skb);
+ return NETDEV_TX_OK;
+ }
elements_needed++;
- } else if (card->info.type != QETH_CARD_TYPE_OSN) {
- new_skb2 = qeth_prepare_skb(card, new_skb, &hdr, ipv);
- if (!new_skb2) {
- __qeth_free_new_skb(skb, new_skb);
- return -EINVAL;
+ } else {
+ if ((rc = qeth_prepare_skb(card, &skb, &hdr, ipv))) {
+ QETH_DBF_TEXT_(trace, 4, "pskbe%d", rc);
+ return rc;
}
- if (new_skb != skb)
- __qeth_free_new_skb(new_skb2, new_skb);
- new_skb = new_skb2;
- qeth_fill_header(card, hdr, new_skb, ipv, cast_type);
+ if (card->info.type != QETH_CARD_TYPE_OSN)
+ qeth_fill_header(card, hdr, skb, ipv, cast_type);
}
+
if (large_send == QETH_LARGE_SEND_EDDP) {
- ctx = qeth_eddp_create_context(card, new_skb, hdr);
+ ctx = qeth_eddp_create_context(card, skb, hdr);
if (ctx == NULL) {
- __qeth_free_new_skb(skb, new_skb);
PRINT_WARN("could not create eddp context\n");
return -EINVAL;
}
} else {
- int elems = qeth_get_elements_no(card,(void*) hdr, new_skb,
+ int elems = qeth_get_elements_no(card,(void*) hdr, skb,
elements_needed);
- if (!elems) {
- __qeth_free_new_skb(skb, new_skb);
+ if (!elems)
return -EINVAL;
- }
elements_needed += elems;
}
if (card->info.type != QETH_CARD_TYPE_IQD)
- rc = qeth_do_send_packet(card, queue, new_skb, hdr,
+ rc = qeth_do_send_packet(card, queue, skb, hdr,
elements_needed, ctx);
else
- rc = qeth_do_send_packet_fast(card, queue, new_skb, hdr,
+ rc = qeth_do_send_packet_fast(card, queue, skb, hdr,
elements_needed, ctx);
- if (!rc) {
+ if (!rc){
card->stats.tx_packets++;
- card->stats.tx_bytes += tx_bytes;
- if (new_skb != skb)
- dev_kfree_skb_any(skb);
- if (card->options.performance_stats) {
- if (tso_size &&
- !(large_send == QETH_LARGE_SEND_NO)) {
- card->perf_stats.large_send_bytes += tx_bytes;
- card->perf_stats.large_send_cnt++;
- }
- if (nr_frags > 0) {
- card->perf_stats.sg_skbs_sent++;
- /* nr_frags + skb->data */
- card->perf_stats.sg_frags_sent +=
- nr_frags + 1;
- }
+ card->stats.tx_bytes += skb->len;
+#ifdef CONFIG_QETH_PERF_STATS
+ if (skb_shinfo(skb)->tso_size &&
+ !(large_send == QETH_LARGE_SEND_NO)) {
+ card->perf_stats.large_send_bytes += skb->len;
+ card->perf_stats.large_send_cnt++;
}
- } else {
- card->stats.tx_dropped++;
- __qeth_free_new_skb(skb, new_skb);
+ if (skb_shinfo(skb)->nr_frags > 0){
+ card->perf_stats.sg_skbs_sent++;
+ /* nr_frags + skb->data */
+ card->perf_stats.sg_frags_sent +=
+ skb_shinfo(skb)->nr_frags + 1;
+ }
+#endif /* CONFIG_QETH_PERF_STATS */
}
if (ctx != NULL) {
/* drop creator's reference */
qeth_eddp_put_context(ctx);
/* free skb; it's not referenced by a buffer */
- if (!rc)
- dev_kfree_skb_any(new_skb);
+ if (rc == 0)
+ dev_kfree_skb_any(skb);
+
}
return rc;
}
case MII_NCONFIG: /* network interface config */
break;
default:
+ rc = 0;
break;
}
return rc;
}
+static void
+qeth_mdio_write(struct net_device *dev, int phy_id, int regnum, int value)
+{
+ switch(regnum){
+ case MII_BMCR: /* Basic mode control register */
+ case MII_BMSR: /* Basic mode status register */
+ case MII_PHYSID1: /* PHYS ID 1 */
+ case MII_PHYSID2: /* PHYS ID 2 */
+ case MII_ADVERTISE: /* Advertisement control reg */
+ case MII_LPA: /* Link partner ability reg */
+ case MII_EXPANSION: /* Expansion register */
+ case MII_DCOUNTER: /* disconnect counter */
+ case MII_FCSCOUNTER: /* false carrier counter */
+ case MII_NWAYTEST: /* N-way auto-neg test register */
+ case MII_RERRCOUNTER: /* rx error counter */
+ case MII_SREVISION: /* silicon revision */
+ case MII_RESV1: /* reserved 1 */
+ case MII_LBRERROR: /* loopback, rx, bypass error */
+ case MII_PHYADDR: /* physical address */
+ case MII_RESV2: /* reserved 2 */
+ case MII_TPISTATUS: /* TPI status for 10mbps */
+ case MII_NCONFIG: /* network interface config */
+ default:
+ break;
+ }
+}
static inline const char *
qeth_arp_get_error_cause(int *rc)
qeth_get_setassparms_cmd(struct qeth_card *, enum qeth_ipa_funcs,
__u16, __u16, enum qeth_prot_versions);
static int
-qeth_arp_query(struct qeth_card *card, char __user *udata)
+qeth_arp_query(struct qeth_card *card, char *udata)
{
struct qeth_cmd_buffer *iob;
struct qeth_arp_query_info qinfo = {0, };
/* get size of userspace buffer and mask_bits -> 6 bytes */
if (copy_from_user(&qinfo, udata, 6))
return -EFAULT;
- if (!(qinfo.udata = kzalloc(qinfo.udata_len, GFP_KERNEL)))
+ if (!(qinfo.udata = kmalloc(qinfo.udata_len, GFP_KERNEL)))
return -ENOMEM;
+ memset(qinfo.udata, 0, qinfo.udata_len);
qinfo.udata_offset = QETH_QARP_ENTRIES_OFFSET;
iob = qeth_get_setassparms_cmd(card, IPA_ARP_PROCESSING,
IPA_CMD_ASS_ARP_QUERY_INFO,
* function to send SNMP commands to OSA-E card
*/
static int
-qeth_snmp_command(struct qeth_card *card, char __user *udata)
+qeth_snmp_command(struct qeth_card *card, char *udata)
{
struct qeth_cmd_buffer *iob;
struct qeth_ipa_cmd *cmd;
return -EFAULT;
}
qinfo.udata_len = ureq->hdr.data_len;
- if (!(qinfo.udata = kzalloc(qinfo.udata_len, GFP_KERNEL))){
+ if (!(qinfo.udata = kmalloc(qinfo.udata_len, GFP_KERNEL))){
kfree(ureq);
return -ENOMEM;
}
+ memset(qinfo.udata, 0, qinfo.udata_len);
qinfo.udata_offset = sizeof(struct qeth_snmp_ureq_hdr);
iob = qeth_get_adapter_cmd(card, IPA_SETADP_SET_SNMP_CONTROL,
mii_data->val_out = qeth_mdio_read(dev,mii_data->phy_id,
mii_data->reg_num);
break;
+ case SIOCSMIIREG:
+ rc = -EOPNOTSUPP;
+ break;
+ /* TODO: remove return if qeth_mdio_write does something */
+ if (!capable(CAP_NET_ADMIN)){
+ rc = -EPERM;
+ break;
+ }
+ mii_data = if_mii(rq);
+ if (mii_data->phy_id != 0)
+ rc = -EINVAL;
+ else
+ qeth_mdio_write(dev, mii_data->phy_id, mii_data->reg_num,
+ mii_data->val_in);
+ break;
default:
rc = -EOPNOTSUPP;
}
struct sk_buff_head tmp_list;
skb_queue_head_init(&tmp_list);
- lockdep_set_class(&tmp_list.lock, &qdio_out_skb_queue_key);
for(i = 0; i < QETH_MAX_BUFFER_ELEMENTS(card); ++i){
while ((skb = skb_dequeue(&buf->skb_list))){
if (vlan_tx_tag_present(skb) &&
cmd = (struct qeth_ipa_cmd *) data;
if (cmd->hdr.return_code) {
PRINT_ERR("Error in processing VLAN %i on %s: 0x%x. "
- "Continuing\n",cmd->data.setdelvlan.vlan_id,
+ "Continuing\n",cmd->data.setdelvlan.vlan_id,
QETH_CARD_IFNAME(card), cmd->hdr.return_code);
QETH_DBF_TEXT_(trace, 2, "L2VL%4x", cmd->hdr.command);
QETH_DBF_TEXT_(trace, 2, "L2%s", CARD_BUS_ID(card));
iob = qeth_get_ipacmd_buffer(card, ipacmd, QETH_PROT_IPV4);
cmd = (struct qeth_ipa_cmd *)(iob->data+IPA_PDU_HEADER_SIZE);
cmd->data.setdelvlan.vlan_id = i;
- return qeth_send_ipa_cmd(card, iob,
+ return qeth_send_ipa_cmd(card, iob,
qeth_layer2_send_setdelvlan_cb, NULL);
}
* Examine hardware response to SET_PROMISC_MODE
*/
static int
-qeth_setadp_promisc_mode_cb(struct qeth_card *card,
+qeth_setadp_promisc_mode_cb(struct qeth_card *card,
struct qeth_reply *reply,
unsigned long data)
{
cmd = (struct qeth_ipa_cmd *) data;
setparms = &(cmd->data.setadapterparms);
-
+
qeth_default_setadapterparms_cb(card, reply, (unsigned long)cmd);
- if (cmd->hdr.return_code) {
- QETH_DBF_TEXT_(trace,4,"prmrc%2.2x",cmd->hdr.return_code);
+ if (cmd->hdr.return_code) {
+ QETH_DBF_TEXT_(trace,4,"prmrc%2.2x",cmd->hdr.return_code);
setparms->data.mode = SET_PROMISC_MODE_OFF;
}
card->info.promisc_mode = setparms->data.mode;
if (card->info.type == QETH_CARD_TYPE_OSN)
return ;
-
+
QETH_DBF_TEXT(trace, 3, "setmulti");
qeth_delete_mc_addresses(card);
if (card->options.layer2) {
{
struct qeth_ipaddr *addr;
- addr = kzalloc(sizeof(struct qeth_ipaddr), GFP_ATOMIC);
+ addr = kmalloc(sizeof(struct qeth_ipaddr), GFP_ATOMIC);
if (addr == NULL) {
PRINT_WARN("Not enough memory to add address\n");
return NULL;
}
+ memset(addr,0,sizeof(struct qeth_ipaddr));
addr->type = QETH_IP_TYPE_NORMAL;
addr->proto = prot;
return addr;
struct qeth_cmd_buffer *iob;
struct qeth_card *card;
int rc;
-
+
QETH_DBF_TEXT(trace, 2, "osnsdmc");
if (!dev)
return -ENODEV;
card->osn_info.data_cb = NULL;
return;
}
-
+
static void
qeth_delete_mc_addresses(struct qeth_card *card)
{
struct inet6_dev *in6_dev;
QETH_DBF_TEXT(trace,4,"chkmcv6");
- if (!qeth_is_supported(card, IPA_IPV6))
+ if (!qeth_is_supported(card, IPA_IPV6))
return ;
in6_dev = in6_dev_get(card->dev);
if (in6_dev == NULL)
dev->vlan_rx_kill_vid = qeth_vlan_rx_kill_vid;
dev->vlan_rx_add_vid = qeth_vlan_rx_add_vid;
#endif
+ dev->hard_header = card->orig_hard_header;
if (qeth_get_netdev_flags(card) & IFF_NOARP) {
dev->rebuild_header = NULL;
dev->hard_header = NULL;
+ if (card->options.fake_ll)
+ dev->hard_header = qeth_fake_header;
dev->header_cache_update = NULL;
dev->hard_header_cache = NULL;
}
if (!(card->info.unique_id & UNIQUE_ID_NOT_BY_CARD))
card->dev->dev_id = card->info.unique_id & 0xffff;
#endif
- if (card->options.fake_ll &&
- (qeth_get_netdev_flags(card) & IFF_NOARP))
- dev->hard_header = qeth_fake_header;
dev->hard_header_parse = NULL;
dev->set_mac_address = qeth_layer2_set_mac_address;
dev->flags |= qeth_get_netdev_flags(card);
/*network device will be recovered*/
if (card->dev) {
card->dev->hard_header = card->orig_hard_header;
- if (card->options.fake_ll &&
- (qeth_get_netdev_flags(card) & IFF_NOARP))
- card->dev->hard_header = qeth_fake_header;
return 0;
}
/* at first set_online allocate netdev */
cmd = (struct qeth_ipa_cmd *) data;
if (!card->options.layer2 || card->info.guestlan ||
- !(card->info.mac_bits & QETH_LAYER2_MAC_READ)) {
+ !(card->info.mac_bits & QETH_LAYER2_MAC_READ)) {
memcpy(card->dev->dev_addr,
&cmd->data.setadapterparms.data.change_addr.addr,
OSA_ADDR_LEN);
cmd = (struct qeth_ipa_cmd *)(iob->data+IPA_PDU_HEADER_SIZE);
if (len <= sizeof(__u32))
cmd->data.setassparms.data.flags_32bit = (__u32) data;
- else /* (len > sizeof(__u32)) */
+ else if (len > sizeof(__u32))
memcpy(&cmd->data.setassparms.data, (void *) data, len);
rc = qeth_send_ipa_cmd(card, iob, reply_cb, reply_param);
QETH_DBF_TEXT(trace,3,"softipv6");
+ netif_stop_queue(card->dev);
rc = qeth_send_startlan(card, QETH_PROT_IPV6);
if (rc) {
PRINT_ERR("IPv6 startlan failed on %s\n",
QETH_CARD_IFNAME(card));
return rc;
}
+ netif_wake_queue(card->dev);
rc = qeth_query_ipassists(card,QETH_PROT_IPV6);
if (rc) {
PRINT_ERR("IPv6 query ipassist failed on %s\n",
QETH_DBF_TEXT(trace,3,"setrtg6");
#ifdef CONFIG_QETH_IPV6
- if (!qeth_is_supported(card, IPA_IPV6))
- return 0;
qeth_correct_routing_type(card, &card->options.route6.type,
QETH_PROT_IPV6);
+ if ((card->options.route6.type == NO_ROUTER) ||
+ ((card->info.type == QETH_CARD_TYPE_OSAE) &&
+ (card->options.route6.type == MULTICAST_ROUTER) &&
+ !qeth_is_supported6(card,IPA_OSA_MC_ROUTER)))
+ return 0;
rc = qeth_send_setrouting(card, card->options.route6.type,
QETH_PROT_IPV6);
if (rc) {
card->options.large_send = type;
return 0;
}
- if (card->state == CARD_STATE_UP)
- netif_tx_disable(card->dev);
+ netif_stop_queue(card->dev);
card->options.large_send = type;
switch (card->options.large_send) {
case QETH_LARGE_SEND_EDDP:
card->dev->features &= ~(NETIF_F_TSO | NETIF_F_SG);
break;
}
- if (card->state == CARD_STATE_UP)
- netif_wake_queue(card->dev);
+ netif_wake_queue(card->dev);
return rc;
}
if ((rc = qeth_setrouting_v6(card)))
QETH_DBF_TEXT_(setup, 2, "5err%d", rc);
out:
- netif_tx_disable(card->dev);
+ netif_stop_queue(card->dev);
return 0;
}
if (card->read.state == CH_STATE_UP &&
card->write.state == CH_STATE_UP &&
(card->state == CARD_STATE_UP)) {
- if (recovery_mode &&
+ if (recovery_mode &&
card->info.type != QETH_CARD_TYPE_OSN) {
qeth_stop(card->dev);
} else {
qeth_register_netdev(struct qeth_card *card)
{
QETH_DBF_TEXT(setup, 3, "regnetd");
- if (card->dev->reg_state != NETREG_UNINITIALIZED)
+ if (card->dev->reg_state != NETREG_UNINITIALIZED) {
+ qeth_netdev_init(card->dev);
return 0;
+ }
/* sysfs magic */
SET_NETDEV_DEV(card->dev, &card->gdev->dev);
return register_netdev(card->dev);
{
QETH_DBF_TEXT(setup ,2, "startag");
- if (recovery_mode &&
+ if (recovery_mode &&
card->info.type != QETH_CARD_TYPE_OSN) {
qeth_open(card->dev);
} else {
QETH_DBF_TEXT_(setup, 2, "5err%d", rc);
goto out_remove;
}
+ card->state = CARD_STATE_SOFTSETUP;
if ((rc = qeth_init_qdio_queues(card))){
QETH_DBF_TEXT_(setup, 2, "6err%d", rc);
goto out_remove;
}
- card->state = CARD_STATE_SOFTSETUP;
netif_carrier_on(card->dev);
qeth_set_allowed_threads(card, 0xffffffff, 0);
}
static struct ccw_device_id qeth_ids[] = {
- {CCW_DEVICE(0x1731, 0x01), .driver_info = QETH_CARD_TYPE_OSAE},
- {CCW_DEVICE(0x1731, 0x05), .driver_info = QETH_CARD_TYPE_IQD},
- {CCW_DEVICE(0x1731, 0x06), .driver_info = QETH_CARD_TYPE_OSN},
+ {CCW_DEVICE(0x1731, 0x01), driver_info:QETH_CARD_TYPE_OSAE},
+ {CCW_DEVICE(0x1731, 0x05), driver_info:QETH_CARD_TYPE_IQD},
+ {CCW_DEVICE(0x1731, 0x06), driver_info:QETH_CARD_TYPE_OSN},
{},
};
MODULE_DEVICE_TABLE(ccw, qeth_ids);
static struct neigh_ops arp_direct_ops_template = {
.family = AF_INET,
+ .destructor = NULL,
.solicit = NULL,
.error_report = NULL,
.output = dev_queue_xmit,
static struct notifier_block qeth_ip_notifier = {
qeth_ip_event,
- NULL,
+ 0
};
#ifdef CONFIG_QETH_IPV6
static struct notifier_block qeth_ip6_notifier = {
qeth_ip6_event,
- NULL,
+ 0
};
#endif
static int
qeth_reboot_event(struct notifier_block *this, unsigned long event, void *ptr)
{
- int ret;
- ret = driver_for_each_device(&qeth_ccwgroup_driver.driver, NULL, NULL,
- __qeth_reboot_event_card);
- return ret ? NOTIFY_BAD : NOTIFY_DONE;
+ driver_for_each_device(&qeth_ccwgroup_driver.driver, NULL, NULL,
+ __qeth_reboot_event_card);
+ return NOTIFY_DONE;
}
static struct notifier_block qeth_reboot_notifier = {
qeth_reboot_event,
- NULL,
+ 0
};
static int
qeth_ipv6_init(void)
{
qeth_old_arp_constructor = arp_tbl.constructor;
- write_lock_bh(&arp_tbl.lock);
+ write_lock(&arp_tbl.lock);
arp_tbl.constructor = qeth_arp_constructor;
- write_unlock_bh(&arp_tbl.lock);
+ write_unlock(&arp_tbl.lock);
arp_direct_ops = (struct neigh_ops*)
kmalloc(sizeof(struct neigh_ops), GFP_KERNEL);
static void
qeth_ipv6_uninit(void)
{
- write_lock_bh(&arp_tbl.lock);
+ write_lock(&arp_tbl.lock);
arp_tbl.constructor = qeth_old_arp_constructor;
- write_unlock_bh(&arp_tbl.lock);
+ write_unlock(&arp_tbl.lock);
kfree(arp_direct_ops);
}
#endif /* CONFIG_QETH_IPV6 */
static void
qeth_sysfs_unregister(void)
{
- s390_root_dev_unregister(qeth_root_dev);
qeth_remove_driver_attributes();
ccw_driver_unregister(&qeth_ccw_driver);
ccwgroup_driver_unregister(&qeth_ccwgroup_driver);
+ s390_root_dev_unregister(qeth_root_dev);
}
-
/**
* register qeth at sysfs
*/
static int
qeth_sysfs_register(void)
{
- int rc;
+ int rc=0;
rc = ccwgroup_driver_register(&qeth_ccwgroup_driver);
if (rc)
- goto out;
-
+ return rc;
rc = ccw_driver_register(&qeth_ccw_driver);
if (rc)
- goto out_ccw_driver;
-
+ return rc;
rc = qeth_create_driver_attributes();
if (rc)
- goto out_qeth_attr;
-
+ return rc;
qeth_root_dev = s390_root_dev_register("qeth");
- rc = IS_ERR(qeth_root_dev) ? PTR_ERR(qeth_root_dev) : 0;
- if (!rc)
- goto out;
-
- qeth_remove_driver_attributes();
-out_qeth_attr:
- ccw_driver_unregister(&qeth_ccw_driver);
-out_ccw_driver:
- ccwgroup_driver_unregister(&qeth_ccwgroup_driver);
-out:
- return rc;
+ if (IS_ERR(qeth_root_dev)) {
+ rc = PTR_ERR(qeth_root_dev);
+ return rc;
+ }
+ return 0;
}
/***
static int __init
qeth_init(void)
{
- int rc;
+ int rc=0;
PRINT_INFO("loading %s\n", version);
spin_lock_init(&qeth_notify_lock);
rwlock_init(&qeth_card_list.rwlock);
- rc = qeth_register_dbf_views();
- if (rc)
+ if (qeth_register_dbf_views())
goto out_err;
-
- rc = qeth_sysfs_register();
- if (rc)
- goto out_dbf;
+ if (qeth_sysfs_register())
+ goto out_sysfs;
#ifdef CONFIG_QETH_IPV6
- rc = qeth_ipv6_init();
- if (rc) {
- PRINT_ERR("Out of memory during ipv6 init code = %d\n", rc);
+ if (qeth_ipv6_init()) {
+ PRINT_ERR("Out of memory during ipv6 init.\n");
goto out_sysfs;
}
#endif /* QETH_IPV6 */
- rc = qeth_register_notifiers();
- if (rc)
+ if (qeth_register_notifiers())
goto out_ipv6;
- rc = qeth_create_procfs_entries();
- if (rc)
+ if (qeth_create_procfs_entries())
goto out_notifiers;
return rc;
out_ipv6:
#ifdef CONFIG_QETH_IPV6
qeth_ipv6_uninit();
-out_sysfs:
#endif /* QETH_IPV6 */
+out_sysfs:
qeth_sysfs_unregister();
-out_dbf:
qeth_unregister_dbf_views();
out_err:
- PRINT_ERR("Initialization failed with code %d\n", rc);
+ PRINT_ERR("Initialization failed");
return rc;
}