/*
*
- * linux/drivers/s390/net/qeth_main.c ($Revision: 1.121 $)
+ * linux/drivers/s390/net/qeth_main.c ($Revision: 1.130 $)
*
* Linux on zSeries OSA Express and HiperSockets support
*
* Frank Pavlic (pavlic@de.ibm.com) and
* Thomas Spatzier <tspat@de.ibm.com>
*
- * $Revision: 1.121 $ $Date: 2004/06/11 16:32:15 $
+ * $Revision: 1.130 $ $Date: 2004/08/05 11:21:50 $
*
* This program is free software; you can redistribute it and/or modify
* it under the terms of the GNU General Public License as published by
#include "qeth_mpc.h"
#include "qeth_fs.h"
-#define VERSION_QETH_C "$Revision: 1.121 $"
+#define VERSION_QETH_C "$Revision: 1.130 $"
static const char *version = "qeth S/390 OSA-Express driver";
/**
static void qeth_add_multicast_ipv6(struct qeth_card *);
#endif
-static void
+static inline int
qeth_set_thread_start_bit(struct qeth_card *card, unsigned long thread)
{
unsigned long flags;
spin_lock_irqsave(&card->thread_mask_lock, flags);
+ if ( !(card->thread_allowed_mask & thread) ||
+ (card->thread_start_mask & thread) ) {
+ spin_unlock_irqrestore(&card->thread_mask_lock, flags);
+ return -EPERM;
+ }
card->thread_start_mask |= thread;
spin_unlock_irqrestore(&card->thread_mask_lock, flags);
+ return 0;
}
static void
{
QETH_DBF_TEXT(trace,2,"startrec");
- qeth_set_thread_start_bit(card, QETH_RECOVER_THREAD);
- schedule_work(&card->kernel_thread_starter);
+ if (qeth_set_thread_start_bit(card, QETH_RECOVER_THREAD) == 0)
+ schedule_work(&card->kernel_thread_starter);
}
static int
QETH_DBF_TEXT(trace, 2, "rstipadd");
qeth_clear_ip_list(card, 0, 1);
- qeth_set_thread_start_bit(card, QETH_SET_IP_THREAD);
- qeth_set_thread_start_bit(card, QETH_SET_MC_THREAD);
- schedule_work(&card->kernel_thread_starter);
+ if ( (qeth_set_thread_start_bit(card, QETH_SET_IP_THREAD) == 0) ||
+ (qeth_set_thread_start_bit(card, QETH_SET_MC_THREAD) == 0) )
+ schedule_work(&card->kernel_thread_starter);
}
static struct qeth_ipa_cmd *
}
add_timer(&timer);
wait_event(reply->wait_q, reply->received);
- del_timer(&timer);
+ del_timer_sync(&timer);
rc = reply->rc;
qeth_put_reply(reply);
return rc;
QETH_DBF_TEXT(qerr,2,"unexeob");
QETH_DBF_TEXT_(qerr,2,"%s",CARD_BUS_ID(card));
QETH_DBF_HEX(misc,4,buffer,sizeof(*buffer));
- dev_kfree_skb_irq(skb);
+ dev_kfree_skb_any(skb);
card->stats.rx_errors++;
return NULL;
}
qeth_rebuild_skb(card, skb, hdr);
/* is device UP ? */
if (!(card->dev->flags & IFF_UP)){
- dev_kfree_skb_irq(skb);
+ dev_kfree_skb_any(skb);
continue;
}
skb->dev = card->dev;
static inline struct qeth_buffer_pool_entry *
qeth_get_buffer_pool_entry(struct qeth_card *card)
{
- struct qeth_buffer_pool_entry *entry, *tmp;
+ struct qeth_buffer_pool_entry *entry;
QETH_DBF_TEXT(trace, 6, "gtbfplen");
- entry = NULL;
- list_for_each_entry_safe(entry, tmp,
- &card->qdio.in_buf_pool.entry_list, list){
+ if (!list_empty(&card->qdio.in_buf_pool.entry_list)) {
+ entry = list_entry(card->qdio.in_buf_pool.entry_list.next,
+ struct qeth_buffer_pool_entry, list);
list_del_init(&entry->list);
- break;
+ return entry;
}
- return entry;
+ return NULL;
}
static inline void
buf->buffer->element[i].flags = 0;
while ((skb = skb_dequeue(&buf->skb_list))){
atomic_dec(&skb->users);
- dev_kfree_skb_irq(skb);
+ dev_kfree_skb_any(skb);
}
}
buf->next_element_to_fill = 0;
QETH_DBF_TEXT(trace, 2, "flushbuf");
QETH_DBF_TEXT_(trace, 2, " err%d", rc);
queue->card->stats.tx_errors += count;
- /* ok, since do_QDIO went wrong the buffers have not been given
- * to the hardware. they still belong to us, so we can clear
- * them and reuse then, i.e. set back next_buf_to_fill*/
- for (i = index; i < index + count; ++i) {
- buf = &queue->bufs[i % QDIO_MAX_BUFFERS_PER_Q];
- qeth_clear_output_buffer(queue, buf);
- }
- queue->next_buf_to_fill = index;
+ /* this must not happen under normal circumstances. if it
+ * happens something is really wrong -> recover */
+ qeth_schedule_recovery(queue->card);
return;
}
atomic_add(count, &queue->used_buffers);
}
/*
- * switches between PACKING and non-PACKING state if needed.
- * has to be called holding queue->lock
+ * Switched to packing state if the number of used buffers on a queue
+ * reaches a certain limit.
*/
-static inline int
-qeth_switch_packing_state(struct qeth_qdio_out_q *queue)
+static inline void
+qeth_switch_to_packing_if_needed(struct qeth_qdio_out_q *queue)
{
- struct qeth_qdio_out_buffer *buffer;
- int flush_count = 0;
-
- QETH_DBF_TEXT(trace, 6, "swipack");
if (!queue->do_pack) {
if (atomic_read(&queue->used_buffers)
>= QETH_HIGH_WATERMARK_PACK){
#endif
queue->do_pack = 1;
}
- } else {
+ }
+}
+
+/*
+ * Switches from packing to non-packing mode. If there is a packing
+ * buffer on the queue this buffer will be prepared to be flushed.
+ * In that case 1 is returned to inform the caller. If no buffer
+ * has to be flushed, zero is returned.
+ */
+static inline int
+qeth_switch_to_nonpacking_if_needed(struct qeth_qdio_out_q *queue)
+{
+ struct qeth_qdio_out_buffer *buffer;
+ int flush_count = 0;
+
+ if (queue->do_pack) {
if (atomic_read(&queue->used_buffers)
<= QETH_LOW_WATERMARK_PACK) {
/* switch PACKING -> non-PACKING */
return flush_count;
}
-static inline void
-qeth_flush_buffers_on_no_pci(struct qeth_qdio_out_q *queue, int under_int)
+/*
+ * Called to flush a packing buffer if no more pci flags are on the queue.
+ * Checks if there is a packing buffer and prepares it to be flushed.
+ * In that case returns 1, otherwise zero.
+ */
+static inline int
+qeth_flush_buffers_on_no_pci(struct qeth_qdio_out_q *queue)
{
struct qeth_qdio_out_buffer *buffer;
- int index;
- index = queue->next_buf_to_fill;
- buffer = &queue->bufs[index];
+ buffer = &queue->bufs[queue->next_buf_to_fill];
if((atomic_read(&buffer->state) == QETH_QDIO_BUF_EMPTY) &&
(buffer->next_element_to_fill > 0)){
/* it's a packing buffer */
atomic_set(&buffer->state, QETH_QDIO_BUF_PRIMED);
queue->next_buf_to_fill =
(queue->next_buf_to_fill + 1) % QDIO_MAX_BUFFERS_PER_Q;
- qeth_flush_buffers(queue, under_int, index, 1);
+ return 1;
+ }
+ return 0;
+}
+
+static inline void
+qeth_check_outbound_queue(struct qeth_qdio_out_q *queue)
+{
+ int index;
+ int flush_cnt = 0;
+
+ /*
+ * check if weed have to switch to non-packing mode or if
+ * we have to get a pci flag out on the queue
+ */
+ if ((atomic_read(&queue->used_buffers) <= QETH_LOW_WATERMARK_PACK) ||
+ !atomic_read(&queue->set_pci_flags_count)){
+ if (atomic_swap(&queue->state, QETH_OUT_Q_LOCKED_FLUSH) ==
+ QETH_OUT_Q_UNLOCKED) {
+ /*
+ * If we get in here, there was no action in
+ * do_send_packet. So, we check if there is a
+ * packing buffer to be flushed here.
+ */
+ /* TODO: try if we get a performance improvement
+ * by calling netif_stop_queue here */
+ /* save start index for flushing */
+ index = queue->next_buf_to_fill;
+ flush_cnt += qeth_switch_to_nonpacking_if_needed(queue);
+ if (!flush_cnt &&
+ !atomic_read(&queue->set_pci_flags_count))
+ flush_cnt +=
+ qeth_flush_buffers_on_no_pci(queue);
+ /* were done with updating critical queue members */
+ atomic_set(&queue->state, QETH_OUT_Q_UNLOCKED);
+ /* flushing can be done outside the lock */
+ if (flush_cnt)
+ qeth_flush_buffers(queue, 1, index, flush_cnt);
+ }
}
}
qeth_clear_output_buffer(queue, buffer);
}
atomic_sub(count, &queue->used_buffers);
+ /* check if we need to do something on this outbound queue */
+ qeth_check_outbound_queue(queue);
netif_wake_queue(card->dev);
#ifdef CONFIG_QETH_PERF_STATS
card->qdio.out_qs[i]->do_pack = 0;
atomic_set(&card->qdio.out_qs[i]->used_buffers,0);
atomic_set(&card->qdio.out_qs[i]->set_pci_flags_count, 0);
- spin_lock_init(&card->qdio.out_qs[i]->lock);
+ atomic_set(&card->qdio.out_qs[i]->state,
+ QETH_OUT_Q_UNLOCKED);
}
return 0;
}
card->perf_stats.outbound_start_time = qeth_get_micros();
#endif
/*
- * dev_queue_xmit should ensure that we are called packet
- * after packet
+ * We only call netif_stop_queue in case of errors. Since we've
+ * got our own synchronization on queues we can keep the stack's
+ * queue running.
*/
- netif_stop_queue(dev);
- if (!(rc = qeth_send_packet(card, skb)))
- netif_wake_queue(dev);
+ if ((rc = qeth_send_packet(card, skb)))
+ netif_stop_queue(dev);
#ifdef CONFIG_QETH_PERF_STATS
card->perf_stats.outbound_time += qeth_get_micros() -
QETH_DBF_TEXT(trace, 6, "dosndpfa");
- spin_lock(&queue->lock);
+ /* spin until we get the queue ... */
+ while (atomic_compare_and_swap(QETH_OUT_Q_UNLOCKED,
+ QETH_OUT_Q_LOCKED,
+ &queue->state));
+ /* ... now we've got the queue */
index = queue->next_buf_to_fill;
buffer = &queue->bufs[queue->next_buf_to_fill];
/*
*/
if (atomic_read(&buffer->state) != QETH_QDIO_BUF_EMPTY) {
card->stats.tx_dropped++;
- spin_unlock(&queue->lock);
+ atomic_set(&queue->state, QETH_OUT_Q_UNLOCKED);
return -EBUSY;
}
queue->next_buf_to_fill = (queue->next_buf_to_fill + 1) %
QDIO_MAX_BUFFERS_PER_Q;
+ atomic_set(&queue->state, QETH_OUT_Q_UNLOCKED);
qeth_fill_buffer(queue, buffer, (char *)hdr, skb);
qeth_flush_buffers(queue, 0, index, 1);
- spin_unlock(&queue->lock);
return 0;
}
QETH_DBF_TEXT(trace, 6, "dosndpkt");
- spin_lock(&queue->lock);
+ /* spin until we get the queue ... */
+ while (atomic_compare_and_swap(QETH_OUT_Q_UNLOCKED,
+ QETH_OUT_Q_LOCKED,
+ &queue->state));
start_index = queue->next_buf_to_fill;
buffer = &queue->bufs[queue->next_buf_to_fill];
/*
*/
if (atomic_read(&buffer->state) != QETH_QDIO_BUF_EMPTY){
card->stats.tx_dropped++;
- spin_unlock(&queue->lock);
+ atomic_set(&queue->state, QETH_OUT_Q_UNLOCKED);
return -EBUSY;
}
+ /* check if we need to switch packing state of this queue */
+ qeth_switch_to_packing_if_needed(queue);
if (queue->do_pack){
/* does packet fit in current buffer? */
if((QETH_MAX_BUFFER_ELEMENTS(card) -
/* we did a step forward, so check buffer state again */
if (atomic_read(&buffer->state) != QETH_QDIO_BUF_EMPTY){
card->stats.tx_dropped++;
- qeth_flush_buffers(queue, 0, start_index, 1);
- spin_unlock(&queue->lock);
/* return EBUSY because we sent old packet, not
* the current one */
- return -EBUSY;
+ rc = -EBUSY;
+ goto out;
}
}
}
queue->next_buf_to_fill = (queue->next_buf_to_fill + 1) %
QDIO_MAX_BUFFERS_PER_Q;
}
- /* check if we need to switch packing state of this queue */
- flush_count += qeth_switch_packing_state(queue);
-
+ /*
+ * queue->state will go from LOCKED -> UNLOCKED or from
+ * LOCKED_FLUSH -> LOCKED if output_handler wanted to 'notify' us
+ * (switch packing state or flush buffer to get another pci flag out).
+ * In that case we will enter this loop
+ */
+ while (atomic_dec_return(&queue->state)){
+ /* check if we can go back to non-packing state */
+ flush_count += qeth_switch_to_nonpacking_if_needed(queue);
+ /*
+ * check if we need to flush a packing buffer to get a pci
+ * flag out on the queue
+ */
+ if (!flush_count && !atomic_read(&queue->set_pci_flags_count))
+ flush_count += qeth_flush_buffers_on_no_pci(queue);
+ }
+ /* at this point the queue is UNLOCKED again */
+out:
if (flush_count)
qeth_flush_buffers(queue, 0, start_index, flush_count);
- if (!atomic_read(&queue->set_pci_flags_count))
- qeth_flush_buffers_on_no_pci(queue, 0);
-
- spin_unlock(&queue->lock);
return rc;
}
switch(regnum){
case MII_BMCR: /* Basic mode control register */
rc = BMCR_FULLDPLX;
- if(card->info.link_type != QETH_LINK_TYPE_GBIT_ETH)
+ if ((card->info.link_type != QETH_LINK_TYPE_GBIT_ETH)&&
+ (card->info.link_type != QETH_LINK_TYPE_10GBIT_ETH))
rc |= BMCR_SPEED100;
break;
case MII_BMSR: /* Basic mode status register */
if (card->vlangrp)
card->vlangrp->vlan_devices[vid] = NULL;
spin_unlock_irqrestore(&card->vlanlock, flags);
- qeth_set_thread_start_bit(card, QETH_SET_IP_THREAD);
- /* delete mc addresses for this vlan dev */
- qeth_set_thread_start_bit(card, QETH_SET_MC_THREAD);
- schedule_work(&card->kernel_thread_starter);
+ if ( (qeth_set_thread_start_bit(card, QETH_SET_IP_THREAD) == 0) ||
+ (qeth_set_thread_start_bit(card, QETH_SET_MC_THREAD) == 0) )
+ schedule_work(&card->kernel_thread_starter);
}
#endif
QETH_DBF_TEXT(trace,3,"setmulti");
card = (struct qeth_card *) dev->priv;
- qeth_set_thread_start_bit(card, QETH_SET_MC_THREAD);
- schedule_work(&card->kernel_thread_starter);
+ if (qeth_set_thread_start_bit(card, QETH_SET_MC_THREAD) == 0)
+ schedule_work(&card->kernel_thread_starter);
}
static void
rtnl_lock();
dev_open(card->dev);
rtnl_unlock();
- qeth_set_thread_start_bit(card, QETH_SET_MC_THREAD);
- schedule_work(&card->kernel_thread_starter);
+ if (qeth_set_thread_start_bit(card, QETH_SET_MC_THREAD) == 0)
+ schedule_work(&card->kernel_thread_starter);
}
static int
}
if (!qeth_add_ip(card, ipaddr))
kfree(ipaddr);
- qeth_set_thread_start_bit(card, QETH_SET_IP_THREAD);
- schedule_work(&card->kernel_thread_starter);
+ if (qeth_set_thread_start_bit(card, QETH_SET_IP_THREAD) == 0)
+ schedule_work(&card->kernel_thread_starter);
return rc;
}
return;
if (!qeth_delete_ip(card, ipaddr))
kfree(ipaddr);
- qeth_set_thread_start_bit(card, QETH_SET_IP_THREAD);
- schedule_work(&card->kernel_thread_starter);
+ if (qeth_set_thread_start_bit(card, QETH_SET_IP_THREAD) == 0)
+ schedule_work(&card->kernel_thread_starter);
}
/*
}
if (!qeth_add_ip(card, ipaddr))
kfree(ipaddr);
- qeth_set_thread_start_bit(card, QETH_SET_IP_THREAD);
- schedule_work(&card->kernel_thread_starter);
+ if (qeth_set_thread_start_bit(card, QETH_SET_IP_THREAD) == 0)
+ schedule_work(&card->kernel_thread_starter);
return 0;
}
return;
if (!qeth_delete_ip(card, ipaddr))
kfree(ipaddr);
- qeth_set_thread_start_bit(card, QETH_SET_IP_THREAD);
- schedule_work(&card->kernel_thread_starter);
+ if (qeth_set_thread_start_bit(card, QETH_SET_IP_THREAD) == 0)
+ schedule_work(&card->kernel_thread_starter);
}
/**
default:
break;
}
- qeth_set_thread_start_bit(card, QETH_SET_IP_THREAD);
- schedule_work(&card->kernel_thread_starter);
+ if (qeth_set_thread_start_bit(card, QETH_SET_IP_THREAD) == 0)
+ schedule_work(&card->kernel_thread_starter);
out:
return NOTIFY_DONE;
}
default:
break;
}
- qeth_set_thread_start_bit(card, QETH_SET_IP_THREAD);
- schedule_work(&card->kernel_thread_starter);
+ if (qeth_set_thread_start_bit(card, QETH_SET_IP_THREAD) == 0)
+ schedule_work(&card->kernel_thread_starter);
out:
return NOTIFY_DONE;
}