+static int set_rxd_buffer_pointer(nic_t *sp, RxD_t *rxdp, buffAdd_t *ba,
+ struct sk_buff **skb, u64 *temp0, u64 *temp1,
+ u64 *temp2, int size)
+{
+ struct net_device *dev = sp->dev;
+ struct sk_buff *frag_list;
+
+ if ((sp->rxd_mode == RXD_MODE_1) && (rxdp->Host_Control == 0)) {
+ /* allocate skb */
+ if (*skb) {
+ DBG_PRINT(INFO_DBG, "SKB is not NULL\n");
+ /*
+ * As Rx frame are not going to be processed,
+ * using same mapped address for the Rxd
+ * buffer pointer
+ */
+ ((RxD1_t*)rxdp)->Buffer0_ptr = *temp0;
+ } else {
+ *skb = dev_alloc_skb(size);
+ if (!(*skb)) {
+ DBG_PRINT(ERR_DBG, "%s: Out of ", dev->name);
+ DBG_PRINT(ERR_DBG, "memory to allocate SKBs\n");
+ return -ENOMEM ;
+ }
+ /* storing the mapped addr in a temp variable
+ * such it will be used for next rxd whose
+ * Host Control is NULL
+ */
+ ((RxD1_t*)rxdp)->Buffer0_ptr = *temp0 =
+ pci_map_single( sp->pdev, (*skb)->data,
+ size - NET_IP_ALIGN,
+ PCI_DMA_FROMDEVICE);
+ rxdp->Host_Control = (unsigned long) (*skb);
+ }
+ } else if ((sp->rxd_mode == RXD_MODE_3B) && (rxdp->Host_Control == 0)) {
+ /* Two buffer Mode */
+ if (*skb) {
+ ((RxD3_t*)rxdp)->Buffer2_ptr = *temp2;
+ ((RxD3_t*)rxdp)->Buffer0_ptr = *temp0;
+ ((RxD3_t*)rxdp)->Buffer1_ptr = *temp1;
+ } else {
+ *skb = dev_alloc_skb(size);
+ if (!(*skb)) {
+ DBG_PRINT(ERR_DBG, "%s: dev_alloc_skb failed\n",
+ dev->name);
+ return -ENOMEM;
+ }
+ ((RxD3_t*)rxdp)->Buffer2_ptr = *temp2 =
+ pci_map_single(sp->pdev, (*skb)->data,
+ dev->mtu + 4,
+ PCI_DMA_FROMDEVICE);
+ ((RxD3_t*)rxdp)->Buffer0_ptr = *temp0 =
+ pci_map_single( sp->pdev, ba->ba_0, BUF0_LEN,
+ PCI_DMA_FROMDEVICE);
+ rxdp->Host_Control = (unsigned long) (*skb);
+
+ /* Buffer-1 will be dummy buffer not used */
+ ((RxD3_t*)rxdp)->Buffer1_ptr = *temp1 =
+ pci_map_single(sp->pdev, ba->ba_1, BUF1_LEN,
+ PCI_DMA_FROMDEVICE);
+ }
+ } else if ((rxdp->Host_Control == 0)) {
+ /* Three buffer mode */
+ if (*skb) {
+ ((RxD3_t*)rxdp)->Buffer0_ptr = *temp0;
+ ((RxD3_t*)rxdp)->Buffer1_ptr = *temp1;
+ ((RxD3_t*)rxdp)->Buffer2_ptr = *temp2;
+ } else {
+ *skb = dev_alloc_skb(size);
+ if (!(*skb)) {
+ DBG_PRINT(ERR_DBG, "%s: dev_alloc_skb failed\n",
+ dev->name);
+ return -ENOMEM;
+ }
+ ((RxD3_t*)rxdp)->Buffer0_ptr = *temp0 =
+ pci_map_single(sp->pdev, ba->ba_0, BUF0_LEN,
+ PCI_DMA_FROMDEVICE);
+ /* Buffer-1 receives L3/L4 headers */
+ ((RxD3_t*)rxdp)->Buffer1_ptr = *temp1 =
+ pci_map_single( sp->pdev, (*skb)->data,
+ l3l4hdr_size + 4,
+ PCI_DMA_FROMDEVICE);
+ /*
+ * skb_shinfo(skb)->frag_list will have L4
+ * data payload
+ */
+ skb_shinfo(*skb)->frag_list = dev_alloc_skb(dev->mtu +
+ ALIGN_SIZE);
+ if (skb_shinfo(*skb)->frag_list == NULL) {
+ DBG_PRINT(ERR_DBG, "%s: dev_alloc_skb \
+ failed\n ", dev->name);
+ return -ENOMEM ;
+ }
+ frag_list = skb_shinfo(*skb)->frag_list;
+ frag_list->next = NULL;
+ /*
+ * Buffer-2 receives L4 data payload
+ */
+ ((RxD3_t*)rxdp)->Buffer2_ptr = *temp2 =
+ pci_map_single( sp->pdev, frag_list->data,
+ dev->mtu, PCI_DMA_FROMDEVICE);
+ }
+ }
+ return 0;
+}
+static void set_rxd_buffer_size(nic_t *sp, RxD_t *rxdp, int size)
+{
+ struct net_device *dev = sp->dev;
+ if (sp->rxd_mode == RXD_MODE_1) {
+ rxdp->Control_2 = SET_BUFFER0_SIZE_1( size - NET_IP_ALIGN);
+ } else if (sp->rxd_mode == RXD_MODE_3B) {
+ rxdp->Control_2 = SET_BUFFER0_SIZE_3(BUF0_LEN);
+ rxdp->Control_2 |= SET_BUFFER1_SIZE_3(1);
+ rxdp->Control_2 |= SET_BUFFER2_SIZE_3( dev->mtu + 4);
+ } else {
+ rxdp->Control_2 = SET_BUFFER0_SIZE_3(BUF0_LEN);
+ rxdp->Control_2 |= SET_BUFFER1_SIZE_3(l3l4hdr_size + 4);
+ rxdp->Control_2 |= SET_BUFFER2_SIZE_3(dev->mtu);
+ }
+}
+
+static int rxd_owner_bit_reset(nic_t *sp)
+{
+ int i, j, k, blk_cnt = 0, size;
+ mac_info_t * mac_control = &sp->mac_control;
+ struct config_param *config = &sp->config;
+ struct net_device *dev = sp->dev;
+ RxD_t *rxdp = NULL;
+ struct sk_buff *skb = NULL;
+ buffAdd_t *ba = NULL;
+ u64 temp0_64 = 0, temp1_64 = 0, temp2_64 = 0;
+
+ /* Calculate the size based on ring mode */
+ size = dev->mtu + HEADER_ETHERNET_II_802_3_SIZE +
+ HEADER_802_2_SIZE + HEADER_SNAP_SIZE;
+ if (sp->rxd_mode == RXD_MODE_1)
+ size += NET_IP_ALIGN;
+ else if (sp->rxd_mode == RXD_MODE_3B)
+ size = dev->mtu + ALIGN_SIZE + BUF0_LEN + 4;
+ else
+ size = l3l4hdr_size + ALIGN_SIZE + BUF0_LEN + 4;
+
+ for (i = 0; i < config->rx_ring_num; i++) {
+ blk_cnt = config->rx_cfg[i].num_rxd /
+ (rxd_count[sp->rxd_mode] +1);
+
+ for (j = 0; j < blk_cnt; j++) {
+ for (k = 0; k < rxd_count[sp->rxd_mode]; k++) {
+ rxdp = mac_control->rings[i].
+ rx_blocks[j].rxds[k].virt_addr;
+ if(sp->rxd_mode >= RXD_MODE_3A)
+ ba = &mac_control->rings[i].ba[j][k];
+ set_rxd_buffer_pointer(sp, rxdp, ba,
+ &skb,(u64 *)&temp0_64,
+ (u64 *)&temp1_64,
+ (u64 *)&temp2_64, size);
+
+ set_rxd_buffer_size(sp, rxdp, size);
+ wmb();
+ /* flip the Ownership bit to Hardware */
+ rxdp->Control_1 |= RXD_OWN_XENA;
+ }
+ }
+ }
+ return 0;
+
+}
+
+static int s2io_add_isr(nic_t * sp)
+{
+ int ret = 0;
+ struct net_device *dev = sp->dev;
+ int err = 0;
+
+ if (sp->intr_type == MSI)
+ ret = s2io_enable_msi(sp);
+ else if (sp->intr_type == MSI_X)
+ ret = s2io_enable_msi_x(sp);
+ if (ret) {
+ DBG_PRINT(ERR_DBG, "%s: Defaulting to INTA\n", dev->name);
+ sp->intr_type = INTA;
+ }
+
+ /* Store the values of the MSIX table in the nic_t structure */
+ store_xmsi_data(sp);
+
+ /* After proper initialization of H/W, register ISR */
+ if (sp->intr_type == MSI) {
+ err = request_irq((int) sp->pdev->irq, s2io_msi_handle,
+ IRQF_SHARED, sp->name, dev);
+ if (err) {
+ pci_disable_msi(sp->pdev);
+ DBG_PRINT(ERR_DBG, "%s: MSI registration failed\n",
+ dev->name);
+ return -1;
+ }
+ }
+ if (sp->intr_type == MSI_X) {
+ int i;
+
+ for (i=1; (sp->s2io_entries[i].in_use == MSIX_FLG); i++) {
+ if (sp->s2io_entries[i].type == MSIX_FIFO_TYPE) {
+ sprintf(sp->desc[i], "%s:MSI-X-%d-TX",
+ dev->name, i);
+ err = request_irq(sp->entries[i].vector,
+ s2io_msix_fifo_handle, 0, sp->desc[i],
+ sp->s2io_entries[i].arg);
+ DBG_PRINT(ERR_DBG, "%s @ 0x%llx\n", sp->desc[i],
+ (unsigned long long)sp->msix_info[i].addr);
+ } else {
+ sprintf(sp->desc[i], "%s:MSI-X-%d-RX",
+ dev->name, i);
+ err = request_irq(sp->entries[i].vector,
+ s2io_msix_ring_handle, 0, sp->desc[i],
+ sp->s2io_entries[i].arg);
+ DBG_PRINT(ERR_DBG, "%s @ 0x%llx\n", sp->desc[i],
+ (unsigned long long)sp->msix_info[i].addr);
+ }
+ if (err) {
+ DBG_PRINT(ERR_DBG,"%s:MSI-X-%d registration "
+ "failed\n", dev->name, i);
+ DBG_PRINT(ERR_DBG, "Returned: %d\n", err);
+ return -1;
+ }
+ sp->s2io_entries[i].in_use = MSIX_REGISTERED_SUCCESS;
+ }
+ }
+ if (sp->intr_type == INTA) {
+ err = request_irq((int) sp->pdev->irq, s2io_isr, IRQF_SHARED,
+ sp->name, dev);
+ if (err) {
+ DBG_PRINT(ERR_DBG, "%s: ISR registration failed\n",
+ dev->name);
+ return -1;
+ }
+ }
+ return 0;
+}
+static void s2io_rem_isr(nic_t * sp)
+{
+ int cnt = 0;
+ struct net_device *dev = sp->dev;
+
+ if (sp->intr_type == MSI_X) {
+ int i;
+ u16 msi_control;
+
+ for (i=1; (sp->s2io_entries[i].in_use ==
+ MSIX_REGISTERED_SUCCESS); i++) {
+ int vector = sp->entries[i].vector;
+ void *arg = sp->s2io_entries[i].arg;
+
+ free_irq(vector, arg);
+ }
+ pci_read_config_word(sp->pdev, 0x42, &msi_control);
+ msi_control &= 0xFFFE; /* Disable MSI */
+ pci_write_config_word(sp->pdev, 0x42, msi_control);
+
+ pci_disable_msix(sp->pdev);
+ } else {
+ free_irq(sp->pdev->irq, dev);
+ if (sp->intr_type == MSI) {
+ u16 val;
+
+ pci_disable_msi(sp->pdev);
+ pci_read_config_word(sp->pdev, 0x4c, &val);
+ val ^= 0x1;
+ pci_write_config_word(sp->pdev, 0x4c, val);
+ }
+ }
+ /* Waiting till all Interrupt handlers are complete */
+ cnt = 0;
+ do {
+ msleep(10);
+ if (!atomic_read(&sp->isr_cnt))
+ break;
+ cnt++;
+ } while(cnt < 5);
+}
+
+static void s2io_card_down(nic_t * sp)
+{
+ int cnt = 0;
+ XENA_dev_config_t __iomem *bar0 = sp->bar0;
+ unsigned long flags;
+ register u64 val64 = 0;
+
+ del_timer_sync(&sp->alarm_timer);
+ /* If s2io_set_link task is executing, wait till it completes. */
+ while (test_and_set_bit(0, &(sp->link_state))) {
+ msleep(50);
+ }
+ atomic_set(&sp->card_state, CARD_DOWN);
+
+ /* disable Tx and Rx traffic on the NIC */
+ stop_nic(sp);
+
+ s2io_rem_isr(sp);
+
+ /* Kill tasklet. */
+ tasklet_kill(&sp->task);
+
+ /* Check if the device is Quiescent and then Reset the NIC */
+ do {
+ /* As per the HW requirement we need to replenish the
+ * receive buffer to avoid the ring bump. Since there is
+ * no intention of processing the Rx frame at this pointwe are
+ * just settting the ownership bit of rxd in Each Rx
+ * ring to HW and set the appropriate buffer size
+ * based on the ring mode
+ */
+ rxd_owner_bit_reset(sp);
+
+ val64 = readq(&bar0->adapter_status);
+ if (verify_xena_quiescence(sp, val64, sp->device_enabled_once)) {
+ break;
+ }
+
+ msleep(50);
+ cnt++;
+ if (cnt == 10) {
+ DBG_PRINT(ERR_DBG,
+ "s2io_close:Device not Quiescent ");
+ DBG_PRINT(ERR_DBG, "adaper status reads 0x%llx\n",
+ (unsigned long long) val64);
+ break;
+ }
+ } while (1);
+ s2io_reset(sp);
+
+ spin_lock_irqsave(&sp->tx_lock, flags);
+ /* Free all Tx buffers */
+ free_tx_buffers(sp);
+ spin_unlock_irqrestore(&sp->tx_lock, flags);
+
+ /* Free all Rx buffers */
+ spin_lock_irqsave(&sp->rx_lock, flags);
+ free_rx_buffers(sp);
+ spin_unlock_irqrestore(&sp->rx_lock, flags);
+
+ clear_bit(0, &(sp->link_state));
+}
+
+static int s2io_card_up(nic_t * sp)
+{
+ int i, ret = 0;
+ mac_info_t *mac_control;
+ struct config_param *config;
+ struct net_device *dev = (struct net_device *) sp->dev;
+ u16 interruptible;
+
+ /* Initialize the H/W I/O registers */
+ if (init_nic(sp) != 0) {
+ DBG_PRINT(ERR_DBG, "%s: H/W initialization failed\n",
+ dev->name);
+ s2io_reset(sp);
+ return -ENODEV;
+ }
+
+ /*
+ * Initializing the Rx buffers. For now we are considering only 1
+ * Rx ring and initializing buffers into 30 Rx blocks
+ */
+ mac_control = &sp->mac_control;
+ config = &sp->config;
+
+ for (i = 0; i < config->rx_ring_num; i++) {
+ if ((ret = fill_rx_buffers(sp, i))) {
+ DBG_PRINT(ERR_DBG, "%s: Out of memory in Open\n",
+ dev->name);
+ s2io_reset(sp);
+ free_rx_buffers(sp);
+ return -ENOMEM;
+ }
+ DBG_PRINT(INFO_DBG, "Buf in ring:%d is %d:\n", i,
+ atomic_read(&sp->rx_bufs_left[i]));
+ }
+
+ /* Setting its receive mode */
+ s2io_set_multicast(dev);
+
+ if (sp->lro) {
+ /* Initialize max aggregatable pkts per session based on MTU */
+ sp->lro_max_aggr_per_sess = ((1<<16) - 1) / dev->mtu;
+ /* Check if we can use(if specified) user provided value */
+ if (lro_max_pkts < sp->lro_max_aggr_per_sess)
+ sp->lro_max_aggr_per_sess = lro_max_pkts;
+ }
+
+ /* Enable Rx Traffic and interrupts on the NIC */
+ if (start_nic(sp)) {
+ DBG_PRINT(ERR_DBG, "%s: Starting NIC failed\n", dev->name);
+ s2io_reset(sp);
+ free_rx_buffers(sp);
+ return -ENODEV;
+ }
+
+ /* Add interrupt service routine */
+ if (s2io_add_isr(sp) != 0) {
+ if (sp->intr_type == MSI_X)
+ s2io_rem_isr(sp);
+ s2io_reset(sp);
+ free_rx_buffers(sp);
+ return -ENODEV;
+ }
+
+ S2IO_TIMER_CONF(sp->alarm_timer, s2io_alarm_handle, sp, (HZ/2));
+
+ /* Enable tasklet for the device */
+ tasklet_init(&sp->task, s2io_tasklet, (unsigned long) dev);
+
+ /* Enable select interrupts */
+ if (sp->intr_type != INTA)
+ en_dis_able_nic_intrs(sp, ENA_ALL_INTRS, DISABLE_INTRS);
+ else {
+ interruptible = TX_TRAFFIC_INTR | RX_TRAFFIC_INTR;
+ interruptible |= TX_PIC_INTR | RX_PIC_INTR;
+ interruptible |= TX_MAC_INTR | RX_MAC_INTR;
+ en_dis_able_nic_intrs(sp, interruptible, ENABLE_INTRS);
+ }
+
+
+ atomic_set(&sp->card_state, CARD_UP);
+ return 0;
+}
+
+/**
+ * s2io_restart_nic - Resets the NIC.
+ * @data : long pointer to the device private structure