+ struct list_head *ent;
+ struct sk_buff *skb;
+ netif_t *netif;
+ netif_tx_request_t txreq;
+ struct netif_extra_info extras[XEN_NETIF_EXTRA_TYPE_MAX - 1];
+ u16 pending_idx;
+ RING_IDX i;
+ gnttab_map_grant_ref_t *mop;
+ unsigned int data_len;
+ int ret, work_to_do;
+
+ if (dealloc_cons != dealloc_prod)
+ net_tx_action_dealloc();
+
+ mop = tx_map_ops;
+ while (((NR_PENDING_REQS + MAX_SKB_FRAGS) < MAX_PENDING_REQS) &&
+ !list_empty(&net_schedule_list)) {
+ /* Get a netif from the list with work to do. */
+ ent = net_schedule_list.next;
+ netif = list_entry(ent, netif_t, list);
+ netif_get(netif);
+ remove_from_net_schedule_list(netif);
+
+ RING_FINAL_CHECK_FOR_REQUESTS(&netif->tx, work_to_do);
+ if (!work_to_do) {
+ netif_put(netif);
+ continue;
+ }
+
+ i = netif->tx.req_cons;
+ rmb(); /* Ensure that we see the request before we copy it. */
+ memcpy(&txreq, RING_GET_REQUEST(&netif->tx, i), sizeof(txreq));
+ /* Credit-based scheduling. */
+ if (txreq.size > netif->remaining_credit) {
+ unsigned long now = jiffies;
+ unsigned long next_credit =
+ netif->credit_timeout.expires +
+ msecs_to_jiffies(netif->credit_usec / 1000);
+
+ /* Timer could already be pending in rare cases. */
+ if (timer_pending(&netif->credit_timeout))
+ break;
+
+ /* Passed the point where we can replenish credit? */
+ if (time_after_eq(now, next_credit)) {
+ netif->credit_timeout.expires = now;
+ netif->remaining_credit = netif->credit_bytes;
+ }
+
+ /* Still too big to send right now? Set a callback. */
+ if (txreq.size > netif->remaining_credit) {
+ netif->remaining_credit = 0;
+ netif->credit_timeout.data =
+ (unsigned long)netif;
+ netif->credit_timeout.function =
+ tx_credit_callback;
+ __mod_timer(&netif->credit_timeout,
+ next_credit);
+ break;
+ }
+ }
+ netif->remaining_credit -= txreq.size;
+
+ work_to_do--;
+ netif->tx.req_cons = ++i;
+
+ memset(extras, 0, sizeof(extras));
+ if (txreq.flags & NETTXF_extra_info) {
+ work_to_do = netbk_get_extras(netif, extras,
+ work_to_do);
+ i = netif->tx.req_cons;
+ if (unlikely(work_to_do < 0)) {
+ netbk_tx_err(netif, &txreq, i);
+ continue;
+ }
+ }
+
+ ret = netbk_count_requests(netif, &txreq, work_to_do);
+ if (unlikely(ret < 0)) {
+ netbk_tx_err(netif, &txreq, i - ret);
+ continue;
+ }
+ i += ret;
+
+ if (unlikely(ret > MAX_SKB_FRAGS)) {
+ DPRINTK("Too many frags\n");
+ netbk_tx_err(netif, &txreq, i);
+ continue;
+ }
+
+ if (unlikely(txreq.size < ETH_HLEN)) {
+ DPRINTK("Bad packet size: %d\n", txreq.size);
+ netbk_tx_err(netif, &txreq, i);
+ continue;
+ }
+
+ /* No crossing a page as the payload mustn't fragment. */
+ if (unlikely((txreq.offset + txreq.size) > PAGE_SIZE)) {
+ DPRINTK("txreq.offset: %x, size: %u, end: %lu\n",
+ txreq.offset, txreq.size,
+ (txreq.offset &~PAGE_MASK) + txreq.size);
+ netbk_tx_err(netif, &txreq, i);
+ continue;
+ }
+
+ pending_idx = pending_ring[MASK_PEND_IDX(pending_cons)];
+
+ data_len = (txreq.size > PKT_PROT_LEN &&
+ ret < MAX_SKB_FRAGS) ?
+ PKT_PROT_LEN : txreq.size;
+
+ skb = alloc_skb(data_len+16, GFP_ATOMIC);
+ if (unlikely(skb == NULL)) {
+ DPRINTK("Can't allocate a skb in start_xmit.\n");
+ netbk_tx_err(netif, &txreq, i);
+ break;
+ }
+
+ /* Packets passed to netif_rx() must have some headroom. */
+ skb_reserve(skb, 16);
+
+ if (extras[XEN_NETIF_EXTRA_TYPE_GSO - 1].type) {
+ struct netif_extra_info *gso;
+ gso = &extras[XEN_NETIF_EXTRA_TYPE_GSO - 1];
+
+ if (netbk_set_skb_gso(skb, gso)) {
+ kfree_skb(skb);
+ netbk_tx_err(netif, &txreq, i);
+ continue;
+ }
+ }
+
+ gnttab_set_map_op(mop, MMAP_VADDR(pending_idx),
+ GNTMAP_host_map | GNTMAP_readonly,
+ txreq.gref, netif->domid);
+ mop++;
+
+ memcpy(&pending_tx_info[pending_idx].req,
+ &txreq, sizeof(txreq));
+ pending_tx_info[pending_idx].netif = netif;
+ *((u16 *)skb->data) = pending_idx;
+
+ __skb_put(skb, data_len);
+
+ skb_shinfo(skb)->nr_frags = ret;
+ if (data_len < txreq.size) {
+ skb_shinfo(skb)->nr_frags++;
+ skb_shinfo(skb)->frags[0].page =
+ (void *)(unsigned long)pending_idx;
+ }
+
+ __skb_queue_tail(&tx_queue, skb);
+
+ pending_cons++;
+
+ mop = netbk_get_requests(netif, skb, mop);
+
+ netif->tx.req_cons = i;
+ netif_schedule_work(netif);
+
+ if ((mop - tx_map_ops) >= ARRAY_SIZE(tx_map_ops))
+ break;
+ }
+
+ if (mop == tx_map_ops)
+ return;
+
+ ret = HYPERVISOR_grant_table_op(
+ GNTTABOP_map_grant_ref, tx_map_ops, mop - tx_map_ops);
+ BUG_ON(ret);
+
+ mop = tx_map_ops;
+ while ((skb = __skb_dequeue(&tx_queue)) != NULL) {
+ netif_tx_request_t *txp;
+
+ pending_idx = *((u16 *)skb->data);
+ netif = pending_tx_info[pending_idx].netif;
+ txp = &pending_tx_info[pending_idx].req;
+
+ /* Check the remap error code. */
+ if (unlikely(netbk_tx_check_mop(skb, &mop))) {
+ printk(KERN_ALERT "#### netback grant fails\n");
+ skb_shinfo(skb)->nr_frags = 0;
+ kfree_skb(skb);
+ continue;
+ }
+
+ data_len = skb->len;
+ memcpy(skb->data,
+ (void *)(MMAP_VADDR(pending_idx)|txp->offset),
+ data_len);
+ if (data_len < txp->size) {
+ /* Append the packet payload as a fragment. */
+ txp->offset += data_len;
+ txp->size -= data_len;
+ } else {
+ /* Schedule a response immediately. */
+ netif_idx_release(pending_idx);
+ }
+
+ /*
+ * Old frontends do not assert data_validated but we
+ * can infer it from csum_blank so test both flags.
+ */
+ if (txp->flags & (NETTXF_data_validated|NETTXF_csum_blank)) {
+ skb->ip_summed = CHECKSUM_UNNECESSARY;
+ skb->proto_data_valid = 1;
+ } else {
+ skb->ip_summed = CHECKSUM_NONE;
+ skb->proto_data_valid = 0;
+ }
+ skb->proto_csum_blank = !!(txp->flags & NETTXF_csum_blank);
+
+ netbk_fill_frags(skb);
+
+ skb->dev = netif->dev;
+ skb->protocol = eth_type_trans(skb, skb->dev);
+
+ netif->stats.rx_bytes += skb->len;
+ netif->stats.rx_packets++;
+
+ netif_rx(skb);
+ netif->dev->last_rx = jiffies;
+ }
+}