1 /******************************************************************************
2 * arch/xen/drivers/blkif/backend/main.c
4 * Back-end of the driver for virtual block devices. This portion of the
5 * driver exports a 'unified' block-device interface that can be accessed
6 * by any operating system that implements a compatible front end. A
7 * reference front-end implementation can be found in:
8 * arch/xen/drivers/blkif/frontend
10 * Copyright (c) 2003-2004, Keir Fraser & Steve Hand
11 * Copyright (c) 2005, Christopher Clark
13 * This program is free software; you can redistribute it and/or
14 * modify it under the terms of the GNU General Public License version 2
15 * as published by the Free Software Foundation; or, when distributed
16 * separately from the Linux kernel or incorporated into other
17 * software packages, subject to the following license:
19 * Permission is hereby granted, free of charge, to any person obtaining a copy
20 * of this source file (the "Software"), to deal in the Software without
21 * restriction, including without limitation the rights to use, copy, modify,
22 * merge, publish, distribute, sublicense, and/or sell copies of the Software,
23 * and to permit persons to whom the Software is furnished to do so, subject to
24 * the following conditions:
26 * The above copyright notice and this permission notice shall be included in
27 * all copies or substantial portions of the Software.
29 * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
30 * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
31 * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE
32 * AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
33 * LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING
34 * FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS
38 #include <linux/spinlock.h>
39 #include <linux/kthread.h>
40 #include <linux/list.h>
41 #include <xen/balloon.h>
42 #include <asm/hypervisor.h>
46 * These are rather arbitrary. They are fairly large because adjacent requests
47 * pulled from a communication ring are quite likely to end up being part of
48 * the same scatter/gather request at the disc.
50 * ** TRY INCREASING 'blkif_reqs' IF WRITE SPEEDS SEEM TOO LOW **
52 * This will increase the chances of being able to write whole tracks.
53 * 64 should be enough to keep us competitive with Linux.
55 static int blkif_reqs = 64;
56 module_param_named(reqs, blkif_reqs, int, 0);
57 MODULE_PARM_DESC(reqs, "Number of blkback requests to allocate");
59 static int mmap_pages;
61 /* Run-time switchable: /sys/module/blkback/parameters/ */
62 static unsigned int log_stats = 0;
63 static unsigned int debug_lvl = 0;
64 module_param(log_stats, int, 0644);
65 module_param(debug_lvl, int, 0644);
68 * Each outstanding request that we've passed to the lower device layers has a
69 * 'pending_req' allocated to it. Each buffer_head that completes decrements
70 * the pendcnt towards zero. When it hits zero, the specified domain has a
71 * response queued for it, with the saved 'id' passed back.
78 unsigned short operation;
80 struct list_head free_list;
83 static pending_req_t *pending_reqs;
84 static struct list_head pending_free;
85 static DEFINE_SPINLOCK(pending_free_lock);
86 static DECLARE_WAIT_QUEUE_HEAD(pending_free_wq);
88 #define BLKBACK_INVALID_HANDLE (~0)
90 static unsigned long mmap_vstart;
91 static unsigned long *pending_vaddrs;
92 static grant_handle_t *pending_grant_handles;
94 static inline int vaddr_pagenr(pending_req_t *req, int seg)
96 return (req - pending_reqs) * BLKIF_MAX_SEGMENTS_PER_REQUEST + seg;
99 static inline unsigned long vaddr(pending_req_t *req, int seg)
101 return pending_vaddrs[vaddr_pagenr(req, seg)];
104 #define pending_handle(_req, _seg) \
105 (pending_grant_handles[vaddr_pagenr(_req, _seg)])
108 static int do_block_io_op(blkif_t *blkif);
109 static void dispatch_rw_block_io(blkif_t *blkif,
110 blkif_request_t *req,
111 pending_req_t *pending_req);
112 static void make_response(blkif_t *blkif, unsigned long id,
113 unsigned short op, int st);
115 /******************************************************************
118 static pending_req_t* alloc_req(void)
120 pending_req_t *req = NULL;
123 spin_lock_irqsave(&pending_free_lock, flags);
124 if (!list_empty(&pending_free)) {
125 req = list_entry(pending_free.next, pending_req_t, free_list);
126 list_del(&req->free_list);
128 spin_unlock_irqrestore(&pending_free_lock, flags);
132 static void free_req(pending_req_t *req)
137 spin_lock_irqsave(&pending_free_lock, flags);
138 was_empty = list_empty(&pending_free);
139 list_add(&req->free_list, &pending_free);
140 spin_unlock_irqrestore(&pending_free_lock, flags);
142 wake_up(&pending_free_wq);
145 static void unplug_queue(blkif_t *blkif)
147 if (blkif->plug == NULL)
149 if (blkif->plug->unplug_fn)
150 blkif->plug->unplug_fn(blkif->plug);
151 blk_put_queue(blkif->plug);
155 static void plug_queue(blkif_t *blkif, struct bio *bio)
157 request_queue_t *q = bdev_get_queue(bio->bi_bdev);
159 if (q == blkif->plug)
166 static void fast_flush_area(pending_req_t *req)
168 struct gnttab_unmap_grant_ref unmap[BLKIF_MAX_SEGMENTS_PER_REQUEST];
169 unsigned int i, invcount = 0;
170 grant_handle_t handle;
173 for (i = 0; i < req->nr_pages; i++) {
174 handle = pending_handle(req, i);
175 if (handle == BLKBACK_INVALID_HANDLE)
177 gnttab_set_unmap_op(&unmap[i], vaddr(req, i), GNTMAP_host_map,
179 pending_handle(req, i) = BLKBACK_INVALID_HANDLE;
183 ret = HYPERVISOR_grant_table_op(
184 GNTTABOP_unmap_grant_ref, unmap, invcount);
188 /******************************************************************
189 * SCHEDULER FUNCTIONS
192 static void print_stats(blkif_t *blkif)
194 printk(KERN_DEBUG "%s: oo %3d | rd %4d | wr %4d\n",
195 current->comm, blkif->st_oo_req,
196 blkif->st_rd_req, blkif->st_wr_req);
197 blkif->st_print = jiffies + msecs_to_jiffies(10 * 1000);
198 blkif->st_rd_req = 0;
199 blkif->st_wr_req = 0;
200 blkif->st_oo_req = 0;
203 int blkif_schedule(void *arg)
205 blkif_t *blkif = arg;
210 printk(KERN_DEBUG "%s: started\n", current->comm);
212 while (!kthread_should_stop()) {
213 wait_event_interruptible(
215 blkif->waiting_reqs || kthread_should_stop());
216 wait_event_interruptible(
218 !list_empty(&pending_free) || kthread_should_stop());
220 blkif->waiting_reqs = 0;
221 smp_mb(); /* clear flag *before* checking for work */
223 if (do_block_io_op(blkif))
224 blkif->waiting_reqs = 1;
227 if (log_stats && time_after(jiffies, blkif->st_print))
234 printk(KERN_DEBUG "%s: exiting\n", current->comm);
236 blkif->xenblkd = NULL;
242 /******************************************************************
243 * COMPLETION CALLBACK -- Called as bh->b_end_io()
246 static void __end_block_io_op(pending_req_t *pending_req, int uptodate)
248 /* An error fails the entire request. */
250 DPRINTK("Buffer not up-to-date at end of operation\n");
251 pending_req->status = BLKIF_RSP_ERROR;
254 if (atomic_dec_and_test(&pending_req->pendcnt)) {
255 fast_flush_area(pending_req);
256 make_response(pending_req->blkif, pending_req->id,
257 pending_req->operation, pending_req->status);
258 blkif_put(pending_req->blkif);
259 free_req(pending_req);
263 static int end_block_io_op(struct bio *bio, unsigned int done, int error)
265 if (bio->bi_size != 0)
267 __end_block_io_op(bio->bi_private, !error);
273 /******************************************************************************
274 * NOTIFICATION FROM GUEST OS.
277 static void blkif_notify_work(blkif_t *blkif)
279 blkif->waiting_reqs = 1;
283 irqreturn_t blkif_be_int(int irq, void *dev_id, struct pt_regs *regs)
285 blkif_notify_work(dev_id);
291 /******************************************************************
292 * DOWNWARD CALLS -- These interface with the block-device layer proper.
295 static int do_block_io_op(blkif_t *blkif)
297 blkif_back_ring_t *blk_ring = &blkif->blk_ring;
298 blkif_request_t *req;
299 pending_req_t *pending_req;
303 rc = blk_ring->req_cons;
304 rp = blk_ring->sring->req_prod;
305 rmb(); /* Ensure we see queued requests up to 'rp'. */
307 while ((rc != rp) && !RING_REQUEST_CONS_OVERFLOW(blk_ring, rc)) {
309 pending_req = alloc_req();
310 if (NULL == pending_req) {
316 req = RING_GET_REQUEST(blk_ring, rc);
317 blk_ring->req_cons = ++rc; /* before make_response() */
319 switch (req->operation) {
322 dispatch_rw_block_io(blkif, req, pending_req);
326 dispatch_rw_block_io(blkif, req, pending_req);
329 DPRINTK("error: unknown block io operation [%d]\n",
331 make_response(blkif, req->id, req->operation,
333 free_req(pending_req);
340 static void dispatch_rw_block_io(blkif_t *blkif,
341 blkif_request_t *req,
342 pending_req_t *pending_req)
344 extern void ll_rw_block(int rw, int nr, struct buffer_head * bhs[]);
345 int operation = (req->operation == BLKIF_OP_WRITE) ? WRITE : READ;
346 struct gnttab_map_grant_ref map[BLKIF_MAX_SEGMENTS_PER_REQUEST];
347 struct phys_req preq;
349 unsigned long buf; unsigned int nsec;
350 } seg[BLKIF_MAX_SEGMENTS_PER_REQUEST];
352 struct bio *bio = NULL, *biolist[BLKIF_MAX_SEGMENTS_PER_REQUEST];
353 int ret, i, nbio = 0;
355 /* Check that number of segments is sane. */
356 nseg = req->nr_segments;
357 if (unlikely(nseg == 0) ||
358 unlikely(nseg > BLKIF_MAX_SEGMENTS_PER_REQUEST)) {
359 DPRINTK("Bad number of segments in request (%d)\n", nseg);
363 preq.dev = req->handle;
364 preq.sector_number = req->sector_number;
367 pending_req->blkif = blkif;
368 pending_req->id = req->id;
369 pending_req->operation = operation;
370 pending_req->status = BLKIF_RSP_OKAY;
371 pending_req->nr_pages = nseg;
373 for (i = 0; i < nseg; i++) {
376 seg[i].nsec = req->seg[i].last_sect -
377 req->seg[i].first_sect + 1;
379 if ((req->seg[i].last_sect >= (PAGE_SIZE >> 9)) ||
382 preq.nr_sects += seg[i].nsec;
384 flags = GNTMAP_host_map;
385 if ( operation == WRITE )
386 flags |= GNTMAP_readonly;
387 gnttab_set_map_op(&map[i], vaddr(pending_req, i), flags,
388 req->seg[i].gref, blkif->domid);
391 ret = HYPERVISOR_grant_table_op(GNTTABOP_map_grant_ref, map, nseg);
394 for (i = 0; i < nseg; i++) {
395 if (unlikely(map[i].status != 0)) {
396 DPRINTK("invalid buffer -- could not remap it\n");
400 pending_handle(pending_req, i) = map[i].handle;
401 set_phys_to_machine(__pa(vaddr(
402 pending_req, i)) >> PAGE_SHIFT,
403 FOREIGN_FRAME(map[i].dev_bus_addr >> PAGE_SHIFT));
404 seg[i].buf = map[i].dev_bus_addr |
405 (req->seg[i].first_sect << 9);
408 if (vbd_translate(&preq, blkif, operation) != 0) {
409 DPRINTK("access denied: %s of [%llu,%llu] on dev=%04x\n",
410 operation == READ ? "read" : "write",
412 preq.sector_number + preq.nr_sects, preq.dev);
416 for (i = 0; i < nseg; i++) {
417 if (((int)preq.sector_number|(int)seg[i].nsec) &
418 ((bdev_hardsect_size(preq.bdev) >> 9) - 1)) {
419 DPRINTK("Misaligned I/O request from domain %d",
424 while ((bio == NULL) ||
426 virt_to_page(vaddr(pending_req, i)),
428 seg[i].buf & ~PAGE_MASK) == 0)) {
429 bio = biolist[nbio++] = bio_alloc(GFP_KERNEL, nseg-i);
430 if (unlikely(bio == NULL))
433 bio->bi_bdev = preq.bdev;
434 bio->bi_private = pending_req;
435 bio->bi_end_io = end_block_io_op;
436 bio->bi_sector = preq.sector_number;
439 preq.sector_number += seg[i].nsec;
442 plug_queue(blkif, bio);
443 atomic_set(&pending_req->pendcnt, nbio);
446 for (i = 0; i < nbio; i++)
447 submit_bio(operation, biolist[i]);
452 for (i = 0; i < (nbio-1); i++)
455 fast_flush_area(pending_req);
457 make_response(blkif, req->id, req->operation, BLKIF_RSP_ERROR);
458 free_req(pending_req);
463 /******************************************************************
464 * MISCELLANEOUS SETUP / TEARDOWN / DEBUGGING
468 static void make_response(blkif_t *blkif, unsigned long id,
469 unsigned short op, int st)
471 blkif_response_t *resp;
473 blkif_back_ring_t *blk_ring = &blkif->blk_ring;
477 spin_lock_irqsave(&blkif->blk_ring_lock, flags);
479 /* Place on the response ring for the relevant domain. */
480 resp = RING_GET_RESPONSE(blk_ring, blk_ring->rsp_prod_pvt);
482 resp->operation = op;
484 blk_ring->rsp_prod_pvt++;
485 RING_PUSH_RESPONSES_AND_CHECK_NOTIFY(blk_ring, notify);
487 if (blk_ring->rsp_prod_pvt == blk_ring->req_cons) {
489 * Tail check for pending requests. Allows frontend to avoid
490 * notifications if requests are already in flight (lower
491 * overheads and promotes batching).
493 RING_FINAL_CHECK_FOR_REQUESTS(blk_ring, more_to_do);
495 } else if (RING_HAS_UNCONSUMED_REQUESTS(blk_ring)) {
499 spin_unlock_irqrestore(&blkif->blk_ring_lock, flags);
502 blkif_notify_work(blkif);
504 notify_remote_via_irq(blkif->irq);
507 static int __init blkif_init(void)
512 if (!is_running_on_xen())
515 mmap_pages = blkif_reqs * BLKIF_MAX_SEGMENTS_PER_REQUEST;
517 #ifdef CONFIG_XEN_IA64_DOM0_NON_VP
518 extern unsigned long alloc_empty_foreign_map_page_range(
519 unsigned long pages);
520 mmap_vstart = (unsigned long)
521 alloc_empty_foreign_map_page_range(mmap_pages);
523 page = balloon_alloc_empty_page_range(mmap_pages);
526 mmap_vstart = (unsigned long)pfn_to_kaddr(page_to_pfn(page));
529 pending_reqs = kmalloc(sizeof(pending_reqs[0]) *
530 blkif_reqs, GFP_KERNEL);
531 pending_grant_handles = kmalloc(sizeof(pending_grant_handles[0]) *
532 mmap_pages, GFP_KERNEL);
533 pending_vaddrs = kmalloc(sizeof(pending_vaddrs[0]) *
534 mmap_pages, GFP_KERNEL);
535 if (!pending_reqs || !pending_grant_handles || !pending_vaddrs) {
537 kfree(pending_grant_handles);
538 kfree(pending_vaddrs);
539 printk("%s: out of memory\n", __FUNCTION__);
543 blkif_interface_init();
545 printk("%s: reqs=%d, pages=%d, mmap_vstart=0x%lx\n",
546 __FUNCTION__, blkif_reqs, mmap_pages, mmap_vstart);
547 BUG_ON(mmap_vstart == 0);
548 for (i = 0; i < mmap_pages; i++) {
549 pending_vaddrs[i] = mmap_vstart + (i << PAGE_SHIFT);
550 pending_grant_handles[i] = BLKBACK_INVALID_HANDLE;
553 memset(pending_reqs, 0, sizeof(pending_reqs));
554 INIT_LIST_HEAD(&pending_free);
556 for (i = 0; i < blkif_reqs; i++)
557 list_add_tail(&pending_reqs[i].free_list, &pending_free);
564 module_init(blkif_init);
566 MODULE_LICENSE("Dual BSD/GPL");