Merge to Fedora kernel-2.6.17-1.2187_FC5 patched with stable patch-2.6.17.13-vs2...
[linux-2.6.git] / drivers / xen / blkback / blkback.c
1 /******************************************************************************
2  * arch/xen/drivers/blkif/backend/main.c
3  * 
4  * Back-end of the driver for virtual block devices. This portion of the
5  * driver exports a 'unified' block-device interface that can be accessed
6  * by any operating system that implements a compatible front end. A 
7  * reference front-end implementation can be found in:
8  *  arch/xen/drivers/blkif/frontend
9  * 
10  * Copyright (c) 2003-2004, Keir Fraser & Steve Hand
11  * Copyright (c) 2005, Christopher Clark
12  * 
13  * This program is free software; you can redistribute it and/or
14  * modify it under the terms of the GNU General Public License version 2
15  * as published by the Free Software Foundation; or, when distributed
16  * separately from the Linux kernel or incorporated into other
17  * software packages, subject to the following license:
18  * 
19  * Permission is hereby granted, free of charge, to any person obtaining a copy
20  * of this source file (the "Software"), to deal in the Software without
21  * restriction, including without limitation the rights to use, copy, modify,
22  * merge, publish, distribute, sublicense, and/or sell copies of the Software,
23  * and to permit persons to whom the Software is furnished to do so, subject to
24  * the following conditions:
25  * 
26  * The above copyright notice and this permission notice shall be included in
27  * all copies or substantial portions of the Software.
28  * 
29  * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
30  * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
31  * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE
32  * AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
33  * LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING
34  * FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS
35  * IN THE SOFTWARE.
36  */
37
38 #include <linux/spinlock.h>
39 #include <linux/kthread.h>
40 #include <linux/list.h>
41 #include <xen/balloon.h>
42 #include <asm/hypervisor.h>
43 #include "common.h"
44
45 /*
46  * These are rather arbitrary. They are fairly large because adjacent requests
47  * pulled from a communication ring are quite likely to end up being part of
48  * the same scatter/gather request at the disc.
49  * 
50  * ** TRY INCREASING 'blkif_reqs' IF WRITE SPEEDS SEEM TOO LOW **
51  * 
52  * This will increase the chances of being able to write whole tracks.
53  * 64 should be enough to keep us competitive with Linux.
54  */
55 static int blkif_reqs = 64;
56 module_param_named(reqs, blkif_reqs, int, 0);
57 MODULE_PARM_DESC(reqs, "Number of blkback requests to allocate");
58
59 static int mmap_pages;
60
61 /* Run-time switchable: /sys/module/blkback/parameters/ */
62 static unsigned int log_stats = 0;
63 static unsigned int debug_lvl = 0;
64 module_param(log_stats, int, 0644);
65 module_param(debug_lvl, int, 0644);
66
67 /*
68  * Each outstanding request that we've passed to the lower device layers has a 
69  * 'pending_req' allocated to it. Each buffer_head that completes decrements 
70  * the pendcnt towards zero. When it hits zero, the specified domain has a 
71  * response queued for it, with the saved 'id' passed back.
72  */
73 typedef struct {
74         blkif_t       *blkif;
75         unsigned long  id;
76         int            nr_pages;
77         atomic_t       pendcnt;
78         unsigned short operation;
79         int            status;
80         struct list_head free_list;
81 } pending_req_t;
82
83 static pending_req_t *pending_reqs;
84 static struct list_head pending_free;
85 static DEFINE_SPINLOCK(pending_free_lock);
86 static DECLARE_WAIT_QUEUE_HEAD(pending_free_wq);
87
88 #define BLKBACK_INVALID_HANDLE (~0)
89
90 static unsigned long mmap_vstart;
91 static unsigned long *pending_vaddrs;
92 static grant_handle_t *pending_grant_handles;
93
94 static inline int vaddr_pagenr(pending_req_t *req, int seg)
95 {
96         return (req - pending_reqs) * BLKIF_MAX_SEGMENTS_PER_REQUEST + seg;
97 }
98
99 static inline unsigned long vaddr(pending_req_t *req, int seg)
100 {
101         return pending_vaddrs[vaddr_pagenr(req, seg)];
102 }
103
104 #define pending_handle(_req, _seg) \
105         (pending_grant_handles[vaddr_pagenr(_req, _seg)])
106
107
108 static int do_block_io_op(blkif_t *blkif);
109 static void dispatch_rw_block_io(blkif_t *blkif,
110                                  blkif_request_t *req,
111                                  pending_req_t *pending_req);
112 static void make_response(blkif_t *blkif, unsigned long id, 
113                           unsigned short op, int st);
114
115 /******************************************************************
116  * misc small helpers
117  */
118 static pending_req_t* alloc_req(void)
119 {
120         pending_req_t *req = NULL;
121         unsigned long flags;
122
123         spin_lock_irqsave(&pending_free_lock, flags);
124         if (!list_empty(&pending_free)) {
125                 req = list_entry(pending_free.next, pending_req_t, free_list);
126                 list_del(&req->free_list);
127         }
128         spin_unlock_irqrestore(&pending_free_lock, flags);
129         return req;
130 }
131
132 static void free_req(pending_req_t *req)
133 {
134         unsigned long flags;
135         int was_empty;
136
137         spin_lock_irqsave(&pending_free_lock, flags);
138         was_empty = list_empty(&pending_free);
139         list_add(&req->free_list, &pending_free);
140         spin_unlock_irqrestore(&pending_free_lock, flags);
141         if (was_empty)
142                 wake_up(&pending_free_wq);
143 }
144
145 static void unplug_queue(blkif_t *blkif)
146 {
147         if (blkif->plug == NULL)
148                 return;
149         if (blkif->plug->unplug_fn)
150                 blkif->plug->unplug_fn(blkif->plug);
151         blk_put_queue(blkif->plug);
152         blkif->plug = NULL;
153 }
154
155 static void plug_queue(blkif_t *blkif, struct bio *bio)
156 {
157         request_queue_t *q = bdev_get_queue(bio->bi_bdev);
158
159         if (q == blkif->plug)
160                 return;
161         unplug_queue(blkif);
162         blk_get_queue(q);
163         blkif->plug = q;
164 }
165
166 static void fast_flush_area(pending_req_t *req)
167 {
168         struct gnttab_unmap_grant_ref unmap[BLKIF_MAX_SEGMENTS_PER_REQUEST];
169         unsigned int i, invcount = 0;
170         grant_handle_t handle;
171         int ret;
172
173         for (i = 0; i < req->nr_pages; i++) {
174                 handle = pending_handle(req, i);
175                 if (handle == BLKBACK_INVALID_HANDLE)
176                         continue;
177                 gnttab_set_unmap_op(&unmap[i], vaddr(req, i), GNTMAP_host_map,
178                                     handle);
179                 pending_handle(req, i) = BLKBACK_INVALID_HANDLE;
180                 invcount++;
181         }
182
183         ret = HYPERVISOR_grant_table_op(
184                 GNTTABOP_unmap_grant_ref, unmap, invcount);
185         BUG_ON(ret);
186 }
187
188 /******************************************************************
189  * SCHEDULER FUNCTIONS
190  */
191
192 static void print_stats(blkif_t *blkif)
193 {
194         printk(KERN_DEBUG "%s: oo %3d  |  rd %4d  |  wr %4d\n",
195                current->comm, blkif->st_oo_req,
196                blkif->st_rd_req, blkif->st_wr_req);
197         blkif->st_print = jiffies + msecs_to_jiffies(10 * 1000);
198         blkif->st_rd_req = 0;
199         blkif->st_wr_req = 0;
200         blkif->st_oo_req = 0;
201 }
202
203 int blkif_schedule(void *arg)
204 {
205         blkif_t *blkif = arg;
206
207         blkif_get(blkif);
208
209         if (debug_lvl)
210                 printk(KERN_DEBUG "%s: started\n", current->comm);
211
212         while (!kthread_should_stop()) {
213                 wait_event_interruptible(
214                         blkif->wq,
215                         blkif->waiting_reqs || kthread_should_stop());
216                 wait_event_interruptible(
217                         pending_free_wq,
218                         !list_empty(&pending_free) || kthread_should_stop());
219
220                 blkif->waiting_reqs = 0;
221                 smp_mb(); /* clear flag *before* checking for work */
222
223                 if (do_block_io_op(blkif))
224                         blkif->waiting_reqs = 1;
225                 unplug_queue(blkif);
226
227                 if (log_stats && time_after(jiffies, blkif->st_print))
228                         print_stats(blkif);
229         }
230
231         if (log_stats)
232                 print_stats(blkif);
233         if (debug_lvl)
234                 printk(KERN_DEBUG "%s: exiting\n", current->comm);
235
236         blkif->xenblkd = NULL;
237         blkif_put(blkif);
238
239         return 0;
240 }
241
242 /******************************************************************
243  * COMPLETION CALLBACK -- Called as bh->b_end_io()
244  */
245
246 static void __end_block_io_op(pending_req_t *pending_req, int uptodate)
247 {
248         /* An error fails the entire request. */
249         if (!uptodate) {
250                 DPRINTK("Buffer not up-to-date at end of operation\n");
251                 pending_req->status = BLKIF_RSP_ERROR;
252         }
253
254         if (atomic_dec_and_test(&pending_req->pendcnt)) {
255                 fast_flush_area(pending_req);
256                 make_response(pending_req->blkif, pending_req->id,
257                               pending_req->operation, pending_req->status);
258                 blkif_put(pending_req->blkif);
259                 free_req(pending_req);
260         }
261 }
262
263 static int end_block_io_op(struct bio *bio, unsigned int done, int error)
264 {
265         if (bio->bi_size != 0)
266                 return 1;
267         __end_block_io_op(bio->bi_private, !error);
268         bio_put(bio);
269         return error;
270 }
271
272
273 /******************************************************************************
274  * NOTIFICATION FROM GUEST OS.
275  */
276
277 static void blkif_notify_work(blkif_t *blkif)
278 {
279         blkif->waiting_reqs = 1;
280         wake_up(&blkif->wq);
281 }
282
283 irqreturn_t blkif_be_int(int irq, void *dev_id, struct pt_regs *regs)
284 {
285         blkif_notify_work(dev_id);
286         return IRQ_HANDLED;
287 }
288
289
290
291 /******************************************************************
292  * DOWNWARD CALLS -- These interface with the block-device layer proper.
293  */
294
295 static int do_block_io_op(blkif_t *blkif)
296 {
297         blkif_back_ring_t *blk_ring = &blkif->blk_ring;
298         blkif_request_t *req;
299         pending_req_t *pending_req;
300         RING_IDX rc, rp;
301         int more_to_do = 0;
302
303         rc = blk_ring->req_cons;
304         rp = blk_ring->sring->req_prod;
305         rmb(); /* Ensure we see queued requests up to 'rp'. */
306
307         while ((rc != rp) && !RING_REQUEST_CONS_OVERFLOW(blk_ring, rc)) {
308
309                 pending_req = alloc_req();
310                 if (NULL == pending_req) {
311                         blkif->st_oo_req++;
312                         more_to_do = 1;
313                         break;
314                 }
315
316                 req = RING_GET_REQUEST(blk_ring, rc);
317                 blk_ring->req_cons = ++rc; /* before make_response() */
318
319                 switch (req->operation) {
320                 case BLKIF_OP_READ:
321                         blkif->st_rd_req++;
322                         dispatch_rw_block_io(blkif, req, pending_req);
323                         break;
324                 case BLKIF_OP_WRITE:
325                         blkif->st_wr_req++;
326                         dispatch_rw_block_io(blkif, req, pending_req);
327                         break;
328                 default:
329                         DPRINTK("error: unknown block io operation [%d]\n",
330                                 req->operation);
331                         make_response(blkif, req->id, req->operation,
332                                       BLKIF_RSP_ERROR);
333                         free_req(pending_req);
334                         break;
335                 }
336         }
337         return more_to_do;
338 }
339
340 static void dispatch_rw_block_io(blkif_t *blkif,
341                                  blkif_request_t *req,
342                                  pending_req_t *pending_req)
343 {
344         extern void ll_rw_block(int rw, int nr, struct buffer_head * bhs[]); 
345         int operation = (req->operation == BLKIF_OP_WRITE) ? WRITE : READ;
346         struct gnttab_map_grant_ref map[BLKIF_MAX_SEGMENTS_PER_REQUEST];
347         struct phys_req preq;
348         struct { 
349                 unsigned long buf; unsigned int nsec;
350         } seg[BLKIF_MAX_SEGMENTS_PER_REQUEST];
351         unsigned int nseg;
352         struct bio *bio = NULL, *biolist[BLKIF_MAX_SEGMENTS_PER_REQUEST];
353         int ret, i, nbio = 0;
354
355         /* Check that number of segments is sane. */
356         nseg = req->nr_segments;
357         if (unlikely(nseg == 0) || 
358             unlikely(nseg > BLKIF_MAX_SEGMENTS_PER_REQUEST)) {
359                 DPRINTK("Bad number of segments in request (%d)\n", nseg);
360                 goto fail_response;
361         }
362
363         preq.dev           = req->handle;
364         preq.sector_number = req->sector_number;
365         preq.nr_sects      = 0;
366
367         pending_req->blkif     = blkif;
368         pending_req->id        = req->id;
369         pending_req->operation = operation;
370         pending_req->status    = BLKIF_RSP_OKAY;
371         pending_req->nr_pages  = nseg;
372
373         for (i = 0; i < nseg; i++) {
374                 uint32_t flags;
375
376                 seg[i].nsec = req->seg[i].last_sect -
377                         req->seg[i].first_sect + 1;
378
379                 if ((req->seg[i].last_sect >= (PAGE_SIZE >> 9)) ||
380                     (seg[i].nsec <= 0))
381                         goto fail_response;
382                 preq.nr_sects += seg[i].nsec;
383
384                 flags = GNTMAP_host_map;
385                 if ( operation == WRITE )
386                         flags |= GNTMAP_readonly;
387                 gnttab_set_map_op(&map[i], vaddr(pending_req, i), flags,
388                                   req->seg[i].gref, blkif->domid);
389         }
390
391         ret = HYPERVISOR_grant_table_op(GNTTABOP_map_grant_ref, map, nseg);
392         BUG_ON(ret);
393
394         for (i = 0; i < nseg; i++) {
395                 if (unlikely(map[i].status != 0)) {
396                         DPRINTK("invalid buffer -- could not remap it\n");
397                         goto fail_flush;
398                 }
399
400                 pending_handle(pending_req, i) = map[i].handle;
401                 set_phys_to_machine(__pa(vaddr(
402                         pending_req, i)) >> PAGE_SHIFT,
403                         FOREIGN_FRAME(map[i].dev_bus_addr >> PAGE_SHIFT));
404                 seg[i].buf  = map[i].dev_bus_addr | 
405                         (req->seg[i].first_sect << 9);
406         }
407
408         if (vbd_translate(&preq, blkif, operation) != 0) {
409                 DPRINTK("access denied: %s of [%llu,%llu] on dev=%04x\n", 
410                         operation == READ ? "read" : "write",
411                         preq.sector_number,
412                         preq.sector_number + preq.nr_sects, preq.dev); 
413                 goto fail_flush;
414         }
415
416         for (i = 0; i < nseg; i++) {
417                 if (((int)preq.sector_number|(int)seg[i].nsec) &
418                     ((bdev_hardsect_size(preq.bdev) >> 9) - 1)) {
419                         DPRINTK("Misaligned I/O request from domain %d",
420                                 blkif->domid);
421                         goto fail_put_bio;
422                 }
423
424                 while ((bio == NULL) ||
425                        (bio_add_page(bio,
426                                      virt_to_page(vaddr(pending_req, i)),
427                                      seg[i].nsec << 9,
428                                      seg[i].buf & ~PAGE_MASK) == 0)) {
429                         bio = biolist[nbio++] = bio_alloc(GFP_KERNEL, nseg-i);
430                         if (unlikely(bio == NULL))
431                                 goto fail_put_bio;
432
433                         bio->bi_bdev    = preq.bdev;
434                         bio->bi_private = pending_req;
435                         bio->bi_end_io  = end_block_io_op;
436                         bio->bi_sector  = preq.sector_number;
437                 }
438
439                 preq.sector_number += seg[i].nsec;
440         }
441
442         plug_queue(blkif, bio);
443         atomic_set(&pending_req->pendcnt, nbio);
444         blkif_get(blkif);
445
446         for (i = 0; i < nbio; i++)
447                 submit_bio(operation, biolist[i]);
448
449         return;
450
451  fail_put_bio:
452         for (i = 0; i < (nbio-1); i++)
453                 bio_put(biolist[i]);
454  fail_flush:
455         fast_flush_area(pending_req);
456  fail_response:
457         make_response(blkif, req->id, req->operation, BLKIF_RSP_ERROR);
458         free_req(pending_req);
459
460
461
462
463 /******************************************************************
464  * MISCELLANEOUS SETUP / TEARDOWN / DEBUGGING
465  */
466
467
468 static void make_response(blkif_t *blkif, unsigned long id, 
469                           unsigned short op, int st)
470 {
471         blkif_response_t *resp;
472         unsigned long     flags;
473         blkif_back_ring_t *blk_ring = &blkif->blk_ring;
474         int more_to_do = 0;
475         int notify;
476
477         spin_lock_irqsave(&blkif->blk_ring_lock, flags);
478
479         /* Place on the response ring for the relevant domain. */ 
480         resp = RING_GET_RESPONSE(blk_ring, blk_ring->rsp_prod_pvt);
481         resp->id        = id;
482         resp->operation = op;
483         resp->status    = st;
484         blk_ring->rsp_prod_pvt++;
485         RING_PUSH_RESPONSES_AND_CHECK_NOTIFY(blk_ring, notify);
486
487         if (blk_ring->rsp_prod_pvt == blk_ring->req_cons) {
488                 /*
489                  * Tail check for pending requests. Allows frontend to avoid
490                  * notifications if requests are already in flight (lower
491                  * overheads and promotes batching).
492                  */
493                 RING_FINAL_CHECK_FOR_REQUESTS(blk_ring, more_to_do);
494
495         } else if (RING_HAS_UNCONSUMED_REQUESTS(blk_ring)) {
496                 more_to_do = 1;
497
498         }
499         spin_unlock_irqrestore(&blkif->blk_ring_lock, flags);
500
501         if (more_to_do)
502                 blkif_notify_work(blkif);
503         if (notify)
504                 notify_remote_via_irq(blkif->irq);
505 }
506
507 static int __init blkif_init(void)
508 {
509         struct page *page;
510         int i;
511
512         if (!is_running_on_xen())
513                 return -ENODEV;
514
515         mmap_pages            = blkif_reqs * BLKIF_MAX_SEGMENTS_PER_REQUEST;
516
517 #ifdef CONFIG_XEN_IA64_DOM0_NON_VP
518         extern unsigned long alloc_empty_foreign_map_page_range(
519                 unsigned long pages);
520         mmap_vstart = (unsigned long)
521                 alloc_empty_foreign_map_page_range(mmap_pages);
522 #else /* ! ia64 */
523         page = balloon_alloc_empty_page_range(mmap_pages);
524         if (page == NULL)
525                 return -ENOMEM;
526         mmap_vstart = (unsigned long)pfn_to_kaddr(page_to_pfn(page));
527 #endif
528
529         pending_reqs          = kmalloc(sizeof(pending_reqs[0]) *
530                                         blkif_reqs, GFP_KERNEL);
531         pending_grant_handles = kmalloc(sizeof(pending_grant_handles[0]) *
532                                         mmap_pages, GFP_KERNEL);
533         pending_vaddrs        = kmalloc(sizeof(pending_vaddrs[0]) *
534                                         mmap_pages, GFP_KERNEL);
535         if (!pending_reqs || !pending_grant_handles || !pending_vaddrs) {
536                 kfree(pending_reqs);
537                 kfree(pending_grant_handles);
538                 kfree(pending_vaddrs);
539                 printk("%s: out of memory\n", __FUNCTION__);
540                 return -ENOMEM;
541         }
542
543         blkif_interface_init();
544         
545         printk("%s: reqs=%d, pages=%d, mmap_vstart=0x%lx\n",
546                __FUNCTION__, blkif_reqs, mmap_pages, mmap_vstart);
547         BUG_ON(mmap_vstart == 0);
548         for (i = 0; i < mmap_pages; i++) {
549                 pending_vaddrs[i] = mmap_vstart + (i << PAGE_SHIFT);
550                 pending_grant_handles[i] = BLKBACK_INVALID_HANDLE;
551         }
552
553         memset(pending_reqs, 0, sizeof(pending_reqs));
554         INIT_LIST_HEAD(&pending_free);
555
556         for (i = 0; i < blkif_reqs; i++)
557                 list_add_tail(&pending_reqs[i].free_list, &pending_free);
558     
559         blkif_xenbus_init();
560
561         return 0;
562 }
563
564 module_init(blkif_init);
565
566 MODULE_LICENSE("Dual BSD/GPL");