1 /******************************************************************************
2 * drivers/xen/blktap/blktap.c
4 * Back-end driver for user level virtual block devices. This portion of the
5 * driver exports a 'unified' block-device interface that can be accessed
6 * by any operating system that implements a compatible front end. Requests
7 * are remapped to a user-space memory region.
9 * Based on the blkback driver code.
11 * Copyright (c) 2004-2005, Andrew Warfield and Julian Chesterfield
13 * This program is free software; you can redistribute it and/or
14 * modify it under the terms of the GNU General Public License version 2
15 * as published by the Free Software Foundation; or, when distributed
16 * separately from the Linux kernel or incorporated into other
17 * software packages, subject to the following license:
19 * Permission is hereby granted, free of charge, to any person obtaining a copy
20 * of this source file (the "Software"), to deal in the Software without
21 * restriction, including without limitation the rights to use, copy, modify,
22 * merge, publish, distribute, sublicense, and/or sell copies of the Software,
23 * and to permit persons to whom the Software is furnished to do so, subject to
24 * the following conditions:
26 * The above copyright notice and this permission notice shall be included in
27 * all copies or substantial portions of the Software.
29 * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
30 * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
31 * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE
32 * AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
33 * LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING
34 * FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS
38 #include <linux/spinlock.h>
39 #include <linux/kthread.h>
40 #include <linux/list.h>
41 #include <asm/hypervisor.h>
43 #include <xen/balloon.h>
44 #include <linux/kernel.h>
47 #include <linux/errno.h>
48 #include <linux/major.h>
49 #include <linux/gfp.h>
50 #include <linux/poll.h>
51 #include <linux/init.h>
52 #include <asm/tlbflush.h>
54 #define MAX_TAP_DEV 100 /*the maximum number of tapdisk ring devices */
55 #define MAX_DEV_NAME 100 /*the max tapdisk ring device name e.g. blktap0 */
58 struct class *xen_class;
59 EXPORT_SYMBOL_GPL(xen_class);
62 * Setup the xen class. This should probably go in another file, but
63 * since blktap is the only user of it so far, it gets to keep it.
65 int setup_xen_class(void)
72 xen_class = class_create(THIS_MODULE, "xen");
73 if ((ret = IS_ERR(xen_class))) {
82 * The maximum number of requests that can be outstanding at any time
85 * [mmap_alloc * MAX_PENDING_REQS * BLKIF_MAX_SEGMENTS_PER_REQUEST]
87 * where mmap_alloc < MAX_DYNAMIC_MEM.
90 * mmap_alloc is initialised to 2 and should be adjustable on the fly via
93 #define MAX_DYNAMIC_MEM 64
94 #define MAX_PENDING_REQS 64
95 #define MMAP_PAGES (MAX_PENDING_REQS * BLKIF_MAX_SEGMENTS_PER_REQUEST)
96 #define MMAP_VADDR(_start, _req,_seg) \
98 ((_req) * BLKIF_MAX_SEGMENTS_PER_REQUEST * PAGE_SIZE) + \
100 static int blkif_reqs = MAX_PENDING_REQS;
101 module_param(blkif_reqs, int, 0);
103 static int mmap_pages = MMAP_PAGES;
105 #define RING_PAGES 1 /* BLKTAP - immediately before the mmap area, we
106 * have a bunch of pages reserved for shared
110 /*Data struct associated with each of the tapdisk devices*/
111 typedef struct tap_blkif {
112 struct vm_area_struct *vma; /*Shared memory area */
113 unsigned long rings_vstart; /*Kernel memory mapping */
114 unsigned long user_vstart; /*User memory mapping */
115 unsigned long dev_inuse; /*One process opens device at a time. */
116 unsigned long dev_pending; /*In process of being opened */
117 unsigned long ring_ok; /*make this ring->state */
118 blkif_front_ring_t ufe_ring; /*Rings up to user space. */
119 wait_queue_head_t wait; /*for poll */
120 unsigned long mode; /*current switching mode */
121 int minor; /*Minor number for tapdisk device */
122 pid_t pid; /*tapdisk process id */
123 enum { RUNNING, CLEANSHUTDOWN } status; /*Detect a clean userspace
125 unsigned long *idx_map; /*Record the user ring id to kern
126 [req id, idx] tuple */
127 blkif_t *blkif; /*Associate blkif with tapdev */
128 int sysfs_set; /*Set if it has a class device. */
131 /*Data struct handed back to userspace for tapdisk device to VBD mapping*/
132 typedef struct domid_translate {
133 unsigned short domid;
134 unsigned short busid;
135 } domid_translate_t ;
137 static domid_translate_t translate_domid[MAX_TAP_DEV];
138 static tap_blkif_t *tapfds[MAX_TAP_DEV];
140 /* Run-time switchable: /sys/module/blktap/parameters/ */
141 static unsigned int log_stats = 0;
142 static unsigned int debug_lvl = 0;
143 module_param(log_stats, int, 0644);
144 module_param(debug_lvl, int, 0644);
147 * Each outstanding request that we've passed to the lower device layers has a
148 * 'pending_req' allocated to it. Each buffer_head that completes decrements
149 * the pendcnt towards zero. When it hits zero, the specified domain has a
150 * response queued for it, with the saved 'id' passed back.
155 unsigned short mem_idx;
158 unsigned short operation;
160 struct list_head free_list;
164 static pending_req_t *pending_reqs[MAX_PENDING_REQS];
165 static struct list_head pending_free;
166 static DEFINE_SPINLOCK(pending_free_lock);
167 static DECLARE_WAIT_QUEUE_HEAD (pending_free_wq);
168 static int alloc_pending_reqs;
170 typedef unsigned int PEND_RING_IDX;
172 static inline int MASK_PEND_IDX(int i) {
173 return (i & (MAX_PENDING_REQS-1));
176 static inline unsigned int RTN_PEND_IDX(pending_req_t *req, int idx) {
177 return (req - pending_reqs[idx]);
180 #define NR_PENDING_REQS (MAX_PENDING_REQS - pending_prod + pending_cons)
182 #define BLKBACK_INVALID_HANDLE (~0)
184 static struct page **foreign_pages[MAX_DYNAMIC_MEM];
185 static inline unsigned long idx_to_kaddr(
186 unsigned int mmap_idx, unsigned int req_idx, unsigned int sg_idx)
188 unsigned int arr_idx = req_idx*BLKIF_MAX_SEGMENTS_PER_REQUEST + sg_idx;
189 unsigned long pfn = page_to_pfn(foreign_pages[mmap_idx][arr_idx]);
190 return (unsigned long)pfn_to_kaddr(pfn);
193 static unsigned short mmap_alloc = 0;
194 static unsigned short mmap_lock = 0;
195 static unsigned short mmap_inuse = 0;
197 /******************************************************************
201 /* When using grant tables to map a frame for device access then the
202 * handle returned must be used to unmap the frame. This is needed to
203 * drop the ref count on the frame.
205 struct grant_handle_pair
207 grant_handle_t kernel;
211 static struct grant_handle_pair
212 pending_grant_handles[MAX_DYNAMIC_MEM][MMAP_PAGES];
213 #define pending_handle(_id, _idx, _i) \
214 (pending_grant_handles[_id][((_idx) * BLKIF_MAX_SEGMENTS_PER_REQUEST) \
218 static int blktap_read_ufe_ring(tap_blkif_t *info); /*local prototypes*/
220 #define BLKTAP_MINOR 0 /*/dev/xen/blktap has a dynamic major */
221 #define BLKTAP_DEV_DIR "/dev/xen"
223 static int blktap_major;
226 #define BLKTAP_IOCTL_KICK_FE 1
227 #define BLKTAP_IOCTL_KICK_BE 2 /* currently unused */
228 #define BLKTAP_IOCTL_SETMODE 3
229 #define BLKTAP_IOCTL_SENDPID 4
230 #define BLKTAP_IOCTL_NEWINTF 5
231 #define BLKTAP_IOCTL_MINOR 6
232 #define BLKTAP_IOCTL_MAJOR 7
233 #define BLKTAP_QUERY_ALLOC_REQS 8
234 #define BLKTAP_IOCTL_FREEINTF 9
235 #define BLKTAP_IOCTL_PRINT_IDXS 100
237 /* blktap switching modes: (Set with BLKTAP_IOCTL_SETMODE) */
238 #define BLKTAP_MODE_PASSTHROUGH 0x00000000 /* default */
239 #define BLKTAP_MODE_INTERCEPT_FE 0x00000001
240 #define BLKTAP_MODE_INTERCEPT_BE 0x00000002 /* unimp. */
242 #define BLKTAP_MODE_INTERPOSE \
243 (BLKTAP_MODE_INTERCEPT_FE | BLKTAP_MODE_INTERCEPT_BE)
246 static inline int BLKTAP_MODE_VALID(unsigned long arg)
248 return ((arg == BLKTAP_MODE_PASSTHROUGH ) ||
249 (arg == BLKTAP_MODE_INTERCEPT_FE) ||
250 (arg == BLKTAP_MODE_INTERPOSE ));
253 /* Requests passing through the tap to userspace are re-assigned an ID.
254 * We must record a mapping between the BE [IDX,ID] tuple and the userspace
258 static inline unsigned long MAKE_ID(domid_t fe_dom, PEND_RING_IDX idx)
260 return ((fe_dom << 16) | MASK_PEND_IDX(idx));
263 extern inline PEND_RING_IDX ID_TO_IDX(unsigned long id)
265 return (PEND_RING_IDX)(id & 0x0000ffff);
268 extern inline int ID_TO_MIDX(unsigned long id)
270 return (int)(id >> 16);
273 #define INVALID_REQ 0xdead0000
275 /*TODO: Convert to a free list*/
276 static inline int GET_NEXT_REQ(unsigned long *idx_map)
279 for (i = 0; i < MAX_PENDING_REQS; i++)
280 if (idx_map[i] == INVALID_REQ)
287 #define BLKTAP_INVALID_HANDLE(_g) \
288 (((_g->kernel) == 0xFFFF) && ((_g->user) == 0xFFFF))
290 #define BLKTAP_INVALIDATE_HANDLE(_g) do { \
291 (_g)->kernel = 0xFFFF; (_g)->user = 0xFFFF; \
295 /******************************************************************
299 static struct page *blktap_nopage(struct vm_area_struct *vma,
300 unsigned long address,
304 * if the page has not been mapped in by the driver then return
305 * NOPAGE_SIGBUS to the domain.
308 return NOPAGE_SIGBUS;
311 struct vm_operations_struct blktap_vm_ops = {
312 nopage: blktap_nopage,
315 /******************************************************************
319 /*Function Declarations*/
320 static int get_next_free_dev(void);
321 static int blktap_open(struct inode *inode, struct file *filp);
322 static int blktap_release(struct inode *inode, struct file *filp);
323 static int blktap_mmap(struct file *filp, struct vm_area_struct *vma);
324 static int blktap_ioctl(struct inode *inode, struct file *filp,
325 unsigned int cmd, unsigned long arg);
326 static unsigned int blktap_poll(struct file *file, poll_table *wait);
328 static struct file_operations blktap_fops = {
329 .owner = THIS_MODULE,
331 .ioctl = blktap_ioctl,
333 .release = blktap_release,
338 static int get_next_free_dev(void)
344 spin_lock_irqsave(&pending_free_lock, flags);
346 while (i < MAX_TAP_DEV) {
348 if ( (tapfds[i] != NULL) && (info->dev_inuse == 0)
349 && (info->dev_pending == 0) ) {
350 info->dev_pending = 1;
358 spin_unlock_irqrestore(&pending_free_lock, flags);
361 * We are protected by having the dev_pending set.
363 if (!tapfds[i]->sysfs_set && xen_class) {
364 class_device_create(xen_class, NULL,
365 MKDEV(blktap_major, ret), NULL,
367 tapfds[i]->sysfs_set = 1;
372 int dom_to_devid(domid_t domid, int xenbus_id, blkif_t *blkif)
376 for (i = 0; i < MAX_TAP_DEV; i++)
377 if ( (translate_domid[i].domid == domid)
378 && (translate_domid[i].busid == xenbus_id) ) {
379 tapfds[i]->blkif = blkif;
380 tapfds[i]->status = RUNNING;
386 void signal_tapdisk(int idx)
389 struct task_struct *ptask;
392 if ( (idx > 0) && (idx < MAX_TAP_DEV) && (info->pid > 0) ) {
393 ptask = find_task_by_pid(info->pid);
395 info->status = CLEANSHUTDOWN;
401 static int blktap_open(struct inode *inode, struct file *filp)
403 blkif_sring_t *sring;
404 int idx = iminor(inode) - BLKTAP_MINOR;
408 if (tapfds[idx] == NULL) {
409 WPRINTK("Unable to open device /dev/xen/blktap%d\n",
413 DPRINTK("Opening device /dev/xen/blktap%d\n",idx);
417 /*Only one process can access device at a time*/
418 if (test_and_set_bit(0, &info->dev_inuse))
421 info->dev_pending = 0;
423 /* Allocate the fe ring. */
424 sring = (blkif_sring_t *)get_zeroed_page(GFP_KERNEL);
428 SetPageReserved(virt_to_page(sring));
430 SHARED_RING_INIT(sring);
431 FRONT_RING_INIT(&info->ufe_ring, sring, PAGE_SIZE);
433 filp->private_data = info;
436 info->idx_map = kmalloc(sizeof(unsigned long) * MAX_PENDING_REQS,
440 init_waitqueue_head(&info->wait);
441 for (i = 0; i < MAX_PENDING_REQS; i++)
442 info->idx_map[i] = INVALID_REQ;
445 DPRINTK("Tap open: device /dev/xen/blktap%d\n",idx);
452 static int blktap_release(struct inode *inode, struct file *filp)
454 tap_blkif_t *info = filp->private_data;
456 /* can this ever happen? - sdr */
458 WPRINTK("Trying to free device that doesn't exist "
459 "[/dev/xen/blktap%d]\n",iminor(inode) - BLKTAP_MINOR);
463 DPRINTK("Freeing device [/dev/xen/blktap%d]\n",info->minor);
465 /* Free the ring page. */
466 ClearPageReserved(virt_to_page(info->ufe_ring.sring));
467 free_page((unsigned long) info->ufe_ring.sring);
469 /* Clear any active mappings and free foreign map table */
472 info->vma, info->vma->vm_start,
473 info->vma->vm_end - info->vma->vm_start, NULL);
477 if ( (info->status != CLEANSHUTDOWN) && (info->blkif != NULL) ) {
478 kthread_stop(info->blkif->xenblkd);
479 info->blkif->xenblkd = NULL;
480 info->status = CLEANSHUTDOWN;
487 * We need to map pages to user space in a way that will allow the block
488 * subsystem set up direct IO to them. This couldn't be done before, because
489 * there isn't really a sane way to translate a user virtual address down to a
490 * physical address when the page belongs to another domain.
492 * My first approach was to map the page in to kernel memory, add an entry
493 * for it in the physical frame list (using alloc_lomem_region as in blkback)
494 * and then attempt to map that page up to user space. This is disallowed
495 * by xen though, which realizes that we don't really own the machine frame
496 * underlying the physical page.
498 * The new approach is to provide explicit support for this in xen linux.
499 * The VMA now has a flag, VM_FOREIGN, to indicate that it contains pages
500 * mapped from other vms. vma->vm_private_data is set up as a mapping
501 * from pages to actual page structs. There is a new clause in get_user_pages
502 * that does the right thing for this sort of mapping.
504 static int blktap_mmap(struct file *filp, struct vm_area_struct *vma)
509 tap_blkif_t *info = filp->private_data;
512 WPRINTK("blktap: mmap, retrieving idx failed\n");
516 vma->vm_flags |= VM_RESERVED;
517 vma->vm_ops = &blktap_vm_ops;
519 size = vma->vm_end - vma->vm_start;
520 if (size != ((mmap_pages + RING_PAGES) << PAGE_SHIFT)) {
521 WPRINTK("you _must_ map exactly %d pages!\n",
522 mmap_pages + RING_PAGES);
527 info->rings_vstart = vma->vm_start;
528 info->user_vstart = info->rings_vstart + (RING_PAGES << PAGE_SHIFT);
530 /* Map the ring pages to the start of the region and reserve it. */
531 vma->vm_page_prot = pgprot_noncached(vma->vm_page_prot);
533 if (remap_pfn_range(vma, vma->vm_start,
534 __pa(info->ufe_ring.sring) >> PAGE_SHIFT,
535 PAGE_SIZE, vma->vm_page_prot)) {
536 WPRINTK("Mapping user ring failed!\n");
540 /* Mark this VM as containing foreign pages, and set up mappings. */
541 map = kzalloc(((vma->vm_end - vma->vm_start) >> PAGE_SHIFT)
542 * sizeof(struct page_struct*),
545 WPRINTK("Couldn't alloc VM_FOREIGN map.\n");
549 for (i = 0; i < ((vma->vm_end - vma->vm_start) >> PAGE_SHIFT); i++)
552 vma->vm_private_data = map;
553 vma->vm_flags |= VM_FOREIGN;
559 /* Clear any active mappings. */
560 zap_page_range(vma, vma->vm_start,
561 vma->vm_end - vma->vm_start, NULL);
567 static int blktap_ioctl(struct inode *inode, struct file *filp,
568 unsigned int cmd, unsigned long arg)
570 tap_blkif_t *info = filp->private_data;
573 case BLKTAP_IOCTL_KICK_FE:
575 /* There are fe messages to process. */
576 return blktap_read_ufe_ring(info);
578 case BLKTAP_IOCTL_SETMODE:
581 if (BLKTAP_MODE_VALID(arg)) {
583 /* XXX: may need to flush rings here. */
584 DPRINTK("blktap: set mode to %lx\n",
591 case BLKTAP_IOCTL_PRINT_IDXS:
594 printk("User Rings: \n-----------\n");
595 printk("UF: rsp_cons: %2d, req_prod_prv: %2d "
596 "| req_prod: %2d, rsp_prod: %2d\n",
597 info->ufe_ring.rsp_cons,
598 info->ufe_ring.req_prod_pvt,
599 info->ufe_ring.sring->req_prod,
600 info->ufe_ring.sring->rsp_prod);
604 case BLKTAP_IOCTL_SENDPID:
607 info->pid = (pid_t)arg;
608 DPRINTK("blktap: pid received %d\n",
613 case BLKTAP_IOCTL_NEWINTF:
615 uint64_t val = (uint64_t)arg;
616 domid_translate_t *tr = (domid_translate_t *)&val;
619 DPRINTK("NEWINTF Req for domid %d and bus id %d\n",
620 tr->domid, tr->busid);
621 newdev = get_next_free_dev();
623 WPRINTK("Error initialising /dev/xen/blktap - "
624 "No more devices\n");
627 translate_domid[newdev].domid = tr->domid;
628 translate_domid[newdev].busid = tr->busid;
631 case BLKTAP_IOCTL_FREEINTF:
633 unsigned long dev = arg;
636 /* Looking at another device */
639 if ( (dev > 0) && (dev < MAX_TAP_DEV) )
642 spin_lock_irqsave(&pending_free_lock, flags);
643 if ( (info != NULL) && (info->dev_pending) )
644 info->dev_pending = 0;
645 spin_unlock_irqrestore(&pending_free_lock, flags);
649 case BLKTAP_IOCTL_MINOR:
651 unsigned long dev = arg;
653 /* Looking at another device */
656 if ( (dev > 0) && (dev < MAX_TAP_DEV) )
664 case BLKTAP_IOCTL_MAJOR:
667 case BLKTAP_QUERY_ALLOC_REQS:
669 WPRINTK("BLKTAP_QUERY_ALLOC_REQS ioctl: %d/%d\n",
670 alloc_pending_reqs, blkif_reqs);
671 return (alloc_pending_reqs/blkif_reqs) * 100;
677 static unsigned int blktap_poll(struct file *filp, poll_table *wait)
679 tap_blkif_t *info = filp->private_data;
682 WPRINTK(" poll, retrieving idx failed\n");
686 /* do not work on the control device */
690 poll_wait(filp, &info->wait, wait);
691 if (info->ufe_ring.req_prod_pvt != info->ufe_ring.sring->req_prod) {
692 RING_PUSH_REQUESTS(&info->ufe_ring);
693 return POLLIN | POLLRDNORM;
698 void blktap_kick_user(int idx)
708 wake_up_interruptible(&info->wait);
713 static int do_block_io_op(blkif_t *blkif);
714 static void dispatch_rw_block_io(blkif_t *blkif,
715 blkif_request_t *req,
716 pending_req_t *pending_req);
717 static void make_response(blkif_t *blkif, unsigned long id,
718 unsigned short op, int st);
720 /******************************************************************
723 static int req_increase(void)
727 if (mmap_alloc >= MAX_PENDING_REQS || mmap_lock)
730 pending_reqs[mmap_alloc] = kzalloc(sizeof(pending_req_t)
731 * blkif_reqs, GFP_KERNEL);
732 foreign_pages[mmap_alloc] = alloc_empty_pages_and_pagevec(mmap_pages);
734 if (!pending_reqs[mmap_alloc] || !foreign_pages[mmap_alloc])
737 DPRINTK("%s: reqs=%d, pages=%d\n",
738 __FUNCTION__, blkif_reqs, mmap_pages);
740 for (i = 0; i < MAX_PENDING_REQS; i++) {
741 list_add_tail(&pending_reqs[mmap_alloc][i].free_list,
743 pending_reqs[mmap_alloc][i].mem_idx = mmap_alloc;
744 for (j = 0; j < BLKIF_MAX_SEGMENTS_PER_REQUEST; j++)
745 BLKTAP_INVALIDATE_HANDLE(&pending_handle(mmap_alloc,
750 DPRINTK("# MMAPs increased to %d\n",mmap_alloc);
754 free_empty_pages_and_pagevec(foreign_pages[mmap_alloc], mmap_pages);
755 kfree(pending_reqs[mmap_alloc]);
756 WPRINTK("%s: out of memory\n", __FUNCTION__);
760 static void mmap_req_del(int mmap)
762 BUG_ON(!spin_is_locked(&pending_free_lock));
764 kfree(pending_reqs[mmap]);
765 pending_reqs[mmap] = NULL;
767 free_empty_pages_and_pagevec(foreign_pages[mmap_alloc], mmap_pages);
768 foreign_pages[mmap] = NULL;
771 DPRINTK("# MMAPs decreased to %d\n",mmap_alloc);
775 static pending_req_t* alloc_req(void)
777 pending_req_t *req = NULL;
780 spin_lock_irqsave(&pending_free_lock, flags);
782 if (!list_empty(&pending_free)) {
783 req = list_entry(pending_free.next, pending_req_t, free_list);
784 list_del(&req->free_list);
789 alloc_pending_reqs++;
791 spin_unlock_irqrestore(&pending_free_lock, flags);
796 static void free_req(pending_req_t *req)
801 spin_lock_irqsave(&pending_free_lock, flags);
803 alloc_pending_reqs--;
805 if (mmap_lock && (req->mem_idx == mmap_alloc-1)) {
807 if (mmap_inuse == 0) mmap_req_del(mmap_alloc-1);
808 spin_unlock_irqrestore(&pending_free_lock, flags);
811 was_empty = list_empty(&pending_free);
812 list_add(&req->free_list, &pending_free);
814 spin_unlock_irqrestore(&pending_free_lock, flags);
817 wake_up(&pending_free_wq);
820 static void fast_flush_area(pending_req_t *req, int k_idx, int u_idx, int
823 struct gnttab_unmap_grant_ref unmap[BLKIF_MAX_SEGMENTS_PER_REQUEST*2];
824 unsigned int i, invcount = 0;
825 struct grant_handle_pair *khandle;
828 unsigned long kvaddr, uvaddr;
830 tap_blkif_t *info = tapfds[tapidx];
833 WPRINTK("fast_flush: Couldn't get info!\n");
836 mmap_idx = req->mem_idx;
838 for (i = 0; i < req->nr_pages; i++) {
839 kvaddr = idx_to_kaddr(mmap_idx, k_idx, i);
840 uvaddr = MMAP_VADDR(info->user_vstart, u_idx, i);
842 khandle = &pending_handle(mmap_idx, k_idx, i);
844 if (khandle->kernel != 0xFFFF) {
845 gnttab_set_unmap_op(&unmap[invcount],
846 idx_to_kaddr(mmap_idx, k_idx, i),
847 GNTMAP_host_map, khandle->kernel);
851 if (khandle->user != 0xFFFF) {
852 if (create_lookup_pte_addr(
854 MMAP_VADDR(info->user_vstart, u_idx, i),
856 WPRINTK("Couldn't get a pte addr!\n");
860 gnttab_set_unmap_op(&unmap[invcount], ptep,
862 GNTMAP_application_map |
868 BLKTAP_INVALIDATE_HANDLE(khandle);
870 ret = HYPERVISOR_grant_table_op(
871 GNTTABOP_unmap_grant_ref, unmap, invcount);
874 if (info->vma != NULL)
875 zap_page_range(info->vma,
876 MMAP_VADDR(info->user_vstart, u_idx, 0),
877 req->nr_pages << PAGE_SHIFT, NULL);
880 /******************************************************************
881 * SCHEDULER FUNCTIONS
884 static void print_stats(blkif_t *blkif)
886 printk(KERN_DEBUG "%s: oo %3d | rd %4d | wr %4d\n",
887 current->comm, blkif->st_oo_req,
888 blkif->st_rd_req, blkif->st_wr_req);
889 blkif->st_print = jiffies + msecs_to_jiffies(10 * 1000);
890 blkif->st_rd_req = 0;
891 blkif->st_wr_req = 0;
892 blkif->st_oo_req = 0;
895 int tap_blkif_schedule(void *arg)
897 blkif_t *blkif = arg;
902 printk(KERN_DEBUG "%s: started\n", current->comm);
904 while (!kthread_should_stop()) {
905 wait_event_interruptible(
907 blkif->waiting_reqs || kthread_should_stop());
908 wait_event_interruptible(
910 !list_empty(&pending_free) || kthread_should_stop());
912 blkif->waiting_reqs = 0;
913 smp_mb(); /* clear flag *before* checking for work */
915 if (do_block_io_op(blkif))
916 blkif->waiting_reqs = 1;
918 if (log_stats && time_after(jiffies, blkif->st_print))
925 printk(KERN_DEBUG "%s: exiting\n", current->comm);
927 blkif->xenblkd = NULL;
933 /******************************************************************
934 * COMPLETION CALLBACK -- Called by user level ioctl()
937 static int blktap_read_ufe_ring(tap_blkif_t *info)
939 /* This is called to read responses from the UFE ring. */
941 blkif_response_t *resp;
943 int pending_idx, usr_idx, mmap_idx;
944 pending_req_t *pending_req;
949 /* We currently only forward packets in INTERCEPT_FE mode. */
950 if (!(info->mode & BLKTAP_MODE_INTERCEPT_FE))
953 /* for each outstanding message on the UFEring */
954 rp = info->ufe_ring.sring->rsp_prod;
957 for (i = info->ufe_ring.rsp_cons; i != rp; i++) {
958 resp = RING_GET_RESPONSE(&info->ufe_ring, i);
959 ++info->ufe_ring.rsp_cons;
961 /*retrieve [usr_idx] to [mmap_idx,pending_idx] mapping*/
962 usr_idx = (int)resp->id;
963 pending_idx = MASK_PEND_IDX(ID_TO_IDX(info->idx_map[usr_idx]));
964 mmap_idx = ID_TO_MIDX(info->idx_map[usr_idx]);
966 if ( (mmap_idx >= mmap_alloc) ||
967 (ID_TO_IDX(info->idx_map[usr_idx]) >= MAX_PENDING_REQS) )
968 WPRINTK("Incorrect req map"
969 "[%d], internal map [%d,%d (%d)]\n",
971 ID_TO_IDX(info->idx_map[usr_idx]),
973 ID_TO_IDX(info->idx_map[usr_idx])));
975 pending_req = &pending_reqs[mmap_idx][pending_idx];
976 blkif = pending_req->blkif;
978 for (j = 0; j < pending_req->nr_pages; j++) {
980 unsigned long kvaddr, uvaddr;
981 struct page **map = info->vma->vm_private_data;
985 uvaddr = MMAP_VADDR(info->user_vstart, usr_idx, j);
986 kvaddr = idx_to_kaddr(mmap_idx, pending_idx, j);
988 pg = pfn_to_page(__pa(kvaddr) >> PAGE_SHIFT);
989 ClearPageReserved(pg);
990 offset = (uvaddr - info->vma->vm_start)
994 fast_flush_area(pending_req, pending_idx, usr_idx, info->minor);
995 make_response(blkif, pending_req->id, resp->operation,
997 info->idx_map[usr_idx] = INVALID_REQ;
998 blkif_put(pending_req->blkif);
999 free_req(pending_req);
1006 /******************************************************************************
1007 * NOTIFICATION FROM GUEST OS.
1010 static void blkif_notify_work(blkif_t *blkif)
1012 blkif->waiting_reqs = 1;
1013 wake_up(&blkif->wq);
1016 irqreturn_t tap_blkif_be_int(int irq, void *dev_id)
1018 blkif_notify_work(dev_id);
1024 /******************************************************************
1025 * DOWNWARD CALLS -- These interface with the block-device layer proper.
1027 static int print_dbug = 1;
1028 static int do_block_io_op(blkif_t *blkif)
1030 blkif_back_ring_t *blk_ring = &blkif->blk_ring;
1031 blkif_request_t req;
1032 pending_req_t *pending_req;
1037 rc = blk_ring->req_cons;
1038 rp = blk_ring->sring->req_prod;
1039 rmb(); /* Ensure we see queued requests up to 'rp'. */
1041 /*Check blkif has corresponding UE ring*/
1042 if (blkif->dev_num == -1) {
1045 WPRINTK("Corresponding UE "
1046 "ring does not exist!\n");
1047 print_dbug = 0; /*We only print this message once*/
1052 info = tapfds[blkif->dev_num];
1053 if (info == NULL || !info->dev_inuse) {
1055 WPRINTK("Can't get UE info!\n");
1063 if (RING_FULL(&info->ufe_ring)) {
1064 WPRINTK("RING_FULL! More to do\n");
1069 if (RING_REQUEST_CONS_OVERFLOW(blk_ring, rc)) {
1070 WPRINTK("RING_REQUEST_CONS_OVERFLOW!"
1076 pending_req = alloc_req();
1077 if (NULL == pending_req) {
1083 memcpy(&req, RING_GET_REQUEST(blk_ring, rc), sizeof(req));
1084 blk_ring->req_cons = ++rc; /* before make_response() */
1086 switch (req.operation) {
1089 dispatch_rw_block_io(blkif, &req, pending_req);
1092 case BLKIF_OP_WRITE:
1094 dispatch_rw_block_io(blkif, &req, pending_req);
1098 WPRINTK("unknown operation [%d]\n",
1100 make_response(blkif, req.id, req.operation,
1102 free_req(pending_req);
1107 blktap_kick_user(blkif->dev_num);
1112 static void dispatch_rw_block_io(blkif_t *blkif,
1113 blkif_request_t *req,
1114 pending_req_t *pending_req)
1116 extern void ll_rw_block(int rw, int nr, struct buffer_head * bhs[]);
1117 int op, operation = (req->operation == BLKIF_OP_WRITE) ? WRITE : READ;
1118 struct gnttab_map_grant_ref map[BLKIF_MAX_SEGMENTS_PER_REQUEST*2];
1121 tap_blkif_t *info = tapfds[blkif->dev_num];
1124 blkif_request_t *target;
1125 int pending_idx = RTN_PEND_IDX(pending_req,pending_req->mem_idx);
1126 int usr_idx = GET_NEXT_REQ(info->idx_map);
1127 uint16_t mmap_idx = pending_req->mem_idx;
1129 /* Check we have space on user ring - should never fail. */
1130 if (usr_idx == INVALID_REQ)
1133 /* Check that number of segments is sane. */
1134 nseg = req->nr_segments;
1135 if ( unlikely(nseg == 0) ||
1136 unlikely(nseg > BLKIF_MAX_SEGMENTS_PER_REQUEST) ) {
1137 WPRINTK("Bad number of segments in request (%d)\n", nseg);
1141 /* Make sure userspace is ready. */
1142 if (!info->ring_ok) {
1143 WPRINTK("blktap: ring not ready for requests!\n");
1147 if (RING_FULL(&info->ufe_ring)) {
1148 WPRINTK("blktap: fe_ring is full, can't add "
1149 "IO Request will be dropped. %d %d\n",
1150 RING_SIZE(&info->ufe_ring),
1151 RING_SIZE(&blkif->blk_ring));
1155 pending_req->blkif = blkif;
1156 pending_req->id = req->id;
1157 pending_req->operation = operation;
1158 pending_req->status = BLKIF_RSP_OKAY;
1159 pending_req->nr_pages = nseg;
1161 for (i = 0; i < nseg; i++) {
1162 unsigned long uvaddr;
1163 unsigned long kvaddr;
1168 uvaddr = MMAP_VADDR(info->user_vstart, usr_idx, i);
1169 kvaddr = idx_to_kaddr(mmap_idx, pending_idx, i);
1170 page = virt_to_page(kvaddr);
1172 sector = req->sector_number + (8*i);
1173 if( (blkif->sectors > 0) && (sector >= blkif->sectors) ) {
1174 WPRINTK("BLKTAP: Sector request greater"
1176 WPRINTK("BLKTAP: %s request sector"
1177 "[%llu,%llu], Total [%llu]\n",
1179 BLKIF_OP_WRITE ? "WRITE" : "READ"),
1180 (long long unsigned) sector,
1181 (long long unsigned) sector>>9,
1185 flags = GNTMAP_host_map;
1186 if (operation == WRITE)
1187 flags |= GNTMAP_readonly;
1188 gnttab_set_map_op(&map[op], kvaddr, flags,
1189 req->seg[i].gref, blkif->domid);
1192 /* Now map it to user. */
1193 ret = create_lookup_pte_addr(info->vma->vm_mm,
1196 WPRINTK("Couldn't get a pte addr!\n");
1200 flags = GNTMAP_host_map | GNTMAP_application_map
1201 | GNTMAP_contains_pte;
1202 if (operation == WRITE)
1203 flags |= GNTMAP_readonly;
1204 gnttab_set_map_op(&map[op], ptep, flags,
1205 req->seg[i].gref, blkif->domid);
1209 ret = HYPERVISOR_grant_table_op(GNTTABOP_map_grant_ref, map, op);
1212 for (i = 0; i < (nseg*2); i+=2) {
1213 unsigned long uvaddr;
1214 unsigned long kvaddr;
1215 unsigned long offset;
1218 uvaddr = MMAP_VADDR(info->user_vstart, usr_idx, i/2);
1219 kvaddr = idx_to_kaddr(mmap_idx, pending_idx, i/2);
1221 if (unlikely(map[i].status != 0)) {
1222 WPRINTK("invalid kernel buffer -- "
1223 "could not remap it\n");
1225 map[i].handle = 0xFFFF;
1228 if (unlikely(map[i+1].status != 0)) {
1229 WPRINTK("invalid user buffer -- "
1230 "could not remap it\n");
1232 map[i+1].handle = 0xFFFF;
1235 pending_handle(mmap_idx, pending_idx, i/2).kernel
1237 pending_handle(mmap_idx, pending_idx, i/2).user
1243 set_phys_to_machine(__pa(kvaddr) >> PAGE_SHIFT,
1244 FOREIGN_FRAME(map[i].dev_bus_addr >> PAGE_SHIFT));
1245 offset = (uvaddr - info->vma->vm_start) >> PAGE_SHIFT;
1246 pg = pfn_to_page(__pa(kvaddr) >> PAGE_SHIFT);
1247 ((struct page **)info->vma->vm_private_data)[offset] =
1254 /* Mark mapped pages as reserved: */
1255 for (i = 0; i < req->nr_segments; i++) {
1256 unsigned long kvaddr;
1259 kvaddr = idx_to_kaddr(mmap_idx, pending_idx, i);
1260 pg = pfn_to_page(__pa(kvaddr) >> PAGE_SHIFT);
1261 SetPageReserved(pg);
1264 /*record [mmap_idx,pending_idx] to [usr_idx] mapping*/
1265 info->idx_map[usr_idx] = MAKE_ID(mmap_idx, pending_idx);
1268 /* Finally, write the request message to the user ring. */
1269 target = RING_GET_REQUEST(&info->ufe_ring,
1270 info->ufe_ring.req_prod_pvt);
1271 memcpy(target, req, sizeof(*req));
1272 target->id = usr_idx;
1273 info->ufe_ring.req_prod_pvt++;
1277 WPRINTK("Reached Fail_flush\n");
1278 fast_flush_area(pending_req, pending_idx, usr_idx, blkif->dev_num);
1280 make_response(blkif, req->id, req->operation, BLKIF_RSP_ERROR);
1281 free_req(pending_req);
1286 /******************************************************************
1287 * MISCELLANEOUS SETUP / TEARDOWN / DEBUGGING
1291 static void make_response(blkif_t *blkif, unsigned long id,
1292 unsigned short op, int st)
1294 blkif_response_t *resp;
1295 unsigned long flags;
1296 blkif_back_ring_t *blk_ring = &blkif->blk_ring;
1300 spin_lock_irqsave(&blkif->blk_ring_lock, flags);
1301 /* Place on the response ring for the relevant domain. */
1302 resp = RING_GET_RESPONSE(blk_ring, blk_ring->rsp_prod_pvt);
1304 resp->operation = op;
1306 blk_ring->rsp_prod_pvt++;
1307 RING_PUSH_RESPONSES_AND_CHECK_NOTIFY(blk_ring, notify);
1309 if (blk_ring->rsp_prod_pvt == blk_ring->req_cons) {
1311 * Tail check for pending requests. Allows frontend to avoid
1312 * notifications if requests are already in flight (lower
1313 * overheads and promotes batching).
1315 RING_FINAL_CHECK_FOR_REQUESTS(blk_ring, more_to_do);
1316 } else if (RING_HAS_UNCONSUMED_REQUESTS(blk_ring)) {
1320 spin_unlock_irqrestore(&blkif->blk_ring_lock, flags);
1322 blkif_notify_work(blkif);
1324 notify_remote_via_irq(blkif->irq);
1327 static int __init blkif_init(void)
1332 if (!is_running_on_xen())
1335 INIT_LIST_HEAD(&pending_free);
1336 for(i = 0; i < 2; i++) {
1337 ret = req_increase();
1344 tap_blkif_interface_init();
1346 alloc_pending_reqs = 0;
1348 tap_blkif_xenbus_init();
1350 /*Create the blktap devices, but do not map memory or waitqueue*/
1351 for(i = 0; i < MAX_TAP_DEV; i++) translate_domid[i].domid = 0xFFFF;
1353 /* Dynamically allocate a major for this device */
1354 ret = register_chrdev(0, "blktap", &blktap_fops);
1357 WPRINTK("Couldn't register /dev/xen/blktap\n");
1363 for(i = 0; i < MAX_TAP_DEV; i++ ) {
1364 info = tapfds[i] = kzalloc(sizeof(tap_blkif_t),GFP_KERNEL);
1365 if(tapfds[i] == NULL)
1371 info->dev_pending = info->dev_inuse = 0;
1373 DPRINTK("Created misc_dev [/dev/xen/blktap%d]\n",i);
1376 /* Make sure the xen class exists */
1377 if (!setup_xen_class()) {
1379 * This will allow udev to create the blktap ctrl device.
1380 * We only want to create blktap0 first. We don't want
1381 * to flood the sysfs system with needless blktap devices.
1382 * We only create the device when a request of a new device is
1385 class_device_create(xen_class, NULL,
1386 MKDEV(blktap_major, 0), NULL,
1388 tapfds[0]->sysfs_set = 1;
1390 /* this is bad, but not fatal */
1391 WPRINTK("blktap: sysfs xen_class not created\n");
1394 DPRINTK("Blktap device successfully created\n");
1399 module_init(blkif_init);
1401 MODULE_LICENSE("Dual BSD/GPL");