+ pending_reqs[mmap_alloc] = kzalloc(sizeof(pending_req_t) *
+ blkif_reqs, GFP_KERNEL);
+ pending_addrs[mmap_alloc] = kzalloc(sizeof(unsigned long) *
+ mmap_pages, GFP_KERNEL);
+
+ ret = -ENOMEM;
+ if (!pending_reqs[mmap_alloc] || !pending_addrs[mmap_alloc]) {
+ kfree(pending_reqs[mmap_alloc]);
+ kfree(pending_addrs[mmap_alloc]);
+ WPRINTK("%s: out of memory\n", __FUNCTION__);
+ ret = -ENOMEM;
+ goto done;
+ }
+
+ ret = 0;
+
+ DPRINTK("%s: reqs=%d, pages=%d, mmap_vstart=0x%lx\n",
+ __FUNCTION__, blkif_reqs, mmap_pages,
+ mmap_start[mmap_alloc].start);
+
+ BUG_ON(mmap_start[mmap_alloc].start == 0);
+
+ for (i = 0; i < mmap_pages; i++)
+ pending_addrs[mmap_alloc][i] =
+ mmap_start[mmap_alloc].start + (i << PAGE_SHIFT);
+
+ for (i = 0; i < MAX_PENDING_REQS ; i++) {
+ list_add_tail(&pending_reqs[mmap_alloc][i].free_list,
+ &pending_free);
+ pending_reqs[mmap_alloc][i].mem_idx = mmap_alloc;
+ for (j = 0; j < BLKIF_MAX_SEGMENTS_PER_REQUEST; j++)
+ BLKTAP_INVALIDATE_HANDLE(&pending_handle(mmap_alloc,
+ i, j));
+ }
+
+ mmap_alloc++;
+ DPRINTK("# MMAPs increased to %d\n",mmap_alloc);
+ done:
+ spin_unlock_irqrestore(&pending_free_lock, flags);
+ return ret;
+}
+
+static void mmap_req_del(int mmap)
+{
+ int i;
+ struct page *page;
+
+ /*Spinlock already acquired*/
+ kfree(pending_reqs[mmap]);
+ kfree(pending_addrs[mmap]);
+
+#ifdef __ia64__
+ /*Not sure what goes here yet!*/
+#else
+
+ /* Unpin all of the pages. */
+ page = mmap_start[mmap].mpage;
+ for (i=0; i<mmap_pages; i++)
+ put_page(&page[i]);
+
+ balloon_dealloc_empty_page_range(mmap_start[mmap].mpage, mmap_pages);
+#endif
+
+ mmap_lock = 0;
+ DPRINTK("# MMAPs decreased to %d\n",mmap_alloc);
+ mmap_alloc--;
+}
+
+/*N.B. Currently unused - will be accessed via sysfs*/
+static void req_decrease(void)
+{
+ pending_req_t *req;
+ int i;
+ unsigned long flags;
+
+ spin_lock_irqsave(&pending_free_lock, flags);
+
+ DPRINTK("Req decrease called.\n");
+ if (mmap_lock || mmap_alloc == 1)
+ goto done;
+
+ mmap_lock = 1;
+ mmap_inuse = MAX_PENDING_REQS;
+
+ /*Go through reqs and remove any that aren't in use*/
+ for (i = 0; i < MAX_PENDING_REQS ; i++) {
+ req = &pending_reqs[mmap_alloc-1][i];
+ if (req->inuse == 0) {
+ list_del(&req->free_list);
+ mmap_inuse--;
+ }
+ }
+ if (mmap_inuse == 0) mmap_req_del(mmap_alloc-1);
+ done:
+ spin_unlock_irqrestore(&pending_free_lock, flags);
+ return;
+}
+
+static pending_req_t* alloc_req(void)
+{
+ pending_req_t *req = NULL;
+ unsigned long flags;
+
+ spin_lock_irqsave(&pending_free_lock, flags);
+
+ if (!list_empty(&pending_free)) {
+ req = list_entry(pending_free.next, pending_req_t, free_list);
+ list_del(&req->free_list);
+ }
+
+ if (req) {
+ req->inuse = 1;
+ alloc_pending_reqs++;
+ }
+ spin_unlock_irqrestore(&pending_free_lock, flags);
+
+ return req;
+}
+
+static void free_req(pending_req_t *req)
+{
+ unsigned long flags;
+ int was_empty;
+
+ spin_lock_irqsave(&pending_free_lock, flags);
+
+ alloc_pending_reqs--;
+ req->inuse = 0;
+ if (mmap_lock && (req->mem_idx == mmap_alloc-1)) {
+ mmap_inuse--;
+ if (mmap_inuse == 0) mmap_req_del(mmap_alloc-1);
+ spin_unlock_irqrestore(&pending_free_lock, flags);
+ return;
+ }
+ was_empty = list_empty(&pending_free);
+ list_add(&req->free_list, &pending_free);
+
+ spin_unlock_irqrestore(&pending_free_lock, flags);
+
+ if (was_empty)
+ wake_up(&pending_free_wq);
+}
+
+static void fast_flush_area(pending_req_t *req, int k_idx, int u_idx, int
+ tapidx)
+{
+ struct gnttab_unmap_grant_ref unmap[BLKIF_MAX_SEGMENTS_PER_REQUEST*2];
+ unsigned int i, invcount = 0;
+ struct grant_handle_pair *khandle;
+ uint64_t ptep;
+ int ret, mmap_idx;
+ unsigned long kvaddr, uvaddr;
+
+ tap_blkif_t *info = tapfds[tapidx];
+
+ if (info == NULL) {
+ WPRINTK("fast_flush: Couldn't get info!\n");
+ return;
+ }
+ mmap_idx = req->mem_idx;
+
+ for (i = 0; i < req->nr_pages; i++) {
+ kvaddr = MMAP_VADDR(mmap_start[mmap_idx].start, k_idx, i);
+ uvaddr = MMAP_VADDR(info->user_vstart, u_idx, i);
+
+ khandle = &pending_handle(mmap_idx, k_idx, i);
+ if (BLKTAP_INVALID_HANDLE(khandle)) {
+ WPRINTK("BLKTAP_INVALID_HANDLE\n");
+ continue;
+ }
+ gnttab_set_unmap_op(&unmap[invcount],
+ MMAP_VADDR(mmap_start[mmap_idx].start, k_idx, i),
+ GNTMAP_host_map, khandle->kernel);
+ invcount++;
+
+ if (create_lookup_pte_addr(
+ info->vma->vm_mm,
+ MMAP_VADDR(info->user_vstart, u_idx, i),
+ &ptep) !=0) {
+ WPRINTK("Couldn't get a pte addr!\n");
+ return;
+ }
+
+ gnttab_set_unmap_op(&unmap[invcount],
+ ptep, GNTMAP_host_map,
+ khandle->user);
+ invcount++;
+
+ BLKTAP_INVALIDATE_HANDLE(khandle);
+ }
+ ret = HYPERVISOR_grant_table_op(
+ GNTTABOP_unmap_grant_ref, unmap, invcount);
+ BUG_ON(ret);
+
+ if (info->vma != NULL)
+ zap_page_range(info->vma,
+ MMAP_VADDR(info->user_vstart, u_idx, 0),
+ req->nr_pages << PAGE_SHIFT, NULL);
+}
+
+/******************************************************************
+ * SCHEDULER FUNCTIONS
+ */
+
+static void print_stats(blkif_t *blkif)
+{
+ printk(KERN_DEBUG "%s: oo %3d | rd %4d | wr %4d\n",
+ current->comm, blkif->st_oo_req,
+ blkif->st_rd_req, blkif->st_wr_req);
+ blkif->st_print = jiffies + msecs_to_jiffies(10 * 1000);
+ blkif->st_rd_req = 0;
+ blkif->st_wr_req = 0;
+ blkif->st_oo_req = 0;
+}
+
+int tap_blkif_schedule(void *arg)
+{
+ blkif_t *blkif = arg;
+
+ blkif_get(blkif);
+
+ if (debug_lvl)
+ printk(KERN_DEBUG "%s: started\n", current->comm);
+
+ while (!kthread_should_stop()) {
+ wait_event_interruptible(
+ blkif->wq,
+ blkif->waiting_reqs || kthread_should_stop());
+ wait_event_interruptible(
+ pending_free_wq,
+ !list_empty(&pending_free) || kthread_should_stop());
+
+ blkif->waiting_reqs = 0;
+ smp_mb(); /* clear flag *before* checking for work */
+
+ if (do_block_io_op(blkif))
+ blkif->waiting_reqs = 1;
+
+ if (log_stats && time_after(jiffies, blkif->st_print))
+ print_stats(blkif);
+ }
+
+ if (log_stats)
+ print_stats(blkif);
+ if (debug_lvl)
+ printk(KERN_DEBUG "%s: exiting\n", current->comm);
+
+ blkif->xenblkd = NULL;
+ blkif_put(blkif);
+
+ return 0;
+}
+
+/******************************************************************
+ * COMPLETION CALLBACK -- Called by user level ioctl()
+ */
+
+static int blktap_read_ufe_ring(int idx)
+{
+ /* This is called to read responses from the UFE ring. */
+ RING_IDX i, j, rp;
+ blkif_response_t *resp;
+ blkif_t *blkif=NULL;
+ int pending_idx, usr_idx, mmap_idx;
+ pending_req_t *pending_req;
+ tap_blkif_t *info;
+
+ info = tapfds[idx];
+ if (info == NULL) {
+ return 0;
+ }
+
+ /* We currently only forward packets in INTERCEPT_FE mode. */
+ if (!(info->mode & BLKTAP_MODE_INTERCEPT_FE))
+ return 0;
+
+ /* for each outstanding message on the UFEring */
+ rp = info->ufe_ring.sring->rsp_prod;
+ rmb();
+
+ for (i = info->ufe_ring.rsp_cons; i != rp; i++) {
+ resp = RING_GET_RESPONSE(&info->ufe_ring, i);
+ ++info->ufe_ring.rsp_cons;
+
+ /*retrieve [usr_idx] to [mmap_idx,pending_idx] mapping*/
+ usr_idx = (int)resp->id;
+ pending_idx = MASK_PEND_IDX(ID_TO_IDX(info->idx_map[usr_idx]));
+ mmap_idx = ID_TO_MIDX(info->idx_map[usr_idx]);
+
+ if ( (mmap_idx >= mmap_alloc) ||
+ (ID_TO_IDX(info->idx_map[usr_idx]) >= MAX_PENDING_REQS) )
+ WPRINTK("Incorrect req map"
+ "[%d], internal map [%d,%d (%d)]\n",
+ usr_idx, mmap_idx,
+ ID_TO_IDX(info->idx_map[usr_idx]),
+ MASK_PEND_IDX(
+ ID_TO_IDX(info->idx_map[usr_idx])));
+
+ pending_req = &pending_reqs[mmap_idx][pending_idx];
+ blkif = pending_req->blkif;
+
+ for (j = 0; j < pending_req->nr_pages; j++) {
+
+ unsigned long kvaddr, uvaddr;
+ struct page **map = info->vma->vm_private_data;
+ struct page *pg;
+ int offset;
+
+ uvaddr = MMAP_VADDR(info->user_vstart, usr_idx, j);
+ kvaddr = MMAP_VADDR(mmap_start[mmap_idx].start,
+ pending_idx, j);
+
+ pg = pfn_to_page(__pa(kvaddr) >> PAGE_SHIFT);
+ ClearPageReserved(pg);
+ offset = (uvaddr - info->vma->vm_start)
+ >> PAGE_SHIFT;
+ map[offset] = NULL;
+ }
+ fast_flush_area(pending_req, pending_idx, usr_idx, idx);
+ make_response(blkif, pending_req->id, resp->operation,
+ resp->status);
+ info->idx_map[usr_idx] = INVALID_REQ;
+ blkif_put(pending_req->blkif);
+ free_req(pending_req);
+ }
+
+ return 0;
+}
+
+
+/******************************************************************************
+ * NOTIFICATION FROM GUEST OS.
+ */
+
+static void blkif_notify_work(blkif_t *blkif)
+{
+ blkif->waiting_reqs = 1;
+ wake_up(&blkif->wq);
+}
+
+irqreturn_t tap_blkif_be_int(int irq, void *dev_id, struct pt_regs *regs)
+{
+ blkif_notify_work(dev_id);
+ return IRQ_HANDLED;
+}
+
+
+
+/******************************************************************
+ * DOWNWARD CALLS -- These interface with the block-device layer proper.
+ */
+static int print_dbug = 1;
+static int do_block_io_op(blkif_t *blkif)
+{
+ blkif_back_ring_t *blk_ring = &blkif->blk_ring;
+ blkif_request_t *req;
+ pending_req_t *pending_req;
+ RING_IDX rc, rp;
+ int more_to_do = 0;
+ tap_blkif_t *info;
+
+ rc = blk_ring->req_cons;
+ rp = blk_ring->sring->req_prod;
+ rmb(); /* Ensure we see queued requests up to 'rp'. */
+
+ /*Check blkif has corresponding UE ring*/
+ if (blkif->dev_num == -1) {
+ /*oops*/
+ if (print_dbug) {
+ WPRINTK("Corresponding UE "
+ "ring does not exist!\n");
+ print_dbug = 0; /*We only print this message once*/
+ }
+ return 1;
+ }
+
+ info = tapfds[blkif->dev_num];
+ if (info == NULL || !info->dev_inuse) {
+ if (print_dbug) {
+ WPRINTK("Can't get UE info!\n");
+ print_dbug = 0;
+ }
+ return 1;
+ }
+
+ while (rc != rp) {
+
+ if (RING_FULL(&info->ufe_ring)) {
+ WPRINTK("RING_FULL! More to do\n");
+ more_to_do = 1;
+ break;
+ }
+
+ if (RING_REQUEST_CONS_OVERFLOW(blk_ring, rc)) {
+ WPRINTK("RING_REQUEST_CONS_OVERFLOW!"
+ " More to do\n");
+ more_to_do = 1;
+ break;
+ }
+
+ pending_req = alloc_req();
+ if (NULL == pending_req) {
+ blkif->st_oo_req++;
+ more_to_do = 1;
+ break;
+ }
+
+ req = RING_GET_REQUEST(blk_ring, rc);
+ blk_ring->req_cons = ++rc; /* before make_response() */
+
+ switch (req->operation) {
+ case BLKIF_OP_READ:
+ blkif->st_rd_req++;
+ dispatch_rw_block_io(blkif, req, pending_req);
+ break;
+
+ case BLKIF_OP_WRITE:
+ blkif->st_wr_req++;
+ dispatch_rw_block_io(blkif, req, pending_req);
+ break;
+
+ default:
+ WPRINTK("unknown operation [%d]\n",
+ req->operation);
+ make_response(blkif, req->id, req->operation,
+ BLKIF_RSP_ERROR);
+ free_req(pending_req);
+ break;
+ }
+ }
+
+ blktap_kick_user(blkif->dev_num);
+
+ return more_to_do;
+}
+
+static void dispatch_rw_block_io(blkif_t *blkif,
+ blkif_request_t *req,
+ pending_req_t *pending_req)
+{
+ extern void ll_rw_block(int rw, int nr, struct buffer_head * bhs[]);
+ int op, operation = (req->operation == BLKIF_OP_WRITE) ? WRITE : READ;
+ struct gnttab_map_grant_ref map[BLKIF_MAX_SEGMENTS_PER_REQUEST*2];
+ unsigned int nseg;
+ int ret, i;
+ tap_blkif_t *info = tapfds[blkif->dev_num];
+ uint64_t sector;
+
+ blkif_request_t *target;
+ int pending_idx = RTN_PEND_IDX(pending_req,pending_req->mem_idx);
+ int usr_idx = GET_NEXT_REQ(info->idx_map);
+ uint16_t mmap_idx = pending_req->mem_idx;
+
+ /*Check we have space on user ring - should never fail*/
+ if(usr_idx == INVALID_REQ) goto fail_flush;
+
+ /* Check that number of segments is sane. */
+ nseg = req->nr_segments;
+ if ( unlikely(nseg == 0) ||
+ unlikely(nseg > BLKIF_MAX_SEGMENTS_PER_REQUEST) ) {
+ WPRINTK("Bad number of segments in request (%d)\n", nseg);
+ goto fail_response;
+ }
+
+ /* Make sure userspace is ready. */
+ if (!info->ring_ok) {
+ WPRINTK("blktap: ring not ready for requests!\n");
+ goto fail_response;
+ }
+
+ if (RING_FULL(&info->ufe_ring)) {
+ WPRINTK("blktap: fe_ring is full, can't add "
+ "IO Request will be dropped. %d %d\n",
+ RING_SIZE(&info->ufe_ring),
+ RING_SIZE(&blkif->blk_ring));
+ goto fail_response;
+ }
+
+ pending_req->blkif = blkif;
+ pending_req->id = req->id;
+ pending_req->operation = operation;
+ pending_req->status = BLKIF_RSP_OKAY;
+ pending_req->nr_pages = nseg;
+ op = 0;
+ for (i = 0; i < nseg; i++) {
+ unsigned long uvaddr;
+ unsigned long kvaddr;
+ uint64_t ptep;
+ struct page *page;
+ uint32_t flags;
+
+ uvaddr = MMAP_VADDR(info->user_vstart, usr_idx, i);
+ kvaddr = MMAP_VADDR(mmap_start[mmap_idx].start,
+ pending_idx, i);
+ page = virt_to_page(kvaddr);
+
+ sector = req->sector_number + (8*i);
+ if( (blkif->sectors > 0) && (sector >= blkif->sectors) ) {
+ WPRINTK("BLKTAP: Sector request greater"
+ "than size\n");
+ WPRINTK("BLKTAP: %s request sector"
+ "[%llu,%llu], Total [%llu]\n",
+ (req->operation ==
+ BLKIF_OP_WRITE ? "WRITE" : "READ"),
+ (long long unsigned) sector,
+ (long long unsigned) sector>>9,
+ blkif->sectors);
+ }
+
+ flags = GNTMAP_host_map;
+ if (operation == WRITE)
+ flags |= GNTMAP_readonly;
+ gnttab_set_map_op(&map[op], kvaddr, flags,
+ req->seg[i].gref, blkif->domid);
+ op++;
+
+ /* Now map it to user. */
+ ret = create_lookup_pte_addr(info->vma->vm_mm,
+ uvaddr, &ptep);
+ if (ret) {
+ WPRINTK("Couldn't get a pte addr!\n");
+ fast_flush_area(pending_req, pending_idx, usr_idx,
+ blkif->dev_num);
+ goto fail_flush;
+ }
+
+ flags = GNTMAP_host_map | GNTMAP_application_map
+ | GNTMAP_contains_pte;
+ if (operation == WRITE)
+ flags |= GNTMAP_readonly;
+ gnttab_set_map_op(&map[op], ptep, flags,
+ req->seg[i].gref, blkif->domid);
+ op++;
+ }
+
+ ret = HYPERVISOR_grant_table_op(GNTTABOP_map_grant_ref, map, op);
+ BUG_ON(ret);
+
+ for (i = 0; i < (nseg*2); i+=2) {
+ unsigned long uvaddr;
+ unsigned long kvaddr;
+ unsigned long offset;
+ struct page *pg;
+
+ uvaddr = MMAP_VADDR(info->user_vstart, usr_idx, i/2);
+ kvaddr = MMAP_VADDR(mmap_start[mmap_idx].start,
+ pending_idx, i/2);
+
+ if (unlikely(map[i].status != 0)) {
+ WPRINTK("invalid kernel buffer -- "
+ "could not remap it\n");
+ goto fail_flush;
+ }
+
+ if (unlikely(map[i+1].status != 0)) {
+ WPRINTK("invalid user buffer -- "
+ "could not remap it\n");
+ goto fail_flush;
+ }
+
+ pending_handle(mmap_idx, pending_idx, i/2).kernel
+ = map[i].handle;
+ pending_handle(mmap_idx, pending_idx, i/2).user
+ = map[i+1].handle;
+ set_phys_to_machine(__pa(kvaddr) >> PAGE_SHIFT,
+ FOREIGN_FRAME(map[i].dev_bus_addr >> PAGE_SHIFT));
+ offset = (uvaddr - info->vma->vm_start) >> PAGE_SHIFT;
+ pg = pfn_to_page(__pa(kvaddr) >> PAGE_SHIFT);
+ ((struct page **)info->vma->vm_private_data)[offset] =
+ pg;
+ }
+ /* Mark mapped pages as reserved: */
+ for (i = 0; i < req->nr_segments; i++) {
+ unsigned long kvaddr;
+ struct page *pg;
+
+ kvaddr = MMAP_VADDR(mmap_start[mmap_idx].start,
+ pending_idx, i);
+ pg = pfn_to_page(__pa(kvaddr) >> PAGE_SHIFT);
+ SetPageReserved(pg);
+ }
+
+ /*record [mmap_idx,pending_idx] to [usr_idx] mapping*/
+ info->idx_map[usr_idx] = MAKE_ID(mmap_idx, pending_idx);
+
+ blkif_get(blkif);
+ /* Finally, write the request message to the user ring. */
+ target = RING_GET_REQUEST(&info->ufe_ring,
+ info->ufe_ring.req_prod_pvt);
+ memcpy(target, req, sizeof(*req));
+ target->id = usr_idx;
+ info->ufe_ring.req_prod_pvt++;
+ return;
+
+ fail_flush:
+ WPRINTK("Reached Fail_flush\n");
+ fast_flush_area(pending_req, pending_idx, usr_idx, blkif->dev_num);
+ fail_response:
+ make_response(blkif, req->id, req->operation, BLKIF_RSP_ERROR);
+ free_req(pending_req);
+}
+
+
+
+/******************************************************************
+ * MISCELLANEOUS SETUP / TEARDOWN / DEBUGGING
+ */
+
+
+static void make_response(blkif_t *blkif, unsigned long id,
+ unsigned short op, int st)
+{
+ blkif_response_t *resp;
+ unsigned long flags;
+ blkif_back_ring_t *blk_ring = &blkif->blk_ring;
+ int more_to_do = 0;
+ int notify;
+
+ spin_lock_irqsave(&blkif->blk_ring_lock, flags);
+ /* Place on the response ring for the relevant domain. */
+ resp = RING_GET_RESPONSE(blk_ring, blk_ring->rsp_prod_pvt);
+ resp->id = id;
+ resp->operation = op;
+ resp->status = st;
+ blk_ring->rsp_prod_pvt++;
+ RING_PUSH_RESPONSES_AND_CHECK_NOTIFY(blk_ring, notify);
+
+ if (blk_ring->rsp_prod_pvt == blk_ring->req_cons) {
+ /*
+ * Tail check for pending requests. Allows frontend to avoid
+ * notifications if requests are already in flight (lower
+ * overheads and promotes batching).
+ */
+ RING_FINAL_CHECK_FOR_REQUESTS(blk_ring, more_to_do);
+ } else if (RING_HAS_UNCONSUMED_REQUESTS(blk_ring)) {
+ more_to_do = 1;
+
+ }
+ spin_unlock_irqrestore(&blkif->blk_ring_lock, flags);
+ if (more_to_do)
+ blkif_notify_work(blkif);
+ if (notify)
+ notify_remote_via_irq(blkif->irq);
+}
+
+static int __init blkif_init(void)
+{
+ int i,ret,blktap_dir;
+ tap_blkif_t *info;
+
+ if (!is_running_on_xen())
+ return -ENODEV;
+
+ INIT_LIST_HEAD(&pending_free);
+ for(i = 0; i < 2; i++) {
+ ret = req_increase();
+ if (ret)
+ break;
+ }
+ if (i == 0)
+ return ret;
+
+ tap_blkif_interface_init();
+
+ alloc_pending_reqs = 0;
+
+ tap_blkif_xenbus_init();
+
+ /*Create the blktap devices, but do not map memory or waitqueue*/
+ for(i = 0; i < MAX_TAP_DEV; i++) translate_domid[i].domid = 0xFFFF;
+
+ ret = register_chrdev(BLKTAP_DEV_MAJOR,"blktap",&blktap_fops);
+ blktap_dir = devfs_mk_dir(NULL, "xen", 0, NULL);
+
+ if ( (ret < 0)||(blktap_dir < 0) ) {
+ WPRINTK("Couldn't register /dev/xen/blktap\n");
+ return -ENOMEM;
+ }
+
+ for(i = 0; i < MAX_TAP_DEV; i++ ) {
+ info = tapfds[i] = kzalloc(sizeof(tap_blkif_t),GFP_KERNEL);
+ if(tapfds[i] == NULL) return -ENOMEM;
+ info->minor = i;
+ info->pid = 0;
+ info->blkif = NULL;
+
+ ret = devfs_mk_cdev(MKDEV(BLKTAP_DEV_MAJOR, i),
+ S_IFCHR|S_IRUGO|S_IWUSR, "xen/blktap%d", i);
+
+ if(ret != 0) return -ENOMEM;
+ info->dev_pending = info->dev_inuse = 0;
+
+ DPRINTK("Created misc_dev [/dev/xen/blktap%d]\n",i);
+ }
+
+ DPRINTK("Blktap device successfully created\n");
+
+ return 0;
+}
+
+module_init(blkif_init);
+
+MODULE_LICENSE("Dual BSD/GPL");