This commit was manufactured by cvs2svn to create branch 'vserver'.
[linux-2.6.git] / drivers / xen / fbfront / xenfb.c
1 /*
2  * linux/drivers/video/xenfb.c -- Xen para-virtual frame buffer device
3  *
4  * Copyright (C) 2005-2006 Anthony Liguori <aliguori@us.ibm.com>
5  * Copyright (C) 2006 Red Hat, Inc., Markus Armbruster <armbru@redhat.com>
6  *
7  *  Based on linux/drivers/video/q40fb.c
8  *
9  *  This file is subject to the terms and conditions of the GNU General Public
10  *  License. See the file COPYING in the main directory of this archive for
11  *  more details.
12  */
13
14 /*
15  * TODO:
16  *
17  * Switch to grant tables when they become capable of dealing with the
18  * frame buffer.
19  */
20
21 #include <linux/kernel.h>
22 #include <linux/errno.h>
23 #include <linux/fb.h>
24 #include <linux/module.h>
25 #include <linux/vmalloc.h>
26 #include <linux/mm.h>
27 #include <linux/freezer.h>
28 #include <asm/hypervisor.h>
29 #include <xen/evtchn.h>
30 #include <xen/interface/io/fbif.h>
31 #include <xen/xenbus.h>
32 #include <linux/kthread.h>
33
34 struct xenfb_mapping
35 {
36         struct list_head        link;
37         struct vm_area_struct   *vma;
38         atomic_t                map_refs;
39         int                     faults;
40         struct xenfb_info       *info;
41 };
42
43 struct xenfb_info
44 {
45         struct task_struct      *kthread;
46         wait_queue_head_t       wq;
47
48         unsigned char           *fb;
49         struct fb_info          *fb_info;
50         struct timer_list       refresh;
51         int                     dirty;
52         int                     x1, y1, x2, y2; /* dirty rectangle,
53                                                    protected by dirty_lock */
54         spinlock_t              dirty_lock;
55         struct mutex            mm_lock;
56         int                     nr_pages;
57         struct page             **pages;
58         struct list_head        mappings; /* protected by mm_lock */
59
60         unsigned                evtchn;
61         int                     irq;
62         struct xenfb_page       *page;
63         unsigned long           *mfns;
64         int                     update_wanted; /* XENFB_TYPE_UPDATE wanted */
65
66         struct xenbus_device    *xbdev;
67 };
68
69 static int xenfb_fps = 20;
70 static unsigned long xenfb_mem_len = XENFB_WIDTH * XENFB_HEIGHT * XENFB_DEPTH / 8;
71
72 static int xenfb_remove(struct xenbus_device *);
73 static void xenfb_init_shared_page(struct xenfb_info *);
74 static int xenfb_connect_backend(struct xenbus_device *, struct xenfb_info *);
75 static void xenfb_disconnect_backend(struct xenfb_info *);
76
77 static void xenfb_do_update(struct xenfb_info *info,
78                             int x, int y, int w, int h)
79 {
80         union xenfb_out_event event;
81         __u32 prod;
82
83         event.type = XENFB_TYPE_UPDATE;
84         event.update.x = x;
85         event.update.y = y;
86         event.update.width = w;
87         event.update.height = h;
88
89         prod = info->page->out_prod;
90         /* caller ensures !xenfb_queue_full() */
91         mb();                   /* ensure ring space available */
92         XENFB_OUT_RING_REF(info->page, prod) = event;
93         wmb();                  /* ensure ring contents visible */
94         info->page->out_prod = prod + 1;
95
96         notify_remote_via_evtchn(info->evtchn);
97 }
98
99 static int xenfb_queue_full(struct xenfb_info *info)
100 {
101         __u32 cons, prod;
102
103         prod = info->page->out_prod;
104         cons = info->page->out_cons;
105         return prod - cons == XENFB_OUT_RING_LEN;
106 }
107
108 static void xenfb_update_screen(struct xenfb_info *info)
109 {
110         unsigned long flags;
111         int y1, y2, x1, x2;
112         struct xenfb_mapping *map;
113
114         if (!info->update_wanted)
115                 return;
116         if (xenfb_queue_full(info))
117                 return;
118
119         mutex_lock(&info->mm_lock);
120
121         spin_lock_irqsave(&info->dirty_lock, flags);
122         y1 = info->y1;
123         y2 = info->y2;
124         x1 = info->x1;
125         x2 = info->x2;
126         info->x1 = info->y1 = INT_MAX;
127         info->x2 = info->y2 = 0;
128         spin_unlock_irqrestore(&info->dirty_lock, flags);
129
130         list_for_each_entry(map, &info->mappings, link) {
131                 if (!map->faults)
132                         continue;
133                 zap_page_range(map->vma, map->vma->vm_start,
134                                map->vma->vm_end - map->vma->vm_start, NULL);
135                 map->faults = 0;
136         }
137
138         mutex_unlock(&info->mm_lock);
139
140         xenfb_do_update(info, x1, y1, x2 - x1, y2 - y1);
141 }
142
143 static int xenfb_thread(void *data)
144 {
145         struct xenfb_info *info = data;
146
147         while (!kthread_should_stop()) {
148                 if (info->dirty) {
149                         info->dirty = 0;
150                         xenfb_update_screen(info);
151                 }
152                 wait_event_interruptible(info->wq,
153                         kthread_should_stop() || info->dirty);
154                 try_to_freeze();
155         }
156         return 0;
157 }
158
159 static int xenfb_setcolreg(unsigned regno, unsigned red, unsigned green,
160                            unsigned blue, unsigned transp,
161                            struct fb_info *info)
162 {
163         u32 v;
164
165         if (regno > info->cmap.len)
166                 return 1;
167
168         red   >>= (16 - info->var.red.length);
169         green >>= (16 - info->var.green.length);
170         blue  >>= (16 - info->var.blue.length);
171
172         v = (red << info->var.red.offset) |
173             (green << info->var.green.offset) |
174             (blue << info->var.blue.offset);
175
176         /* FIXME is this sane?  check against xxxfb_setcolreg()!  */
177         switch (info->var.bits_per_pixel) {
178         case 16:
179         case 24:
180         case 32:
181                 ((u32 *)info->pseudo_palette)[regno] = v;
182                 break;
183         }
184         
185         return 0;
186 }
187
188 static void xenfb_timer(unsigned long data)
189 {
190         struct xenfb_info *info = (struct xenfb_info *)data;
191         info->dirty = 1;
192         wake_up(&info->wq);
193 }
194
195 static void __xenfb_refresh(struct xenfb_info *info,
196                             int x1, int y1, int w, int h)
197 {
198         int y2, x2;
199
200         y2 = y1 + h;
201         x2 = x1 + w;
202
203         if (info->y1 > y1)
204                 info->y1 = y1;
205         if (info->y2 < y2)
206                 info->y2 = y2;
207         if (info->x1 > x1)
208                 info->x1 = x1;
209         if (info->x2 < x2)
210                 info->x2 = x2;
211
212         if (timer_pending(&info->refresh))
213                 return;
214
215         mod_timer(&info->refresh, jiffies + HZ/xenfb_fps);
216 }
217
218 static void xenfb_refresh(struct xenfb_info *info,
219                           int x1, int y1, int w, int h)
220 {
221         unsigned long flags;
222
223         spin_lock_irqsave(&info->dirty_lock, flags);
224         __xenfb_refresh(info, x1, y1, w, h);
225         spin_unlock_irqrestore(&info->dirty_lock, flags);
226 }
227
228 static void xenfb_fillrect(struct fb_info *p, const struct fb_fillrect *rect)
229 {
230         struct xenfb_info *info = p->par;
231
232         cfb_fillrect(p, rect);
233         xenfb_refresh(info, rect->dx, rect->dy, rect->width, rect->height);
234 }
235
236 static void xenfb_imageblit(struct fb_info *p, const struct fb_image *image)
237 {
238         struct xenfb_info *info = p->par;
239
240         cfb_imageblit(p, image);
241         xenfb_refresh(info, image->dx, image->dy, image->width, image->height);
242 }
243
244 static void xenfb_copyarea(struct fb_info *p, const struct fb_copyarea *area)
245 {
246         struct xenfb_info *info = p->par;
247
248         cfb_copyarea(p, area);
249         xenfb_refresh(info, area->dx, area->dy, area->width, area->height);
250 }
251
252 static void xenfb_vm_open(struct vm_area_struct *vma)
253 {
254         struct xenfb_mapping *map = vma->vm_private_data;
255         atomic_inc(&map->map_refs);
256 }
257
258 static void xenfb_vm_close(struct vm_area_struct *vma)
259 {
260         struct xenfb_mapping *map = vma->vm_private_data;
261         struct xenfb_info *info = map->info;
262
263         mutex_lock(&info->mm_lock);
264         if (atomic_dec_and_test(&map->map_refs)) {
265                 list_del(&map->link);
266                 kfree(map);
267         }
268         mutex_unlock(&info->mm_lock);
269 }
270
271 static struct page *xenfb_vm_nopage(struct vm_area_struct *vma,
272                                     unsigned long vaddr, int *type)
273 {
274         struct xenfb_mapping *map = vma->vm_private_data;
275         struct xenfb_info *info = map->info;
276         int pgnr = (vaddr - vma->vm_start) >> PAGE_SHIFT;
277         unsigned long flags;
278         struct page *page;
279         int y1, y2;
280
281         if (pgnr >= info->nr_pages)
282                 return NOPAGE_SIGBUS;
283
284         mutex_lock(&info->mm_lock);
285         spin_lock_irqsave(&info->dirty_lock, flags);
286         page = info->pages[pgnr];
287         get_page(page);
288         map->faults++;
289
290         y1 = pgnr * PAGE_SIZE / info->fb_info->fix.line_length;
291         y2 = (pgnr * PAGE_SIZE + PAGE_SIZE - 1) / info->fb_info->fix.line_length;
292         if (y2 > info->fb_info->var.yres)
293                 y2 = info->fb_info->var.yres;
294         __xenfb_refresh(info, 0, y1, info->fb_info->var.xres, y2 - y1);
295         spin_unlock_irqrestore(&info->dirty_lock, flags);
296         mutex_unlock(&info->mm_lock);
297
298         if (type)
299                 *type = VM_FAULT_MINOR;
300
301         return page;
302 }
303
304 static struct vm_operations_struct xenfb_vm_ops = {
305         .open   = xenfb_vm_open,
306         .close  = xenfb_vm_close,
307         .nopage = xenfb_vm_nopage,
308 };
309
310 static int xenfb_mmap(struct fb_info *fb_info, struct vm_area_struct *vma)
311 {
312         struct xenfb_info *info = fb_info->par;
313         struct xenfb_mapping *map;
314         int map_pages;
315
316         if (!(vma->vm_flags & VM_WRITE))
317                 return -EINVAL;
318         if (!(vma->vm_flags & VM_SHARED))
319                 return -EINVAL;
320         if (vma->vm_pgoff != 0)
321                 return -EINVAL;
322
323         map_pages = (vma->vm_end - vma->vm_start + PAGE_SIZE-1) >> PAGE_SHIFT;
324         if (map_pages > info->nr_pages)
325                 return -EINVAL;
326
327         map = kzalloc(sizeof(*map), GFP_KERNEL);
328         if (map == NULL)
329                 return -ENOMEM;
330
331         map->vma = vma;
332         map->faults = 0;
333         map->info = info;
334         atomic_set(&map->map_refs, 1);
335
336         mutex_lock(&info->mm_lock);
337         list_add(&map->link, &info->mappings);
338         mutex_unlock(&info->mm_lock);
339
340         vma->vm_ops = &xenfb_vm_ops;
341         vma->vm_flags |= (VM_DONTEXPAND | VM_RESERVED);
342         vma->vm_private_data = map;
343
344         return 0;
345 }
346
347 static struct fb_ops xenfb_fb_ops = {
348         .owner          = THIS_MODULE,
349         .fb_setcolreg   = xenfb_setcolreg,
350         .fb_fillrect    = xenfb_fillrect,
351         .fb_copyarea    = xenfb_copyarea,
352         .fb_imageblit   = xenfb_imageblit,
353         .fb_mmap        = xenfb_mmap,
354 };
355
356 static irqreturn_t xenfb_event_handler(int rq, void *dev_id,
357                                        struct pt_regs *regs)
358 {
359         /*
360          * No in events recognized, simply ignore them all.
361          * If you need to recognize some, see xenbkd's input_handler()
362          * for how to do that.
363          */
364         struct xenfb_info *info = dev_id;
365         struct xenfb_page *page = info->page;
366
367         if (page->in_cons != page->in_prod) {
368                 info->page->in_cons = info->page->in_prod;
369                 notify_remote_via_evtchn(info->evtchn);
370         }
371         return IRQ_HANDLED;
372 }
373
374 static unsigned long vmalloc_to_mfn(void *address)
375 {
376         return pfn_to_mfn(vmalloc_to_pfn(address));
377 }
378
379 static int __devinit xenfb_probe(struct xenbus_device *dev,
380                                  const struct xenbus_device_id *id)
381 {
382         struct xenfb_info *info;
383         struct fb_info *fb_info;
384         int ret;
385
386         info = kzalloc(sizeof(*info), GFP_KERNEL);
387         if (info == NULL) {
388                 xenbus_dev_fatal(dev, -ENOMEM, "allocating info structure");
389                 return -ENOMEM;
390         }
391         dev->dev.driver_data = info;
392         info->xbdev = dev;
393         info->irq = -1;
394         info->x1 = info->y1 = INT_MAX;
395         spin_lock_init(&info->dirty_lock);
396         mutex_init(&info->mm_lock);
397         init_waitqueue_head(&info->wq);
398         init_timer(&info->refresh);
399         info->refresh.function = xenfb_timer;
400         info->refresh.data = (unsigned long)info;
401         INIT_LIST_HEAD(&info->mappings);
402
403         info->fb = vmalloc(xenfb_mem_len);
404         if (info->fb == NULL)
405                 goto error_nomem;
406         memset(info->fb, 0, xenfb_mem_len);
407
408         info->nr_pages = (xenfb_mem_len + PAGE_SIZE - 1) >> PAGE_SHIFT;
409
410         info->pages = kmalloc(sizeof(struct page *) * info->nr_pages,
411                               GFP_KERNEL);
412         if (info->pages == NULL)
413                 goto error_nomem;
414
415         info->mfns = vmalloc(sizeof(unsigned long) * info->nr_pages);
416         if (!info->mfns)
417                 goto error_nomem;
418
419         /* set up shared page */
420         info->page = (void *)__get_free_page(GFP_KERNEL);
421         if (!info->page)
422                 goto error_nomem;
423
424         xenfb_init_shared_page(info);
425
426         fb_info = framebuffer_alloc(sizeof(u32) * 256, NULL);
427                                 /* see fishy hackery below */
428         if (fb_info == NULL)
429                 goto error_nomem;
430
431         /* FIXME fishy hackery */
432         fb_info->pseudo_palette = fb_info->par;
433         fb_info->par = info;
434         /* /FIXME */
435         fb_info->screen_base = info->fb;
436
437         fb_info->fbops = &xenfb_fb_ops;
438         fb_info->var.xres_virtual = fb_info->var.xres = info->page->width;
439         fb_info->var.yres_virtual = fb_info->var.yres = info->page->height;
440         fb_info->var.bits_per_pixel = info->page->depth;
441
442         fb_info->var.red = (struct fb_bitfield){16, 8, 0};
443         fb_info->var.green = (struct fb_bitfield){8, 8, 0};
444         fb_info->var.blue = (struct fb_bitfield){0, 8, 0};
445
446         fb_info->var.activate = FB_ACTIVATE_NOW;
447         fb_info->var.height = -1;
448         fb_info->var.width = -1;
449         fb_info->var.vmode = FB_VMODE_NONINTERLACED;
450
451         fb_info->fix.visual = FB_VISUAL_TRUECOLOR;
452         fb_info->fix.line_length = info->page->line_length;
453         fb_info->fix.smem_start = 0;
454         fb_info->fix.smem_len = xenfb_mem_len;
455         strcpy(fb_info->fix.id, "xen");
456         fb_info->fix.type = FB_TYPE_PACKED_PIXELS;
457         fb_info->fix.accel = FB_ACCEL_NONE;
458
459         fb_info->flags = FBINFO_FLAG_DEFAULT;
460
461         ret = fb_alloc_cmap(&fb_info->cmap, 256, 0);
462         if (ret < 0) {
463                 framebuffer_release(fb_info);
464                 xenbus_dev_fatal(dev, ret, "fb_alloc_cmap");
465                 goto error;
466         }
467
468         ret = register_framebuffer(fb_info);
469         if (ret) {
470                 fb_dealloc_cmap(&info->fb_info->cmap);
471                 framebuffer_release(fb_info);
472                 xenbus_dev_fatal(dev, ret, "register_framebuffer");
473                 goto error;
474         }
475         info->fb_info = fb_info;
476
477         /* FIXME should this be delayed until backend XenbusStateConnected? */
478         info->kthread = kthread_run(xenfb_thread, info, "xenfb thread");
479         if (IS_ERR(info->kthread)) {
480                 ret = PTR_ERR(info->kthread);
481                 info->kthread = NULL;
482                 xenbus_dev_fatal(dev, ret, "register_framebuffer");
483                 goto error;
484         }
485
486         ret = xenfb_connect_backend(dev, info);
487         if (ret < 0)
488                 goto error;
489
490         return 0;
491
492  error_nomem:
493         ret = -ENOMEM;
494         xenbus_dev_fatal(dev, ret, "allocating device memory");
495  error:
496         xenfb_remove(dev);
497         return ret;
498 }
499
500 static int xenfb_resume(struct xenbus_device *dev)
501 {
502         struct xenfb_info *info = dev->dev.driver_data;
503
504         xenfb_disconnect_backend(info);
505         xenfb_init_shared_page(info);
506         return xenfb_connect_backend(dev, info);
507 }
508
509 static int xenfb_remove(struct xenbus_device *dev)
510 {
511         struct xenfb_info *info = dev->dev.driver_data;
512
513         del_timer(&info->refresh);
514         if (info->kthread)
515                 kthread_stop(info->kthread);
516         xenfb_disconnect_backend(info);
517         if (info->fb_info) {
518                 unregister_framebuffer(info->fb_info);
519                 fb_dealloc_cmap(&info->fb_info->cmap);
520                 framebuffer_release(info->fb_info);
521         }
522         free_page((unsigned long)info->page);
523         vfree(info->mfns);
524         kfree(info->pages);
525         vfree(info->fb);
526         kfree(info);
527
528         return 0;
529 }
530
531 static void xenfb_init_shared_page(struct xenfb_info *info)
532 {
533         int i;
534
535         for (i = 0; i < info->nr_pages; i++)
536                 info->pages[i] = vmalloc_to_page(info->fb + i * PAGE_SIZE);
537
538         for (i = 0; i < info->nr_pages; i++)
539                 info->mfns[i] = vmalloc_to_mfn(info->fb + i * PAGE_SIZE);
540
541         info->page->pd[0] = vmalloc_to_mfn(info->mfns);
542         info->page->pd[1] = 0;
543         info->page->width = XENFB_WIDTH;
544         info->page->height = XENFB_HEIGHT;
545         info->page->depth = XENFB_DEPTH;
546         info->page->line_length = (info->page->depth / 8) * info->page->width;
547         info->page->mem_length = xenfb_mem_len;
548         info->page->in_cons = info->page->in_prod = 0;
549         info->page->out_cons = info->page->out_prod = 0;
550 }
551
552 static int xenfb_connect_backend(struct xenbus_device *dev,
553                                  struct xenfb_info *info)
554 {
555         int ret;
556         struct xenbus_transaction xbt;
557
558         ret = xenbus_alloc_evtchn(dev, &info->evtchn);
559         if (ret)
560                 return ret;
561         ret = bind_evtchn_to_irqhandler(info->evtchn, xenfb_event_handler,
562                                         0, "xenfb", info);
563         if (ret < 0) {
564                 xenbus_free_evtchn(dev, info->evtchn);
565                 xenbus_dev_fatal(dev, ret, "bind_evtchn_to_irqhandler");
566                 return ret;
567         }
568         info->irq = ret;
569
570  again:
571         ret = xenbus_transaction_start(&xbt);
572         if (ret) {
573                 xenbus_dev_fatal(dev, ret, "starting transaction");
574                 return ret;
575         }
576         ret = xenbus_printf(xbt, dev->nodename, "page-ref", "%lu",
577                             virt_to_mfn(info->page));
578         if (ret)
579                 goto error_xenbus;
580         ret = xenbus_printf(xbt, dev->nodename, "event-channel", "%u",
581                             info->evtchn);
582         if (ret)
583                 goto error_xenbus;
584         ret = xenbus_printf(xbt, dev->nodename, "feature-update", "1");
585         if (ret)
586                 goto error_xenbus;
587         ret = xenbus_transaction_end(xbt, 0);
588         if (ret) {
589                 if (ret == -EAGAIN)
590                         goto again;
591                 xenbus_dev_fatal(dev, ret, "completing transaction");
592                 return ret;
593         }
594
595         xenbus_switch_state(dev, XenbusStateInitialised);
596         return 0;
597
598  error_xenbus:
599         xenbus_transaction_end(xbt, 1);
600         xenbus_dev_fatal(dev, ret, "writing xenstore");
601         return ret;
602 }
603
604 static void xenfb_disconnect_backend(struct xenfb_info *info)
605 {
606         if (info->irq >= 0)
607                 unbind_from_irqhandler(info->irq, info);
608         info->irq = -1;
609 }
610
611 static void xenfb_backend_changed(struct xenbus_device *dev,
612                                   enum xenbus_state backend_state)
613 {
614         struct xenfb_info *info = dev->dev.driver_data;
615         int val;
616
617         switch (backend_state) {
618         case XenbusStateInitialising:
619         case XenbusStateInitialised:
620         case XenbusStateUnknown:
621         case XenbusStateClosed:
622                 break;
623
624         case XenbusStateInitWait:
625         InitWait:
626                 xenbus_switch_state(dev, XenbusStateConnected);
627                 break;
628
629         case XenbusStateConnected:
630                 /*
631                  * Work around xenbus race condition: If backend goes
632                  * through InitWait to Connected fast enough, we can
633                  * get Connected twice here.
634                  */
635                 if (dev->state != XenbusStateConnected)
636                         goto InitWait; /* no InitWait seen yet, fudge it */
637
638                 if (xenbus_scanf(XBT_NIL, info->xbdev->otherend,
639                                  "request-update", "%d", &val) < 0)
640                         val = 0;
641                 if (val)
642                         info->update_wanted = 1;
643                 break;
644
645         case XenbusStateClosing:
646                 // FIXME is this safe in any dev->state?
647                 xenbus_frontend_closed(dev);
648                 break;
649         }
650 }
651
652 static struct xenbus_device_id xenfb_ids[] = {
653         { "vfb" },
654         { "" }
655 };
656
657 static struct xenbus_driver xenfb = {
658         .name = "vfb",
659         .owner = THIS_MODULE,
660         .ids = xenfb_ids,
661         .probe = xenfb_probe,
662         .remove = xenfb_remove,
663         .resume = xenfb_resume,
664         .otherend_changed = xenfb_backend_changed,
665 };
666
667 static int __init xenfb_init(void)
668 {
669         if (!is_running_on_xen())
670                 return -ENODEV;
671
672         /* Nothing to do if running in dom0. */
673         if (is_initial_xendomain())
674                 return -ENODEV;
675
676         return xenbus_register_frontend(&xenfb);
677 }
678
679 static void __exit xenfb_cleanup(void)
680 {
681         return xenbus_unregister_driver(&xenfb);
682 }
683
684 module_init(xenfb_init);
685 module_exit(xenfb_cleanup);
686
687 MODULE_LICENSE("GPL");