X-Git-Url: http://git.onelab.eu/?a=blobdiff_plain;f=drivers%2Fchar%2Fdrm%2Fdrm_irq.c;h=9d00c51fe2c44790000f59551bf05947a2d829f2;hb=refs%2Fheads%2Fvserver;hp=611a1173091de92116cb4a018cbd0d473bdadee3;hpb=76828883507a47dae78837ab5dec5a5b4513c667;p=linux-2.6.git diff --git a/drivers/char/drm/drm_irq.c b/drivers/char/drm/drm_irq.c index 611a11730..9d00c51fe 100644 --- a/drivers/char/drm/drm_irq.c +++ b/drivers/char/drm/drm_irq.c @@ -64,9 +64,9 @@ int drm_irq_by_busid(struct inode *inode, struct file *filp, if (copy_from_user(&p, argp, sizeof(p))) return -EFAULT; - if ((p.busnum >> 8) != dev->pci_domain || - (p.busnum & 0xff) != dev->pci_bus || - p.devnum != dev->pci_slot || p.funcnum != dev->pci_func) + if ((p.busnum >> 8) != drm_get_pci_domain(dev) || + (p.busnum & 0xff) != dev->pdev->bus->number || + p.devnum != PCI_SLOT(dev->pdev->devfn) || p.funcnum != PCI_FUNC(dev->pdev->devfn)) return -EINVAL; p.irq = dev->irq; @@ -121,6 +121,7 @@ static int drm_irq_install(drm_device_t * dev) spin_lock_init(&dev->vbl_lock); INIT_LIST_HEAD(&dev->vbl_sigs.head); + INIT_LIST_HEAD(&dev->vbl_sigs2.head); dev->vbl_pending = 0; } @@ -130,7 +131,7 @@ static int drm_irq_install(drm_device_t * dev) /* Install handler */ if (drm_core_check_feature(dev, DRIVER_IRQ_SHARED)) - sh_flags = SA_SHIRQ; + sh_flags = IRQF_SHARED; ret = request_irq(dev->irq, dev->driver->irq_handler, sh_flags, dev->devname, dev); @@ -175,6 +176,8 @@ int drm_irq_uninstall(drm_device_t * dev) free_irq(dev->irq, dev); + dev->locked_tasklet_func = NULL; + return 0; } @@ -247,19 +250,34 @@ int drm_wait_vblank(DRM_IOCTL_ARGS) drm_wait_vblank_t vblwait; struct timeval now; int ret = 0; - unsigned int flags; + unsigned int flags, seq; - if (!drm_core_check_feature(dev, DRIVER_IRQ_VBL)) + if (!dev->irq) return -EINVAL; - if (!dev->irq) + if (copy_from_user(&vblwait, argp, sizeof(vblwait))) + return -EFAULT; + + if (vblwait.request.type & + ~(_DRM_VBLANK_TYPES_MASK | _DRM_VBLANK_FLAGS_MASK)) { + DRM_ERROR("Unsupported type value 0x%x, supported mask 0x%x\n", + vblwait.request.type, + (_DRM_VBLANK_TYPES_MASK | _DRM_VBLANK_FLAGS_MASK)); return -EINVAL; + } - DRM_COPY_FROM_USER_IOCTL(vblwait, argp, sizeof(vblwait)); + flags = vblwait.request.type & _DRM_VBLANK_FLAGS_MASK; - switch (vblwait.request.type & ~_DRM_VBLANK_FLAGS_MASK) { + if (!drm_core_check_feature(dev, (flags & _DRM_VBLANK_SECONDARY) ? + DRIVER_IRQ_VBL2 : DRIVER_IRQ_VBL)) + return -EINVAL; + + seq = atomic_read((flags & _DRM_VBLANK_SECONDARY) ? &dev->vbl_received2 + : &dev->vbl_received); + + switch (vblwait.request.type & _DRM_VBLANK_TYPES_MASK) { case _DRM_VBLANK_RELATIVE: - vblwait.request.sequence += atomic_read(&dev->vbl_received); + vblwait.request.sequence += seq; vblwait.request.type &= ~_DRM_VBLANK_RELATIVE; case _DRM_VBLANK_ABSOLUTE: break; @@ -267,26 +285,30 @@ int drm_wait_vblank(DRM_IOCTL_ARGS) return -EINVAL; } - flags = vblwait.request.type & _DRM_VBLANK_FLAGS_MASK; + if ((flags & _DRM_VBLANK_NEXTONMISS) && + (seq - vblwait.request.sequence) <= (1<<23)) { + vblwait.request.sequence = seq + 1; + } if (flags & _DRM_VBLANK_SIGNAL) { unsigned long irqflags; + drm_vbl_sig_t *vbl_sigs = (flags & _DRM_VBLANK_SECONDARY) + ? &dev->vbl_sigs2 : &dev->vbl_sigs; drm_vbl_sig_t *vbl_sig; - vblwait.reply.sequence = atomic_read(&dev->vbl_received); - spin_lock_irqsave(&dev->vbl_lock, irqflags); /* Check if this task has already scheduled the same signal * for the same vblank sequence number; nothing to be done in * that case */ - list_for_each_entry(vbl_sig, &dev->vbl_sigs.head, head) { + list_for_each_entry(vbl_sig, &vbl_sigs->head, head) { if (vbl_sig->sequence == vblwait.request.sequence && vbl_sig->info.si_signo == vblwait.request.signal && vbl_sig->task == current) { spin_unlock_irqrestore(&dev->vbl_lock, irqflags); + vblwait.reply.sequence = seq; goto done; } } @@ -314,11 +336,16 @@ int drm_wait_vblank(DRM_IOCTL_ARGS) spin_lock_irqsave(&dev->vbl_lock, irqflags); - list_add_tail((struct list_head *)vbl_sig, &dev->vbl_sigs.head); + list_add_tail((struct list_head *)vbl_sig, &vbl_sigs->head); spin_unlock_irqrestore(&dev->vbl_lock, irqflags); + + vblwait.reply.sequence = seq; } else { - if (dev->driver->vblank_wait) + if (flags & _DRM_VBLANK_SECONDARY) { + if (dev->driver->vblank_wait2) + ret = dev->driver->vblank_wait2(dev, &vblwait.request.sequence); + } else if (dev->driver->vblank_wait) ret = dev->driver->vblank_wait(dev, &vblwait.request.sequence); @@ -329,7 +356,8 @@ int drm_wait_vblank(DRM_IOCTL_ARGS) } done: - DRM_COPY_TO_USER_IOCTL(argp, vblwait, sizeof(vblwait)); + if (copy_to_user(argp, &vblwait, sizeof(vblwait))) + return -EFAULT; return ret; } @@ -345,25 +373,32 @@ int drm_wait_vblank(DRM_IOCTL_ARGS) */ void drm_vbl_send_signals(drm_device_t * dev) { - struct list_head *list, *tmp; - drm_vbl_sig_t *vbl_sig; - unsigned int vbl_seq = atomic_read(&dev->vbl_received); unsigned long flags; + int i; spin_lock_irqsave(&dev->vbl_lock, flags); - list_for_each_safe(list, tmp, &dev->vbl_sigs.head) { - vbl_sig = list_entry(list, drm_vbl_sig_t, head); - if ((vbl_seq - vbl_sig->sequence) <= (1 << 23)) { - vbl_sig->info.si_code = vbl_seq; - send_sig_info(vbl_sig->info.si_signo, &vbl_sig->info, - vbl_sig->task); + for (i = 0; i < 2; i++) { + struct list_head *list, *tmp; + drm_vbl_sig_t *vbl_sig; + drm_vbl_sig_t *vbl_sigs = i ? &dev->vbl_sigs2 : &dev->vbl_sigs; + unsigned int vbl_seq = atomic_read(i ? &dev->vbl_received2 : + &dev->vbl_received); + + list_for_each_safe(list, tmp, &vbl_sigs->head) { + vbl_sig = list_entry(list, drm_vbl_sig_t, head); + if ((vbl_seq - vbl_sig->sequence) <= (1 << 23)) { + vbl_sig->info.si_code = vbl_seq; + send_sig_info(vbl_sig->info.si_signo, + &vbl_sig->info, vbl_sig->task); - list_del(list); + list_del(list); - drm_free(vbl_sig, sizeof(*vbl_sig), DRM_MEM_DRIVER); + drm_free(vbl_sig, sizeof(*vbl_sig), + DRM_MEM_DRIVER); - dev->vbl_pending--; + dev->vbl_pending--; + } } } @@ -371,3 +406,77 @@ void drm_vbl_send_signals(drm_device_t * dev) } EXPORT_SYMBOL(drm_vbl_send_signals); + +/** + * Tasklet wrapper function. + * + * \param data DRM device in disguise. + * + * Attempts to grab the HW lock and calls the driver callback on success. On + * failure, leave the lock marked as contended so the callback can be called + * from drm_unlock(). + */ +static void drm_locked_tasklet_func(unsigned long data) +{ + drm_device_t *dev = (drm_device_t*)data; + unsigned long irqflags; + + spin_lock_irqsave(&dev->tasklet_lock, irqflags); + + if (!dev->locked_tasklet_func || + !drm_lock_take(&dev->lock.hw_lock->lock, + DRM_KERNEL_CONTEXT)) { + spin_unlock_irqrestore(&dev->tasklet_lock, irqflags); + return; + } + + dev->lock.lock_time = jiffies; + atomic_inc(&dev->counts[_DRM_STAT_LOCKS]); + + dev->locked_tasklet_func(dev); + + drm_lock_free(dev, &dev->lock.hw_lock->lock, + DRM_KERNEL_CONTEXT); + + dev->locked_tasklet_func = NULL; + + spin_unlock_irqrestore(&dev->tasklet_lock, irqflags); +} + +/** + * Schedule a tasklet to call back a driver hook with the HW lock held. + * + * \param dev DRM device. + * \param func Driver callback. + * + * This is intended for triggering actions that require the HW lock from an + * interrupt handler. The lock will be grabbed ASAP after the interrupt handler + * completes. Note that the callback may be called from interrupt or process + * context, it must not make any assumptions about this. Also, the HW lock will + * be held with the kernel context or any client context. + */ +void drm_locked_tasklet(drm_device_t *dev, void (*func)(drm_device_t*)) +{ + unsigned long irqflags; + static DECLARE_TASKLET(drm_tasklet, drm_locked_tasklet_func, 0); + + if (!drm_core_check_feature(dev, DRIVER_HAVE_IRQ) || + test_bit(TASKLET_STATE_SCHED, &drm_tasklet.state)) + return; + + spin_lock_irqsave(&dev->tasklet_lock, irqflags); + + if (dev->locked_tasklet_func) { + spin_unlock_irqrestore(&dev->tasklet_lock, irqflags); + return; + } + + dev->locked_tasklet_func = func; + + spin_unlock_irqrestore(&dev->tasklet_lock, irqflags); + + drm_tasklet.data = (unsigned long)dev; + + tasklet_hi_schedule(&drm_tasklet); +} +EXPORT_SYMBOL(drm_locked_tasklet);