W: http://www.weinigel.se
S: Supported
+SECURITY CONTACT
+P: Security Officers
+M: security@kernel.org
+S: Supported
+
SELINUX SECURITY MODULE
P: Stephen Smalley
M: sds@epoch.ncsc.mil
VERSION = 2
PATCHLEVEL = 6
SUBLEVEL = 11
-EXTRAVERSION = .7-vs2.0-pre4
-NAME=Woozy Numbat
+EXTRAVERSION = .11-vs2.0-rc4
+NAME=Woozy Beaver
# *DOCUMENTATION*
# To see a list of typical targets execute "make help"
describe how to recreate it. That is worth even more than the oops itself.
The list of maintainers is in the MAINTAINERS file in this directory.
+ If it is a security bug, please copy the Security Contact listed
+in the MAINTAINERS file. They can help coordinate bugfix and disclosure.
+See Documentation/SecurityBugs for more infomation.
+
If you are totally stumped as to whom to send the report, send it to
linux-kernel@vger.kernel.org. (For more information on the linux-kernel
mailing list see http://www.tux.org/lkml/).
{
pte_t entry;
- // mm->rss += (HPAGE_SIZE / PAGE_SIZE);
vx_rsspages_add(mm, HPAGE_SIZE / PAGE_SIZE);
if (write_access) {
entry =
ptepage = pte_page(entry);
get_page(ptepage);
set_pte(dst_pte, entry);
- // dst->rss += (HPAGE_SIZE / PAGE_SIZE);
vx_rsspages_add(dst, HPAGE_SIZE / PAGE_SIZE);
addr += HPAGE_SIZE;
}
page = pte_page(pte);
put_page(page);
}
- // mm->rss -= (end - start) >> PAGE_SHIFT;
vx_rsspages_sub(mm, (end - start) >> PAGE_SHIFT);
flush_tlb_range(vma, start, end);
}
kmem_cache_free(vm_area_cachep, mpnt);
return ret;
}
- // current->mm->stack_vm = current->mm->total_vm = vma_pages(mpnt);
vx_vmpages_sub(current->mm, current->mm->total_vm - vma_pages(mpnt));
current->mm->stack_vm = current->mm->total_vm;
}
*/
insert_vm_struct(mm, vma);
- // mm->total_vm += size >> PAGE_SHIFT;
vx_vmpages_add(mm, size >> PAGE_SHIFT);
vm_stat_account(vma);
up_write(&task->mm->mmap_sem);
!vx_vmlocked_avail(vma->vm_mm, grow)))
return -ENOMEM;
vma->vm_end += PAGE_SIZE;
- // vma->vm_mm->total_vm += grow;
vx_vmpages_add(vma->vm_mm, grow);
if (vma->vm_flags & VM_LOCKED)
- // vma->vm_mm->locked_vm += grow;
vx_vmlocked_add(vma->vm_mm, grow);
__vm_stat_account(vma->vm_mm, vma->vm_flags, vma->vm_file, grow);
return 0;
{
pte_t entry;
- // mm->rss += (HPAGE_SIZE / PAGE_SIZE);
vx_rsspages_add(mm, HPAGE_SIZE / PAGE_SIZE);
if (write_access) {
entry =
ptepage = pte_page(entry);
get_page(ptepage);
set_pte(dst_pte, entry);
- // dst->rss += (HPAGE_SIZE / PAGE_SIZE);
vx_rsspages_add(dst, HPAGE_SIZE / PAGE_SIZE);
addr += HPAGE_SIZE;
}
put_page(page);
pte_clear(pte);
}
- // mm->rss -= (end - start) >> PAGE_SHIFT;
vx_rsspages_sub(mm, (end - start) >> PAGE_SHIFT);
flush_tlb_range(vma, start, end);
}
set_pte(dir, pte_mkdirty(mk_pte(page, vma->vm_page_prot)));
swap_free(entry);
get_page(page);
- // ++vma->vm_mm->rss;
vx_rsspages_inc(vma->vm_mm);
}
/* Do this so that we can load the interpreter, if need be. We will
* change some of these later.
*/
- // current->mm->rss = 0;
vx_rsspages_sub(current->mm, current->mm->rss);
setup_arg_pages(bprm, STACK_TOP, EXSTACK_DEFAULT);
current->mm->start_stack = bprm->p;
ENTRY_COMP(mbind) /* 260 */
ENTRY_COMP(get_mempolicy)
ENTRY_COMP(set_mempolicy)
+ ENTRY_SAME(ni_syscall)
+ ENTRY_SAME(ni_syscall)
+ ENTRY_SAME(ni_syscall) /* 265 */
+ ENTRY_SAME(ni_syscall)
+ ENTRY_SAME(ni_syscall)
+ ENTRY_SAME(ni_syscall)
+ ENTRY_SAME(ni_syscall)
+ ENTRY_SAME(ni_syscall) /* 270 */
+ ENTRY_SAME(ni_syscall)
+ ENTRY_SAME(ni_syscall)
+ ENTRY_DIFF(vserver)
/* Nothing yet */
struct device_node *dn, *pdn;
unsigned int *dma_window = NULL;
+ DBG("iommu_bus_setup_pSeriesLP, bus %p, bus->self %p\n", bus, bus->self);
+
dn = pci_bus_to_OF_node(bus);
/* Find nearest ibm,dma-window, walking up the device tree */
}
}
+static void iommu_dev_setup_pSeriesLP(struct pci_dev *dev)
+{
+ struct device_node *pdn, *dn;
+ struct iommu_table *tbl;
+ int *dma_window = NULL;
+
+ DBG("iommu_dev_setup_pSeriesLP, dev %p (%s)\n", dev, dev->pretty_name);
+
+ /* dev setup for LPAR is a little tricky, since the device tree might
+ * contain the dma-window properties per-device and not neccesarily
+ * for the bus. So we need to search upwards in the tree until we
+ * either hit a dma-window property, OR find a parent with a table
+ * already allocated.
+ */
+ dn = pci_device_to_OF_node(dev);
+
+ for (pdn = dn; pdn && !pdn->iommu_table; pdn = pdn->parent) {
+ dma_window = (unsigned int *)get_property(pdn, "ibm,dma-window", NULL);
+ if (dma_window)
+ break;
+ }
+
+ /* Check for parent == NULL so we don't try to setup the empty EADS
+ * slots on POWER4 machines.
+ */
+ if (dma_window == NULL || pdn->parent == NULL) {
+ /* Fall back to regular (non-LPAR) dev setup */
+ DBG("No dma window for device, falling back to regular setup\n");
+ iommu_dev_setup_pSeries(dev);
+ return;
+ } else {
+ DBG("Found DMA window, allocating table\n");
+ }
+
+ if (!pdn->iommu_table) {
+ /* iommu_table_setparms_lpar needs bussubno. */
+ pdn->bussubno = pdn->phb->bus->number;
+
+ tbl = (struct iommu_table *)kmalloc(sizeof(struct iommu_table),
+ GFP_KERNEL);
+
+ iommu_table_setparms_lpar(pdn->phb, pdn, tbl, dma_window);
+
+ pdn->iommu_table = iommu_init_table(tbl);
+ }
+
+ if (pdn != dn)
+ dn->iommu_table = pdn->iommu_table;
+}
+
static void iommu_bus_setup_null(struct pci_bus *b) { }
static void iommu_dev_setup_null(struct pci_dev *d) { }
ppc_md.tce_free = tce_free_pSeriesLP;
}
ppc_md.iommu_bus_setup = iommu_bus_setup_pSeriesLP;
+ ppc_md.iommu_dev_setup = iommu_dev_setup_pSeriesLP;
} else {
ppc_md.tce_build = tce_build_pSeries;
ppc_md.tce_free = tce_free_pSeries;
ppc_md.iommu_bus_setup = iommu_bus_setup_pSeries;
+ ppc_md.iommu_dev_setup = iommu_dev_setup_pSeries;
}
- ppc_md.iommu_dev_setup = iommu_dev_setup_pSeries;
pci_iommu_init();
}
{
pte_t entry;
- // mm->rss += (HPAGE_SIZE / PAGE_SIZE);
vx_rsspages_add(mm, HPAGE_SIZE / PAGE_SIZE);
if (write_access) {
entry =
ptepage = pte_page(entry);
get_page(ptepage);
- // dst->rss += (HPAGE_SIZE / PAGE_SIZE);
vx_rsspages_add(dst, HPAGE_SIZE / PAGE_SIZE);
set_pte(dst_pte, entry);
put_page(page);
}
- // mm->rss -= (end - start) >> PAGE_SHIFT;
vx_rsspages_sub(mm, (end - start) >> PAGE_SHIFT);
flush_tlb_pending();
}
unsigned long i;
pte_t entry;
- // mm->rss += (HPAGE_SIZE / PAGE_SIZE);
vx_rsspages_add(mm, HPAGE_SIZE / PAGE_SIZE);
if (write_access)
pte_val(entry) += PAGE_SIZE;
dst_pte++;
}
- // dst->rss += (HPAGE_SIZE / PAGE_SIZE);
vx_rsspages_add(dst, HPAGE_SIZE / PAGE_SIZE);
addr += HPAGE_SIZE;
}
pte++;
}
}
- // mm->rss -= (end - start) >> PAGE_SHIFT;
vx_rsspages_sub(mm, (end - start) >> PAGE_SHIFT);
flush_tlb_range(vma, start, end);
}
unsigned long i;
pte_t entry;
- // mm->rss += (HPAGE_SIZE / PAGE_SIZE);
vx_rsspages_add(mm, HPAGE_SIZE / PAGE_SIZE);
if (write_access)
entry = pte_mkwrite(pte_mkdirty(mk_pte(page,
pte_val(entry) += PAGE_SIZE;
dst_pte++;
}
- // dst->rss += (HPAGE_SIZE / PAGE_SIZE);
vx_rsspages_add(dst, HPAGE_SIZE / PAGE_SIZE);
addr += HPAGE_SIZE;
}
pte++;
}
}
- // mm->rss -= (end - start) >> PAGE_SHIFT;
vx_rsspages_sub(mm, (end - start) >> PAGE_SHIFT);
flush_tlb_range(vma, start, end);
}
pt_error_return(regs, EIO);
goto out_tsk;
}
- if (addr != 1) {
- if (addr & 3) {
- pt_error_return(regs, EINVAL);
- goto out_tsk;
- }
-#ifdef DEBUG_PTRACE
- printk ("Original: %08lx %08lx\n", child->thread.kregs->pc, child->thread.kregs->npc);
- printk ("Continuing with %08lx %08lx\n", addr, addr+4);
-#endif
- child->thread.kregs->pc = addr;
- child->thread.kregs->npc = addr + 4;
- }
if (request == PTRACE_SYSCALL)
set_tsk_thread_flag(child, TIF_SYSCALL_TRACE);
current->mm->brk = ex.a_bss +
(current->mm->start_brk = N_BSSADDR(ex));
- // current->mm->rss = 0;
vx_rsspages_sub(current->mm, current->mm->rss);
current->mm->mmap = NULL;
compute_creds(bprm);
pt_error_return(regs, EIO);
goto out_tsk;
}
- if (addr != 1) {
- unsigned long pc_mask = ~0UL;
-
- if ((child->thread_info->flags & _TIF_32BIT) != 0)
- pc_mask = 0xffffffff;
-
- if (addr & 3) {
- pt_error_return(regs, EINVAL);
- goto out_tsk;
- }
-#ifdef DEBUG_PTRACE
- printk ("Original: %016lx %016lx\n",
- child->thread_info->kregs->tpc,
- child->thread_info->kregs->tnpc);
- printk ("Continuing with %016lx %016lx\n", addr, addr+4);
-#endif
- child->thread_info->kregs->tpc = (addr & pc_mask);
- child->thread_info->kregs->tnpc = ((addr + 4) & pc_mask);
- }
if (request == PTRACE_SYSCALL) {
set_tsk_thread_flag(child, TIF_SYSCALL_TRACE);
err |= __put_user(from->si_uid, &to->si_uid);
break;
case __SI_FAULT >> 16:
- case __SI_POLL >> 16:
err |= __put_user(from->si_trapno, &to->si_trapno);
err |= __put_user((unsigned long)from->si_addr, &to->si_addr);
break;
+ case __SI_POLL >> 16:
+ err |= __put_user(from->si_band, &to->si_band);
+ err |= __put_user(from->si_fd, &to->si_fd);
+ break;
case __SI_RT >> 16: /* This is not generated by the kernel as of now. */
case __SI_MESGQ >> 16:
err |= __put_user(from->si_pid, &to->si_pid);
/*260*/ .word compat_sys_sched_getaffinity, compat_sys_sched_setaffinity, sys32_timer_settime, compat_sys_timer_gettime, sys_timer_getoverrun
.word sys_timer_delete, sys32_timer_create, sys32_vserver, compat_sys_io_setup, sys_io_destroy
/*270*/ .word sys32_io_submit, sys_io_cancel, compat_sys_io_getevents, sys32_mq_open, sys_mq_unlink
- .word sys_mq_timedsend, sys_mq_timedreceive, compat_sys_mq_notify, compat_sys_mq_getsetattr, compat_sys_waitid
+ .word compat_sys_mq_timedsend, compat_sys_mq_timedreceive, compat_sys_mq_notify, compat_sys_mq_getsetattr, compat_sys_waitid
/*280*/ .word sys_ni_syscall, sys_add_key, sys_request_key, sys_keyctl
#endif /* CONFIG_COMPAT */
unsigned long i;
pte_t entry;
- // mm->rss += (HPAGE_SIZE / PAGE_SIZE);
vx_rsspages_add(mm, HPAGE_SIZE / PAGE_SIZE);
if (write_access)
pte_val(entry) += PAGE_SIZE;
dst_pte++;
}
- // dst->rss += (HPAGE_SIZE / PAGE_SIZE);
vx_rsspages_add(dst, HPAGE_SIZE / PAGE_SIZE);
addr += HPAGE_SIZE;
}
pte++;
}
}
- // mm->rss -= (end - start) >> PAGE_SHIFT;
vx_rsspages_sub(mm, (end - start) >> PAGE_SHIFT);
flush_tlb_range(vma, start, end);
}
unsigned long prot, unsigned long flags,
unsigned long fd, unsigned long pgoff);
+/* On i386 they choose a meaningless naming.*/
+#define __NR_kexec_load __NR_sys_kexec_load
+
#define ARCH_SYSCALLS \
[ __NR_waitpid ] = (syscall_handler_t *) sys_waitpid, \
[ __NR_break ] = (syscall_handler_t *) sys_ni_syscall, \
[ 223 ] = (syscall_handler_t *) sys_ni_syscall, \
[ __NR_set_thread_area ] = (syscall_handler_t *) sys_ni_syscall, \
[ __NR_get_thread_area ] = (syscall_handler_t *) sys_ni_syscall, \
- [ __NR_fadvise64 ] = (syscall_handler_t *) sys_fadvise64, \
[ 251 ] = (syscall_handler_t *) sys_ni_syscall, \
- [ __NR_remap_file_pages ] = (syscall_handler_t *) sys_remap_file_pages, \
- [ __NR_utimes ] = (syscall_handler_t *) sys_utimes, \
- [ __NR_vserver ] = (syscall_handler_t *) sys_ni_syscall,
-
+ [ 285 ] = (syscall_handler_t *) sys_ni_syscall,
+
/* 222 doesn't yet have a name in include/asm-i386/unistd.h */
-#define LAST_ARCH_SYSCALL __NR_vserver
+#define LAST_ARCH_SYSCALL 285
/*
* Overrides for Emacs so that we follow Linus's tabbing style.
[ __NR_iopl ] = (syscall_handler_t *) sys_ni_syscall, \
[ __NR_set_thread_area ] = (syscall_handler_t *) sys_ni_syscall, \
[ __NR_get_thread_area ] = (syscall_handler_t *) sys_ni_syscall, \
- [ __NR_remap_file_pages ] = (syscall_handler_t *) sys_remap_file_pages, \
[ __NR_semtimedop ] = (syscall_handler_t *) sys_semtimedop, \
- [ __NR_fadvise64 ] = (syscall_handler_t *) sys_fadvise64, \
- [ 223 ] = (syscall_handler_t *) sys_ni_syscall, \
- [ __NR_utimes ] = (syscall_handler_t *) sys_utimes, \
- [ __NR_vserver ] = (syscall_handler_t *) sys_ni_syscall, \
[ 251 ] = (syscall_handler_t *) sys_ni_syscall,
#define LAST_ARCH_SYSCALL 251
extern syscall_handler_t old_select;
extern syscall_handler_t sys_modify_ldt;
extern syscall_handler_t sys_rt_sigsuspend;
-extern syscall_handler_t sys_vserver;
extern syscall_handler_t sys_mbind;
extern syscall_handler_t sys_get_mempolicy;
extern syscall_handler_t sys_set_mempolicy;
[ __NR_epoll_create ] = (syscall_handler_t *) sys_epoll_create,
[ __NR_epoll_ctl ] = (syscall_handler_t *) sys_epoll_ctl,
[ __NR_epoll_wait ] = (syscall_handler_t *) sys_epoll_wait,
+ [ __NR_remap_file_pages ] = (syscall_handler_t *) sys_remap_file_pages,
[ __NR_set_tid_address ] = (syscall_handler_t *) sys_set_tid_address,
[ __NR_timer_create ] = (syscall_handler_t *) sys_timer_create,
[ __NR_timer_settime ] = (syscall_handler_t *) sys_timer_settime,
[ __NR_clock_gettime ] = (syscall_handler_t *) sys_clock_gettime,
[ __NR_clock_getres ] = (syscall_handler_t *) sys_clock_getres,
[ __NR_clock_nanosleep ] = (syscall_handler_t *) sys_clock_nanosleep,
- [ __NR_statfs64 ] = (syscall_handler_t *) sys_statfs64,
- [ __NR_fstatfs64 ] = (syscall_handler_t *) sys_fstatfs64,
[ __NR_tgkill ] = (syscall_handler_t *) sys_tgkill,
[ __NR_utimes ] = (syscall_handler_t *) sys_utimes,
- [ __NR_fadvise64_64 ] = (syscall_handler_t *) sys_fadvise64_64,
- [ __NR_vserver ] = (syscall_handler_t *) sys_vserver,
+ [ __NR_fadvise64 ] = (syscall_handler_t *) sys_fadvise64,
+ [ __NR_vserver ] = (syscall_handler_t *) sys_ni_syscall,
[ __NR_mbind ] = (syscall_handler_t *) sys_mbind,
[ __NR_get_mempolicy ] = (syscall_handler_t *) sys_get_mempolicy,
[ __NR_set_mempolicy ] = (syscall_handler_t *) sys_set_mempolicy,
[ __NR_mq_timedreceive ] = (syscall_handler_t *) sys_mq_timedreceive,
[ __NR_mq_notify ] = (syscall_handler_t *) sys_mq_notify,
[ __NR_mq_getsetattr ] = (syscall_handler_t *) sys_mq_getsetattr,
- [ __NR_sys_kexec_load ] = (syscall_handler_t *) sys_ni_syscall,
+ [ __NR_kexec_load ] = (syscall_handler_t *) sys_ni_syscall,
[ __NR_waitid ] = (syscall_handler_t *) sys_waitid,
- [ 285 ] = (syscall_handler_t *) sys_ni_syscall,
[ __NR_add_key ] = (syscall_handler_t *) sys_add_key,
[ __NR_request_key ] = (syscall_handler_t *) sys_request_key,
[ __NR_keyctl ] = (syscall_handler_t *) sys_keyctl,
(current->mm->start_brk = N_BSSADDR(ex));
current->mm->free_area_cache = TASK_UNMAPPED_BASE;
- // current->mm->rss = 0;
vx_rsspages_sub(current->mm, current->mm->rss);
current->mm->mmap = NULL;
compute_creds(bprm);
kmem_cache_free(vm_area_cachep, mpnt);
return ret;
}
- // mm->stack_vm = mm->total_vm = vma_pages(mpnt);
vx_vmpages_sub(mm, mm->total_vm - vma_pages(mpnt));
mm->stack_vm = mm->total_vm;
}
value &= 0xffff;
return 0;
case offsetof(struct user_regs_struct,fs_base):
- if (!((value >> 48) == 0 || (value >> 48) == 0xffff))
- return -EIO;
+ if (value >= TASK_SIZE)
+ return -EIO;
child->thread.fs = value;
return 0;
case offsetof(struct user_regs_struct,gs_base):
- if (!((value >> 48) == 0 || (value >> 48) == 0xffff))
- return -EIO;
+ if (value >= TASK_SIZE)
+ return -EIO;
child->thread.gs = value;
return 0;
case offsetof(struct user_regs_struct, eflags):
return -EIO;
value &= 0xffff;
break;
+ case offsetof(struct user_regs_struct, rip):
+ /* Check if the new RIP address is canonical */
+ if (value >= TASK_SIZE)
+ return -EIO;
+ break;
}
put_stack_long(child, regno - sizeof(struct pt_regs), value);
return 0;
/*
* Handle a fault on the vmalloc or module mapping area
+ *
+ * This assumes no large pages in there.
*/
static int vmalloc_fault(unsigned long address)
{
if (!pte_present(*pte_ref))
return -1;
pte = pte_offset_kernel(pmd, address);
- if (!pte_present(*pte) || pte_page(*pte) != pte_page(*pte_ref))
+ /* Don't use pte_page here, because the mappings can point
+ outside mem_map, and the NUMA hash lookup cannot handle
+ that. */
+ if (!pte_present(*pte) || pte_pfn(*pte) != pte_pfn(*pte_ref))
BUG();
__flush_tlb_all();
return 0;
* protection error (error_code & 1) == 0.
*/
if (unlikely(address >= TASK_SIZE)) {
- if (!(error_code & 5)) {
+ if (!(error_code & 5) &&
+ ((address >= VMALLOC_START && address < VMALLOC_END) ||
+ (address >= MODULES_VADDR && address < MODULES_END))) {
if (vmalloc_fault(address) < 0)
goto bad_area_nosemaphore;
return;
if ((p->flags >> 20) &&
p->phys_addr + p->size - 1 < virt_to_phys(high_memory)) {
/* p->size includes the guard page, but cpa doesn't like that */
- change_page_attr(virt_to_page(__va(p->phys_addr)),
+ change_page_attr_addr((unsigned long)(__va(p->phys_addr)),
(p->size - PAGE_SIZE) >> PAGE_SHIFT,
PAGE_KERNEL);
global_flush_tlb();
}
return ret;
}
+
+EXPORT_SYMBOL_GPL(blkdev_ioctl);
case CDROM_LAST_WRITTEN:
case CDROM_SEND_PACKET:
case SCSI_IOCTL_SEND_COMMAND:
- return ioctl_by_bdev(pd->bdev, cmd, arg);
+ return blkdev_ioctl(pd->bdev->bd_inode, file, cmd, arg);
case CDROMEJECT:
/*
* have to unlock it or else the eject command fails.
*/
pkt_lock_door(pd, 0);
- return ioctl_by_bdev(pd->bdev, cmd, arg);
+ return blkdev_ioctl(pd->bdev->bd_inode, file, cmd, arg);
default:
printk("pktcdvd: Unknown ioctl for %s (%x)\n", pd->name, cmd);
{
struct block_device *bdev = filp->private_data;
- return ioctl_by_bdev(bdev, command, arg);
+ return blkdev_ioctl(bdev->bd_inode, filp, command, arg);
}
static void bind_device(struct raw_config_request *rq)
struct it87_data *data = it87_update_device(dev);
return sprintf(buf,"%d\n", ALARMS_FROM_REG(data->alarms));
}
-static DEVICE_ATTR(alarms, S_IRUGO | S_IWUSR, show_alarms, NULL);
+static DEVICE_ATTR(alarms, S_IRUGO, show_alarms, NULL);
static ssize_t
show_vrm_reg(struct device *dev, char *buf)
struct via686a_data *data = via686a_update_device(dev);
return sprintf(buf,"%d\n", ALARMS_FROM_REG(data->alarms));
}
-static DEVICE_ATTR(alarms, S_IRUGO | S_IWUSR, show_alarms, NULL);
+static DEVICE_ATTR(alarms, S_IRUGO, show_alarms, NULL);
/* The driver. I choose to use type i2c_driver, as at is identical to both
smbus_driver and isa_driver, and clients could be of either kind */
if (hwif->no_lba48_dma && lba48 && dma) {
if (block + rq->nr_sectors > 1ULL << 28)
dma = 0;
+ else
+ lba48 = 0;
}
if (!dma) {
/* FIXME: SELECT_MASK(drive, 0) ? */
if (drive->select.b.lba) {
- if (drive->addressing == 1) {
+ if (lba48) {
task_ioreg_t tasklets[10];
pr_debug("%s: LBA=0x%012llx\n", drive->name, block);
}
btv->pll.pll_current = -1;
- bttv_reset_audio(btv);
-
/* tuner configuration (from card list / autodetect / insmod option) */
if (UNSET != bttv_tvcards[btv->c.type].tuner_type)
if(UNSET == btv->tuner_type)
if (VORTEX_PCI(vp)) {
pci_set_power_state(VORTEX_PCI(vp), PCI_D0); /* Go active */
- pci_restore_state(VORTEX_PCI(vp));
+ if (vp->pm_state_valid)
+ pci_restore_state(VORTEX_PCI(vp));
pci_enable_device(VORTEX_PCI(vp));
}
outl(0, ioaddr + DownListPtr);
if (final_down && VORTEX_PCI(vp)) {
+ vp->pm_state_valid = 1;
pci_save_state(VORTEX_PCI(vp));
acpi_set_WOL(dev);
}
outw(RxEnable, ioaddr + EL3_CMD);
pci_enable_wake(VORTEX_PCI(vp), 0, 1);
+
+ /* Change the power state to D3; RxEnable doesn't take effect. */
+ pci_set_power_state(VORTEX_PCI(vp), PCI_D3hot);
}
- /* Change the power state to D3; RxEnable doesn't take effect. */
- pci_set_power_state(VORTEX_PCI(vp), PCI_D3hot);
}
int bytes_in;
int bytes_out;
int outstanding_urbs;
+ int throttled;
};
/* number of outstanding urbs to prevent userspace DoS from happening */
priv->bytes_in = 0;
priv->bytes_out = 0;
priv->outstanding_urbs = 0;
+ priv->throttled = 0;
spin_unlock_irqrestore(&priv->lock, flags);
/*
struct tty_struct *tty;
unsigned long flags;
int i;
+ int throttled;
int result;
dbg("%s - port %d", __FUNCTION__, port->number);
}
spin_lock_irqsave(&priv->lock, flags);
priv->bytes_in += urb->actual_length;
+ throttled = priv->throttled;
spin_unlock_irqrestore(&priv->lock, flags);
- /* Continue trying to always read */
- usb_fill_bulk_urb (port->read_urb, port->serial->dev,
- usb_rcvbulkpipe(port->serial->dev,
- port->bulk_in_endpointAddress),
- port->read_urb->transfer_buffer,
- port->read_urb->transfer_buffer_length,
- visor_read_bulk_callback, port);
- result = usb_submit_urb(port->read_urb, GFP_ATOMIC);
- if (result)
- dev_err(&port->dev, "%s - failed resubmitting read urb, error %d\n", __FUNCTION__, result);
+ /* Continue trying to always read if we should */
+ if (!throttled) {
+ usb_fill_bulk_urb (port->read_urb, port->serial->dev,
+ usb_rcvbulkpipe(port->serial->dev,
+ port->bulk_in_endpointAddress),
+ port->read_urb->transfer_buffer,
+ port->read_urb->transfer_buffer_length,
+ visor_read_bulk_callback, port);
+ result = usb_submit_urb(port->read_urb, GFP_ATOMIC);
+ if (result)
+ dev_err(&port->dev, "%s - failed resubmitting read urb, error %d\n", __FUNCTION__, result);
+ }
return;
}
static void visor_throttle (struct usb_serial_port *port)
{
+ struct visor_private *priv = usb_get_serial_port_data(port);
+ unsigned long flags;
+
dbg("%s - port %d", __FUNCTION__, port->number);
- usb_kill_urb(port->read_urb);
+ spin_lock_irqsave(&priv->lock, flags);
+ priv->throttled = 1;
+ spin_unlock_irqrestore(&priv->lock, flags);
}
static void visor_unthrottle (struct usb_serial_port *port)
{
+ struct visor_private *priv = usb_get_serial_port_data(port);
+ unsigned long flags;
int result;
dbg("%s - port %d", __FUNCTION__, port->number);
+ spin_lock_irqsave(&priv->lock, flags);
+ priv->throttled = 0;
+ spin_unlock_irqrestore(&priv->lock, flags);
port->read_urb->dev = port->serial->dev;
result = usb_submit_urb(port->read_urb, GFP_ATOMIC);
} else if (step == 1) {
/* Special case for 1..8bit widths */
while (height--) {
- mga_writel(mmio, 0, *chardata);
+#if defined(__BIG_ENDIAN)
+ fb_writel((*chardata) << 24, mmio.vaddr);
+#else
+ fb_writel(*chardata, mmio.vaddr);
+#endif
chardata++;
}
} else if (step == 2) {
/* Special case for 9..15bit widths */
while (height--) {
- mga_writel(mmio, 0, *(u_int16_t*)chardata);
+#if defined(__BIG_ENDIAN)
+ fb_writel((*(u_int16_t*)chardata) << 16, mmio.vaddr);
+#else
+ fb_writel(*(u_int16_t*)chardata, mmio.vaddr);
+#endif
chardata += 2;
}
} else {
for (i = 0; i < step; i += 4) {
/* Hope that there are at least three readable bytes beyond the end of bitmap */
- mga_writel(mmio, 0, get_unaligned((u_int32_t*)(chardata + i)));
+ fb_writel(get_unaligned((u_int32_t*)(chardata + i)),mmio.vaddr);
}
chardata += step;
}
if ((unsigned long)src & 3) {
while (len >= 4) {
- writel(get_unaligned((u32 *)src), addr);
+ fb_writel(get_unaligned((u32 *)src), addr);
addr++;
len -= 4;
src += 4;
}
} else {
while (len >= 4) {
- writel(*(u32 *)src, addr);
+ fb_writel(*(u32 *)src, addr);
addr++;
len -= 4;
src += 4;
(current->mm->start_brk = N_BSSADDR(ex));
current->mm->free_area_cache = current->mm->mmap_base;
- // current->mm->rss = 0;
vx_rsspages_sub(current->mm, current->mm->rss);
current->mm->mmap = NULL;
compute_creds(bprm);
}
/* Populate argv and envp */
- p = current->mm->arg_start;
+ p = current->mm->arg_end = current->mm->arg_start;
while (argc-- > 0) {
size_t len;
__put_user((elf_addr_t)p, argv++);
/* Do this so that we can load the interpreter, if need be. We will
change some of these later */
- // current->mm->rss = 0;
vx_rsspages_sub(current->mm, current->mm->rss);
current->mm->free_area_cache = current->mm->mmap_base;
retval = setup_arg_pages(bprm, STACK_TOP, executable_stack);
static int fill_psinfo(struct elf_prpsinfo *psinfo, struct task_struct *p,
struct mm_struct *mm)
{
- int i, len;
+ unsigned int i, len;
/* first copy the parameters from user space */
memset(psinfo, 0, sizeof(struct elf_prpsinfo));
/* do this so that we can load the interpreter, if need be
* - we will change some of these later
*/
- // current->mm->rss = 0;
vx_rsspages_sub(current->mm, current->mm->rss);
#ifdef CONFIG_MMU
current->mm->start_brk = datapos + data_len + bss_len;
current->mm->brk = (current->mm->start_brk + 3) & ~3;
current->mm->context.end_brk = memp + ksize((void *) memp) - stack_len;
- // current->mm->rss = 0;
vx_rsspages_sub(current->mm, current->mm->rss);
}
create_som_tables(bprm);
current->mm->start_stack = bprm->p;
- // current->mm->rss = 0;
vx_rsspages_sub(current->mm, current->mm->rss);
#if 0
pte_unmap(pte);
goto out;
}
- // mm->rss++;
vx_rsspages_inc(mm);
lru_cache_add_active(page);
set_pte(pte, pte_mkdirty(pte_mkwrite(mk_pte(
kmem_cache_free(vm_area_cachep, mpnt);
return ret;
}
- // mm->stack_vm = mm->total_vm = vma_pages(mpnt);
vx_vmpages_sub(mm, mm->total_vm - vma_pages(mpnt));
mm->stack_vm = mm->total_vm;
}
if (!rsv_is_empty(&rsv->rsv_window)) {
spin_lock(rsv_lock);
- rsv_window_remove(inode->i_sb, rsv);
+ if (!rsv_is_empty(&rsv->rsv_window))
+ rsv_window_remove(inode->i_sb, rsv);
spin_unlock(rsv_lock);
}
}
rwlock_init(&f->f_owner.lock);
/* f->f_version: 0 */
INIT_LIST_HEAD(&f->f_list);
- // set_vx_info(&f->f_vx_info, current->vx_info);
f->f_xid = vx_current_xid();
vx_files_inc(f);
f->f_maxcount = INT_MAX;
struct address_space * const mapping = &inode->i_data;
inode->i_sb = sb;
- // inode->i_dqh = dqhget(sb->s_dqh);
/* essential because of inode slab reuse */
inode->i_xid = 0;
{ MNT_NOEXEC, ",noexec" },
{ 0, NULL }
};
+
struct proc_fs_info *fs_infop;
+ unsigned long s_flags = mnt->mnt_sb->s_flags;
+ int mnt_flags = mnt->mnt_flags;
if (vx_flags(VXF_HIDE_MOUNT, 0))
return 0;
struct inode *inode = dentry->d_inode;
struct task_struct *task = proc_task(inode);
- if (!vx_check(vx_task_xid(task), VX_WATCH|VX_IDENT))
+ if (!vx_check(vx_task_xid(task), VX_IDENT))
goto out_drop;
/* discard wrong fakeinit */
STATIC ctl_table xfs_table[] = {
{XFS_RESTRICT_CHOWN, "restrict_chown", &xfs_params.restrict_chown.val,
sizeof(int), 0644, NULL, &proc_dointvec_minmax,
- &sysctl_intvec, NULL,
+ NULL, &sysctl_intvec, NULL,
&xfs_params.restrict_chown.min, &xfs_params.restrict_chown.max},
{XFS_SGID_INHERIT, "irix_sgid_inherit", &xfs_params.sgid_inherit.val,
sizeof(int), 0644, NULL, &proc_dointvec_minmax,
- &sysctl_intvec, NULL,
+ NULL, &sysctl_intvec, NULL,
&xfs_params.sgid_inherit.min, &xfs_params.sgid_inherit.max},
{XFS_SYMLINK_MODE, "irix_symlink_mode", &xfs_params.symlink_mode.val,
sizeof(int), 0644, NULL, &proc_dointvec_minmax,
- &sysctl_intvec, NULL,
+ NULL, &sysctl_intvec, NULL,
&xfs_params.symlink_mode.min, &xfs_params.symlink_mode.max},
{XFS_PANIC_MASK, "panic_mask", &xfs_params.panic_mask.val,
sizeof(int), 0644, NULL, &proc_dointvec_minmax,
- &sysctl_intvec, NULL,
+ NULL, &sysctl_intvec, NULL,
&xfs_params.panic_mask.min, &xfs_params.panic_mask.max},
{XFS_ERRLEVEL, "error_level", &xfs_params.error_level.val,
sizeof(int), 0644, NULL, &proc_dointvec_minmax,
- &sysctl_intvec, NULL,
+ NULL, &sysctl_intvec, NULL,
&xfs_params.error_level.min, &xfs_params.error_level.max},
{XFS_SYNCD_TIMER, "xfssyncd_centisecs", &xfs_params.syncd_timer.val,
sizeof(int), 0644, NULL, &proc_dointvec_minmax,
- &sysctl_intvec, NULL,
+ NULL, &sysctl_intvec, NULL,
&xfs_params.syncd_timer.min, &xfs_params.syncd_timer.max},
{XFS_INHERIT_SYNC, "inherit_sync", &xfs_params.inherit_sync.val,
sizeof(int), 0644, NULL, &proc_dointvec_minmax,
- &sysctl_intvec, NULL,
+ NULL, &sysctl_intvec, NULL,
&xfs_params.inherit_sync.min, &xfs_params.inherit_sync.max},
{XFS_INHERIT_NODUMP, "inherit_nodump", &xfs_params.inherit_nodump.val,
sizeof(int), 0644, NULL, &proc_dointvec_minmax,
- &sysctl_intvec, NULL,
+ NULL, &sysctl_intvec, NULL,
&xfs_params.inherit_nodump.min, &xfs_params.inherit_nodump.max},
{XFS_INHERIT_NOATIME, "inherit_noatime", &xfs_params.inherit_noatim.val,
sizeof(int), 0644, NULL, &proc_dointvec_minmax,
- &sysctl_intvec, NULL,
+ NULL, &sysctl_intvec, NULL,
&xfs_params.inherit_noatim.min, &xfs_params.inherit_noatim.max},
{XFS_BUF_TIMER, "xfsbufd_centisecs", &xfs_params.xfs_buf_timer.val,
sizeof(int), 0644, NULL, &proc_dointvec_minmax,
- &sysctl_intvec, NULL,
+ NULL, &sysctl_intvec, NULL,
&xfs_params.xfs_buf_timer.min, &xfs_params.xfs_buf_timer.max},
{XFS_BUF_AGE, "age_buffer_centisecs", &xfs_params.xfs_buf_age.val,
sizeof(int), 0644, NULL, &proc_dointvec_minmax,
- &sysctl_intvec, NULL,
+ NULL, &sysctl_intvec, NULL,
&xfs_params.xfs_buf_age.min, &xfs_params.xfs_buf_age.max},
{XFS_INHERIT_NOSYM, "inherit_nosymlinks", &xfs_params.inherit_nosym.val,
sizeof(int), 0644, NULL, &proc_dointvec_minmax,
- &sysctl_intvec, NULL,
+ NULL, &sysctl_intvec, NULL,
&xfs_params.inherit_nosym.min, &xfs_params.inherit_nosym.max},
{XFS_ROTORSTEP, "rotorstep", &xfs_params.rotorstep.val,
sizeof(int), 0644, NULL, &proc_dointvec_minmax,
- &sysctl_intvec, NULL,
+ NULL, &sysctl_intvec, NULL,
&xfs_params.rotorstep.min, &xfs_params.rotorstep.max},
/* please keep this the last entry */
#ifdef CONFIG_PROC_FS
{XFS_STATS_CLEAR, "stats_clear", &xfs_params.stats_clear.val,
sizeof(int), 0644, NULL, &xfs_stats_clear_proc_handler,
- &sysctl_intvec, NULL,
+ NULL, &sysctl_intvec, NULL,
&xfs_params.stats_clear.min, &xfs_params.stats_clear.max},
#endif /* CONFIG_PROC_FS */
#define XFSMNT_IDELETE 0x08000000 /* inode cluster delete */
#define XFSMNT_SWALLOC 0x10000000 /* turn on stripe width
* allocation */
+#define XFSMNT_TAGXID 0x40000000 /* context xid tagging */
#endif /* __XFS_CLNT_H__ */
#define XFS_MOUNT_IDELETE 0x00040000 /* delete empty inode clusters*/
#define XFS_MOUNT_SWALLOC 0x00080000 /* turn on stripe width
* allocation */
+#define XFS_MOUNT_TAGXID 0x40000000 /* context xid tagging */
/*
* Default minimum read and write sizes.
if (ap->flags & XFSMNT_NOUUID)
mp->m_flags |= XFS_MOUNT_NOUUID;
+ if (ap->flags & XFSMNT_TAGXID)
+ mp->m_flags |= XFS_MOUNT_TAGXID;
if (ap->flags & XFSMNT_NOLOGFLUSH)
mp->m_flags |= XFS_MOUNT_NOLOGFLUSH;
return XFS_ERROR(EINVAL);
}
+ if (ap->flags & XFSMNT_TAGXID)
+ vfs->vfs_super->s_flags |= MS_TAGXID;
return 0;
}
#define MNTOPT_64BITINODE "inode64" /* inodes can be allocated anywhere */
#define MNTOPT_IKEEP "ikeep" /* do not free empty inode clusters */
#define MNTOPT_NOIKEEP "noikeep" /* free empty inode clusters */
+#define MNTOPT_TAGXID "tagxid" /* context xid tagging for inodes */
int
args->flags &= ~XFSMNT_IDELETE;
} else if (!strcmp(this_char, MNTOPT_NOIKEEP)) {
args->flags |= XFSMNT_IDELETE;
+ } else if (!strcmp(this_char, MNTOPT_TAGXID)) {
+ args->flags |= XFSMNT_TAGXID;
} else if (!strcmp(this_char, "osyncisdsync")) {
/* no-op, this is now the default */
printk("XFS: osyncisdsync is now the default, option is deprecated.\n");
if (rss < freed)
freed = rss;
- // mm->rss = rss - freed;
vx_rsspages_sub(mm, freed);
if (freed) {
if (rss < freed)
freed = rss;
- // mm->rss = rss - freed;
vx_rsspages_sub(mm, freed);
if (freed) {
if (rss < freed)
freed = rss;
- // mm->rss = rss - freed;
vx_rsspages_sub(mm, freed);
tlb_flush_mmu(tlb, start, end);
if (rss < freed)
freed = rss;
- // mm->rss = rss - freed;
vx_rsspages_sub(mm, freed);
/*
* Note: tlb->nr may be 0 at this point, so we can't rely on tlb->start_addr and
if (rss < freed)
freed = rss;
- // mm->rss = rss - freed;
vx_rsspages_sub(mm, freed);
tlb_flush_mmu(mp);
/*
- * User space process size. 47bits.
+ * User space process size. 47bits minus one guard page.
*/
-#define TASK_SIZE (0x800000000000UL)
+#define TASK_SIZE (0x800000000000UL - 4096)
/* This decides where the kernel will search for a free chunk of vm
* space during mmap's.
* This should be a per-architecture thing, to allow different
* error and pointer decisions.
*/
+#define IS_ERR_VALUE(x) unlikely((x) > (unsigned long)-1000L)
+
static inline void *ERR_PTR(long error)
{
return (void *) error;
static inline long IS_ERR(const void *ptr)
{
- return unlikely((unsigned long)ptr > (unsigned long)-1000L);
+ return IS_ERR_VALUE((unsigned long)ptr);
}
#endif /* _LINUX_ERR_H */
#define put_vx_info(i) __put_vx_info(i,__FILE__,__LINE__)
-static inline void __put_vx_info(struct vx_info *vxi, const char *_file, int _line)
+static inline void __put_vx_info(struct vx_info *vxi,
+ const char *_file, int _line)
{
if (!vxi)
return;
+
vxlprintk(VXD_CBIT(xid, 2), "put_vx_info(%p[#%d.%d])",
vxi, vxi?vxi->vx_id:0, vxi?atomic_read(&vxi->vx_usecnt):0,
_file, _line);
#define init_vx_info(p,i) __init_vx_info(p,i,__FILE__,__LINE__)
static inline void __init_vx_info(struct vx_info **vxp, struct vx_info *vxi,
- const char *_file, int _line)
+ const char *_file, int _line)
{
if (vxi) {
vxlprintk(VXD_CBIT(xid, 3),
_file, _line);
vxh_set_vx_info(vxi, vxp);
- // vxo = xchg(vxp, __get_vx_info(vxi, _file, _line));
atomic_inc(&vxi->vx_usecnt);
vxo = xchg(vxp, vxi);
BUG_ON(vxo);
_file, _line);
vxh_clr_vx_info(vxo, vxp);
- // __put_vx_info(vxo, _file, _line);
if (atomic_dec_and_test(&vxo->vx_usecnt))
free_vx_info(vxo);
}
{
if (!nxi)
return NULL;
+
vxlprintk(VXD_CBIT(nid, 2), "get_nx_info(%p[#%d.%d])",
nxi, nxi?nxi->nx_id:0, nxi?atomic_read(&nxi->nx_usecnt):0,
_file, _line);
+
atomic_inc(&nxi->nx_usecnt);
return nxi;
}
{
if (!nxi)
return;
+
vxlprintk(VXD_CBIT(nid, 2), "put_nx_info(%p[#%d.%d])",
nxi, nxi?nxi->nx_id:0, nxi?atomic_read(&nxi->nx_usecnt):0,
_file, _line);
+
if (atomic_dec_and_test(&nxi->nx_usecnt))
free_nx_info(nxi);
}
+
+#define init_nx_info(p,i) __init_nx_info(p,i,__FILE__,__LINE__)
+
+static inline void __init_nx_info(struct nx_info **nxp, struct nx_info *nxi,
+ const char *_file, int _line)
+{
+ if (nxi) {
+ vxlprintk(VXD_CBIT(nid, 3),
+ "init_nx_info(%p[#%d.%d])",
+ nxi, nxi?nxi->nx_id:0,
+ nxi?atomic_read(&nxi->nx_usecnt):0,
+ _file, _line);
+
+ atomic_inc(&nxi->nx_usecnt);
+ }
+ *nxp = nxi;
+}
+
+
#define set_nx_info(p,i) __set_nx_info(p,i,__FILE__,__LINE__)
static inline void __set_nx_info(struct nx_info **nxp, struct nx_info *nxi,
if (!nxi)
return;
- vxlprintk(VXD_CBIT(nid, 3), "set_nx_info(%p[#%d.%d.%d])",
+ vxlprintk(VXD_CBIT(nid, 3), "set_nx_info(%p[#%d.%d])",
nxi, nxi?nxi->nx_id:0,
nxi?atomic_read(&nxi->nx_usecnt):0,
- nxi?atomic_read(&nxi->nx_refcnt):0,
_file, _line);
- atomic_inc(&nxi->nx_refcnt);
- // nxo = xchg(nxp, __get_nx_info(nxi, _file, _line));
+ atomic_inc(&nxi->nx_usecnt);
nxo = xchg(nxp, nxi);
BUG_ON(nxo);
}
if (!nxo)
return;
- vxlprintk(VXD_CBIT(nid, 3), "clr_nx_info(%p[#%d.%d.%d])",
+ vxlprintk(VXD_CBIT(nid, 3), "clr_nx_info(%p[#%d.%d])",
nxo, nxo?nxo->nx_id:0,
nxo?atomic_read(&nxo->nx_usecnt):0,
- nxo?atomic_read(&nxo->nx_refcnt):0,
_file, _line);
- if (atomic_dec_and_test(&nxo->nx_refcnt))
- unhash_nx_info(nxo);
- // __put_nx_info(nxo, _file, _line);
+ if (atomic_dec_and_test(&nxo->nx_usecnt))
+ free_nx_info(nxo);
+}
+
+
+#define claim_nx_info(v,p) __claim_nx_info(v,p,__FILE__,__LINE__)
+
+static inline void __claim_nx_info(struct nx_info *nxi,
+ struct task_struct *task, const char *_file, int _line)
+{
+ vxlprintk(VXD_CBIT(nid, 3), "claim_nx_info(%p[#%d.%d.%d]) %p",
+ nxi, nxi?nxi->nx_id:0,
+ nxi?atomic_read(&nxi->nx_usecnt):0,
+ nxi?atomic_read(&nxi->nx_tasks):0,
+ task, _file, _line);
+
+ atomic_inc(&nxi->nx_tasks);
+}
+
+
+extern void unhash_nx_info(struct nx_info *);
+
+#define release_nx_info(v,p) __release_nx_info(v,p,__FILE__,__LINE__)
+
+static inline void __release_nx_info(struct nx_info *nxi,
+ struct task_struct *task, const char *_file, int _line)
+{
+ vxlprintk(VXD_CBIT(nid, 3), "release_nx_info(%p[#%d.%d.%d]) %p",
+ nxi, nxi?nxi->nx_id:0,
+ nxi?atomic_read(&nxi->nx_usecnt):0,
+ nxi?atomic_read(&nxi->nx_tasks):0,
+ task, _file, _line);
+
+ might_sleep();
+
+ if (atomic_dec_and_test(&nxi->nx_tasks))
+ unhash_nx_info(nxi);
}
struct nx_info *nxi;
task_lock(p);
- nxi = __get_nx_info(p->nx_info, _file, _line);
vxlprintk(VXD_CBIT(nid, 5), "task_get_nx_info(%p)",
p, _file, _line);
+ nxi = __get_nx_info(p->nx_info, _file, _line);
task_unlock(p);
return nxi;
}
#define nx_weak_check(c,m) ((m) ? nx_check(c,m) : 1)
-#define __nx_flags(v,m,f) (((v) & (m)) ^ (f))
+#define __nx_state(v) ((v) ? ((v)->nx_state) : 0)
+
+#define nx_info_state(v,m) (__nx_state(v) & (m))
+
+
+#define __nx_flags(v) ((v) ? (v)->nx_flags : 0)
+
+#define nx_current_flags() __nx_flags(current->nx_info)
+
+#define nx_info_flags(v,m,f) \
+ vx_check_flags(__nx_flags(v),(m),(f))
+
+#define task_nx_flags(t,m,f) \
+ ((t) && nx_info_flags((t)->nx_info, (m), (f)))
+
+#define nx_flags(m,f) nx_info_flags(current->nx_info,(m),(f))
-#define __nx_task_flags(t,m,f) \
- (((t) && ((t)->nx_info)) ? \
- __nx_flags((t)->nx_info->nx_flags,(m),(f)) : 0)
-#define nx_current_flags() \
- ((current->nx_info) ? current->nx_info->nx_flags : 0)
+/* context caps */
-#define nx_flags(m,f) __nx_flags(nx_current_flags(),(m),(f))
+#define __nx_ncaps(v) ((v) ? (v)->nx_ncaps : 0)
+#define nx_current_ncaps() __nx_ncaps(current->nx_info)
-#define nx_current_ncaps() \
- ((current->nx_info) ? current->nx_info->nx_ncaps : 0)
+#define nx_info_ncaps(v,c) (__nx_ncaps(v) & (c))
-#define nx_ncaps(c) (nx_current_ncaps() & (c))
+#define nx_ncaps(c) nx_info_ncaps(current->nx_info,(c))
static inline int addr_in_nx_info(struct nx_info *nxi, uint32_t addr)
__vx_onhold_update(vxi);
}
+static inline void vx_account_user(struct vx_info *vxi,
+ cputime_t cputime, int nice)
+{
+ int cpu = smp_processor_id();
+
+ if (!vxi)
+ return;
+ vxi->sched.cpu[cpu].user_ticks += cputime;
+}
+
+static inline void vx_account_system(struct vx_info *vxi,
+ cputime_t cputime, int idle)
+{
+ int cpu = smp_processor_id();
+
+ if (!vxi)
+ return;
+ vxi->sched.cpu[cpu].sys_ticks += cputime;
+}
+
#else
#warning duplicate inclusion
#endif
#define VXF_STATE_SETUP (1ULL<<32)
#define VXF_STATE_INIT (1ULL<<33)
+#define VXF_STATE_HELPER (1ULL<<36)
+
#define VXF_FORK_RSS (1ULL<<48)
#define VXF_PROLIFIC (1ULL<<49)
#define VXF_ONE_TIME (0x0003ULL<<32)
+#define VXF_INIT_SET (VXF_STATE_SETUP|VXF_STATE_INIT)
+
/* context caps */
enum {
VSC_STARTUP = 1,
VSC_SHUTDOWN,
+
+ VSC_NETUP,
+ VSC_NETDOWN,
};
struct vx_info {
struct hlist_node vx_hlist; /* linked list of contexts */
- struct rcu_head vx_rcu; /* the rcu head */
xid_t vx_id; /* context id */
atomic_t vx_usecnt; /* usage count */
atomic_t vx_tasks; /* tasks count */
pid_t vx_initpid; /* PID of fake init process */
- spinlock_t vx_lock;
wait_queue_head_t vx_wait; /* context exit waitqueue */
struct _vx_limit limit; /* vserver limits */
#define VXS_PAUSED 0x0010
#define VXS_ONHOLD 0x0020
#define VXS_SHUTDOWN 0x0100
-#define VXS_DEFUNCT 0x1000
#define VXS_RELEASED 0x8000
/* check conditions */
extern long vs_state_change(struct vx_info *, unsigned int);
-extern void free_vx_info(struct vx_info *);
-
#endif /* __KERNEL__ */
#else /* _VX_CONTEXT_H */
/* context commands */
-#define VCMD_ctx_create VC_CMD(VPROC, 1, 0)
+#define VCMD_ctx_create_v0 VC_CMD(VPROC, 1, 0)
+#define VCMD_ctx_create VC_CMD(VPROC, 1, 1)
+
+struct vcmd_ctx_create {
+ uint64_t flagword;
+};
+
#define VCMD_ctx_migrate VC_CMD(PROCMIG, 1, 0)
#ifdef __KERNEL__
struct _vx_hist_entry *vxh_advance(void *loc);
+#if (__GNUC__ > 3)
#define VXH_HERE() \
({ __label__ here; \
here:; \
&&here; })
-
-
+#else
+#define VXH_HERE() \
+ ({ __vxh_here:; \
+ &&__vxh_here; })
+#endif
static inline void __vxh_copy_vxi(struct _vx_hist_entry *entry, struct vx_info *vxi)
{
}
}
-static inline void vxh_throw_oops(void)
-{
- struct _vx_hist_entry *entry;
-
- preempt_disable();
- entry = vxh_advance(VXH_HERE());
- entry->type = VXH_THROW_OOPS;
- preempt_enable();
-
- /* prevent further acquisition */
- vxh_active = 0;
-}
-
-static inline void vxh_get_vx_info(struct vx_info *vxi)
-{
- struct _vx_hist_entry *entry;
-
- preempt_disable();
- entry = vxh_advance(VXH_HERE());
- __vxh_copy_vxi(entry, vxi);
- entry->type = VXH_GET_VX_INFO;
- preempt_enable();
-}
-
-static inline void vxh_put_vx_info(struct vx_info *vxi)
-{
- struct _vx_hist_entry *entry;
- preempt_disable();
- entry = vxh_advance(VXH_HERE());
- __vxh_copy_vxi(entry, vxi);
- entry->type = VXH_PUT_VX_INFO;
+#define __VXH_BODY(__type, __data) \
+ struct _vx_hist_entry *entry; \
+ \
+ preempt_disable(); \
+ entry = vxh_advance(VXH_HERE()); \
+ __data; \
+ entry->type = __type; \
preempt_enable();
-}
-static inline void vxh_init_vx_info(struct vx_info *vxi, void *data)
-{
- struct _vx_hist_entry *entry;
- preempt_disable();
- entry = vxh_advance(VXH_HERE());
- __vxh_copy_vxi(entry, vxi);
- entry->sc.data = data;
- entry->type = VXH_INIT_VX_INFO;
- preempt_enable();
-}
-
-static inline void vxh_set_vx_info(struct vx_info *vxi, void *data)
-{
- struct _vx_hist_entry *entry;
+ /* pass vxi only */
+#define __VXH_SIMPLE \
+ __vxh_copy_vxi(entry, vxi)
- preempt_disable();
- entry = vxh_advance(VXH_HERE());
- __vxh_copy_vxi(entry, vxi);
- entry->sc.data = data;
- entry->type = VXH_SET_VX_INFO;
- preempt_enable();
+#define VXH_SIMPLE(__name, __type) \
+static inline void __name(struct vx_info *vxi) \
+{ \
+ __VXH_BODY(__type, __VXH_SIMPLE) \
}
-static inline void vxh_clr_vx_info(struct vx_info *vxi, void *data)
-{
- struct _vx_hist_entry *entry;
+ /* pass vxi and data (void *) */
+#define __VXH_DATA \
+ __vxh_copy_vxi(entry, vxi); \
+ entry->sc.data = data
- preempt_disable();
- entry = vxh_advance(VXH_HERE());
- __vxh_copy_vxi(entry, vxi);
- entry->sc.data = data;
- entry->type = VXH_CLR_VX_INFO;
- preempt_enable();
+#define VXH_DATA(__name, __type) \
+static inline \
+void __name(struct vx_info *vxi, void *data) \
+{ \
+ __VXH_BODY(__type, __VXH_DATA) \
}
-static inline void vxh_claim_vx_info(struct vx_info *vxi, void *data)
-{
- struct _vx_hist_entry *entry;
+ /* pass vxi and arg (long) */
+#define __VXH_LARG \
+ __vxh_copy_vxi(entry, vxi); \
+ entry->ll.arg = arg
- preempt_disable();
- entry = vxh_advance(VXH_HERE());
- __vxh_copy_vxi(entry, vxi);
- entry->sc.data = data;
- entry->type = VXH_CLAIM_VX_INFO;
- preempt_enable();
+#define VXH_LARG(__name, __type) \
+static inline \
+void __name(struct vx_info *vxi, long arg) \
+{ \
+ __VXH_BODY(__type, __VXH_LARG) \
}
-static inline void vxh_release_vx_info(struct vx_info *vxi, void *data)
-{
- struct _vx_hist_entry *entry;
- preempt_disable();
- entry = vxh_advance(VXH_HERE());
- __vxh_copy_vxi(entry, vxi);
- entry->sc.data = data;
- entry->type = VXH_RELEASE_VX_INFO;
- preempt_enable();
-}
-
-static inline void vxh_alloc_vx_info(struct vx_info *vxi)
-{
- struct _vx_hist_entry *entry;
-
- preempt_disable();
- entry = vxh_advance(VXH_HERE());
- __vxh_copy_vxi(entry, vxi);
- entry->type = VXH_ALLOC_VX_INFO;
- preempt_enable();
-}
-
-static inline void vxh_dealloc_vx_info(struct vx_info *vxi)
-{
- struct _vx_hist_entry *entry;
-
- preempt_disable();
- entry = vxh_advance(VXH_HERE());
- __vxh_copy_vxi(entry, vxi);
- entry->type = VXH_DEALLOC_VX_INFO;
- preempt_enable();
-}
-
-static inline void vxh_hash_vx_info(struct vx_info *vxi)
-{
- struct _vx_hist_entry *entry;
-
- preempt_disable();
- entry = vxh_advance(VXH_HERE());
- __vxh_copy_vxi(entry, vxi);
- entry->type = VXH_HASH_VX_INFO;
- preempt_enable();
-}
-
-static inline void vxh_unhash_vx_info(struct vx_info *vxi)
+static inline void vxh_throw_oops(void)
{
- struct _vx_hist_entry *entry;
-
- preempt_disable();
- entry = vxh_advance(VXH_HERE());
- __vxh_copy_vxi(entry, vxi);
- entry->type = VXH_UNHASH_VX_INFO;
- preempt_enable();
+ __VXH_BODY(VXH_THROW_OOPS, {});
+ /* prevent further acquisition */
+ vxh_active = 0;
}
-static inline void vxh_loc_vx_info(unsigned arg, struct vx_info *vxi)
-{
- struct _vx_hist_entry *entry;
-
- preempt_disable();
- entry = vxh_advance(VXH_HERE());
- __vxh_copy_vxi(entry, vxi);
- entry->ll.arg = arg;
- entry->type = VXH_LOC_VX_INFO;
- preempt_enable();
-}
+VXH_SIMPLE(vxh_get_vx_info, VXH_GET_VX_INFO);
+VXH_SIMPLE(vxh_put_vx_info, VXH_PUT_VX_INFO);
-static inline void vxh_lookup_vx_info(unsigned arg, struct vx_info *vxi)
-{
- struct _vx_hist_entry *entry;
+VXH_DATA(vxh_init_vx_info, VXH_INIT_VX_INFO);
+VXH_DATA(vxh_set_vx_info, VXH_SET_VX_INFO);
+VXH_DATA(vxh_clr_vx_info, VXH_CLR_VX_INFO);
- preempt_disable();
- entry = vxh_advance(VXH_HERE());
- __vxh_copy_vxi(entry, vxi);
- entry->ll.arg = arg;
- entry->type = VXH_LOOKUP_VX_INFO;
- preempt_enable();
-}
+VXH_DATA(vxh_claim_vx_info, VXH_CLAIM_VX_INFO);
+VXH_DATA(vxh_release_vx_info, VXH_RELEASE_VX_INFO);
-static inline void vxh_create_vx_info(unsigned arg, struct vx_info *vxi)
-{
- struct _vx_hist_entry *entry;
+VXH_SIMPLE(vxh_alloc_vx_info, VXH_ALLOC_VX_INFO);
+VXH_SIMPLE(vxh_dealloc_vx_info, VXH_DEALLOC_VX_INFO);
- preempt_disable();
- entry = vxh_advance(VXH_HERE());
- __vxh_copy_vxi(entry, vxi);
- entry->ll.arg = arg;
- entry->type = VXH_CREATE_VX_INFO;
- preempt_enable();
-}
+VXH_SIMPLE(vxh_hash_vx_info, VXH_HASH_VX_INFO);
+VXH_SIMPLE(vxh_unhash_vx_info, VXH_UNHASH_VX_INFO);
+VXH_LARG(vxh_loc_vx_info, VXH_LOC_VX_INFO);
+VXH_LARG(vxh_lookup_vx_info, VXH_LOOKUP_VX_INFO);
+VXH_LARG(vxh_create_vx_info, VXH_CREATE_VX_INFO);
extern void vxh_dump_history(void);
+
#else /* CONFIG_VSERVER_HISTORY */
+
#define vxh_throw_oops() do { } while (0)
#define vxh_get_vx_info(v) do { } while (0)
#define NB_IPV4ROOT 16
+/* context flags */
+
+#define NXF_STATE_SETUP (1ULL<<32)
+
+#define NXF_STATE_HELPER (1ULL<<36)
+
+#define NXF_INIT_SET (0)
+
+
#ifdef __KERNEL__
#include <linux/list.h>
struct nx_info {
struct hlist_node nx_hlist; /* linked list of nxinfos */
- struct rcu_head nx_rcu; /* the rcu head */
nid_t nx_id; /* vnet id */
atomic_t nx_usecnt; /* usage count */
- atomic_t nx_refcnt; /* reference count */
+ atomic_t nx_tasks; /* tasks count */
+ int nx_state; /* context state */
uint64_t nx_flags; /* network flag word */
uint64_t nx_ncaps; /* network capabilities */
};
-struct rcu_head;
+/* status flags */
-extern void unhash_nx_info(struct nx_info *);
+#define NXS_HASHED 0x0001
+#define NXS_SHUTDOWN 0x0100
+#define NXS_RELEASED 0x8000
extern struct nx_info *locate_nx_info(int);
extern struct nx_info *locate_or_create_nx_info(int);
extern int nx_migrate_task(struct task_struct *, struct nx_info *);
+extern long vs_net_change(struct nx_info *, unsigned int);
+
struct in_ifaddr;
struct net_device;
#endif /* __KERNEL__ */
-#define VCMD_net_create VC_CMD(VNET, 1, 0)
+#define VCMD_net_create_v0 VC_CMD(VNET, 1, 0)
+#define VCMD_net_create VC_CMD(VNET, 1, 1)
+
+struct vcmd_net_create {
+ uint64_t flagword;
+};
+
#define VCMD_net_migrate VC_CMD(NETMIG, 1, 0)
#define VCMD_net_add VC_CMD(NETALT, 1, 0)
/* more to come */
};
-// IPN_TYPE_IPV4
-
#ifdef __KERNEL__
extern int vc_net_create(uint32_t, void __user *);
#define IPF_STATE_SETUP (1ULL<<32)
-
#define IPF_ONE_TIME (0x0001ULL<<32)
#define VCMD_get_ncaps VC_CMD(FLAGS, 7, 0)
/* interface version */
-#define VCI_VERSION 0x00010025
+#define VCI_VERSION 0x00020001
/* query version */
#include <linux/mempolicy.h>
#include <linux/syscalls.h>
#include <linux/vs_limit.h>
+#include <linux/vs_network.h>
#include <asm/uaccess.h>
#include <asm/unistd.h>
release_thread(p);
if (p->vx_info)
release_vx_info(p->vx_info, p);
+ if (p->nx_info)
+ release_nx_info(p->nx_info, p);
put_task_struct(p);
p = leader;
*/
BUG_ON(p == reaper || reaper->exit_state >= EXIT_ZOMBIE);
p->real_parent = reaper;
- if (p->parent == p->real_parent)
- BUG();
}
static inline void reparent_thread(task_t *p, task_t *father, int traced)
int retval;
struct task_struct *p = NULL;
struct vx_info *vxi;
+ struct nx_info *nxi;
if ((clone_flags & (CLONE_NEWNS|CLONE_FS)) == (CLONE_NEWNS|CLONE_FS))
return ERR_PTR(-EINVAL);
goto fork_out;
init_vx_info(&p->vx_info, current->vx_info);
- p->nx_info = NULL;
- set_nx_info(&p->nx_info, current->nx_info);
+ init_nx_info(&p->nx_info, current->nx_info);
/* check vserver memory */
if (p->mm && !(clone_flags & CLONE_VM)) {
atomic_inc(&vxi->cvirt.total_forks);
vx_nproc_inc(p);
}
+ nxi = p->nx_info;
+ if (nxi)
+ claim_nx_info(nxi, p);
write_unlock_irq(&tasklist_lock);
retval = 0;
{
__deactivate_task(p, rq);
p->state |= TASK_ONHOLD;
- // recalc_task_prio(p, now);
- // a new one on hold
+ /* a new one on hold */
vx_onhold_inc(vxi);
list_add_tail(&p->run_list, &rq->hold_queue);
-
- //printk("··· %8lu hold %p [%d]\n", jiffies, p, p->prio);
}
/*
struct task_struct *p, runqueue_t *rq)
{
list_del(&p->run_list);
- // one less waiting
+ /* one less waiting */
vx_onhold_dec(vxi);
- // p->prio = MAX_PRIO-1;
- // p->activated = 1;
- // recalc_task_prio(p, now);
p->state &= ~TASK_ONHOLD;
enqueue_task(p, rq->expired);
rq->nr_running++;
if (p->static_prio < rq->best_expired_prio)
rq->best_expired_prio = p->static_prio;
-
- // printk("··· %8lu unhold %p [%d]\n", jiffies, p, p->prio);
}
#else
static inline
void account_user_time(struct task_struct *p, cputime_t cputime)
{
struct cpu_usage_stat *cpustat = &kstat_this_cpu.cpustat;
+ struct vx_info *vxi = p->vx_info; /* p is _always_ current */
cputime64_t tmp;
+ int nice = (TASK_NICE(p) > 0);
p->utime = cputime_add(p->utime, cputime);
+ vx_account_user(vxi, cputime, nice);
/* Check for signals (SIGVTALRM, SIGPROF, SIGXCPU & SIGKILL). */
check_rlimit(p, cputime);
/* Add user time to cpustat. */
tmp = cputime_to_cputime64(cputime);
- if (TASK_NICE(p) > 0)
+ if (nice)
cpustat->nice = cputime64_add(cpustat->nice, tmp);
else
cpustat->user = cputime64_add(cpustat->user, tmp);
cputime_t cputime)
{
struct cpu_usage_stat *cpustat = &kstat_this_cpu.cpustat;
+ struct vx_info *vxi = p->vx_info; /* p is _always_ current */
runqueue_t *rq = this_rq();
cputime64_t tmp;
p->stime = cputime_add(p->stime, cputime);
+ vx_account_system(vxi, cputime, (p == rq->idle));
/* Check for signals (SIGPROF, SIGXCPU & SIGKILL). */
if (likely(p->signal && p->exit_state < EXIT_ZOMBIE)) {
vxi = next->vx_info;
ret = vx_tokens_recalc(vxi);
- // tokens = vx_tokens_avail(next);
if (ret > 0) {
vx_unhold_task(vxi, next, rq);
val.procs = nr_threads;
} while (read_seqretry(&xtime_lock, seq));
-/* if (vx_flags(VXF_VIRT_CPU, 0))
- vx_vsi_cpu(val);
-*/
si_meminfo(&val);
si_swapinfo(&val);
default y
select SECURITY_CAPABILITIES
+config VSERVER_LEGACYNET
+ bool
+ depends on !VSERVER_NGNET
+ default y
+
menu "Linux VServer"
config VSERVER_LEGACY
This enables the legacy API used in vs1.xx, which allows
to use older tools (for migration purposes).
-config VSERVER_LEGACYNET
- bool "Enable Legacy Networking Kernel API"
- default y
+config VSERVER_NGNET
+ bool "Disable Legacy Networking Kernel API"
+ depends on EXPERIMENTAL
+ default n
help
- This enables the legacy networking API, which allows
- to configure per vserver IPs as we know it.
- For now, even recent tools use this interface of the
- legacy API, so unless you know what you are doing,
- leave that option enabled.
+ This disables the legacy networking API which is required
+ by the chbind tool. Do not disable it unless you exactly
+ know what you are doing.
config VSERVER_PROC_SECURE
bool "Enable Proc Security"
memset (new, 0, sizeof(struct vx_info));
new->vx_id = xid;
- // INIT_RCU_HEAD(&new->vx_rcu);
INIT_HLIST_NODE(&new->vx_hlist);
atomic_set(&new->vx_usecnt, 0);
atomic_set(&new->vx_tasks, 0);
new->vx_parent = NULL;
new->vx_state = 0;
- new->vx_lock = SPIN_LOCK_UNLOCKED;
init_waitqueue_head(&new->vx_wait);
/* rest of init goes here */
vx_info_init_cvirt(&new->cvirt);
vx_info_init_cacct(&new->cacct);
-
- new->vx_flags = VXF_STATE_SETUP|VXF_STATE_INIT;
+ new->vx_flags = VXF_INIT_SET;
new->vx_bcaps = CAP_INIT_EFF_SET;
new->vx_ccaps = 0;
kfree(vxi);
}
-void __shutdown_vx_info(struct vx_info *vxi)
+static void __shutdown_vx_info(struct vx_info *vxi)
{
struct namespace *namespace;
struct fs_struct *fs;
void free_vx_info(struct vx_info *vxi)
{
/* context shutdown is mandatory */
- // BUG_ON(vxi->vx_state != VXS_SHUTDOWN);
+ BUG_ON(!vx_info_state(vxi, VXS_SHUTDOWN));
BUG_ON(atomic_read(&vxi->vx_usecnt));
BUG_ON(atomic_read(&vxi->vx_tasks));
BUG_ON(vx_info_state(vxi, VXS_HASHED));
- // BUG_ON(!vx_state(vxi, VXS_DEFUNCT));
BUG_ON(vxi->vx_namespace);
BUG_ON(vxi->vx_fs);
vxh_hash_vx_info(vxi);
/* context must not be hashed */
- BUG_ON(vxi->vx_state & VXS_HASHED);
+ BUG_ON(vx_info_state(vxi, VXS_HASHED));
vxi->vx_state |= VXS_HASHED;
head = &vx_info_hash[__hashval(vxi->vx_id)];
"__unhash_vx_info: %p[#%d]", vxi, vxi->vx_id);
vxh_unhash_vx_info(vxi);
- /* maybe warn on that? */
- if (!(vxi->vx_state & VXS_HASHED))
- return;
+ /* context must be hashed */
+ BUG_ON(!vx_info_state(vxi, VXS_HASHED));
vxi->vx_state &= ~VXS_HASHED;
hlist_del(&vxi->vx_hlist);
vxdprintk(VXD_CBIT(xid, 0),
"__lookup_vx_info(#%u): %p[#%u]",
xid, vxi, vxi?vxi->vx_id:0);
- vxh_lookup_vx_info(xid, vxi);
+ vxh_lookup_vx_info(vxi, xid);
return vxi;
}
out_unlock:
spin_unlock(&vx_info_hash_lock);
- vxh_loc_vx_info(id, vxi);
+ vxh_loc_vx_info(vxi, id);
if (new)
__dealloc_vx_info(new);
return vxi;
/* __create_vx_info()
* create the requested context
- * get() it and hash it */
+ * get() and hash it */
static struct vx_info * __create_vx_info(int id)
{
vxdprintk(VXD_CBIT(xid, 1), "create_vx_info(%d)*", id);
- if (!(new = __alloc_vx_info(id))) {
+ if (!(new = __alloc_vx_info(id)))
return ERR_PTR(-ENOMEM);
- }
/* required to make dynamic xids unique */
spin_lock(&vx_info_hash_lock);
}
new->vx_id = id;
}
- /* existing context requested */
+ /* static context requested */
else if ((vxi = __lookup_vx_info(id))) {
vxdprintk(VXD_CBIT(xid, 0),
"create_vx_info(%d) = %p (already there)", id, vxi);
goto out_unlock;
}
- /* new context requested */
+ /* new context */
vxdprintk(VXD_CBIT(xid, 0),
"create_vx_info(%d) = %p (new)", id, new);
__hash_vx_info(get_vx_info(new));
out_unlock:
spin_unlock(&vx_info_hash_lock);
- vxh_create_vx_info(id, IS_ERR(vxi)?NULL:vxi);
+ vxh_create_vx_info(IS_ERR(vxi)?NULL:vxi, id);
if (new)
__dealloc_vx_info(new);
return vxi;
}
#endif
+
int vx_migrate_user(struct task_struct *p, struct vx_info *vxi)
{
struct user_struct *new_user, *old_user;
int vc_ctx_create(uint32_t xid, void __user *data)
{
+ struct vcmd_ctx_create vc_data = { .flagword = VXF_INIT_SET };
struct vx_info *new_vxi;
int ret;
if (!capable(CAP_SYS_ADMIN))
return -EPERM;
+ if (data && copy_from_user (&vc_data, data, sizeof(vc_data)))
+ return -EFAULT;
if ((xid > MAX_S_CONTEXT) && (xid != VX_DYNAMIC_ID))
return -EINVAL;
-
if (xid < 2)
return -EINVAL;
if (IS_ERR(new_vxi))
return PTR_ERR(new_vxi);
+ /* initial flags */
+ new_vxi->vx_flags = vc_data.flagword;
+
vs_state_change(new_vxi, VSC_STARTUP);
ret = new_vxi->vx_id;
vx_migrate_task(current, new_vxi);
break;
case 4: /* Read/clear last kernel messages */
do_clear = 1;
- /* FALL THRU */
+ /* fall through */
case 3: /* Read last kernel messages */
- // if (count > log_buf_len)
- // count = log_buf_len;
- spin_lock_irq(&log->logbuf_lock);
- // if (count > logged_chars)
- // count = logged_chars;
- // if (do_clear)
- // logged_chars = 0;
- spin_unlock_irq(&log->logbuf_lock);
- if (error)
- break;
return 0;
case 5: /* Clear ring buffer */
- // logged_chars = 0;
return 0;
case 6: /* Disable logging to console */
#include <linux/kmod.h>
#include <linux/sched.h>
#include <linux/vs_context.h>
+#include <linux/vs_network.h>
#include <asm/uaccess.h>
#include <asm/unistd.h>
{
char id_buf[8], cmd_buf[16];
char uid_buf[16], pid_buf[16];
+ int ret;
char *argv[] = {vshelper_path, NULL, id_buf, 0};
char *envp[] = {"HOME=/", "TERM=linux",
return 0;
}
- if (do_vshelper(vshelper_path, argv, envp, 1))
- return -EPERM;
- return 0;
+#ifndef CONFIG_VSERVER_LEGACY
+ ret = do_vshelper(vshelper_path, argv, envp, 1);
+#else
+ ret = do_vshelper(vshelper_path, argv, envp, 0);
+#endif
+ return (ret) ? -EPERM : 0;
}
char *envp[] = {"HOME=/", "TERM=linux",
"PATH=/sbin:/usr/sbin:/bin:/usr/bin", cmd_buf, 0};
+ if (!vx_info_flags(vxi, VXF_STATE_HELPER, 0))
+ return 0;
+
snprintf(id_buf, sizeof(id_buf)-1, "%d", vxi->vx_id);
snprintf(cmd_buf, sizeof(cmd_buf)-1, "VS_CMD=%08x", cmd);
return 0;
}
+
+/*
+ * argv [0] = vshelper_path;
+ * argv [1] = action: "netup", "netdown"
+ * argv [2] = context identifier
+ *
+ * envp [*] = type-specific parameters
+ */
+
+long vs_net_change(struct nx_info *nxi, unsigned int cmd)
+{
+ char id_buf[8], cmd_buf[16];
+ char *argv[] = {vshelper_path, NULL, id_buf, 0};
+ char *envp[] = {"HOME=/", "TERM=linux",
+ "PATH=/sbin:/usr/sbin:/bin:/usr/bin", cmd_buf, 0};
+
+ if (!nx_info_flags(nxi, NXF_STATE_HELPER, 0))
+ return 0;
+
+ snprintf(id_buf, sizeof(id_buf)-1, "%d", nxi->nx_id);
+ snprintf(cmd_buf, sizeof(cmd_buf)-1, "VS_CMD=%08x", cmd);
+
+ switch (cmd) {
+ case VSC_NETUP:
+ argv[1] = "netup";
+ break;
+ case VSC_NETDOWN:
+ argv[1] = "netdown";
+ break;
+ default:
+ return 0;
+ }
+
+ do_vshelper(vshelper_path, argv, envp, 1);
+ return 0;
+}
+
}
-#define VXH_LOC_FMTS "(#%04x,*%d):%p"
+#define VXH_LOC_FMTS "(#%04x,*%d): %p"
#define VXH_LOC_ARGS(e) (e)->seq, cpu, (e)->loc
case PROC_SUPER_MAGIC:
entry = PROC_I(in)->pde;
- // check for specific inodes ?
+ /* check for specific inodes? */
if (entry)
*mask |= IATTR_FLAGS;
if (entry)
if (ret == 0) {
/* We keep the same vx_id, but lower the capabilities */
current->vx_info->vx_bcaps &= (~vc_data.remove_cap);
- // current->cap_bset &= (~vc_data.remove_cap);
ret = vx_current_xid();
current->vx_info->vx_flags |= vc_data.flags;
}
ret = vx_migrate_task(current, new_vxi);
if (ret == 0) {
current->vx_info->vx_bcaps &= (~vc_data.remove_cap);
- // current->cap_bset &= (~vc_data.remove_cap);
new_vxi->vx_flags |= vc_data.flags;
if (vc_data.flags & VX_INFO_INIT)
vx_set_initpid(new_vxi, current->tgid);
#include <linux/vserver/legacy.h>
#include <linux/vserver/namespace.h>
#include <linux/namespace.h>
+#include <linux/err.h>
#include <asm/errno.h>
#include <asm/uaccess.h>
return -EFAULT;
if (!nxi || nxi->ipv4[0] == 0 || capable(CAP_NET_ADMIN))
- // We are allowed to change everything
+ /* We are allowed to change everything */
err = 0;
else if (nxi) {
int found = 0;
- // We are allowed to select a subset of the currently
- // installed IP numbers. No new one allowed
- // We can't change the broadcast address though
+ /* We are allowed to select a subset of the currently
+ installed IP numbers. No new one are allowed
+ We can't change the broadcast address though */
for (i=0; i<nbip; i++) {
int j;
__u32 nxip = vc_data.nx_mask_pair[i].ip;
return err;
new_nxi = create_nx_info();
- if (!new_nxi)
+ if (IS_ERR(new_nxi))
return -EINVAL;
new_nxi->nbipv4 = nbip;
new_nxi->mask[i] = vc_data.nx_mask_pair[i].mask;
}
new_nxi->v4_bcast = vc_data.broadcast;
- // current->nx_info = new_nxi;
- if (nxi) {
+ if (nxi)
printk("!!! switching nx_info %p->%p\n", nxi, new_nxi);
- clr_nx_info(¤t->nx_info);
- }
+
nx_migrate_task(current, new_nxi);
- // set_nx_info(¤t->nx_info, new_nxi);
- // current->nid = new_nxi->nx_id;
put_nx_info(new_nxi);
return 0;
}
* V0.02 cleaned up implementation
* V0.03 added equiv nx commands
* V0.04 switch to RCU based hash
+ * V0.05 and back to locking again
*
*/
memset (new, 0, sizeof(struct nx_info));
new->nx_id = nid;
- INIT_RCU_HEAD(&new->nx_rcu);
INIT_HLIST_NODE(&new->nx_hlist);
- atomic_set(&new->nx_refcnt, 0);
atomic_set(&new->nx_usecnt, 0);
+ atomic_set(&new->nx_tasks, 0);
+ new->nx_state = 0;
+
+ new->nx_flags = NXF_INIT_SET;
/* rest of init goes here */
vxdprintk(VXD_CBIT(nid, 0),
- "alloc_nx_info() = %p", new);
+ "alloc_nx_info(%d) = %p", nid, new);
return new;
}
nxi->nx_id = -1;
BUG_ON(atomic_read(&nxi->nx_usecnt));
- BUG_ON(atomic_read(&nxi->nx_refcnt));
+ BUG_ON(atomic_read(&nxi->nx_tasks));
+ nxi->nx_state |= NXS_RELEASED;
kfree(nxi);
}
-static inline int __free_nx_info(struct nx_info *nxi)
+static void __shutdown_nx_info(struct nx_info *nxi)
{
- int usecnt, refcnt;
-
- BUG_ON(!nxi);
-
- usecnt = atomic_read(&nxi->nx_usecnt);
- BUG_ON(usecnt < 0);
-
- refcnt = atomic_read(&nxi->nx_refcnt);
- BUG_ON(refcnt < 0);
-
- if (!usecnt)
- __dealloc_nx_info(nxi);
- return usecnt;
+ nxi->nx_state |= NXS_SHUTDOWN;
+ vs_net_change(nxi, VSC_NETDOWN);
}
/* exported stuff */
void free_nx_info(struct nx_info *nxi)
{
/* context shutdown is mandatory */
- // BUG_ON(nxi->nx_state != NXS_SHUTDOWN);
+ BUG_ON(nxi->nx_state != NXS_SHUTDOWN);
- // BUG_ON(nxi->nx_state & NXS_HASHED);
+ /* context must not be hashed */
+ BUG_ON(nxi->nx_state & NXS_HASHED);
- BUG_ON(__free_nx_info(nxi));
+ BUG_ON(atomic_read(&nxi->nx_usecnt));
+ BUG_ON(atomic_read(&nxi->nx_tasks));
+
+ __dealloc_nx_info(nxi);
}
{
struct hlist_head *head;
+ vxd_assert_lock(&nx_info_hash_lock);
vxdprintk(VXD_CBIT(nid, 4),
"__hash_nx_info: %p[#%d]", nxi, nxi->nx_id);
- get_nx_info(nxi);
+
+ /* context must not be hashed */
+ BUG_ON(nx_info_state(nxi, NXS_HASHED));
+
+ nxi->nx_state |= NXS_HASHED;
head = &nx_info_hash[__hashval(nxi->nx_id)];
hlist_add_head(&nxi->nx_hlist, head);
}
vxd_assert_lock(&nx_info_hash_lock);
vxdprintk(VXD_CBIT(nid, 4),
"__unhash_nx_info: %p[#%d]", nxi, nxi->nx_id);
+
+ /* context must be hashed */
+ BUG_ON(!nx_info_state(nxi, NXS_HASHED));
+
+ nxi->nx_state &= ~NXS_HASHED;
hlist_del(&nxi->nx_hlist);
- put_nx_info(nxi);
}
{
struct hlist_head *head = &nx_info_hash[__hashval(nid)];
struct hlist_node *pos;
+ struct nx_info *nxi;
vxd_assert_lock(&nx_info_hash_lock);
hlist_for_each(pos, head) {
- struct nx_info *nxi =
- hlist_entry(pos, struct nx_info, nx_hlist);
+ nxi = hlist_entry(pos, struct nx_info, nx_hlist);
- if (nxi->nx_id == nid) {
- return nxi;
- }
+ if (nxi->nx_id == nid)
+ goto found;
}
- return NULL;
+ nxi = NULL;
+found:
+ vxdprintk(VXD_CBIT(nid, 0),
+ "__lookup_nx_info(#%u): %p[#%u]",
+ nid, nxi, nxi?nxi->nx_id:0);
+ return nxi;
}
return 0;
}
-/* __loc_nx_info()
+/* __create_nx_info()
- * locate or create the requested context
- * get() it and if new hash it */
+ * create the requested context
+ * get() and hash it */
-static struct nx_info * __loc_nx_info(int id, int *err)
+static struct nx_info * __create_nx_info(int id)
{
struct nx_info *new, *nxi = NULL;
- vxdprintk(VXD_CBIT(nid, 1), "loc_nx_info(%d)*", id);
+ vxdprintk(VXD_CBIT(nid, 1), "create_nx_info(%d)*", id);
- if (!(new = __alloc_nx_info(id))) {
- *err = -ENOMEM;
- return NULL;
- }
+ if (!(new = __alloc_nx_info(id)))
+ return ERR_PTR(-ENOMEM);
/* required to make dynamic xids unique */
spin_lock(&nx_info_hash_lock);
id = __nx_dynamic_id();
if (!id) {
printk(KERN_ERR "no dynamic context available.\n");
+ nxi = ERR_PTR(-EAGAIN);
goto out_unlock;
}
new->nx_id = id;
}
- /* existing context requested */
+ /* static context requested */
else if ((nxi = __lookup_nx_info(id))) {
- /* context in setup is not available */
- if (nxi->nx_flags & VXF_STATE_SETUP) {
- vxdprintk(VXD_CBIT(nid, 0),
- "loc_nx_info(%d) = %p (not available)", id, nxi);
- nxi = NULL;
- *err = -EBUSY;
- } else {
- vxdprintk(VXD_CBIT(nid, 0),
- "loc_nx_info(%d) = %p (found)", id, nxi);
- get_nx_info(nxi);
- *err = 0;
- }
+ vxdprintk(VXD_CBIT(nid, 0),
+ "create_nx_info(%d) = %p (already there)", id, nxi);
+ if (nx_info_flags(nxi, NXF_STATE_SETUP, 0))
+ nxi = ERR_PTR(-EBUSY);
+ else
+ nxi = ERR_PTR(-EEXIST);
+ goto out_unlock;
+ }
+ /* dynamic nid creation blocker */
+ else if (id >= MIN_D_CONTEXT) {
+ vxdprintk(VXD_CBIT(nid, 0),
+ "create_nx_info(%d) (dynamic rejected)", id);
+ nxi = ERR_PTR(-EINVAL);
goto out_unlock;
}
- /* new context requested */
+ /* new context */
vxdprintk(VXD_CBIT(nid, 0),
- "loc_nx_info(%d) = %p (new)", id, new);
+ "create_nx_info(%d) = %p (new)", id, new);
__hash_nx_info(get_nx_info(new));
nxi = new, new = NULL;
- *err = 1;
out_unlock:
spin_unlock(&nx_info_hash_lock);
void unhash_nx_info(struct nx_info *nxi)
{
+ __shutdown_nx_info(nxi);
spin_lock(&nx_info_hash_lock);
__unhash_nx_info(nxi);
spin_unlock(&nx_info_hash_lock);
}
+#ifdef CONFIG_VSERVER_LEGACYNET
+
+struct nx_info *create_nx_info(void)
+{
+ return __create_nx_info(NX_DYNAMIC_ID);
+}
+
+#endif
+
/* locate_nx_info()
* search for a nx_info and get() it
struct nx_info *locate_nx_info(int id)
{
- struct nx_info *nxi;
+ struct nx_info *nxi = NULL;
if (id < 0) {
nxi = get_nx_info(current->nx_info);
- } else {
+ } else if (id > 1) {
spin_lock(&nx_info_hash_lock);
nxi = get_nx_info(__lookup_nx_info(id));
spin_unlock(&nx_info_hash_lock);
return hashed;
}
-#ifdef CONFIG_VSERVER_LEGACYNET
-
-struct nx_info *locate_or_create_nx_info(int id)
-{
- int err;
-
- return __loc_nx_info(id, &err);
-}
-
-struct nx_info *create_nx_info(void)
-{
- struct nx_info *new;
- int err;
-
- vxdprintk(VXD_CBIT(nid, 5), "create_nx_info(%s)", "void");
- if (!(new = __loc_nx_info(NX_DYNAMIC_ID, &err)))
- return NULL;
- return new;
-}
-
-
-#endif
#ifdef CONFIG_PROC_FS
/*
* migrate task to new network
+ * gets nxi, puts old_nxi on change
*/
int nx_migrate_task(struct task_struct *p, struct nx_info *nxi)
"nx_migrate_task(%p,%p[#%d.%d.%d])",
p, nxi, nxi->nx_id,
atomic_read(&nxi->nx_usecnt),
- atomic_read(&nxi->nx_refcnt));
+ atomic_read(&nxi->nx_tasks));
+ /* maybe disallow this completely? */
old_nxi = task_get_nx_info(p);
if (old_nxi == nxi)
goto out;
task_lock(p);
- /* should be handled in set_nx_info !! */
if (old_nxi)
clr_nx_info(&p->nx_info);
+ claim_nx_info(nxi, p);
set_nx_info(&p->nx_info, nxi);
p->nid = nxi->nx_id;
task_unlock(p);
- /* obsoleted by clr/set */
- // put_nx_info(old_nxi);
+ vxdprintk(VXD_CBIT(nid, 5),
+ "moved task %p into nxi:%p[#%d]",
+ p, nxi, nxi->nx_id);
+
+ if (old_nxi)
+ release_nx_info(old_nxi, p);
out:
put_nx_info(old_nxi);
return ret;
int vc_net_create(uint32_t nid, void __user *data)
{
- // int ret = -ENOMEM;
+ struct vcmd_net_create vc_data = { .flagword = NXF_INIT_SET };
struct nx_info *new_nxi;
int ret;
if (!capable(CAP_SYS_ADMIN))
return -EPERM;
+ if (data && copy_from_user (&vc_data, data, sizeof(vc_data)))
+ return -EFAULT;
- if ((nid >= MIN_D_CONTEXT) && (nid != VX_DYNAMIC_ID))
+ if ((nid > MAX_S_CONTEXT) && (nid != VX_DYNAMIC_ID))
return -EINVAL;
-
- if (nid < 1)
+ if (nid < 2)
return -EINVAL;
- new_nxi = __loc_nx_info(nid, &ret);
- if (!new_nxi)
- return ret;
- if (!(new_nxi->nx_flags & VXF_STATE_SETUP)) {
- ret = -EEXIST;
- goto out_put;
- }
+ new_nxi = __create_nx_info(nid);
+ if (IS_ERR(new_nxi))
+ return PTR_ERR(new_nxi);
+
+ /* initial flags */
+ new_nxi->nx_flags = vc_data.flagword;
+ vs_net_change(new_nxi, VSC_NETUP);
ret = new_nxi->nx_id;
nx_migrate_task(current, new_nxi);
-out_put:
+ /* if this fails, we might end up with a hashed nx_info */
put_nx_info(new_nxi);
return ret;
}
return 0;
}
-int vc_net_add(uint32_t id, void __user *data)
-{
- struct nx_info *nxi;
- struct vcmd_net_nx_v0 vc_data;
-
- if (!capable(CAP_SYS_ADMIN))
- return -EPERM;
- if (copy_from_user (&vc_data, data, sizeof(vc_data)))
- return -EFAULT;
-
- nxi = locate_nx_info(id);
- if (!nxi)
- return -ESRCH;
-
- // add ip to net context here
- put_nx_info(nxi);
- return 0;
-}
-
-int vc_net_remove(uint32_t id, void __user *data)
-{
- struct nx_info *nxi;
- struct vcmd_net_nx_v0 vc_data;
-
- if (!capable(CAP_SYS_ADMIN))
- return -EPERM;
- if (copy_from_user (&vc_data, data, sizeof(vc_data)))
- return -EFAULT;
-
- nxi = locate_nx_info(id);
- if (!nxi)
- return -ESRCH;
-
- // rem ip from net context here
- put_nx_info(nxi);
- return 0;
-}
-
-
int vc_get_nflags(uint32_t id, void __user *data)
{
/* special STATE flag handling */
mask = vx_mask_mask(vc_data.mask, nxi->nx_flags, IPF_ONE_TIME);
trigger = (mask & nxi->nx_flags) ^ (mask & vc_data.flagword);
- // if (trigger & IPF_STATE_SETUP)
nxi->nx_flags = vx_mask_flags(nxi->nx_flags,
vc_data.flagword, mask);
return 0;
length = sprintf(buffer,
"UseCnt:\t%d\n"
- "RefCnt:\t%d\n"
+ "Tasks:\t%d\n"
,atomic_read(&nxi->nx_usecnt)
- ,atomic_read(&nxi->nx_refcnt)
+ ,atomic_read(&nxi->nx_tasks)
);
put_nx_info(nxi);
return length;
inode->i_uid = 0;
inode->i_gid = 0;
- // inode->i_xid = xid;
out:
return inode;
}
return 0;
}
-/*
-static int proc_vid_delete_dentry(struct dentry * dentry)
-{
- return 1;
-}
-*/
-
#define PROC_BLOCK_SIZE (PAGE_SIZE - 1024)
static struct dentry_operations proc_vid_dentry_operations = {
d_revalidate: proc_vid_revalidate,
-// d_delete: proc_vid_delete_dentry,
};
return ERR_PTR(-EINVAL);
}
inode->i_mode = p->mode;
-// inode->i_op = &proc_vid_info_inode_operations;
inode->i_fop = &proc_vid_info_file_operations;
inode->i_nlink = 1;
inode->i_flags|=S_IMMUTABLE;
inode->i_ino = fake_ino(1, PROC_XID_INO);
inode->i_mode = S_IFLNK|S_IRWXUGO;
inode->i_uid = inode->i_gid = 0;
- inode->i_size = 64;
-// inode->i_op = &proc_current_inode_operations;
d_add(dentry, inode);
return NULL;
}
inode->i_fop = &proc_vid_info_file_operations;
PROC_I(inode)->op.proc_vid_read = proc_virtual_info;
inode->i_mode = S_IFREG|S_IRUGO;
-// inode->i_size = 64;
-// inode->i_op = &proc_current_inode_operations;
d_add(dentry, inode);
return NULL;
}
inode->i_ino = fake_ino(1, PROC_NID_INO);
inode->i_mode = S_IFLNK|S_IRWXUGO;
inode->i_uid = inode->i_gid = 0;
- inode->i_size = 64;
-// inode->i_op = &proc_current_inode_operations;
d_add(dentry, inode);
return NULL;
}
inode->i_fop = &proc_vid_info_file_operations;
PROC_I(inode)->op.proc_vid_read = proc_vnet_info;
inode->i_mode = S_IFREG|S_IRUGO;
-// inode->i_size = 64;
-// inode->i_op = &proc_current_inode_operations;
d_add(dentry, inode);
return NULL;
}
max = max * max;
vavavoom = max_prio * VAVAVOOM_RATIO / 100
* (vavavoom*vavavoom - (max >> 2)) / max;
- /* alternative, geometric mapping
- vavavoom = -( MAX_USER_PRIO*VAVAVOOM_RATIO/100 * vavavoom
- / vxi->sched.tokens_max -
- MAX_USER_PRIO*VAVAVOOM_RATIO/100/2); */
} else
vavavoom = 0;
- /* vavavoom = ( MAX_USER_PRIO*VAVAVOOM_RATIO/100*tokens_left(p) -
- MAX_USER_PRIO*VAVAVOOM_RATIO/100/2); */
vxi->sched.vavavoom = vavavoom;
return vavavoom;
{
int retval, count=0;
struct vcmd_ctx_kill_v0 vc_data;
- struct siginfo info;
struct task_struct *p;
struct vx_info *vxi;
+ unsigned long priv = 0;
if (!vx_check(0, VX_ADMIN))
return -ENOSYS;
if (copy_from_user (&vc_data, data, sizeof(vc_data)))
return -EFAULT;
- info.si_signo = vc_data.sig;
- info.si_errno = 0;
- info.si_code = SI_USER;
- info.si_pid = current->pid;
- info.si_uid = current->uid;
-
vxi = locate_vx_info(id);
if (!vxi)
return -ESRCH;
read_lock(&tasklist_lock);
switch (vc_data.pid) {
case 0:
- info.si_code = SI_KERNEL;
+ priv = 1;
case -1:
for_each_process(p) {
int err = 0;
if (vx_task_xid(p) != id || p->pid <= 1 ||
- (vc_data.pid && vxi->vx_initpid == p->pid) ||
- !thread_group_leader(p))
+ (vc_data.pid && vxi->vx_initpid == p->pid))
continue;
- err = send_sig_info(vc_data.sig, &info, p);
+ err = group_send_sig_info(vc_data.sig, (void*)priv, p);
++count;
if (err != -EPERM)
retval = err;
case 1:
if (vxi->vx_initpid) {
vc_data.pid = vxi->vx_initpid;
- info.si_code = SI_KERNEL;
+ priv = 1;
}
/* fallthrough */
default:
p = find_task_by_real_pid(vc_data.pid);
if (p) {
- if (!thread_group_leader(p)) {
- struct task_struct *tg;
-
- tg = find_task_by_real_pid(p->tgid);
- if (tg)
- p = tg;
- }
if ((id == -1) || (vx_task_xid(p) == id))
- retval = send_sig_info(vc_data.sig, &info, p);
+ retval = group_send_sig_info(vc_data.sig,
+ (void*)priv, p);
}
break;
}
int vc_wait_exit(uint32_t id, void __user *data)
{
-// struct vcmd_wait_exit_v0 vc_data;
struct vx_info *vxi;
int ret;
case VCMD_create_context:
#ifdef CONFIG_VSERVER_LEGACY
- return vc_ctx_create(id, data);
+ return vc_ctx_create(id, NULL);
#else
return -ENOSYS;
#endif
case VCMD_enter_namespace:
return vc_enter_namespace(id, data);
- case VCMD_ctx_create:
+ case VCMD_ctx_create_v0:
#ifdef CONFIG_VSERVER_LEGACY
if (id == 1) {
current->xid = 1;
return 1;
}
#endif
+ return vc_ctx_create(id, NULL);
+ case VCMD_ctx_create:
return vc_ctx_create(id, data);
case VCMD_ctx_migrate:
return vc_ctx_migrate(id, data);
+ case VCMD_net_create_v0:
+ return vc_net_create(id, NULL);
case VCMD_net_create:
return vc_net_create(id, data);
case VCMD_net_migrate:
set_page_dirty(page);
page_remove_rmap(page);
page_cache_release(page);
- // mm->rss--;
vx_rsspages_dec(mm);
}
}
zap_pte(mm, vma, addr, pte);
- // mm->rss++;
vx_rsspages_inc(mm);
flush_icache_page(vma, page);
set_pte(pte, mk_pte(page, prot));
pte = pte_mkclean(pte);
pte = pte_mkold(pte);
get_page(page);
- // dst_mm->rss++;
vx_rsspages_inc(dst_mm);
if (PageAnon(page))
- // dst_mm->anon_rss++;
vx_anonpages_inc(dst_mm);
set_pte(dst_pte, pte);
page_dup_rmap(page);
if (pte_dirty(pte))
set_page_dirty(page);
if (PageAnon(page))
- // tlb->mm->anon_rss--;
vx_anonpages_dec(tlb->mm);
else if (pte_young(pte))
mark_page_accessed(page);
page_table = pte_offset_map(pmd, address);
if (likely(pte_same(*page_table, pte))) {
if (PageAnon(old_page))
- // mm->anon_rss--;
vx_anonpages_dec(mm);
if (PageReserved(old_page)) {
- // ++mm->rss;
vx_rsspages_inc(mm);
acct_update_integrals();
update_mem_hiwater();
if (vm_swap_full())
remove_exclusive_swap_page(page);
- // mm->rss++;
vx_rsspages_inc(mm);
acct_update_integrals();
update_mem_hiwater();
spin_unlock(&mm->page_table_lock);
goto out;
}
- // mm->rss++;
vx_rsspages_inc(mm);
acct_update_integrals();
update_mem_hiwater();
/* Only go through if we didn't race with anybody else... */
if (pte_none(*page_table)) {
if (!PageReserved(new_page))
- // ++mm->rss;
vx_rsspages_inc(mm);
acct_update_integrals();
update_mem_hiwater();
ret = make_pages_present(start, end);
}
- // vma->vm_mm->locked_vm -= pages;
vx_vmlocked_sub(vma->vm_mm, pages);
out:
if (ret == -ENOMEM)
kmem_cache_free(vm_area_cachep, vma);
}
out:
- // mm->total_vm += len >> PAGE_SHIFT;
vx_vmpages_add(mm, len >> PAGE_SHIFT);
__vm_stat_account(mm, vm_flags, file, len >> PAGE_SHIFT);
if (vm_flags & VM_LOCKED) {
- // mm->locked_vm += len >> PAGE_SHIFT;
vx_vmlocked_add(mm, len >> PAGE_SHIFT);
make_pages_present(addr, addr + len);
}
get_unmapped_area(struct file *file, unsigned long addr, unsigned long len,
unsigned long pgoff, unsigned long flags)
{
- if (flags & MAP_FIXED) {
- unsigned long ret;
+ unsigned long ret;
- if (addr > TASK_SIZE - len)
- return -ENOMEM;
- if (addr & ~PAGE_MASK)
- return -EINVAL;
- if (file && is_file_hugepages(file)) {
- /*
- * Check if the given range is hugepage aligned, and
- * can be made suitable for hugepages.
- */
- ret = prepare_hugepage_range(addr, len);
- } else {
- /*
- * Ensure that a normal request is not falling in a
- * reserved hugepage range. For some archs like IA-64,
- * there is a separate region for hugepages.
- */
- ret = is_hugepage_only_range(addr, len);
- }
- if (ret)
- return -EINVAL;
- return addr;
- }
+ if (!(flags & MAP_FIXED)) {
+ unsigned long (*get_area)(struct file *, unsigned long, unsigned long, unsigned long, unsigned long);
- if (file && file->f_op && file->f_op->get_unmapped_area)
- return file->f_op->get_unmapped_area(file, addr, len,
- pgoff, flags);
+ get_area = current->mm->get_unmapped_area;
+ if (file && file->f_op && file->f_op->get_unmapped_area)
+ get_area = file->f_op->get_unmapped_area;
+ addr = get_area(file, addr, len, pgoff, flags);
+ if (IS_ERR_VALUE(addr))
+ return addr;
+ }
- return current->mm->get_unmapped_area(file, addr, len, pgoff, flags);
+ if (addr > TASK_SIZE - len)
+ return -ENOMEM;
+ if (addr & ~PAGE_MASK)
+ return -EINVAL;
+ if (file && is_file_hugepages(file)) {
+ /*
+ * Check if the given range is hugepage aligned, and
+ * can be made suitable for hugepages.
+ */
+ ret = prepare_hugepage_range(addr, len);
+ } else {
+ /*
+ * Ensure that a normal request is not falling in a
+ * reserved hugepage range. For some archs like IA-64,
+ * there is a separate region for hugepages.
+ */
+ ret = is_hugepage_only_range(addr, len);
+ }
+ if (ret)
+ return -EINVAL;
+ return addr;
}
EXPORT_SYMBOL(get_unmapped_area);
return -ENOMEM;
/* Ok, everything looks good - let it rip */
- // mm->total_vm += grow;
vx_vmpages_add(mm, grow);
if (vma->vm_flags & VM_LOCKED)
- // mm->locked_vm += grow;
vx_vmlocked_add(mm, grow);
__vm_stat_account(mm, vma->vm_flags, vma->vm_file, grow);
acct_update_integrals();
{
size_t len = area->vm_end - area->vm_start;
- // area->vm_mm->total_vm -= len >> PAGE_SHIFT;
vx_vmpages_sub(area->vm_mm, len >> PAGE_SHIFT);
if (area->vm_flags & VM_LOCKED)
- // area->vm_mm->locked_vm -= len >> PAGE_SHIFT;
vx_vmlocked_sub(area->vm_mm, len >> PAGE_SHIFT);
vm_stat_unaccount(area);
area->vm_mm->unmap_area(area);
vma->vm_page_prot = protection_map[flags & 0x0f];
vma_link(mm, vma, prev, rb_link, rb_parent);
out:
- // mm->total_vm += len >> PAGE_SHIFT;
vx_vmpages_add(mm, len >> PAGE_SHIFT);
if (flags & VM_LOCKED) {
- // mm->locked_vm += len >> PAGE_SHIFT;
vx_vmlocked_add(mm, len >> PAGE_SHIFT);
make_pages_present(addr, addr + len);
}
vma = mm->mmap;
mm->mmap = mm->mmap_cache = NULL;
mm->mm_rb = RB_ROOT;
- // mm->rss = 0;
vx_rsspages_sub(mm, mm->rss);
- // mm->total_vm = 0;
vx_vmpages_sub(mm, mm->total_vm);
- // mm->locked_vm = 0;
vx_vmlocked_sub(mm, mm->locked_vm);
spin_unlock(&mm->page_table_lock);
vma->vm_next->vm_flags |= VM_ACCOUNT;
}
- // mm->total_vm += new_len >> PAGE_SHIFT;
vx_vmpages_add(mm, new_len >> PAGE_SHIFT);
__vm_stat_account(mm, vma->vm_flags, vma->vm_file, new_len>>PAGE_SHIFT);
if (vm_flags & VM_LOCKED) {
- // mm->locked_vm += new_len >> PAGE_SHIFT;
vx_vmlocked_add(mm, new_len >> PAGE_SHIFT);
if (new_len > old_len)
make_pages_present(new_addr + old_len,
vma_adjust(vma, vma->vm_start,
addr + new_len, vma->vm_pgoff, NULL);
- // current->mm->total_vm += pages;
vx_vmpages_add(current->mm, pages);
__vm_stat_account(vma->vm_mm, vma->vm_flags,
vma->vm_file, pages);
if (vma->vm_flags & VM_LOCKED) {
- // current->mm->locked_vm += pages;
vx_vmlocked_add(vma->vm_mm, pages);
make_pages_present(addr + old_len,
addr + new_len);
realalloc += kobjsize(vma);
askedalloc += sizeof(*vma);
- // current->mm->total_vm += len >> PAGE_SHIFT;
vx_vmpages_add(current->mm, len >> PAGE_SHIFT);
add_nommu_vma(vma);
realalloc -= kobjsize(vml);
askedalloc -= sizeof(*vml);
kfree(vml);
- // mm->total_vm -= len >> PAGE_SHIFT;
vx_vmpages_sub(mm, len >> PAGE_SHIFT);
#ifdef DEBUG
printk("Exit_mmap:\n");
#endif
- // mm->total_vm = 0;
vx_vmpages_sub(mm, mm->total_vm);
while ((tmp = mm->context.vmlist)) {
BUG_ON(PageReserved(page));
BUG_ON(!anon_vma);
- // vma->vm_mm->anon_rss++;
vx_anonpages_inc(vma->vm_mm);
anon_vma = (void *) anon_vma + PAGE_MAPPING_ANON;
}
set_pte(pte, swp_entry_to_pte(entry));
BUG_ON(pte_file(*pte));
- // mm->anon_rss--;
vx_anonpages_dec(mm);
}
- // mm->rss--;
vx_rsspages_dec(mm);
acct_update_integrals();
page_remove_rmap(page);
page_remove_rmap(page);
page_cache_release(page);
acct_update_integrals();
- // mm->rss--;
vx_rsspages_dec(mm);
(*mapcount)--;
}
unuse_pte(struct vm_area_struct *vma, unsigned long address, pte_t *dir,
swp_entry_t entry, struct page *page)
{
- // vma->vm_mm->rss++;
vx_rsspages_inc(vma->vm_mm);
get_page(page);
set_pte(dir, pte_mkold(mk_pte(page, vma->vm_page_prot)));
struct ebt_chainstack *cs;
struct ebt_entries *chaininfo;
char *base;
- struct ebt_table_info *private = table->private;
+ struct ebt_table_info *private;
read_lock_bh(&table->lock);
+ private = table->private;
cb_base = COUNTER_BASE(private->counters, private->nentries,
smp_processor_id());
if (private->chainstack)
security_sk_free(sk);
vx_sock_dec(sk);
- // BUG_ON(sk->sk_vx_info);
clr_vx_info(&sk->sk_vx_info);
sk->sk_xid = -1;
- // BUG_ON(sk->sk_nx_info);
clr_nx_info(&sk->sk_nx_info);
sk->sk_nid = -1;
kmem_cache_free(sk->sk_slab, sk);
}
if (rose_route.mask > 10) /* Mask can't be more than 10 digits */
return -EINVAL;
-
+ if (rose_route.ndigis > 8) /* No more than 8 digipeats */
+ return -EINVAL;
err = rose_add_node(&rose_route, dev);
dev_put(dev);
return err;
{
struct key_user *candidate = NULL, *user;
struct rb_node *parent = NULL;
- struct rb_node **p = &key_user_tree.rb_node;
+ struct rb_node **p;
try_again:
+ p = &key_user_tree.rb_node;
spin_lock(&key_user_lock);
/* search the tree for a user record with a matching UID */
}
usb_chip[chip->index] = NULL;
up(®ister_mutex);
- snd_card_free_in_thread(card);
+ snd_card_free(card);
} else {
up(®ister_mutex);
}
/*
* usbusy2y.c - ALSA USB US-428 Driver
*
+2005-04-14 Karsten Wiese
+ Version 0.8.7.2:
+ Call snd_card_free() instead of snd_card_free_in_thread() to prevent oops with dead keyboard symptom.
+ Tested ok with kernel 2.6.12-rc2.
+
2004-12-14 Karsten Wiese
Version 0.8.7.1:
snd_pcm_open for rawusb pcm-devices now returns -EBUSY if called without rawusb's hwdep device being open.
MODULE_AUTHOR("Karsten Wiese <annabellesgarden@yahoo.de>");
-MODULE_DESCRIPTION("TASCAM "NAME_ALLCAPS" Version 0.8.7.1");
+MODULE_DESCRIPTION("TASCAM "NAME_ALLCAPS" Version 0.8.7.2");
MODULE_LICENSE("GPL");
MODULE_SUPPORTED_DEVICE("{{TASCAM(0x1604), "NAME_ALLCAPS"(0x8001)(0x8005)(0x8007) }}");
if (ptr) {
usX2Ydev_t* usX2Y = usX2Y((snd_card_t*)ptr);
struct list_head* p;
- if (usX2Y->chip_status == USX2Y_STAT_CHIP_HUP) // on 2.6.1 kernel snd_usbmidi_disconnect()
- return; // calls us back. better leave :-) .
usX2Y->chip.shutdown = 1;
usX2Y->chip_status = USX2Y_STAT_CHIP_HUP;
usX2Y_unlinkSeq(&usX2Y->AS04);
}
if (usX2Y->us428ctls_sharedmem)
wake_up(&usX2Y->us428ctls_wait_queue_head);
- snd_card_free_in_thread((snd_card_t*)ptr);
+ snd_card_free((snd_card_t*)ptr);
}
}