--- /dev/null
+diff -Nurb linux-2.6.27-590/arch/Kconfig linux-2.6.27-591/arch/Kconfig
+--- linux-2.6.27-590/arch/Kconfig 2010-01-26 17:49:09.000000000 -0500
++++ linux-2.6.27-591/arch/Kconfig 2010-01-29 15:48:58.000000000 -0500
+@@ -13,9 +13,18 @@
+
+ If unsure, say N.
+
++config CHOPSTIX
++ bool "Chopstix (PlanetLab)"
++ depends on MODULES && OPROFILE
++ help
++ Chopstix allows you to monitor various events by summarizing them
++ in lossy data structures and transferring these data structures
++ into user space. If in doubt, say "N".
++
+ config HAVE_OPROFILE
+ def_bool n
+
++
+ config KPROBES
+ bool "Kprobes"
+ depends on KALLSYMS && MODULES
+diff -Nurb linux-2.6.27-590/arch/Kconfig.orig linux-2.6.27-591/arch/Kconfig.orig
+--- linux-2.6.27-590/arch/Kconfig.orig 1969-12-31 19:00:00.000000000 -0500
++++ linux-2.6.27-591/arch/Kconfig.orig 2010-01-26 17:49:09.000000000 -0500
+@@ -0,0 +1,94 @@
++#
++# General architecture dependent options
++#
++
++config OPROFILE
++ tristate "OProfile system profiling (EXPERIMENTAL)"
++ depends on PROFILING
++ depends on HAVE_OPROFILE
++ help
++ OProfile is a profiling system capable of profiling the
++ whole system, include the kernel, kernel modules, libraries,
++ and applications.
++
++ If unsure, say N.
++
++config HAVE_OPROFILE
++ def_bool n
++
++config KPROBES
++ bool "Kprobes"
++ depends on KALLSYMS && MODULES
++ depends on HAVE_KPROBES
++ help
++ Kprobes allows you to trap at almost any kernel address and
++ execute a callback function. register_kprobe() establishes
++ a probepoint and specifies the callback. Kprobes is useful
++ for kernel debugging, non-intrusive instrumentation and testing.
++ If in doubt, say "N".
++
++config HAVE_EFFICIENT_UNALIGNED_ACCESS
++ def_bool n
++ help
++ Some architectures are unable to perform unaligned accesses
++ without the use of get_unaligned/put_unaligned. Others are
++ unable to perform such accesses efficiently (e.g. trap on
++ unaligned access and require fixing it up in the exception
++ handler.)
++
++ This symbol should be selected by an architecture if it can
++ perform unaligned accesses efficiently to allow different
++ code paths to be selected for these cases. Some network
++ drivers, for example, could opt to not fix up alignment
++ problems with received packets if doing so would not help
++ much.
++
++ See Documentation/unaligned-memory-access.txt for more
++ information on the topic of unaligned memory accesses.
++
++config HAVE_SYSCALL_WRAPPERS
++ bool
++
++config KRETPROBES
++ def_bool y
++ depends on KPROBES && HAVE_KRETPROBES
++
++config HAVE_IOREMAP_PROT
++ def_bool n
++
++config HAVE_KPROBES
++ def_bool n
++
++config HAVE_KRETPROBES
++ def_bool n
++
++#
++# An arch should select this if it provides all these things:
++#
++# task_pt_regs() in asm/processor.h or asm/ptrace.h
++# arch_has_single_step() if there is hardware single-step support
++# arch_has_block_step() if there is hardware block-step support
++# arch_ptrace() and not #define __ARCH_SYS_PTRACE
++# compat_arch_ptrace() and #define __ARCH_WANT_COMPAT_SYS_PTRACE
++# asm/syscall.h supplying asm-generic/syscall.h interface
++# linux/regset.h user_regset interfaces
++# CORE_DUMP_USE_REGSET #define'd in linux/elf.h
++# TIF_SYSCALL_TRACE calls tracehook_report_syscall_{entry,exit}
++# TIF_NOTIFY_RESUME calls tracehook_notify_resume()
++# signal delivery calls tracehook_signal_handler()
++#
++config HAVE_ARCH_TRACEHOOK
++ def_bool n
++
++config HAVE_DMA_ATTRS
++ def_bool n
++
++config USE_GENERIC_SMP_HELPERS
++ def_bool n
++
++config HAVE_CLK
++ def_bool n
++ help
++ The <linux/clk.h> calls support software clock gating and
++ thus are a key power management tool on many systems.
++
+diff -Nurb linux-2.6.27-590/arch/x86/Kconfig.orig linux-2.6.27-591/arch/x86/Kconfig.orig
+--- linux-2.6.27-590/arch/x86/Kconfig.orig 1969-12-31 19:00:00.000000000 -0500
++++ linux-2.6.27-591/arch/x86/Kconfig.orig 2010-01-26 17:49:20.000000000 -0500
+@@ -0,0 +1,1819 @@
++# x86 configuration
++mainmenu "Linux Kernel Configuration for x86"
++
++# Select 32 or 64 bit
++config 64BIT
++ bool "64-bit kernel" if ARCH = "x86"
++ default ARCH = "x86_64"
++ help
++ Say yes to build a 64-bit kernel - formerly known as x86_64
++ Say no to build a 32-bit kernel - formerly known as i386
++
++config X86_32
++ def_bool !64BIT
++
++config X86_64
++ def_bool 64BIT
++
++### Arch settings
++config X86
++ def_bool y
++ select HAVE_UNSTABLE_SCHED_CLOCK
++ select HAVE_IDE
++ select HAVE_OPROFILE
++ select HAVE_IOREMAP_PROT
++ select HAVE_KPROBES
++ select ARCH_WANT_OPTIONAL_GPIOLIB
++ select HAVE_KRETPROBES
++ select HAVE_DYNAMIC_FTRACE
++ select HAVE_FTRACE
++ select HAVE_KVM if ((X86_32 && !X86_VOYAGER && !X86_VISWS && !X86_NUMAQ) || X86_64)
++ select HAVE_ARCH_KGDB if !X86_VOYAGER
++ select HAVE_GENERIC_DMA_COHERENT if X86_32
++ select HAVE_EFFICIENT_UNALIGNED_ACCESS
++
++config ARCH_DEFCONFIG
++ string
++ default "arch/x86/configs/i386_defconfig" if X86_32
++ default "arch/x86/configs/x86_64_defconfig" if X86_64
++
++
++config GENERIC_LOCKBREAK
++ def_bool n
++
++config GENERIC_TIME
++ def_bool y
++
++config GENERIC_CMOS_UPDATE
++ def_bool y
++
++config CLOCKSOURCE_WATCHDOG
++ def_bool y
++
++config GENERIC_CLOCKEVENTS
++ def_bool y
++
++config GENERIC_CLOCKEVENTS_BROADCAST
++ def_bool y
++ depends on X86_64 || (X86_32 && X86_LOCAL_APIC)
++
++config LOCKDEP_SUPPORT
++ def_bool y
++
++config STACKTRACE_SUPPORT
++ def_bool y
++
++config HAVE_LATENCYTOP_SUPPORT
++ def_bool y
++
++config FAST_CMPXCHG_LOCAL
++ bool
++ default y
++
++config MMU
++ def_bool y
++
++config ZONE_DMA
++ def_bool y
++
++config SBUS
++ bool
++
++config GENERIC_ISA_DMA
++ def_bool y
++
++config GENERIC_IOMAP
++ def_bool y
++
++config GENERIC_BUG
++ def_bool y
++ depends on BUG
++
++config GENERIC_HWEIGHT
++ def_bool y
++
++config GENERIC_GPIO
++ def_bool n
++
++config ARCH_MAY_HAVE_PC_FDC
++ def_bool y
++
++config RWSEM_GENERIC_SPINLOCK
++ def_bool !X86_XADD
++
++config RWSEM_XCHGADD_ALGORITHM
++ def_bool X86_XADD
++
++config ARCH_HAS_ILOG2_U32
++ def_bool n
++
++config ARCH_HAS_ILOG2_U64
++ def_bool n
++
++config ARCH_HAS_CPU_IDLE_WAIT
++ def_bool y
++
++config GENERIC_CALIBRATE_DELAY
++ def_bool y
++
++config GENERIC_TIME_VSYSCALL
++ bool
++ default X86_64
++
++config ARCH_HAS_CPU_RELAX
++ def_bool y
++
++config ARCH_HAS_CACHE_LINE_SIZE
++ def_bool y
++
++config HAVE_SETUP_PER_CPU_AREA
++ def_bool X86_64_SMP || (X86_SMP && !X86_VOYAGER)
++
++config HAVE_CPUMASK_OF_CPU_MAP
++ def_bool X86_64_SMP
++
++config ARCH_HIBERNATION_POSSIBLE
++ def_bool y
++ depends on !SMP || !X86_VOYAGER
++
++config ARCH_SUSPEND_POSSIBLE
++ def_bool y
++ depends on !X86_VOYAGER
++
++config ZONE_DMA32
++ bool
++ default X86_64
++
++config ARCH_POPULATES_NODE_MAP
++ def_bool y
++
++config AUDIT_ARCH
++ bool
++ default X86_64
++
++config ARCH_SUPPORTS_AOUT
++ def_bool y
++
++config ARCH_SUPPORTS_OPTIMIZED_INLINING
++ def_bool y
++
++# Use the generic interrupt handling code in kernel/irq/:
++config GENERIC_HARDIRQS
++ bool
++ default y
++
++config GENERIC_IRQ_PROBE
++ bool
++ default y
++
++config GENERIC_PENDING_IRQ
++ bool
++ depends on GENERIC_HARDIRQS && SMP
++ default y
++
++config X86_SMP
++ bool
++ depends on SMP && ((X86_32 && !X86_VOYAGER) || X86_64)
++ select USE_GENERIC_SMP_HELPERS
++ default y
++
++config X86_32_SMP
++ def_bool y
++ depends on X86_32 && SMP
++
++config X86_64_SMP
++ def_bool y
++ depends on X86_64 && SMP
++
++config X86_HT
++ bool
++ depends on SMP
++ depends on (X86_32 && !X86_VOYAGER) || X86_64
++ default y
++
++config X86_BIOS_REBOOT
++ bool
++ depends on !X86_VOYAGER
++ default y
++
++config X86_TRAMPOLINE
++ bool
++ depends on X86_SMP || (X86_VOYAGER && SMP) || (64BIT && ACPI_SLEEP)
++ default y
++
++config KTIME_SCALAR
++ def_bool X86_32
++source "init/Kconfig"
++
++menu "Processor type and features"
++
++source "kernel/time/Kconfig"
++
++config SMP
++ bool "Symmetric multi-processing support"
++ ---help---
++ This enables support for systems with more than one CPU. If you have
++ a system with only one CPU, like most personal computers, say N. If
++ you have a system with more than one CPU, say Y.
++
++ If you say N here, the kernel will run on single and multiprocessor
++ machines, but will use only one CPU of a multiprocessor machine. If
++ you say Y here, the kernel will run on many, but not all,
++ singleprocessor machines. On a singleprocessor machine, the kernel
++ will run faster if you say N here.
++
++ Note that if you say Y here and choose architecture "586" or
++ "Pentium" under "Processor family", the kernel will not work on 486
++ architectures. Similarly, multiprocessor kernels for the "PPro"
++ architecture may not work on all Pentium based boards.
++
++ People using multiprocessor machines who say Y here should also say
++ Y to "Enhanced Real Time Clock Support", below. The "Advanced Power
++ Management" code will be disabled if you say Y here.
++
++ See also <file:Documentation/i386/IO-APIC.txt>,
++ <file:Documentation/nmi_watchdog.txt> and the SMP-HOWTO available at
++ <http://www.tldp.org/docs.html#howto>.
++
++ If you don't know what to do here, say N.
++
++config X86_FIND_SMP_CONFIG
++ def_bool y
++ depends on X86_MPPARSE || X86_VOYAGER
++
++if ACPI
++config X86_MPPARSE
++ def_bool y
++ bool "Enable MPS table"
++ depends on X86_LOCAL_APIC
++ help
++ For old smp systems that do not have proper acpi support. Newer systems
++ (esp with 64bit cpus) with acpi support, MADT and DSDT will override it
++endif
++
++if !ACPI
++config X86_MPPARSE
++ def_bool y
++ depends on X86_LOCAL_APIC
++endif
++
++choice
++ prompt "Subarchitecture Type"
++ default X86_PC
++
++config X86_PC
++ bool "PC-compatible"
++ help
++ Choose this option if your computer is a standard PC or compatible.
++
++config X86_ELAN
++ bool "AMD Elan"
++ depends on X86_32
++ help
++ Select this for an AMD Elan processor.
++
++ Do not use this option for K6/Athlon/Opteron processors!
++
++ If unsure, choose "PC-compatible" instead.
++
++config X86_VOYAGER
++ bool "Voyager (NCR)"
++ depends on X86_32 && (SMP || BROKEN) && !PCI
++ help
++ Voyager is an MCA-based 32-way capable SMP architecture proprietary
++ to NCR Corp. Machine classes 345x/35xx/4100/51xx are Voyager-based.
++
++ *** WARNING ***
++
++ If you do not specifically know you have a Voyager based machine,
++ say N here, otherwise the kernel you build will not be bootable.
++
++config X86_GENERICARCH
++ bool "Generic architecture"
++ depends on X86_32
++ help
++ This option compiles in the NUMAQ, Summit, bigsmp, ES7000, default
++ subarchitectures. It is intended for a generic binary kernel.
++ if you select them all, kernel will probe it one by one. and will
++ fallback to default.
++
++if X86_GENERICARCH
++
++config X86_NUMAQ
++ bool "NUMAQ (IBM/Sequent)"
++ depends on SMP && X86_32 && PCI && X86_MPPARSE
++ select NUMA
++ help
++ This option is used for getting Linux to run on a NUMAQ (IBM/Sequent)
++ NUMA multiquad box. This changes the way that processors are
++ bootstrapped, and uses Clustered Logical APIC addressing mode instead
++ of Flat Logical. You will need a new lynxer.elf file to flash your
++ firmware with - send email to <Martin.Bligh@us.ibm.com>.
++
++config X86_SUMMIT
++ bool "Summit/EXA (IBM x440)"
++ depends on X86_32 && SMP
++ help
++ This option is needed for IBM systems that use the Summit/EXA chipset.
++ In particular, it is needed for the x440.
++
++config X86_ES7000
++ bool "Support for Unisys ES7000 IA32 series"
++ depends on X86_32 && SMP
++ help
++ Support for Unisys ES7000 systems. Say 'Y' here if this kernel is
++ supposed to run on an IA32-based Unisys ES7000 system.
++
++config X86_BIGSMP
++ bool "Support for big SMP systems with more than 8 CPUs"
++ depends on X86_32 && SMP
++ help
++ This option is needed for the systems that have more than 8 CPUs
++ and if the system is not of any sub-arch type above.
++
++endif
++
++config X86_VSMP
++ bool "Support for ScaleMP vSMP"
++ select PARAVIRT
++ depends on X86_64 && PCI
++ help
++ Support for ScaleMP vSMP systems. Say 'Y' here if this kernel is
++ supposed to run on these EM64T-based machines. Only choose this option
++ if you have one of these machines.
++
++endchoice
++
++config X86_VISWS
++ bool "SGI 320/540 (Visual Workstation)"
++ depends on X86_32 && PCI && !X86_VOYAGER && X86_MPPARSE && PCI_GODIRECT
++ help
++ The SGI Visual Workstation series is an IA32-based workstation
++ based on SGI systems chips with some legacy PC hardware attached.
++
++ Say Y here to create a kernel to run on the SGI 320 or 540.
++
++ A kernel compiled for the Visual Workstation will run on general
++ PCs as well. See <file:Documentation/sgi-visws.txt> for details.
++
++config X86_RDC321X
++ bool "RDC R-321x SoC"
++ depends on X86_32
++ select M486
++ select X86_REBOOTFIXUPS
++ help
++ This option is needed for RDC R-321x system-on-chip, also known
++ as R-8610-(G).
++ If you don't have one of these chips, you should say N here.
++
++config SCHED_NO_NO_OMIT_FRAME_POINTER
++ def_bool y
++ prompt "Single-depth WCHAN output"
++ depends on X86_32
++ help
++ Calculate simpler /proc/<PID>/wchan values. If this option
++ is disabled then wchan values will recurse back to the
++ caller function. This provides more accurate wchan values,
++ at the expense of slightly more scheduling overhead.
++
++ If in doubt, say "Y".
++
++menuconfig PARAVIRT_GUEST
++ bool "Paravirtualized guest support"
++ help
++ Say Y here to get to see options related to running Linux under
++ various hypervisors. This option alone does not add any kernel code.
++
++ If you say N, all options in this submenu will be skipped and disabled.
++
++if PARAVIRT_GUEST
++
++source "arch/x86/xen/Kconfig"
++
++config VMI
++ bool "VMI Guest support"
++ select PARAVIRT
++ depends on X86_32
++ depends on !X86_VOYAGER
++ help
++ VMI provides a paravirtualized interface to the VMware ESX server
++ (it could be used by other hypervisors in theory too, but is not
++ at the moment), by linking the kernel to a GPL-ed ROM module
++ provided by the hypervisor.
++
++config KVM_CLOCK
++ bool "KVM paravirtualized clock"
++ select PARAVIRT
++ select PARAVIRT_CLOCK
++ depends on !X86_VOYAGER
++ help
++ Turning on this option will allow you to run a paravirtualized clock
++ when running over the KVM hypervisor. Instead of relying on a PIT
++ (or probably other) emulation by the underlying device model, the host
++ provides the guest with timing infrastructure such as time of day, and
++ system time
++
++config KVM_GUEST
++ bool "KVM Guest support"
++ select PARAVIRT
++ depends on !X86_VOYAGER
++ help
++ This option enables various optimizations for running under the KVM
++ hypervisor.
++
++source "arch/x86/lguest/Kconfig"
++
++config PARAVIRT
++ bool "Enable paravirtualization code"
++ depends on !X86_VOYAGER
++ help
++ This changes the kernel so it can modify itself when it is run
++ under a hypervisor, potentially improving performance significantly
++ over full virtualization. However, when run without a hypervisor
++ the kernel is theoretically slower and slightly larger.
++
++config PARAVIRT_CLOCK
++ bool
++ default n
++
++endif
++
++config PARAVIRT_DEBUG
++ bool "paravirt-ops debugging"
++ depends on PARAVIRT && DEBUG_KERNEL
++ help
++ Enable to debug paravirt_ops internals. Specifically, BUG if
++ a paravirt_op is missing when it is called.
++
++config MEMTEST
++ bool "Memtest"
++ help
++ This option adds a kernel parameter 'memtest', which allows memtest
++ to be set.
++ memtest=0, mean disabled; -- default
++ memtest=1, mean do 1 test pattern;
++ ...
++ memtest=4, mean do 4 test patterns.
++ If you are unsure how to answer this question, answer N.
++
++config X86_SUMMIT_NUMA
++ def_bool y
++ depends on X86_32 && NUMA && X86_GENERICARCH
++
++config X86_CYCLONE_TIMER
++ def_bool y
++ depends on X86_GENERICARCH
++
++config ES7000_CLUSTERED_APIC
++ def_bool y
++ depends on SMP && X86_ES7000 && MPENTIUMIII
++
++source "arch/x86/Kconfig.cpu"
++
++config HPET_TIMER
++ def_bool X86_64
++ prompt "HPET Timer Support" if X86_32
++ help
++ Use the IA-PC HPET (High Precision Event Timer) to manage
++ time in preference to the PIT and RTC, if a HPET is
++ present.
++ HPET is the next generation timer replacing legacy 8254s.
++ The HPET provides a stable time base on SMP
++ systems, unlike the TSC, but it is more expensive to access,
++ as it is off-chip. You can find the HPET spec at
++ <http://www.intel.com/hardwaredesign/hpetspec.htm>.
++
++ You can safely choose Y here. However, HPET will only be
++ activated if the platform and the BIOS support this feature.
++ Otherwise the 8254 will be used for timing services.
++
++ Choose N to continue using the legacy 8254 timer.
++
++config HPET_EMULATE_RTC
++ def_bool y
++ depends on HPET_TIMER && (RTC=y || RTC=m || RTC_DRV_CMOS=m || RTC_DRV_CMOS=y)
++
++# Mark as embedded because too many people got it wrong.
++# The code disables itself when not needed.
++config DMI
++ default y
++ bool "Enable DMI scanning" if EMBEDDED
++ help
++ Enabled scanning of DMI to identify machine quirks. Say Y
++ here unless you have verified that your setup is not
++ affected by entries in the DMI blacklist. Required by PNP
++ BIOS code.
++
++config GART_IOMMU
++ bool "GART IOMMU support" if EMBEDDED
++ default y
++ select SWIOTLB
++ select AGP
++ depends on X86_64 && PCI
++ help
++ Support for full DMA access of devices with 32bit memory access only
++ on systems with more than 3GB. This is usually needed for USB,
++ sound, many IDE/SATA chipsets and some other devices.
++ Provides a driver for the AMD Athlon64/Opteron/Turion/Sempron GART
++ based hardware IOMMU and a software bounce buffer based IOMMU used
++ on Intel systems and as fallback.
++ The code is only active when needed (enough memory and limited
++ device) unless CONFIG_IOMMU_DEBUG or iommu=force is specified
++ too.
++
++config CALGARY_IOMMU
++ bool "IBM Calgary IOMMU support"
++ select SWIOTLB
++ depends on X86_64 && PCI && EXPERIMENTAL
++ help
++ Support for hardware IOMMUs in IBM's xSeries x366 and x460
++ systems. Needed to run systems with more than 3GB of memory
++ properly with 32-bit PCI devices that do not support DAC
++ (Double Address Cycle). Calgary also supports bus level
++ isolation, where all DMAs pass through the IOMMU. This
++ prevents them from going anywhere except their intended
++ destination. This catches hard-to-find kernel bugs and
++ mis-behaving drivers and devices that do not use the DMA-API
++ properly to set up their DMA buffers. The IOMMU can be
++ turned off at boot time with the iommu=off parameter.
++ Normally the kernel will make the right choice by itself.
++ If unsure, say Y.
++
++config CALGARY_IOMMU_ENABLED_BY_DEFAULT
++ def_bool y
++ prompt "Should Calgary be enabled by default?"
++ depends on CALGARY_IOMMU
++ help
++ Should Calgary be enabled by default? if you choose 'y', Calgary
++ will be used (if it exists). If you choose 'n', Calgary will not be
++ used even if it exists. If you choose 'n' and would like to use
++ Calgary anyway, pass 'iommu=calgary' on the kernel command line.
++ If unsure, say Y.
++
++config AMD_IOMMU
++ bool "AMD IOMMU support"
++ select SWIOTLB
++ depends on X86_64 && PCI && ACPI
++ help
++ With this option you can enable support for AMD IOMMU hardware in
++ your system. An IOMMU is a hardware component which provides
++ remapping of DMA memory accesses from devices. With an AMD IOMMU you
++ can isolate the the DMA memory of different devices and protect the
++ system from misbehaving device drivers or hardware.
++
++ You can find out if your system has an AMD IOMMU if you look into
++ your BIOS for an option to enable it or if you have an IVRS ACPI
++ table.
++
++# need this always selected by IOMMU for the VIA workaround
++config SWIOTLB
++ def_bool y if X86_64
++ help
++ Support for software bounce buffers used on x86-64 systems
++ which don't have a hardware IOMMU (e.g. the current generation
++ of Intel's x86-64 CPUs). Using this PCI devices which can only
++ access 32-bits of memory can be used on systems with more than
++ 3 GB of memory. If unsure, say Y.
++
++config IOMMU_HELPER
++ def_bool (CALGARY_IOMMU || GART_IOMMU || SWIOTLB || AMD_IOMMU)
++
++config MAXSMP
++ bool "Configure Maximum number of SMP Processors and NUMA Nodes"
++ depends on X86_64 && SMP && BROKEN
++ default n
++ help
++ Configure maximum number of CPUS and NUMA Nodes for this architecture.
++ If unsure, say N.
++
++config NR_CPUS
++ int "Maximum number of CPUs (2-512)" if !MAXSMP
++ range 2 512
++ depends on SMP
++ default "4096" if MAXSMP
++ default "32" if X86_NUMAQ || X86_SUMMIT || X86_BIGSMP || X86_ES7000
++ default "8"
++ help
++ This allows you to specify the maximum number of CPUs which this
++ kernel will support. The maximum supported value is 512 and the
++ minimum value which makes sense is 2.
++
++ This is purely to save memory - each supported CPU adds
++ approximately eight kilobytes to the kernel image.
++
++config SCHED_SMT
++ bool "SMT (Hyperthreading) scheduler support"
++ depends on X86_HT
++ help
++ SMT scheduler support improves the CPU scheduler's decision making
++ when dealing with Intel Pentium 4 chips with HyperThreading at a
++ cost of slightly increased overhead in some places. If unsure say
++ N here.
++
++config SCHED_MC
++ def_bool y
++ prompt "Multi-core scheduler support"
++ depends on X86_HT
++ help
++ Multi-core scheduler support improves the CPU scheduler's decision
++ making when dealing with multi-core CPU chips at a cost of slightly
++ increased overhead in some places. If unsure say N here.
++
++source "kernel/Kconfig.preempt"
++
++config X86_UP_APIC
++ bool "Local APIC support on uniprocessors"
++ depends on X86_32 && !SMP && !(X86_VOYAGER || X86_GENERICARCH)
++ help
++ A local APIC (Advanced Programmable Interrupt Controller) is an
++ integrated interrupt controller in the CPU. If you have a single-CPU
++ system which has a processor with a local APIC, you can say Y here to
++ enable and use it. If you say Y here even though your machine doesn't
++ have a local APIC, then the kernel will still run with no slowdown at
++ all. The local APIC supports CPU-generated self-interrupts (timer,
++ performance counters), and the NMI watchdog which detects hard
++ lockups.
++
++config X86_UP_IOAPIC
++ bool "IO-APIC support on uniprocessors"
++ depends on X86_UP_APIC
++ help
++ An IO-APIC (I/O Advanced Programmable Interrupt Controller) is an
++ SMP-capable replacement for PC-style interrupt controllers. Most
++ SMP systems and many recent uniprocessor systems have one.
++
++ If you have a single-CPU system with an IO-APIC, you can say Y here
++ to use it. If you say Y here even though your machine doesn't have
++ an IO-APIC, then the kernel will still run with no slowdown at all.
++
++config X86_LOCAL_APIC
++ def_bool y
++ depends on X86_64 || (X86_32 && (X86_UP_APIC || (SMP && !X86_VOYAGER) || X86_GENERICARCH))
++
++config X86_IO_APIC
++ def_bool y
++ depends on X86_64 || (X86_32 && (X86_UP_IOAPIC || (SMP && !X86_VOYAGER) || X86_GENERICARCH))
++
++config X86_VISWS_APIC
++ def_bool y
++ depends on X86_32 && X86_VISWS
++
++config X86_MCE
++ bool "Machine Check Exception"
++ depends on !X86_VOYAGER
++ ---help---
++ Machine Check Exception support allows the processor to notify the
++ kernel if it detects a problem (e.g. overheating, component failure).
++ The action the kernel takes depends on the severity of the problem,
++ ranging from a warning message on the console, to halting the machine.
++ Your processor must be a Pentium or newer to support this - check the
++ flags in /proc/cpuinfo for mce. Note that some older Pentium systems
++ have a design flaw which leads to false MCE events - hence MCE is
++ disabled on all P5 processors, unless explicitly enabled with "mce"
++ as a boot argument. Similarly, if MCE is built in and creates a
++ problem on some new non-standard machine, you can boot with "nomce"
++ to disable it. MCE support simply ignores non-MCE processors like
++ the 386 and 486, so nearly everyone can say Y here.
++
++config X86_MCE_INTEL
++ def_bool y
++ prompt "Intel MCE features"
++ depends on X86_64 && X86_MCE && X86_LOCAL_APIC
++ help
++ Additional support for intel specific MCE features such as
++ the thermal monitor.
++
++config X86_MCE_AMD
++ def_bool y
++ prompt "AMD MCE features"
++ depends on X86_64 && X86_MCE && X86_LOCAL_APIC
++ help
++ Additional support for AMD specific MCE features such as
++ the DRAM Error Threshold.
++
++config X86_MCE_NONFATAL
++ tristate "Check for non-fatal errors on AMD Athlon/Duron / Intel Pentium 4"
++ depends on X86_32 && X86_MCE
++ help
++ Enabling this feature starts a timer that triggers every 5 seconds which
++ will look at the machine check registers to see if anything happened.
++ Non-fatal problems automatically get corrected (but still logged).
++ Disable this if you don't want to see these messages.
++ Seeing the messages this option prints out may be indicative of dying
++ or out-of-spec (ie, overclocked) hardware.
++ This option only does something on certain CPUs.
++ (AMD Athlon/Duron and Intel Pentium 4)
++
++config X86_MCE_P4THERMAL
++ bool "check for P4 thermal throttling interrupt."
++ depends on X86_32 && X86_MCE && (X86_UP_APIC || SMP)
++ help
++ Enabling this feature will cause a message to be printed when the P4
++ enters thermal throttling.
++
++config VM86
++ bool "Enable VM86 support" if EMBEDDED
++ default y
++ depends on X86_32
++ help
++ This option is required by programs like DOSEMU to run 16-bit legacy
++ code on X86 processors. It also may be needed by software like
++ XFree86 to initialize some video cards via BIOS. Disabling this
++ option saves about 6k.
++
++config TOSHIBA
++ tristate "Toshiba Laptop support"
++ depends on X86_32
++ ---help---
++ This adds a driver to safely access the System Management Mode of
++ the CPU on Toshiba portables with a genuine Toshiba BIOS. It does
++ not work on models with a Phoenix BIOS. The System Management Mode
++ is used to set the BIOS and power saving options on Toshiba portables.
++
++ For information on utilities to make use of this driver see the
++ Toshiba Linux utilities web site at:
++ <http://www.buzzard.org.uk/toshiba/>.
++
++ Say Y if you intend to run this kernel on a Toshiba portable.
++ Say N otherwise.
++
++config I8K
++ tristate "Dell laptop support"
++ ---help---
++ This adds a driver to safely access the System Management Mode
++ of the CPU on the Dell Inspiron 8000. The System Management Mode
++ is used to read cpu temperature and cooling fan status and to
++ control the fans on the I8K portables.
++
++ This driver has been tested only on the Inspiron 8000 but it may
++ also work with other Dell laptops. You can force loading on other
++ models by passing the parameter `force=1' to the module. Use at
++ your own risk.
++
++ For information on utilities to make use of this driver see the
++ I8K Linux utilities web site at:
++ <http://people.debian.org/~dz/i8k/>
++
++ Say Y if you intend to run this kernel on a Dell Inspiron 8000.
++ Say N otherwise.
++
++config X86_REBOOTFIXUPS
++ def_bool n
++ prompt "Enable X86 board specific fixups for reboot"
++ depends on X86_32 && X86
++ ---help---
++ This enables chipset and/or board specific fixups to be done
++ in order to get reboot to work correctly. This is only needed on
++ some combinations of hardware and BIOS. The symptom, for which
++ this config is intended, is when reboot ends with a stalled/hung
++ system.
++
++ Currently, the only fixup is for the Geode machines using
++ CS5530A and CS5536 chipsets and the RDC R-321x SoC.
++
++ Say Y if you want to enable the fixup. Currently, it's safe to
++ enable this option even if you don't need it.
++ Say N otherwise.
++
++config MICROCODE
++ tristate "/dev/cpu/microcode - Intel IA32 CPU microcode support"
++ select FW_LOADER
++ ---help---
++ If you say Y here, you will be able to update the microcode on
++ Intel processors in the IA32 family, e.g. Pentium Pro, Pentium II,
++ Pentium III, Pentium 4, Xeon etc. You will obviously need the
++ actual microcode binary data itself which is not shipped with the
++ Linux kernel.
++
++ For latest news and information on obtaining all the required
++ ingredients for this driver, check:
++ <http://www.urbanmyth.org/microcode/>.
++
++ To compile this driver as a module, choose M here: the
++ module will be called microcode.
++
++config MICROCODE_OLD_INTERFACE
++ def_bool y
++ depends on MICROCODE
++
++config X86_MSR
++ tristate "/dev/cpu/*/msr - Model-specific register support"
++ help
++ This device gives privileged processes access to the x86
++ Model-Specific Registers (MSRs). It is a character device with
++ major 202 and minors 0 to 31 for /dev/cpu/0/msr to /dev/cpu/31/msr.
++ MSR accesses are directed to a specific CPU on multi-processor
++ systems.
++
++config X86_CPUID
++ tristate "/dev/cpu/*/cpuid - CPU information support"
++ help
++ This device gives processes access to the x86 CPUID instruction to
++ be executed on a specific processor. It is a character device
++ with major 203 and minors 0 to 31 for /dev/cpu/0/cpuid to
++ /dev/cpu/31/cpuid.
++
++choice
++ prompt "High Memory Support"
++ default HIGHMEM4G if !X86_NUMAQ
++ default HIGHMEM64G if X86_NUMAQ
++ depends on X86_32
++
++config NOHIGHMEM
++ bool "off"
++ depends on !X86_NUMAQ
++ ---help---
++ Linux can use up to 64 Gigabytes of physical memory on x86 systems.
++ However, the address space of 32-bit x86 processors is only 4
++ Gigabytes large. That means that, if you have a large amount of
++ physical memory, not all of it can be "permanently mapped" by the
++ kernel. The physical memory that's not permanently mapped is called
++ "high memory".
++
++ If you are compiling a kernel which will never run on a machine with
++ more than 1 Gigabyte total physical RAM, answer "off" here (default
++ choice and suitable for most users). This will result in a "3GB/1GB"
++ split: 3GB are mapped so that each process sees a 3GB virtual memory
++ space and the remaining part of the 4GB virtual memory space is used
++ by the kernel to permanently map as much physical memory as
++ possible.
++
++ If the machine has between 1 and 4 Gigabytes physical RAM, then
++ answer "4GB" here.
++
++ If more than 4 Gigabytes is used then answer "64GB" here. This
++ selection turns Intel PAE (Physical Address Extension) mode on.
++ PAE implements 3-level paging on IA32 processors. PAE is fully
++ supported by Linux, PAE mode is implemented on all recent Intel
++ processors (Pentium Pro and better). NOTE: If you say "64GB" here,
++ then the kernel will not boot on CPUs that don't support PAE!
++
++ The actual amount of total physical memory will either be
++ auto detected or can be forced by using a kernel command line option
++ such as "mem=256M". (Try "man bootparam" or see the documentation of
++ your boot loader (lilo or loadlin) about how to pass options to the
++ kernel at boot time.)
++
++ If unsure, say "off".
++
++config HIGHMEM4G
++ bool "4GB"
++ depends on !X86_NUMAQ
++ help
++ Select this if you have a 32-bit processor and between 1 and 4
++ gigabytes of physical RAM.
++
++config HIGHMEM64G
++ bool "64GB"
++ depends on !M386 && !M486
++ select X86_PAE
++ help
++ Select this if you have a 32-bit processor and more than 4
++ gigabytes of physical RAM.
++
++endchoice
++
++choice
++ depends on EXPERIMENTAL
++ prompt "Memory split" if EMBEDDED
++ default VMSPLIT_3G
++ depends on X86_32
++ help
++ Select the desired split between kernel and user memory.
++
++ If the address range available to the kernel is less than the
++ physical memory installed, the remaining memory will be available
++ as "high memory". Accessing high memory is a little more costly
++ than low memory, as it needs to be mapped into the kernel first.
++ Note that increasing the kernel address space limits the range
++ available to user programs, making the address space there
++ tighter. Selecting anything other than the default 3G/1G split
++ will also likely make your kernel incompatible with binary-only
++ kernel modules.
++
++ If you are not absolutely sure what you are doing, leave this
++ option alone!
++
++ config VMSPLIT_3G
++ bool "3G/1G user/kernel split"
++ config VMSPLIT_3G_OPT
++ depends on !X86_PAE
++ bool "3G/1G user/kernel split (for full 1G low memory)"
++ config VMSPLIT_2G
++ bool "2G/2G user/kernel split"
++ config VMSPLIT_2G_OPT
++ depends on !X86_PAE
++ bool "2G/2G user/kernel split (for full 2G low memory)"
++ config VMSPLIT_1G
++ bool "1G/3G user/kernel split"
++endchoice
++
++config PAGE_OFFSET
++ hex
++ default 0xB0000000 if VMSPLIT_3G_OPT
++ default 0x80000000 if VMSPLIT_2G
++ default 0x78000000 if VMSPLIT_2G_OPT
++ default 0x40000000 if VMSPLIT_1G
++ default 0xC0000000
++ depends on X86_32
++
++config HIGHMEM
++ def_bool y
++ depends on X86_32 && (HIGHMEM64G || HIGHMEM4G)
++
++config X86_PAE
++ def_bool n
++ prompt "PAE (Physical Address Extension) Support"
++ depends on X86_32 && !HIGHMEM4G
++ select RESOURCES_64BIT
++ help
++ PAE is required for NX support, and furthermore enables
++ larger swapspace support for non-overcommit purposes. It
++ has the cost of more pagetable lookup overhead, and also
++ consumes more pagetable space per process.
++
++# Common NUMA Features
++config NUMA
++ bool "Numa Memory Allocation and Scheduler Support (EXPERIMENTAL)"
++ depends on SMP
++ depends on X86_64 || (X86_32 && HIGHMEM64G && (X86_NUMAQ || X86_BIGSMP || X86_SUMMIT && ACPI) && EXPERIMENTAL)
++ default n if X86_PC
++ default y if (X86_NUMAQ || X86_SUMMIT || X86_BIGSMP)
++ help
++ Enable NUMA (Non Uniform Memory Access) support.
++ The kernel will try to allocate memory used by a CPU on the
++ local memory controller of the CPU and add some more
++ NUMA awareness to the kernel.
++
++ For 32-bit this is currently highly experimental and should be only
++ used for kernel development. It might also cause boot failures.
++ For 64-bit this is recommended on all multiprocessor Opteron systems.
++ If the system is EM64T, you should say N unless your system is
++ EM64T NUMA.
++
++comment "NUMA (Summit) requires SMP, 64GB highmem support, ACPI"
++ depends on X86_32 && X86_SUMMIT && (!HIGHMEM64G || !ACPI)
++
++config K8_NUMA
++ def_bool y
++ prompt "Old style AMD Opteron NUMA detection"
++ depends on X86_64 && NUMA && PCI
++ help
++ Enable K8 NUMA node topology detection. You should say Y here if
++ you have a multi processor AMD K8 system. This uses an old
++ method to read the NUMA configuration directly from the builtin
++ Northbridge of Opteron. It is recommended to use X86_64_ACPI_NUMA
++ instead, which also takes priority if both are compiled in.
++
++config X86_64_ACPI_NUMA
++ def_bool y
++ prompt "ACPI NUMA detection"
++ depends on X86_64 && NUMA && ACPI && PCI
++ select ACPI_NUMA
++ help
++ Enable ACPI SRAT based node topology detection.
++
++# Some NUMA nodes have memory ranges that span
++# other nodes. Even though a pfn is valid and
++# between a node's start and end pfns, it may not
++# reside on that node. See memmap_init_zone()
++# for details.
++config NODES_SPAN_OTHER_NODES
++ def_bool y
++ depends on X86_64_ACPI_NUMA
++
++config NUMA_EMU
++ bool "NUMA emulation"
++ depends on X86_64 && NUMA
++ help
++ Enable NUMA emulation. A flat machine will be split
++ into virtual nodes when booted with "numa=fake=N", where N is the
++ number of nodes. This is only useful for debugging.
++
++config NODES_SHIFT
++ int "Maximum NUMA Nodes (as a power of 2)" if !MAXSMP
++ range 1 9 if X86_64
++ default "9" if MAXSMP
++ default "6" if X86_64
++ default "4" if X86_NUMAQ
++ default "3"
++ depends on NEED_MULTIPLE_NODES
++ help
++ Specify the maximum number of NUMA Nodes available on the target
++ system. Increases memory reserved to accomodate various tables.
++
++config HAVE_ARCH_BOOTMEM_NODE
++ def_bool y
++ depends on X86_32 && NUMA
++
++config ARCH_HAVE_MEMORY_PRESENT
++ def_bool y
++ depends on X86_32 && DISCONTIGMEM
++
++config NEED_NODE_MEMMAP_SIZE
++ def_bool y
++ depends on X86_32 && (DISCONTIGMEM || SPARSEMEM)
++
++config HAVE_ARCH_ALLOC_REMAP
++ def_bool y
++ depends on X86_32 && NUMA
++
++config ARCH_FLATMEM_ENABLE
++ def_bool y
++ depends on X86_32 && ARCH_SELECT_MEMORY_MODEL && X86_PC && !NUMA
++
++config ARCH_DISCONTIGMEM_ENABLE
++ def_bool y
++ depends on NUMA && X86_32
++
++config ARCH_DISCONTIGMEM_DEFAULT
++ def_bool y
++ depends on NUMA && X86_32
++
++config ARCH_SPARSEMEM_DEFAULT
++ def_bool y
++ depends on X86_64
++
++config ARCH_SPARSEMEM_ENABLE
++ def_bool y
++ depends on X86_64 || NUMA || (EXPERIMENTAL && X86_PC)
++ select SPARSEMEM_STATIC if X86_32
++ select SPARSEMEM_VMEMMAP_ENABLE if X86_64
++
++config ARCH_SELECT_MEMORY_MODEL
++ def_bool y
++ depends on ARCH_SPARSEMEM_ENABLE
++
++config ARCH_MEMORY_PROBE
++ def_bool X86_64
++ depends on MEMORY_HOTPLUG
++
++source "mm/Kconfig"
++
++config HIGHPTE
++ bool "Allocate 3rd-level pagetables from highmem"
++ depends on X86_32 && (HIGHMEM4G || HIGHMEM64G)
++ help
++ The VM uses one page table entry for each page of physical memory.
++ For systems with a lot of RAM, this can be wasteful of precious
++ low memory. Setting this option will put user-space page table
++ entries in high memory.
++
++config X86_RESERVE_LOW_64K
++ bool "Reserve low 64K of RAM on AMI/Phoenix BIOSen"
++ default y
++ help
++ Reserve the first 64K of physical RAM on BIOSes that are known
++ to potentially corrupt that memory range. A numbers of BIOSes are
++ known to utilize this area during suspend/resume, so it must not
++ be used by the kernel.
++
++ Set this to N if you are absolutely sure that you trust the BIOS
++ to get all its memory reservations and usages right.
++
++ If you have doubts about the BIOS (e.g. suspend/resume does not
++ work or there's kernel crashes after certain hardware hotplug
++ events) and it's not AMI or Phoenix, then you might want to enable
++ X86_CHECK_BIOS_CORRUPTION=y to allow the kernel to check typical
++ corruption patterns.
++
++ Say Y if unsure.
++
++config MATH_EMULATION
++ bool
++ prompt "Math emulation" if X86_32
++ ---help---
++ Linux can emulate a math coprocessor (used for floating point
++ operations) if you don't have one. 486DX and Pentium processors have
++ a math coprocessor built in, 486SX and 386 do not, unless you added
++ a 487DX or 387, respectively. (The messages during boot time can
++ give you some hints here ["man dmesg"].) Everyone needs either a
++ coprocessor or this emulation.
++
++ If you don't have a math coprocessor, you need to say Y here; if you
++ say Y here even though you have a coprocessor, the coprocessor will
++ be used nevertheless. (This behavior can be changed with the kernel
++ command line option "no387", which comes handy if your coprocessor
++ is broken. Try "man bootparam" or see the documentation of your boot
++ loader (lilo or loadlin) about how to pass options to the kernel at
++ boot time.) This means that it is a good idea to say Y here if you
++ intend to use this kernel on different machines.
++
++ More information about the internals of the Linux math coprocessor
++ emulation can be found in <file:arch/x86/math-emu/README>.
++
++ If you are not sure, say Y; apart from resulting in a 66 KB bigger
++ kernel, it won't hurt.
++
++config MTRR
++ bool "MTRR (Memory Type Range Register) support"
++ ---help---
++ On Intel P6 family processors (Pentium Pro, Pentium II and later)
++ the Memory Type Range Registers (MTRRs) may be used to control
++ processor access to memory ranges. This is most useful if you have
++ a video (VGA) card on a PCI or AGP bus. Enabling write-combining
++ allows bus write transfers to be combined into a larger transfer
++ before bursting over the PCI/AGP bus. This can increase performance
++ of image write operations 2.5 times or more. Saying Y here creates a
++ /proc/mtrr file which may be used to manipulate your processor's
++ MTRRs. Typically the X server should use this.
++
++ This code has a reasonably generic interface so that similar
++ control registers on other processors can be easily supported
++ as well:
++
++ The Cyrix 6x86, 6x86MX and M II processors have Address Range
++ Registers (ARRs) which provide a similar functionality to MTRRs. For
++ these, the ARRs are used to emulate the MTRRs.
++ The AMD K6-2 (stepping 8 and above) and K6-3 processors have two
++ MTRRs. The Centaur C6 (WinChip) has 8 MCRs, allowing
++ write-combining. All of these processors are supported by this code
++ and it makes sense to say Y here if you have one of them.
++
++ Saying Y here also fixes a problem with buggy SMP BIOSes which only
++ set the MTRRs for the boot CPU and not for the secondary CPUs. This
++ can lead to all sorts of problems, so it's good to say Y here.
++
++ You can safely say Y even if your machine doesn't have MTRRs, you'll
++ just add about 9 KB to your kernel.
++
++ See <file:Documentation/mtrr.txt> for more information.
++
++config MTRR_SANITIZER
++ bool
++ prompt "MTRR cleanup support"
++ depends on MTRR
++ help
++ Convert MTRR layout from continuous to discrete, so X drivers can
++ add writeback entries.
++
++ Can be disabled with disable_mtrr_cleanup on the kernel command line.
++ The largest mtrr entry size for a continous block can be set with
++ mtrr_chunk_size.
++
++ If unsure, say N.
++
++config MTRR_SANITIZER_ENABLE_DEFAULT
++ int "MTRR cleanup enable value (0-1)"
++ range 0 1
++ default "0"
++ depends on MTRR_SANITIZER
++ help
++ Enable mtrr cleanup default value
++
++config MTRR_SANITIZER_SPARE_REG_NR_DEFAULT
++ int "MTRR cleanup spare reg num (0-7)"
++ range 0 7
++ default "1"
++ depends on MTRR_SANITIZER
++ help
++ mtrr cleanup spare entries default, it can be changed via
++ mtrr_spare_reg_nr=N on the kernel command line.
++
++config X86_PAT
++ bool
++ prompt "x86 PAT support"
++ depends on MTRR
++ help
++ Use PAT attributes to setup page level cache control.
++
++ PATs are the modern equivalents of MTRRs and are much more
++ flexible than MTRRs.
++
++ Say N here if you see bootup problems (boot crash, boot hang,
++ spontaneous reboots) or a non-working video driver.
++
++ If unsure, say Y.
++
++config EFI
++ def_bool n
++ prompt "EFI runtime service support"
++ depends on ACPI
++ ---help---
++ This enables the kernel to use EFI runtime services that are
++ available (such as the EFI variable services).
++
++ This option is only useful on systems that have EFI firmware.
++ In addition, you should use the latest ELILO loader available
++ at <http://elilo.sourceforge.net> in order to take advantage
++ of EFI runtime services. However, even with this option, the
++ resultant kernel should continue to boot on existing non-EFI
++ platforms.
++
++config IRQBALANCE
++ def_bool y
++ prompt "Enable kernel irq balancing"
++ depends on X86_32 && SMP && X86_IO_APIC
++ help
++ The default yes will allow the kernel to do irq load balancing.
++ Saying no will keep the kernel from doing irq load balancing.
++
++config SECCOMP
++ def_bool y
++ prompt "Enable seccomp to safely compute untrusted bytecode"
++ depends on PROC_FS
++ help
++ This kernel feature is useful for number crunching applications
++ that may need to compute untrusted bytecode during their
++ execution. By using pipes or other transports made available to
++ the process as file descriptors supporting the read/write
++ syscalls, it's possible to isolate those applications in
++ their own address space using seccomp. Once seccomp is
++ enabled via /proc/<pid>/seccomp, it cannot be disabled
++ and the task is only allowed to execute a few safe syscalls
++ defined by each seccomp mode.
++
++ If unsure, say Y. Only embedded should say N here.
++
++config CC_STACKPROTECTOR
++ bool "Enable -fstack-protector buffer overflow detection (EXPERIMENTAL)"
++ depends on X86_64 && EXPERIMENTAL && BROKEN
++ help
++ This option turns on the -fstack-protector GCC feature. This
++ feature puts, at the beginning of critical functions, a canary
++ value on the stack just before the return address, and validates
++ the value just before actually returning. Stack based buffer
++ overflows (that need to overwrite this return address) now also
++ overwrite the canary, which gets detected and the attack is then
++ neutralized via a kernel panic.
++
++ This feature requires gcc version 4.2 or above, or a distribution
++ gcc with the feature backported. Older versions are automatically
++ detected and for those versions, this configuration option is ignored.
++
++config CC_STACKPROTECTOR_ALL
++ bool "Use stack-protector for all functions"
++ depends on CC_STACKPROTECTOR
++ help
++ Normally, GCC only inserts the canary value protection for
++ functions that use large-ish on-stack buffers. By enabling
++ this option, GCC will be asked to do this for ALL functions.
++
++source kernel/Kconfig.hz
++
++config KEXEC
++ bool "kexec system call"
++ depends on X86_BIOS_REBOOT
++ help
++ kexec is a system call that implements the ability to shutdown your
++ current kernel, and to start another kernel. It is like a reboot
++ but it is independent of the system firmware. And like a reboot
++ you can start any kernel with it, not just Linux.
++
++ The name comes from the similarity to the exec system call.
++
++ It is an ongoing process to be certain the hardware in a machine
++ is properly shutdown, so do not be surprised if this code does not
++ initially work for you. It may help to enable device hotplugging
++ support. As of this writing the exact hardware interface is
++ strongly in flux, so no good recommendation can be made.
++
++config CRASH_DUMP
++ bool "kernel crash dumps"
++ depends on X86_64 || (X86_32 && HIGHMEM)
++ help
++ Generate crash dump after being started by kexec.
++ This should be normally only set in special crash dump kernels
++ which are loaded in the main kernel with kexec-tools into
++ a specially reserved region and then later executed after
++ a crash by kdump/kexec. The crash dump kernel must be compiled
++ to a memory address not used by the main kernel or BIOS using
++ PHYSICAL_START, or it must be built as a relocatable image
++ (CONFIG_RELOCATABLE=y).
++ For more details see Documentation/kdump/kdump.txt
++
++config KEXEC_JUMP
++ bool "kexec jump (EXPERIMENTAL)"
++ depends on EXPERIMENTAL
++ depends on KEXEC && HIBERNATION && X86_32
++ help
++ Jump between original kernel and kexeced kernel and invoke
++ code in physical address mode via KEXEC
++
++config PHYSICAL_START
++ hex "Physical address where the kernel is loaded" if (EMBEDDED || CRASH_DUMP)
++ default "0x1000000" if X86_NUMAQ
++ default "0x200000" if X86_64
++ default "0x100000"
++ help
++ This gives the physical address where the kernel is loaded.
++
++ If kernel is a not relocatable (CONFIG_RELOCATABLE=n) then
++ bzImage will decompress itself to above physical address and
++ run from there. Otherwise, bzImage will run from the address where
++ it has been loaded by the boot loader and will ignore above physical
++ address.
++
++ In normal kdump cases one does not have to set/change this option
++ as now bzImage can be compiled as a completely relocatable image
++ (CONFIG_RELOCATABLE=y) and be used to load and run from a different
++ address. This option is mainly useful for the folks who don't want
++ to use a bzImage for capturing the crash dump and want to use a
++ vmlinux instead. vmlinux is not relocatable hence a kernel needs
++ to be specifically compiled to run from a specific memory area
++ (normally a reserved region) and this option comes handy.
++
++ So if you are using bzImage for capturing the crash dump, leave
++ the value here unchanged to 0x100000 and set CONFIG_RELOCATABLE=y.
++ Otherwise if you plan to use vmlinux for capturing the crash dump
++ change this value to start of the reserved region (Typically 16MB
++ 0x1000000). In other words, it can be set based on the "X" value as
++ specified in the "crashkernel=YM@XM" command line boot parameter
++ passed to the panic-ed kernel. Typically this parameter is set as
++ crashkernel=64M@16M. Please take a look at
++ Documentation/kdump/kdump.txt for more details about crash dumps.
++
++ Usage of bzImage for capturing the crash dump is recommended as
++ one does not have to build two kernels. Same kernel can be used
++ as production kernel and capture kernel. Above option should have
++ gone away after relocatable bzImage support is introduced. But it
++ is present because there are users out there who continue to use
++ vmlinux for dump capture. This option should go away down the
++ line.
++
++ Don't change this unless you know what you are doing.
++
++config RELOCATABLE
++ bool "Build a relocatable kernel (EXPERIMENTAL)"
++ depends on EXPERIMENTAL
++ help
++ This builds a kernel image that retains relocation information
++ so it can be loaded someplace besides the default 1MB.
++ The relocations tend to make the kernel binary about 10% larger,
++ but are discarded at runtime.
++
++ One use is for the kexec on panic case where the recovery kernel
++ must live at a different physical address than the primary
++ kernel.
++
++ Note: If CONFIG_RELOCATABLE=y, then the kernel runs from the address
++ it has been loaded at and the compile time physical address
++ (CONFIG_PHYSICAL_START) is ignored.
++
++config PHYSICAL_ALIGN
++ hex
++ prompt "Alignment value to which kernel should be aligned" if X86_32
++ default "0x100000" if X86_32
++ default "0x200000" if X86_64
++ range 0x2000 0x400000
++ help
++ This value puts the alignment restrictions on physical address
++ where kernel is loaded and run from. Kernel is compiled for an
++ address which meets above alignment restriction.
++
++ If bootloader loads the kernel at a non-aligned address and
++ CONFIG_RELOCATABLE is set, kernel will move itself to nearest
++ address aligned to above value and run from there.
++
++ If bootloader loads the kernel at a non-aligned address and
++ CONFIG_RELOCATABLE is not set, kernel will ignore the run time
++ load address and decompress itself to the address it has been
++ compiled for and run from there. The address for which kernel is
++ compiled already meets above alignment restrictions. Hence the
++ end result is that kernel runs from a physical address meeting
++ above alignment restrictions.
++
++ Don't change this unless you know what you are doing.
++
++config HOTPLUG_CPU
++ bool "Support for suspend on SMP and hot-pluggable CPUs (EXPERIMENTAL)"
++ depends on SMP && HOTPLUG && EXPERIMENTAL && !X86_VOYAGER
++ ---help---
++ Say Y here to experiment with turning CPUs off and on, and to
++ enable suspend on SMP systems. CPUs can be controlled through
++ /sys/devices/system/cpu.
++ Say N if you want to disable CPU hotplug and don't need to
++ suspend.
++
++config COMPAT_VDSO
++ def_bool y
++ prompt "Compat VDSO support"
++ depends on X86_32 || IA32_EMULATION
++ help
++ Map the 32-bit VDSO to the predictable old-style address too.
++ ---help---
++ Say N here if you are running a sufficiently recent glibc
++ version (2.3.3 or later), to remove the high-mapped
++ VDSO mapping and to exclusively use the randomized VDSO.
++
++ If unsure, say Y.
++
++endmenu
++
++config ARCH_ENABLE_MEMORY_HOTPLUG
++ def_bool y
++ depends on X86_64 || (X86_32 && HIGHMEM)
++
++config HAVE_ARCH_EARLY_PFN_TO_NID
++ def_bool X86_64
++ depends on NUMA
++
++menu "Power management options"
++ depends on !X86_VOYAGER
++
++config ARCH_HIBERNATION_HEADER
++ def_bool y
++ depends on X86_64 && HIBERNATION
++
++source "kernel/power/Kconfig"
++
++source "drivers/acpi/Kconfig"
++
++config X86_APM_BOOT
++ bool
++ default y
++ depends on APM || APM_MODULE
++
++menuconfig APM
++ tristate "APM (Advanced Power Management) BIOS support"
++ depends on X86_32 && PM_SLEEP
++ ---help---
++ APM is a BIOS specification for saving power using several different
++ techniques. This is mostly useful for battery powered laptops with
++ APM compliant BIOSes. If you say Y here, the system time will be
++ reset after a RESUME operation, the /proc/apm device will provide
++ battery status information, and user-space programs will receive
++ notification of APM "events" (e.g. battery status change).
++
++ If you select "Y" here, you can disable actual use of the APM
++ BIOS by passing the "apm=off" option to the kernel at boot time.
++
++ Note that the APM support is almost completely disabled for
++ machines with more than one CPU.
++
++ In order to use APM, you will need supporting software. For location
++ and more information, read <file:Documentation/power/pm.txt> and the
++ Battery Powered Linux mini-HOWTO, available from
++ <http://www.tldp.org/docs.html#howto>.
++
++ This driver does not spin down disk drives (see the hdparm(8)
++ manpage ("man 8 hdparm") for that), and it doesn't turn off
++ VESA-compliant "green" monitors.
++
++ This driver does not support the TI 4000M TravelMate and the ACER
++ 486/DX4/75 because they don't have compliant BIOSes. Many "green"
++ desktop machines also don't have compliant BIOSes, and this driver
++ may cause those machines to panic during the boot phase.
++
++ Generally, if you don't have a battery in your machine, there isn't
++ much point in using this driver and you should say N. If you get
++ random kernel OOPSes or reboots that don't seem to be related to
++ anything, try disabling/enabling this option (or disabling/enabling
++ APM in your BIOS).
++
++ Some other things you should try when experiencing seemingly random,
++ "weird" problems:
++
++ 1) make sure that you have enough swap space and that it is
++ enabled.
++ 2) pass the "no-hlt" option to the kernel
++ 3) switch on floating point emulation in the kernel and pass
++ the "no387" option to the kernel
++ 4) pass the "floppy=nodma" option to the kernel
++ 5) pass the "mem=4M" option to the kernel (thereby disabling
++ all but the first 4 MB of RAM)
++ 6) make sure that the CPU is not over clocked.
++ 7) read the sig11 FAQ at <http://www.bitwizard.nl/sig11/>
++ 8) disable the cache from your BIOS settings
++ 9) install a fan for the video card or exchange video RAM
++ 10) install a better fan for the CPU
++ 11) exchange RAM chips
++ 12) exchange the motherboard.
++
++ To compile this driver as a module, choose M here: the
++ module will be called apm.
++
++if APM
++
++config APM_IGNORE_USER_SUSPEND
++ bool "Ignore USER SUSPEND"
++ help
++ This option will ignore USER SUSPEND requests. On machines with a
++ compliant APM BIOS, you want to say N. However, on the NEC Versa M
++ series notebooks, it is necessary to say Y because of a BIOS bug.
++
++config APM_DO_ENABLE
++ bool "Enable PM at boot time"
++ ---help---
++ Enable APM features at boot time. From page 36 of the APM BIOS
++ specification: "When disabled, the APM BIOS does not automatically
++ power manage devices, enter the Standby State, enter the Suspend
++ State, or take power saving steps in response to CPU Idle calls."
++ This driver will make CPU Idle calls when Linux is idle (unless this
++ feature is turned off -- see "Do CPU IDLE calls", below). This
++ should always save battery power, but more complicated APM features
++ will be dependent on your BIOS implementation. You may need to turn
++ this option off if your computer hangs at boot time when using APM
++ support, or if it beeps continuously instead of suspending. Turn
++ this off if you have a NEC UltraLite Versa 33/C or a Toshiba
++ T400CDT. This is off by default since most machines do fine without
++ this feature.
++
++config APM_CPU_IDLE
++ bool "Make CPU Idle calls when idle"
++ help
++ Enable calls to APM CPU Idle/CPU Busy inside the kernel's idle loop.
++ On some machines, this can activate improved power savings, such as
++ a slowed CPU clock rate, when the machine is idle. These idle calls
++ are made after the idle loop has run for some length of time (e.g.,
++ 333 mS). On some machines, this will cause a hang at boot time or
++ whenever the CPU becomes idle. (On machines with more than one CPU,
++ this option does nothing.)
++
++config APM_DISPLAY_BLANK
++ bool "Enable console blanking using APM"
++ help
++ Enable console blanking using the APM. Some laptops can use this to
++ turn off the LCD backlight when the screen blanker of the Linux
++ virtual console blanks the screen. Note that this is only used by
++ the virtual console screen blanker, and won't turn off the backlight
++ when using the X Window system. This also doesn't have anything to
++ do with your VESA-compliant power-saving monitor. Further, this
++ option doesn't work for all laptops -- it might not turn off your
++ backlight at all, or it might print a lot of errors to the console,
++ especially if you are using gpm.
++
++config APM_ALLOW_INTS
++ bool "Allow interrupts during APM BIOS calls"
++ help
++ Normally we disable external interrupts while we are making calls to
++ the APM BIOS as a measure to lessen the effects of a badly behaving
++ BIOS implementation. The BIOS should reenable interrupts if it
++ needs to. Unfortunately, some BIOSes do not -- especially those in
++ many of the newer IBM Thinkpads. If you experience hangs when you
++ suspend, try setting this to Y. Otherwise, say N.
++
++config APM_REAL_MODE_POWER_OFF
++ bool "Use real mode APM BIOS call to power off"
++ help
++ Use real mode APM BIOS calls to switch off the computer. This is
++ a work-around for a number of buggy BIOSes. Switch this option on if
++ your computer crashes instead of powering off properly.
++
++endif # APM
++
++source "arch/x86/kernel/cpu/cpufreq/Kconfig"
++
++source "drivers/cpuidle/Kconfig"
++
++endmenu
++
++
++menu "Bus options (PCI etc.)"
++
++config PCI
++ bool "PCI support"
++ default y
++ select ARCH_SUPPORTS_MSI if (X86_LOCAL_APIC && X86_IO_APIC)
++ help
++ Find out whether you have a PCI motherboard. PCI is the name of a
++ bus system, i.e. the way the CPU talks to the other stuff inside
++ your box. Other bus systems are ISA, EISA, MicroChannel (MCA) or
++ VESA. If you have PCI, say Y, otherwise N.
++
++choice
++ prompt "PCI access mode"
++ depends on X86_32 && PCI
++ default PCI_GOANY
++ ---help---
++ On PCI systems, the BIOS can be used to detect the PCI devices and
++ determine their configuration. However, some old PCI motherboards
++ have BIOS bugs and may crash if this is done. Also, some embedded
++ PCI-based systems don't have any BIOS at all. Linux can also try to
++ detect the PCI hardware directly without using the BIOS.
++
++ With this option, you can specify how Linux should detect the
++ PCI devices. If you choose "BIOS", the BIOS will be used,
++ if you choose "Direct", the BIOS won't be used, and if you
++ choose "MMConfig", then PCI Express MMCONFIG will be used.
++ If you choose "Any", the kernel will try MMCONFIG, then the
++ direct access method and falls back to the BIOS if that doesn't
++ work. If unsure, go with the default, which is "Any".
++
++config PCI_GOBIOS
++ bool "BIOS"
++
++config PCI_GOMMCONFIG
++ bool "MMConfig"
++
++config PCI_GODIRECT
++ bool "Direct"
++
++config PCI_GOOLPC
++ bool "OLPC"
++ depends on OLPC
++
++config PCI_GOANY
++ bool "Any"
++
++endchoice
++
++config PCI_BIOS
++ def_bool y
++ depends on X86_32 && PCI && (PCI_GOBIOS || PCI_GOANY)
++
++# x86-64 doesn't support PCI BIOS access from long mode so always go direct.
++config PCI_DIRECT
++ def_bool y
++ depends on PCI && (X86_64 || (PCI_GODIRECT || PCI_GOANY || PCI_GOOLPC))
++
++config PCI_MMCONFIG
++ def_bool y
++ depends on X86_32 && PCI && ACPI && (PCI_GOMMCONFIG || PCI_GOANY)
++
++config PCI_OLPC
++ def_bool y
++ depends on PCI && OLPC && (PCI_GOOLPC || PCI_GOANY)
++
++config PCI_DOMAINS
++ def_bool y
++ depends on PCI
++
++config PCI_MMCONFIG
++ bool "Support mmconfig PCI config space access"
++ depends on X86_64 && PCI && ACPI
++
++config DMAR
++ bool "Support for DMA Remapping Devices (EXPERIMENTAL)"
++ depends on X86_64 && PCI_MSI && ACPI && EXPERIMENTAL
++ help
++ DMA remapping (DMAR) devices support enables independent address
++ translations for Direct Memory Access (DMA) from devices.
++ These DMA remapping devices are reported via ACPI tables
++ and include PCI device scope covered by these DMA
++ remapping devices.
++
++config DMAR_GFX_WA
++ def_bool y
++ prompt "Support for Graphics workaround"
++ depends on DMAR
++ help
++ Current Graphics drivers tend to use physical address
++ for DMA and avoid using DMA APIs. Setting this config
++ option permits the IOMMU driver to set a unity map for
++ all the OS-visible memory. Hence the driver can continue
++ to use physical addresses for DMA.
++
++config DMAR_FLOPPY_WA
++ def_bool y
++ depends on DMAR
++ help
++ Floppy disk drivers are know to bypass DMA API calls
++ thereby failing to work when IOMMU is enabled. This
++ workaround will setup a 1:1 mapping for the first
++ 16M to make floppy (an ISA device) work.
++
++source "drivers/pci/pcie/Kconfig"
++
++source "drivers/pci/Kconfig"
++
++# x86_64 have no ISA slots, but do have ISA-style DMA.
++config ISA_DMA_API
++ def_bool y
++
++if X86_32
++
++config ISA
++ bool "ISA support"
++ depends on !X86_VOYAGER
++ help
++ Find out whether you have ISA slots on your motherboard. ISA is the
++ name of a bus system, i.e. the way the CPU talks to the other stuff
++ inside your box. Other bus systems are PCI, EISA, MicroChannel
++ (MCA) or VESA. ISA is an older system, now being displaced by PCI;
++ newer boards don't support it. If you have ISA, say Y, otherwise N.
++
++config EISA
++ bool "EISA support"
++ depends on ISA
++ ---help---
++ The Extended Industry Standard Architecture (EISA) bus was
++ developed as an open alternative to the IBM MicroChannel bus.
++
++ The EISA bus provided some of the features of the IBM MicroChannel
++ bus while maintaining backward compatibility with cards made for
++ the older ISA bus. The EISA bus saw limited use between 1988 and
++ 1995 when it was made obsolete by the PCI bus.
++
++ Say Y here if you are building a kernel for an EISA-based machine.
++
++ Otherwise, say N.
++
++source "drivers/eisa/Kconfig"
++
++config MCA
++ bool "MCA support" if !X86_VOYAGER
++ default y if X86_VOYAGER
++ help
++ MicroChannel Architecture is found in some IBM PS/2 machines and
++ laptops. It is a bus system similar to PCI or ISA. See
++ <file:Documentation/mca.txt> (and especially the web page given
++ there) before attempting to build an MCA bus kernel.
++
++source "drivers/mca/Kconfig"
++
++config SCx200
++ tristate "NatSemi SCx200 support"
++ depends on !X86_VOYAGER
++ help
++ This provides basic support for National Semiconductor's
++ (now AMD's) Geode processors. The driver probes for the
++ PCI-IDs of several on-chip devices, so its a good dependency
++ for other scx200_* drivers.
++
++ If compiled as a module, the driver is named scx200.
++
++config SCx200HR_TIMER
++ tristate "NatSemi SCx200 27MHz High-Resolution Timer Support"
++ depends on SCx200 && GENERIC_TIME
++ default y
++ help
++ This driver provides a clocksource built upon the on-chip
++ 27MHz high-resolution timer. Its also a workaround for
++ NSC Geode SC-1100's buggy TSC, which loses time when the
++ processor goes idle (as is done by the scheduler). The
++ other workaround is idle=poll boot option.
++
++config GEODE_MFGPT_TIMER
++ def_bool y
++ prompt "Geode Multi-Function General Purpose Timer (MFGPT) events"
++ depends on MGEODE_LX && GENERIC_TIME && GENERIC_CLOCKEVENTS
++ help
++ This driver provides a clock event source based on the MFGPT
++ timer(s) in the CS5535 and CS5536 companion chip for the geode.
++ MFGPTs have a better resolution and max interval than the
++ generic PIT, and are suitable for use as high-res timers.
++
++config OLPC
++ bool "One Laptop Per Child support"
++ default n
++ help
++ Add support for detecting the unique features of the OLPC
++ XO hardware.
++
++endif # X86_32
++
++config K8_NB
++ def_bool y
++ depends on AGP_AMD64 || (X86_64 && (GART_IOMMU || (PCI && NUMA)))
++
++source "drivers/pcmcia/Kconfig"
++
++source "drivers/pci/hotplug/Kconfig"
++
++endmenu
++
++
++menu "Executable file formats / Emulations"
++
++source "fs/Kconfig.binfmt"
++
++config IA32_EMULATION
++ bool "IA32 Emulation"
++ depends on X86_64
++ select COMPAT_BINFMT_ELF
++ help
++ Include code to run 32-bit programs under a 64-bit kernel. You should
++ likely turn this on, unless you're 100% sure that you don't have any
++ 32-bit programs left.
++
++config IA32_AOUT
++ tristate "IA32 a.out support"
++ depends on IA32_EMULATION && ARCH_SUPPORTS_AOUT
++ help
++ Support old a.out binaries in the 32bit emulation.
++
++config COMPAT
++ def_bool y
++ depends on IA32_EMULATION
++
++config COMPAT_FOR_U64_ALIGNMENT
++ def_bool COMPAT
++ depends on X86_64
++
++config SYSVIPC_COMPAT
++ def_bool y
++ depends on X86_64 && COMPAT && SYSVIPC
++
++endmenu
++
++
++source "net/Kconfig"
++
++source "drivers/Kconfig"
++
++source "drivers/firmware/Kconfig"
++
++source "fs/Kconfig"
++
++source "arch/x86/Kconfig.debug"
++
++source "kernel/vserver/Kconfig"
++
++source "security/Kconfig"
++
++source "crypto/Kconfig"
++
++source "arch/x86/kvm/Kconfig"
++
++source "lib/Kconfig"
+diff -Nurb linux-2.6.27-590/arch/x86/kernel/asm-offsets.c.orig linux-2.6.27-591/arch/x86/kernel/asm-offsets.c.orig
+--- linux-2.6.27-590/arch/x86/kernel/asm-offsets.c.orig 1969-12-31 19:00:00.000000000 -0500
++++ linux-2.6.27-591/arch/x86/kernel/asm-offsets.c.orig 2008-10-09 18:13:53.000000000 -0400
+@@ -0,0 +1,5 @@
++#ifdef CONFIG_X86_32
++# include "asm-offsets_32.c"
++#else
++# include "asm-offsets_64.c"
++#endif
+diff -Nurb linux-2.6.27-590/arch/x86/kernel/asm-offsets_32.c linux-2.6.27-591/arch/x86/kernel/asm-offsets_32.c
+--- linux-2.6.27-590/arch/x86/kernel/asm-offsets_32.c 2008-10-09 18:13:53.000000000 -0400
++++ linux-2.6.27-591/arch/x86/kernel/asm-offsets_32.c 2010-01-29 16:25:34.000000000 -0500
+@@ -9,6 +9,7 @@
+ #include <linux/signal.h>
+ #include <linux/personality.h>
+ #include <linux/suspend.h>
++#include <linux/arrays.h>
+ #include <linux/kbuild.h>
+ #include <asm/ucontext.h>
+ #include "sigframe.h"
+@@ -24,9 +25,20 @@
+ #include <linux/lguest.h>
+ #include "../../../drivers/lguest/lg.h"
+
++
++#define STACKOFFSET(sym, str, mem) \
++ DEFINE(sym, offsetof(struct str, mem)-sizeof(struct str));
++
+ /* workaround for a warning with -Wmissing-prototypes */
+ void foo(void);
+
++struct event_spec {
++ unsigned long pc;
++ unsigned long dcookie;
++ unsigned count;
++ unsigned int number;
++};
++
+ void foo(void)
+ {
+ OFFSET(IA32_SIGCONTEXT_ax, sigcontext, ax);
+@@ -50,6 +62,16 @@
+ OFFSET(CPUINFO_x86_vendor_id, cpuinfo_x86, x86_vendor_id);
+ BLANK();
+
++ STACKOFFSET(TASK_thread, task_struct, thread);
++ STACKOFFSET(THREAD_esp, thread_struct, esp);
++ STACKOFFSET(EVENT_event_data, event, event_data);
++ STACKOFFSET(EVENT_task, event, task);
++ STACKOFFSET(EVENT_event_type, event, event_type);
++ STACKOFFSET(SPEC_number, event_spec, number);
++ DEFINE(EVENT_SIZE, sizeof(struct event));
++ DEFINE(SPEC_SIZE, sizeof(struct event_spec));
++ DEFINE(SPEC_EVENT_SIZE, sizeof(struct event_spec)+sizeof(struct event));
++
+ OFFSET(TI_task, thread_info, task);
+ OFFSET(TI_exec_domain, thread_info, exec_domain);
+ OFFSET(TI_flags, thread_info, flags);
+diff -Nurb linux-2.6.27-590/arch/x86/kernel/asm-offsets_32.c.orig linux-2.6.27-591/arch/x86/kernel/asm-offsets_32.c.orig
+--- linux-2.6.27-590/arch/x86/kernel/asm-offsets_32.c.orig 1969-12-31 19:00:00.000000000 -0500
++++ linux-2.6.27-591/arch/x86/kernel/asm-offsets_32.c.orig 2008-10-09 18:13:53.000000000 -0400
+@@ -0,0 +1,147 @@
++/*
++ * Generate definitions needed by assembly language modules.
++ * This code generates raw asm output which is post-processed
++ * to extract and format the required data.
++ */
++
++#include <linux/crypto.h>
++#include <linux/sched.h>
++#include <linux/signal.h>
++#include <linux/personality.h>
++#include <linux/suspend.h>
++#include <linux/kbuild.h>
++#include <asm/ucontext.h>
++#include "sigframe.h"
++#include <asm/pgtable.h>
++#include <asm/fixmap.h>
++#include <asm/processor.h>
++#include <asm/thread_info.h>
++#include <asm/bootparam.h>
++#include <asm/elf.h>
++
++#include <xen/interface/xen.h>
++
++#include <linux/lguest.h>
++#include "../../../drivers/lguest/lg.h"
++
++/* workaround for a warning with -Wmissing-prototypes */
++void foo(void);
++
++void foo(void)
++{
++ OFFSET(IA32_SIGCONTEXT_ax, sigcontext, ax);
++ OFFSET(IA32_SIGCONTEXT_bx, sigcontext, bx);
++ OFFSET(IA32_SIGCONTEXT_cx, sigcontext, cx);
++ OFFSET(IA32_SIGCONTEXT_dx, sigcontext, dx);
++ OFFSET(IA32_SIGCONTEXT_si, sigcontext, si);
++ OFFSET(IA32_SIGCONTEXT_di, sigcontext, di);
++ OFFSET(IA32_SIGCONTEXT_bp, sigcontext, bp);
++ OFFSET(IA32_SIGCONTEXT_sp, sigcontext, sp);
++ OFFSET(IA32_SIGCONTEXT_ip, sigcontext, ip);
++ BLANK();
++
++ OFFSET(CPUINFO_x86, cpuinfo_x86, x86);
++ OFFSET(CPUINFO_x86_vendor, cpuinfo_x86, x86_vendor);
++ OFFSET(CPUINFO_x86_model, cpuinfo_x86, x86_model);
++ OFFSET(CPUINFO_x86_mask, cpuinfo_x86, x86_mask);
++ OFFSET(CPUINFO_hard_math, cpuinfo_x86, hard_math);
++ OFFSET(CPUINFO_cpuid_level, cpuinfo_x86, cpuid_level);
++ OFFSET(CPUINFO_x86_capability, cpuinfo_x86, x86_capability);
++ OFFSET(CPUINFO_x86_vendor_id, cpuinfo_x86, x86_vendor_id);
++ BLANK();
++
++ OFFSET(TI_task, thread_info, task);
++ OFFSET(TI_exec_domain, thread_info, exec_domain);
++ OFFSET(TI_flags, thread_info, flags);
++ OFFSET(TI_status, thread_info, status);
++ OFFSET(TI_preempt_count, thread_info, preempt_count);
++ OFFSET(TI_addr_limit, thread_info, addr_limit);
++ OFFSET(TI_restart_block, thread_info, restart_block);
++ OFFSET(TI_sysenter_return, thread_info, sysenter_return);
++ OFFSET(TI_cpu, thread_info, cpu);
++ BLANK();
++
++ OFFSET(GDS_size, desc_ptr, size);
++ OFFSET(GDS_address, desc_ptr, address);
++ BLANK();
++
++ OFFSET(PT_EBX, pt_regs, bx);
++ OFFSET(PT_ECX, pt_regs, cx);
++ OFFSET(PT_EDX, pt_regs, dx);
++ OFFSET(PT_ESI, pt_regs, si);
++ OFFSET(PT_EDI, pt_regs, di);
++ OFFSET(PT_EBP, pt_regs, bp);
++ OFFSET(PT_EAX, pt_regs, ax);
++ OFFSET(PT_DS, pt_regs, ds);
++ OFFSET(PT_ES, pt_regs, es);
++ OFFSET(PT_FS, pt_regs, fs);
++ OFFSET(PT_ORIG_EAX, pt_regs, orig_ax);
++ OFFSET(PT_EIP, pt_regs, ip);
++ OFFSET(PT_CS, pt_regs, cs);
++ OFFSET(PT_EFLAGS, pt_regs, flags);
++ OFFSET(PT_OLDESP, pt_regs, sp);
++ OFFSET(PT_OLDSS, pt_regs, ss);
++ BLANK();
++
++ OFFSET(EXEC_DOMAIN_handler, exec_domain, handler);
++ OFFSET(IA32_RT_SIGFRAME_sigcontext, rt_sigframe, uc.uc_mcontext);
++ BLANK();
++
++ OFFSET(pbe_address, pbe, address);
++ OFFSET(pbe_orig_address, pbe, orig_address);
++ OFFSET(pbe_next, pbe, next);
++
++ /* Offset from the sysenter stack to tss.sp0 */
++ DEFINE(TSS_sysenter_sp0, offsetof(struct tss_struct, x86_tss.sp0) -
++ sizeof(struct tss_struct));
++
++ DEFINE(PAGE_SIZE_asm, PAGE_SIZE);
++ DEFINE(PAGE_SHIFT_asm, PAGE_SHIFT);
++ DEFINE(PTRS_PER_PTE, PTRS_PER_PTE);
++ DEFINE(PTRS_PER_PMD, PTRS_PER_PMD);
++ DEFINE(PTRS_PER_PGD, PTRS_PER_PGD);
++
++ OFFSET(crypto_tfm_ctx_offset, crypto_tfm, __crt_ctx);
++
++#ifdef CONFIG_PARAVIRT
++ BLANK();
++ OFFSET(PARAVIRT_enabled, pv_info, paravirt_enabled);
++ OFFSET(PARAVIRT_PATCH_pv_cpu_ops, paravirt_patch_template, pv_cpu_ops);
++ OFFSET(PARAVIRT_PATCH_pv_irq_ops, paravirt_patch_template, pv_irq_ops);
++ OFFSET(PV_IRQ_irq_disable, pv_irq_ops, irq_disable);
++ OFFSET(PV_IRQ_irq_enable, pv_irq_ops, irq_enable);
++ OFFSET(PV_CPU_iret, pv_cpu_ops, iret);
++ OFFSET(PV_CPU_irq_enable_sysexit, pv_cpu_ops, irq_enable_sysexit);
++ OFFSET(PV_CPU_read_cr0, pv_cpu_ops, read_cr0);
++#endif
++
++#ifdef CONFIG_XEN
++ BLANK();
++ OFFSET(XEN_vcpu_info_mask, vcpu_info, evtchn_upcall_mask);
++ OFFSET(XEN_vcpu_info_pending, vcpu_info, evtchn_upcall_pending);
++#endif
++
++#if defined(CONFIG_LGUEST) || defined(CONFIG_LGUEST_GUEST) || defined(CONFIG_LGUEST_MODULE)
++ BLANK();
++ OFFSET(LGUEST_DATA_irq_enabled, lguest_data, irq_enabled);
++ OFFSET(LGUEST_DATA_pgdir, lguest_data, pgdir);
++
++ BLANK();
++ OFFSET(LGUEST_PAGES_host_gdt_desc, lguest_pages, state.host_gdt_desc);
++ OFFSET(LGUEST_PAGES_host_idt_desc, lguest_pages, state.host_idt_desc);
++ OFFSET(LGUEST_PAGES_host_cr3, lguest_pages, state.host_cr3);
++ OFFSET(LGUEST_PAGES_host_sp, lguest_pages, state.host_sp);
++ OFFSET(LGUEST_PAGES_guest_gdt_desc, lguest_pages,state.guest_gdt_desc);
++ OFFSET(LGUEST_PAGES_guest_idt_desc, lguest_pages,state.guest_idt_desc);
++ OFFSET(LGUEST_PAGES_guest_gdt, lguest_pages, state.guest_gdt);
++ OFFSET(LGUEST_PAGES_regs_trapnum, lguest_pages, regs.trapnum);
++ OFFSET(LGUEST_PAGES_regs_errcode, lguest_pages, regs.errcode);
++ OFFSET(LGUEST_PAGES_regs, lguest_pages, regs);
++#endif
++
++ BLANK();
++ OFFSET(BP_scratch, boot_params, scratch);
++ OFFSET(BP_loadflags, boot_params, hdr.loadflags);
++ OFFSET(BP_hardware_subarch, boot_params, hdr.hardware_subarch);
++ OFFSET(BP_version, boot_params, hdr.version);
++}
+diff -Nurb linux-2.6.27-590/arch/x86/kernel/entry_32.S linux-2.6.27-591/arch/x86/kernel/entry_32.S
+--- linux-2.6.27-590/arch/x86/kernel/entry_32.S 2008-10-09 18:13:53.000000000 -0400
++++ linux-2.6.27-591/arch/x86/kernel/entry_32.S 2010-01-29 15:43:33.000000000 -0500
+@@ -426,6 +426,33 @@
+ cmpl $(nr_syscalls), %eax
+ jae syscall_badsys
+ syscall_call:
++ /* Move Chopstix syscall probe here */
++ /* Save and clobber: eax, ecx, ebp */
++ pushl %eax
++ pushl %ecx
++ pushl %ebp
++ movl %esp, %ebp
++ subl $SPEC_EVENT_SIZE, %esp
++ movl rec_event, %ecx
++ testl %ecx, %ecx
++ jz carry_on
++ # struct event is first, just below %ebp
++ movl %eax, (SPEC_number-EVENT_SIZE)(%ebp)
++ leal -SPEC_EVENT_SIZE(%ebp), %eax
++ movl %eax, EVENT_event_data(%ebp)
++ movl $6, EVENT_event_type(%ebp)
++ movl rec_event, %edx
++ movl $1, 4(%esp)
++ leal -EVENT_SIZE(%ebp), %eax
++ movl %eax, (%esp)
++ call rec_event_asm
++carry_on:
++ addl $SPEC_EVENT_SIZE, %esp
++ popl %ebp
++ popl %ecx
++ popl %eax
++ /* End chopstix */
++
+ call *sys_call_table(,%eax,4)
+ movl %eax,PT_EAX(%esp) # store the return value
+ syscall_exit:
+diff -Nurb linux-2.6.27-590/arch/x86/kernel/entry_32.S.orig linux-2.6.27-591/arch/x86/kernel/entry_32.S.orig
+--- linux-2.6.27-590/arch/x86/kernel/entry_32.S.orig 1969-12-31 19:00:00.000000000 -0500
++++ linux-2.6.27-591/arch/x86/kernel/entry_32.S.orig 2008-10-09 18:13:53.000000000 -0400
+@@ -0,0 +1,1232 @@
++/*
++ *
++ * Copyright (C) 1991, 1992 Linus Torvalds
++ */
++
++/*
++ * entry.S contains the system-call and fault low-level handling routines.
++ * This also contains the timer-interrupt handler, as well as all interrupts
++ * and faults that can result in a task-switch.
++ *
++ * NOTE: This code handles signal-recognition, which happens every time
++ * after a timer-interrupt and after each system call.
++ *
++ * I changed all the .align's to 4 (16 byte alignment), as that's faster
++ * on a 486.
++ *
++ * Stack layout in 'syscall_exit':
++ * ptrace needs to have all regs on the stack.
++ * if the order here is changed, it needs to be
++ * updated in fork.c:copy_process, signal.c:do_signal,
++ * ptrace.c and ptrace.h
++ *
++ * 0(%esp) - %ebx
++ * 4(%esp) - %ecx
++ * 8(%esp) - %edx
++ * C(%esp) - %esi
++ * 10(%esp) - %edi
++ * 14(%esp) - %ebp
++ * 18(%esp) - %eax
++ * 1C(%esp) - %ds
++ * 20(%esp) - %es
++ * 24(%esp) - %fs
++ * 28(%esp) - orig_eax
++ * 2C(%esp) - %eip
++ * 30(%esp) - %cs
++ * 34(%esp) - %eflags
++ * 38(%esp) - %oldesp
++ * 3C(%esp) - %oldss
++ *
++ * "current" is in register %ebx during any slow entries.
++ */
++
++#include <linux/linkage.h>
++#include <asm/thread_info.h>
++#include <asm/irqflags.h>
++#include <asm/errno.h>
++#include <asm/segment.h>
++#include <asm/smp.h>
++#include <asm/page.h>
++#include <asm/desc.h>
++#include <asm/percpu.h>
++#include <asm/dwarf2.h>
++#include <asm/processor-flags.h>
++#include <asm/ftrace.h>
++#include <asm/irq_vectors.h>
++
++/* Avoid __ASSEMBLER__'ifying <linux/audit.h> just for this. */
++#include <linux/elf-em.h>
++#define AUDIT_ARCH_I386 (EM_386|__AUDIT_ARCH_LE)
++#define __AUDIT_ARCH_LE 0x40000000
++
++#ifndef CONFIG_AUDITSYSCALL
++#define sysenter_audit syscall_trace_entry
++#define sysexit_audit syscall_exit_work
++#endif
++
++/*
++ * We use macros for low-level operations which need to be overridden
++ * for paravirtualization. The following will never clobber any registers:
++ * INTERRUPT_RETURN (aka. "iret")
++ * GET_CR0_INTO_EAX (aka. "movl %cr0, %eax")
++ * ENABLE_INTERRUPTS_SYSEXIT (aka "sti; sysexit").
++ *
++ * For DISABLE_INTERRUPTS/ENABLE_INTERRUPTS (aka "cli"/"sti"), you must
++ * specify what registers can be overwritten (CLBR_NONE, CLBR_EAX/EDX/ECX/ANY).
++ * Allowing a register to be clobbered can shrink the paravirt replacement
++ * enough to patch inline, increasing performance.
++ */
++
++#define nr_syscalls ((syscall_table_size)/4)
++
++#ifdef CONFIG_PREEMPT
++#define preempt_stop(clobbers) DISABLE_INTERRUPTS(clobbers); TRACE_IRQS_OFF
++#else
++#define preempt_stop(clobbers)
++#define resume_kernel restore_nocheck
++#endif
++
++.macro TRACE_IRQS_IRET
++#ifdef CONFIG_TRACE_IRQFLAGS
++ testl $X86_EFLAGS_IF,PT_EFLAGS(%esp) # interrupts off?
++ jz 1f
++ TRACE_IRQS_ON
++1:
++#endif
++.endm
++
++#ifdef CONFIG_VM86
++#define resume_userspace_sig check_userspace
++#else
++#define resume_userspace_sig resume_userspace
++#endif
++
++#define SAVE_ALL \
++ cld; \
++ pushl %fs; \
++ CFI_ADJUST_CFA_OFFSET 4;\
++ /*CFI_REL_OFFSET fs, 0;*/\
++ pushl %es; \
++ CFI_ADJUST_CFA_OFFSET 4;\
++ /*CFI_REL_OFFSET es, 0;*/\
++ pushl %ds; \
++ CFI_ADJUST_CFA_OFFSET 4;\
++ /*CFI_REL_OFFSET ds, 0;*/\
++ pushl %eax; \
++ CFI_ADJUST_CFA_OFFSET 4;\
++ CFI_REL_OFFSET eax, 0;\
++ pushl %ebp; \
++ CFI_ADJUST_CFA_OFFSET 4;\
++ CFI_REL_OFFSET ebp, 0;\
++ pushl %edi; \
++ CFI_ADJUST_CFA_OFFSET 4;\
++ CFI_REL_OFFSET edi, 0;\
++ pushl %esi; \
++ CFI_ADJUST_CFA_OFFSET 4;\
++ CFI_REL_OFFSET esi, 0;\
++ pushl %edx; \
++ CFI_ADJUST_CFA_OFFSET 4;\
++ CFI_REL_OFFSET edx, 0;\
++ pushl %ecx; \
++ CFI_ADJUST_CFA_OFFSET 4;\
++ CFI_REL_OFFSET ecx, 0;\
++ pushl %ebx; \
++ CFI_ADJUST_CFA_OFFSET 4;\
++ CFI_REL_OFFSET ebx, 0;\
++ movl $(__USER_DS), %edx; \
++ movl %edx, %ds; \
++ movl %edx, %es; \
++ movl $(__KERNEL_PERCPU), %edx; \
++ movl %edx, %fs
++
++#define RESTORE_INT_REGS \
++ popl %ebx; \
++ CFI_ADJUST_CFA_OFFSET -4;\
++ CFI_RESTORE ebx;\
++ popl %ecx; \
++ CFI_ADJUST_CFA_OFFSET -4;\
++ CFI_RESTORE ecx;\
++ popl %edx; \
++ CFI_ADJUST_CFA_OFFSET -4;\
++ CFI_RESTORE edx;\
++ popl %esi; \
++ CFI_ADJUST_CFA_OFFSET -4;\
++ CFI_RESTORE esi;\
++ popl %edi; \
++ CFI_ADJUST_CFA_OFFSET -4;\
++ CFI_RESTORE edi;\
++ popl %ebp; \
++ CFI_ADJUST_CFA_OFFSET -4;\
++ CFI_RESTORE ebp;\
++ popl %eax; \
++ CFI_ADJUST_CFA_OFFSET -4;\
++ CFI_RESTORE eax
++
++#define RESTORE_REGS \
++ RESTORE_INT_REGS; \
++1: popl %ds; \
++ CFI_ADJUST_CFA_OFFSET -4;\
++ /*CFI_RESTORE ds;*/\
++2: popl %es; \
++ CFI_ADJUST_CFA_OFFSET -4;\
++ /*CFI_RESTORE es;*/\
++3: popl %fs; \
++ CFI_ADJUST_CFA_OFFSET -4;\
++ /*CFI_RESTORE fs;*/\
++.pushsection .fixup,"ax"; \
++4: movl $0,(%esp); \
++ jmp 1b; \
++5: movl $0,(%esp); \
++ jmp 2b; \
++6: movl $0,(%esp); \
++ jmp 3b; \
++.section __ex_table,"a";\
++ .align 4; \
++ .long 1b,4b; \
++ .long 2b,5b; \
++ .long 3b,6b; \
++.popsection
++
++#define RING0_INT_FRAME \
++ CFI_STARTPROC simple;\
++ CFI_SIGNAL_FRAME;\
++ CFI_DEF_CFA esp, 3*4;\
++ /*CFI_OFFSET cs, -2*4;*/\
++ CFI_OFFSET eip, -3*4
++
++#define RING0_EC_FRAME \
++ CFI_STARTPROC simple;\
++ CFI_SIGNAL_FRAME;\
++ CFI_DEF_CFA esp, 4*4;\
++ /*CFI_OFFSET cs, -2*4;*/\
++ CFI_OFFSET eip, -3*4
++
++#define RING0_PTREGS_FRAME \
++ CFI_STARTPROC simple;\
++ CFI_SIGNAL_FRAME;\
++ CFI_DEF_CFA esp, PT_OLDESP-PT_EBX;\
++ /*CFI_OFFSET cs, PT_CS-PT_OLDESP;*/\
++ CFI_OFFSET eip, PT_EIP-PT_OLDESP;\
++ /*CFI_OFFSET es, PT_ES-PT_OLDESP;*/\
++ /*CFI_OFFSET ds, PT_DS-PT_OLDESP;*/\
++ CFI_OFFSET eax, PT_EAX-PT_OLDESP;\
++ CFI_OFFSET ebp, PT_EBP-PT_OLDESP;\
++ CFI_OFFSET edi, PT_EDI-PT_OLDESP;\
++ CFI_OFFSET esi, PT_ESI-PT_OLDESP;\
++ CFI_OFFSET edx, PT_EDX-PT_OLDESP;\
++ CFI_OFFSET ecx, PT_ECX-PT_OLDESP;\
++ CFI_OFFSET ebx, PT_EBX-PT_OLDESP
++
++ENTRY(ret_from_fork)
++ CFI_STARTPROC
++ pushl %eax
++ CFI_ADJUST_CFA_OFFSET 4
++ call schedule_tail
++ GET_THREAD_INFO(%ebp)
++ popl %eax
++ CFI_ADJUST_CFA_OFFSET -4
++ pushl $0x0202 # Reset kernel eflags
++ CFI_ADJUST_CFA_OFFSET 4
++ popfl
++ CFI_ADJUST_CFA_OFFSET -4
++ jmp syscall_exit
++ CFI_ENDPROC
++END(ret_from_fork)
++
++/*
++ * Return to user mode is not as complex as all this looks,
++ * but we want the default path for a system call return to
++ * go as quickly as possible which is why some of this is
++ * less clear than it otherwise should be.
++ */
++
++ # userspace resumption stub bypassing syscall exit tracing
++ ALIGN
++ RING0_PTREGS_FRAME
++ret_from_exception:
++ preempt_stop(CLBR_ANY)
++ret_from_intr:
++ GET_THREAD_INFO(%ebp)
++check_userspace:
++ movl PT_EFLAGS(%esp), %eax # mix EFLAGS and CS
++ movb PT_CS(%esp), %al
++ andl $(X86_EFLAGS_VM | SEGMENT_RPL_MASK), %eax
++ cmpl $USER_RPL, %eax
++ jb resume_kernel # not returning to v8086 or userspace
++
++ENTRY(resume_userspace)
++ LOCKDEP_SYS_EXIT
++ DISABLE_INTERRUPTS(CLBR_ANY) # make sure we don't miss an interrupt
++ # setting need_resched or sigpending
++ # between sampling and the iret
++ TRACE_IRQS_OFF
++ movl TI_flags(%ebp), %ecx
++ andl $_TIF_WORK_MASK, %ecx # is there any work to be done on
++ # int/exception return?
++ jne work_pending
++ jmp restore_all
++END(ret_from_exception)
++
++#ifdef CONFIG_PREEMPT
++ENTRY(resume_kernel)
++ DISABLE_INTERRUPTS(CLBR_ANY)
++ cmpl $0,TI_preempt_count(%ebp) # non-zero preempt_count ?
++ jnz restore_nocheck
++need_resched:
++ movl TI_flags(%ebp), %ecx # need_resched set ?
++ testb $_TIF_NEED_RESCHED, %cl
++ jz restore_all
++ testl $X86_EFLAGS_IF,PT_EFLAGS(%esp) # interrupts off (exception path) ?
++ jz restore_all
++ call preempt_schedule_irq
++ jmp need_resched
++END(resume_kernel)
++#endif
++ CFI_ENDPROC
++
++/* SYSENTER_RETURN points to after the "sysenter" instruction in
++ the vsyscall page. See vsyscall-sysentry.S, which defines the symbol. */
++
++ # sysenter call handler stub
++ENTRY(ia32_sysenter_target)
++ CFI_STARTPROC simple
++ CFI_SIGNAL_FRAME
++ CFI_DEF_CFA esp, 0
++ CFI_REGISTER esp, ebp
++ movl TSS_sysenter_sp0(%esp),%esp
++sysenter_past_esp:
++ /*
++ * Interrupts are disabled here, but we can't trace it until
++ * enough kernel state to call TRACE_IRQS_OFF can be called - but
++ * we immediately enable interrupts at that point anyway.
++ */
++ pushl $(__USER_DS)
++ CFI_ADJUST_CFA_OFFSET 4
++ /*CFI_REL_OFFSET ss, 0*/
++ pushl %ebp
++ CFI_ADJUST_CFA_OFFSET 4
++ CFI_REL_OFFSET esp, 0
++ pushfl
++ orl $X86_EFLAGS_IF, (%esp)
++ CFI_ADJUST_CFA_OFFSET 4
++ pushl $(__USER_CS)
++ CFI_ADJUST_CFA_OFFSET 4
++ /*CFI_REL_OFFSET cs, 0*/
++ /*
++ * Push current_thread_info()->sysenter_return to the stack.
++ * A tiny bit of offset fixup is necessary - 4*4 means the 4 words
++ * pushed above; +8 corresponds to copy_thread's esp0 setting.
++ */
++ pushl (TI_sysenter_return-THREAD_SIZE+8+4*4)(%esp)
++ CFI_ADJUST_CFA_OFFSET 4
++ CFI_REL_OFFSET eip, 0
++
++ pushl %eax
++ CFI_ADJUST_CFA_OFFSET 4
++ SAVE_ALL
++ ENABLE_INTERRUPTS(CLBR_NONE)
++
++/*
++ * Load the potential sixth argument from user stack.
++ * Careful about security.
++ */
++ cmpl $__PAGE_OFFSET-3,%ebp
++ jae syscall_fault
++1: movl (%ebp),%ebp
++ movl %ebp,PT_EBP(%esp)
++.section __ex_table,"a"
++ .align 4
++ .long 1b,syscall_fault
++.previous
++
++ GET_THREAD_INFO(%ebp)
++
++ /* Note, _TIF_SECCOMP is bit number 8, and so it needs testw and not testb */
++ testw $_TIF_WORK_SYSCALL_ENTRY,TI_flags(%ebp)
++ jnz sysenter_audit
++sysenter_do_call:
++ cmpl $(nr_syscalls), %eax
++ jae syscall_badsys
++ call *sys_call_table(,%eax,4)
++ movl %eax,PT_EAX(%esp)
++ LOCKDEP_SYS_EXIT
++ DISABLE_INTERRUPTS(CLBR_ANY)
++ TRACE_IRQS_OFF
++ movl TI_flags(%ebp), %ecx
++ testw $_TIF_ALLWORK_MASK, %cx
++ jne sysexit_audit
++sysenter_exit:
++/* if something modifies registers it must also disable sysexit */
++ movl PT_EIP(%esp), %edx
++ movl PT_OLDESP(%esp), %ecx
++ xorl %ebp,%ebp
++ TRACE_IRQS_ON
++1: mov PT_FS(%esp), %fs
++ ENABLE_INTERRUPTS_SYSEXIT
++
++#ifdef CONFIG_AUDITSYSCALL
++sysenter_audit:
++ testw $(_TIF_WORK_SYSCALL_ENTRY & ~_TIF_SYSCALL_AUDIT),TI_flags(%ebp)
++ jnz syscall_trace_entry
++ addl $4,%esp
++ CFI_ADJUST_CFA_OFFSET -4
++ /* %esi already in 8(%esp) 6th arg: 4th syscall arg */
++ /* %edx already in 4(%esp) 5th arg: 3rd syscall arg */
++ /* %ecx already in 0(%esp) 4th arg: 2nd syscall arg */
++ movl %ebx,%ecx /* 3rd arg: 1st syscall arg */
++ movl %eax,%edx /* 2nd arg: syscall number */
++ movl $AUDIT_ARCH_I386,%eax /* 1st arg: audit arch */
++ call audit_syscall_entry
++ pushl %ebx
++ CFI_ADJUST_CFA_OFFSET 4
++ movl PT_EAX(%esp),%eax /* reload syscall number */
++ jmp sysenter_do_call
++
++sysexit_audit:
++ testw $(_TIF_ALLWORK_MASK & ~_TIF_SYSCALL_AUDIT), %cx
++ jne syscall_exit_work
++ TRACE_IRQS_ON
++ ENABLE_INTERRUPTS(CLBR_ANY)
++ movl %eax,%edx /* second arg, syscall return value */
++ cmpl $0,%eax /* is it < 0? */
++ setl %al /* 1 if so, 0 if not */
++ movzbl %al,%eax /* zero-extend that */
++ inc %eax /* first arg, 0->1(AUDITSC_SUCCESS), 1->2(AUDITSC_FAILURE) */
++ call audit_syscall_exit
++ DISABLE_INTERRUPTS(CLBR_ANY)
++ TRACE_IRQS_OFF
++ movl TI_flags(%ebp), %ecx
++ testw $(_TIF_ALLWORK_MASK & ~_TIF_SYSCALL_AUDIT), %cx
++ jne syscall_exit_work
++ movl PT_EAX(%esp),%eax /* reload syscall return value */
++ jmp sysenter_exit
++#endif
++
++ CFI_ENDPROC
++.pushsection .fixup,"ax"
++2: movl $0,PT_FS(%esp)
++ jmp 1b
++.section __ex_table,"a"
++ .align 4
++ .long 1b,2b
++.popsection
++ENDPROC(ia32_sysenter_target)
++
++ # system call handler stub
++ENTRY(system_call)
++ RING0_INT_FRAME # can't unwind into user space anyway
++ pushl %eax # save orig_eax
++ CFI_ADJUST_CFA_OFFSET 4
++ SAVE_ALL
++ GET_THREAD_INFO(%ebp)
++ # system call tracing in operation / emulation
++ /* Note, _TIF_SECCOMP is bit number 8, and so it needs testw and not testb */
++ testw $_TIF_WORK_SYSCALL_ENTRY,TI_flags(%ebp)
++ jnz syscall_trace_entry
++ cmpl $(nr_syscalls), %eax
++ jae syscall_badsys
++syscall_call:
++ call *sys_call_table(,%eax,4)
++ movl %eax,PT_EAX(%esp) # store the return value
++syscall_exit:
++ LOCKDEP_SYS_EXIT
++ DISABLE_INTERRUPTS(CLBR_ANY) # make sure we don't miss an interrupt
++ # setting need_resched or sigpending
++ # between sampling and the iret
++ TRACE_IRQS_OFF
++ movl TI_flags(%ebp), %ecx
++ testw $_TIF_ALLWORK_MASK, %cx # current->work
++ jne syscall_exit_work
++
++restore_all:
++ movl PT_EFLAGS(%esp), %eax # mix EFLAGS, SS and CS
++ # Warning: PT_OLDSS(%esp) contains the wrong/random values if we
++ # are returning to the kernel.
++ # See comments in process.c:copy_thread() for details.
++ movb PT_OLDSS(%esp), %ah
++ movb PT_CS(%esp), %al
++ andl $(X86_EFLAGS_VM | (SEGMENT_TI_MASK << 8) | SEGMENT_RPL_MASK), %eax
++ cmpl $((SEGMENT_LDT << 8) | USER_RPL), %eax
++ CFI_REMEMBER_STATE
++ je ldt_ss # returning to user-space with LDT SS
++restore_nocheck:
++ TRACE_IRQS_IRET
++restore_nocheck_notrace:
++ RESTORE_REGS
++ addl $4, %esp # skip orig_eax/error_code
++ CFI_ADJUST_CFA_OFFSET -4
++irq_return:
++ INTERRUPT_RETURN
++.section .fixup,"ax"
++ENTRY(iret_exc)
++ pushl $0 # no error code
++ pushl $do_iret_error
++ jmp error_code
++.previous
++.section __ex_table,"a"
++ .align 4
++ .long irq_return,iret_exc
++.previous
++
++ CFI_RESTORE_STATE
++ldt_ss:
++ larl PT_OLDSS(%esp), %eax
++ jnz restore_nocheck
++ testl $0x00400000, %eax # returning to 32bit stack?
++ jnz restore_nocheck # allright, normal return
++
++#ifdef CONFIG_PARAVIRT
++ /*
++ * The kernel can't run on a non-flat stack if paravirt mode
++ * is active. Rather than try to fixup the high bits of
++ * ESP, bypass this code entirely. This may break DOSemu
++ * and/or Wine support in a paravirt VM, although the option
++ * is still available to implement the setting of the high
++ * 16-bits in the INTERRUPT_RETURN paravirt-op.
++ */
++ cmpl $0, pv_info+PARAVIRT_enabled
++ jne restore_nocheck
++#endif
++
++ /* If returning to userspace with 16bit stack,
++ * try to fix the higher word of ESP, as the CPU
++ * won't restore it.
++ * This is an "official" bug of all the x86-compatible
++ * CPUs, which we can try to work around to make
++ * dosemu and wine happy. */
++ movl PT_OLDESP(%esp), %eax
++ movl %esp, %edx
++ call patch_espfix_desc
++ pushl $__ESPFIX_SS
++ CFI_ADJUST_CFA_OFFSET 4
++ pushl %eax
++ CFI_ADJUST_CFA_OFFSET 4
++ DISABLE_INTERRUPTS(CLBR_EAX)
++ TRACE_IRQS_OFF
++ lss (%esp), %esp
++ CFI_ADJUST_CFA_OFFSET -8
++ jmp restore_nocheck
++ CFI_ENDPROC
++ENDPROC(system_call)
++
++ # perform work that needs to be done immediately before resumption
++ ALIGN
++ RING0_PTREGS_FRAME # can't unwind into user space anyway
++work_pending:
++ testb $_TIF_NEED_RESCHED, %cl
++ jz work_notifysig
++work_resched:
++ call schedule
++ LOCKDEP_SYS_EXIT
++ DISABLE_INTERRUPTS(CLBR_ANY) # make sure we don't miss an interrupt
++ # setting need_resched or sigpending
++ # between sampling and the iret
++ TRACE_IRQS_OFF
++ movl TI_flags(%ebp), %ecx
++ andl $_TIF_WORK_MASK, %ecx # is there any work to be done other
++ # than syscall tracing?
++ jz restore_all
++ testb $_TIF_NEED_RESCHED, %cl
++ jnz work_resched
++
++work_notifysig: # deal with pending signals and
++ # notify-resume requests
++#ifdef CONFIG_VM86
++ testl $X86_EFLAGS_VM, PT_EFLAGS(%esp)
++ movl %esp, %eax
++ jne work_notifysig_v86 # returning to kernel-space or
++ # vm86-space
++ xorl %edx, %edx
++ call do_notify_resume
++ jmp resume_userspace_sig
++
++ ALIGN
++work_notifysig_v86:
++ pushl %ecx # save ti_flags for do_notify_resume
++ CFI_ADJUST_CFA_OFFSET 4
++ call save_v86_state # %eax contains pt_regs pointer
++ popl %ecx
++ CFI_ADJUST_CFA_OFFSET -4
++ movl %eax, %esp
++#else
++ movl %esp, %eax
++#endif
++ xorl %edx, %edx
++ call do_notify_resume
++ jmp resume_userspace_sig
++END(work_pending)
++
++ # perform syscall exit tracing
++ ALIGN
++syscall_trace_entry:
++ movl $-ENOSYS,PT_EAX(%esp)
++ movl %esp, %eax
++ call syscall_trace_enter
++ /* What it returned is what we'll actually use. */
++ cmpl $(nr_syscalls), %eax
++ jnae syscall_call
++ jmp syscall_exit
++END(syscall_trace_entry)
++
++ # perform syscall exit tracing
++ ALIGN
++syscall_exit_work:
++ testb $_TIF_WORK_SYSCALL_EXIT, %cl
++ jz work_pending
++ TRACE_IRQS_ON
++ ENABLE_INTERRUPTS(CLBR_ANY) # could let syscall_trace_leave() call
++ # schedule() instead
++ movl %esp, %eax
++ call syscall_trace_leave
++ jmp resume_userspace
++END(syscall_exit_work)
++ CFI_ENDPROC
++
++ RING0_INT_FRAME # can't unwind into user space anyway
++syscall_fault:
++ GET_THREAD_INFO(%ebp)
++ movl $-EFAULT,PT_EAX(%esp)
++ jmp resume_userspace
++END(syscall_fault)
++
++syscall_badsys:
++ movl $-ENOSYS,PT_EAX(%esp)
++ jmp resume_userspace
++END(syscall_badsys)
++ CFI_ENDPROC
++
++#define FIXUP_ESPFIX_STACK \
++ /* since we are on a wrong stack, we cant make it a C code :( */ \
++ PER_CPU(gdt_page, %ebx); \
++ GET_DESC_BASE(GDT_ENTRY_ESPFIX_SS, %ebx, %eax, %ax, %al, %ah); \
++ addl %esp, %eax; \
++ pushl $__KERNEL_DS; \
++ CFI_ADJUST_CFA_OFFSET 4; \
++ pushl %eax; \
++ CFI_ADJUST_CFA_OFFSET 4; \
++ lss (%esp), %esp; \
++ CFI_ADJUST_CFA_OFFSET -8;
++#define UNWIND_ESPFIX_STACK \
++ movl %ss, %eax; \
++ /* see if on espfix stack */ \
++ cmpw $__ESPFIX_SS, %ax; \
++ jne 27f; \
++ movl $__KERNEL_DS, %eax; \
++ movl %eax, %ds; \
++ movl %eax, %es; \
++ /* switch to normal stack */ \
++ FIXUP_ESPFIX_STACK; \
++27:;
++
++/*
++ * Build the entry stubs and pointer table with
++ * some assembler magic.
++ */
++.section .rodata,"a"
++ENTRY(interrupt)
++.text
++
++ENTRY(irq_entries_start)
++ RING0_INT_FRAME
++vector=0
++.rept NR_IRQS
++ ALIGN
++ .if vector
++ CFI_ADJUST_CFA_OFFSET -4
++ .endif
++1: pushl $~(vector)
++ CFI_ADJUST_CFA_OFFSET 4
++ jmp common_interrupt
++ .previous
++ .long 1b
++ .text
++vector=vector+1
++.endr
++END(irq_entries_start)
++
++.previous
++END(interrupt)
++.previous
++
++/*
++ * the CPU automatically disables interrupts when executing an IRQ vector,
++ * so IRQ-flags tracing has to follow that:
++ */
++ ALIGN
++common_interrupt:
++ SAVE_ALL
++ TRACE_IRQS_OFF
++ movl %esp,%eax
++ call do_IRQ
++ jmp ret_from_intr
++ENDPROC(common_interrupt)
++ CFI_ENDPROC
++
++#define BUILD_INTERRUPT(name, nr) \
++ENTRY(name) \
++ RING0_INT_FRAME; \
++ pushl $~(nr); \
++ CFI_ADJUST_CFA_OFFSET 4; \
++ SAVE_ALL; \
++ TRACE_IRQS_OFF \
++ movl %esp,%eax; \
++ call smp_##name; \
++ jmp ret_from_intr; \
++ CFI_ENDPROC; \
++ENDPROC(name)
++
++/* The include is where all of the SMP etc. interrupts come from */
++#include "entry_arch.h"
++
++KPROBE_ENTRY(page_fault)
++ RING0_EC_FRAME
++ pushl $do_page_fault
++ CFI_ADJUST_CFA_OFFSET 4
++ ALIGN
++error_code:
++ /* the function address is in %fs's slot on the stack */
++ pushl %es
++ CFI_ADJUST_CFA_OFFSET 4
++ /*CFI_REL_OFFSET es, 0*/
++ pushl %ds
++ CFI_ADJUST_CFA_OFFSET 4
++ /*CFI_REL_OFFSET ds, 0*/
++ pushl %eax
++ CFI_ADJUST_CFA_OFFSET 4
++ CFI_REL_OFFSET eax, 0
++ pushl %ebp
++ CFI_ADJUST_CFA_OFFSET 4
++ CFI_REL_OFFSET ebp, 0
++ pushl %edi
++ CFI_ADJUST_CFA_OFFSET 4
++ CFI_REL_OFFSET edi, 0
++ pushl %esi
++ CFI_ADJUST_CFA_OFFSET 4
++ CFI_REL_OFFSET esi, 0
++ pushl %edx
++ CFI_ADJUST_CFA_OFFSET 4
++ CFI_REL_OFFSET edx, 0
++ pushl %ecx
++ CFI_ADJUST_CFA_OFFSET 4
++ CFI_REL_OFFSET ecx, 0
++ pushl %ebx
++ CFI_ADJUST_CFA_OFFSET 4
++ CFI_REL_OFFSET ebx, 0
++ cld
++ pushl %fs
++ CFI_ADJUST_CFA_OFFSET 4
++ /*CFI_REL_OFFSET fs, 0*/
++ movl $(__KERNEL_PERCPU), %ecx
++ movl %ecx, %fs
++ UNWIND_ESPFIX_STACK
++ popl %ecx
++ CFI_ADJUST_CFA_OFFSET -4
++ /*CFI_REGISTER es, ecx*/
++ movl PT_FS(%esp), %edi # get the function address
++ movl PT_ORIG_EAX(%esp), %edx # get the error code
++ movl $-1, PT_ORIG_EAX(%esp) # no syscall to restart
++ mov %ecx, PT_FS(%esp)
++ /*CFI_REL_OFFSET fs, ES*/
++ movl $(__USER_DS), %ecx
++ movl %ecx, %ds
++ movl %ecx, %es
++ movl %esp,%eax # pt_regs pointer
++ call *%edi
++ jmp ret_from_exception
++ CFI_ENDPROC
++KPROBE_END(page_fault)
++
++ENTRY(coprocessor_error)
++ RING0_INT_FRAME
++ pushl $0
++ CFI_ADJUST_CFA_OFFSET 4
++ pushl $do_coprocessor_error
++ CFI_ADJUST_CFA_OFFSET 4
++ jmp error_code
++ CFI_ENDPROC
++END(coprocessor_error)
++
++ENTRY(simd_coprocessor_error)
++ RING0_INT_FRAME
++ pushl $0
++ CFI_ADJUST_CFA_OFFSET 4
++ pushl $do_simd_coprocessor_error
++ CFI_ADJUST_CFA_OFFSET 4
++ jmp error_code
++ CFI_ENDPROC
++END(simd_coprocessor_error)
++
++ENTRY(device_not_available)
++ RING0_INT_FRAME
++ pushl $-1 # mark this as an int
++ CFI_ADJUST_CFA_OFFSET 4
++ SAVE_ALL
++ GET_CR0_INTO_EAX
++ testl $0x4, %eax # EM (math emulation bit)
++ jne device_not_available_emulate
++ preempt_stop(CLBR_ANY)
++ call math_state_restore
++ jmp ret_from_exception
++device_not_available_emulate:
++ pushl $0 # temporary storage for ORIG_EIP
++ CFI_ADJUST_CFA_OFFSET 4
++ call math_emulate
++ addl $4, %esp
++ CFI_ADJUST_CFA_OFFSET -4
++ jmp ret_from_exception
++ CFI_ENDPROC
++END(device_not_available)
++
++/*
++ * Debug traps and NMI can happen at the one SYSENTER instruction
++ * that sets up the real kernel stack. Check here, since we can't
++ * allow the wrong stack to be used.
++ *
++ * "TSS_sysenter_sp0+12" is because the NMI/debug handler will have
++ * already pushed 3 words if it hits on the sysenter instruction:
++ * eflags, cs and eip.
++ *
++ * We just load the right stack, and push the three (known) values
++ * by hand onto the new stack - while updating the return eip past
++ * the instruction that would have done it for sysenter.
++ */
++#define FIX_STACK(offset, ok, label) \
++ cmpw $__KERNEL_CS,4(%esp); \
++ jne ok; \
++label: \
++ movl TSS_sysenter_sp0+offset(%esp),%esp; \
++ CFI_DEF_CFA esp, 0; \
++ CFI_UNDEFINED eip; \
++ pushfl; \
++ CFI_ADJUST_CFA_OFFSET 4; \
++ pushl $__KERNEL_CS; \
++ CFI_ADJUST_CFA_OFFSET 4; \
++ pushl $sysenter_past_esp; \
++ CFI_ADJUST_CFA_OFFSET 4; \
++ CFI_REL_OFFSET eip, 0
++
++KPROBE_ENTRY(debug)
++ RING0_INT_FRAME
++ cmpl $ia32_sysenter_target,(%esp)
++ jne debug_stack_correct
++ FIX_STACK(12, debug_stack_correct, debug_esp_fix_insn)
++debug_stack_correct:
++ pushl $-1 # mark this as an int
++ CFI_ADJUST_CFA_OFFSET 4
++ SAVE_ALL
++ xorl %edx,%edx # error code 0
++ movl %esp,%eax # pt_regs pointer
++ call do_debug
++ jmp ret_from_exception
++ CFI_ENDPROC
++KPROBE_END(debug)
++
++/*
++ * NMI is doubly nasty. It can happen _while_ we're handling
++ * a debug fault, and the debug fault hasn't yet been able to
++ * clear up the stack. So we first check whether we got an
++ * NMI on the sysenter entry path, but after that we need to
++ * check whether we got an NMI on the debug path where the debug
++ * fault happened on the sysenter path.
++ */
++KPROBE_ENTRY(nmi)
++ RING0_INT_FRAME
++ pushl %eax
++ CFI_ADJUST_CFA_OFFSET 4
++ movl %ss, %eax
++ cmpw $__ESPFIX_SS, %ax
++ popl %eax
++ CFI_ADJUST_CFA_OFFSET -4
++ je nmi_espfix_stack
++ cmpl $ia32_sysenter_target,(%esp)
++ je nmi_stack_fixup
++ pushl %eax
++ CFI_ADJUST_CFA_OFFSET 4
++ movl %esp,%eax
++ /* Do not access memory above the end of our stack page,
++ * it might not exist.
++ */
++ andl $(THREAD_SIZE-1),%eax
++ cmpl $(THREAD_SIZE-20),%eax
++ popl %eax
++ CFI_ADJUST_CFA_OFFSET -4
++ jae nmi_stack_correct
++ cmpl $ia32_sysenter_target,12(%esp)
++ je nmi_debug_stack_check
++nmi_stack_correct:
++ /* We have a RING0_INT_FRAME here */
++ pushl %eax
++ CFI_ADJUST_CFA_OFFSET 4
++ SAVE_ALL
++ xorl %edx,%edx # zero error code
++ movl %esp,%eax # pt_regs pointer
++ call do_nmi
++ jmp restore_nocheck_notrace
++ CFI_ENDPROC
++
++nmi_stack_fixup:
++ RING0_INT_FRAME
++ FIX_STACK(12,nmi_stack_correct, 1)
++ jmp nmi_stack_correct
++
++nmi_debug_stack_check:
++ /* We have a RING0_INT_FRAME here */
++ cmpw $__KERNEL_CS,16(%esp)
++ jne nmi_stack_correct
++ cmpl $debug,(%esp)
++ jb nmi_stack_correct
++ cmpl $debug_esp_fix_insn,(%esp)
++ ja nmi_stack_correct
++ FIX_STACK(24,nmi_stack_correct, 1)
++ jmp nmi_stack_correct
++
++nmi_espfix_stack:
++ /* We have a RING0_INT_FRAME here.
++ *
++ * create the pointer to lss back
++ */
++ pushl %ss
++ CFI_ADJUST_CFA_OFFSET 4
++ pushl %esp
++ CFI_ADJUST_CFA_OFFSET 4
++ addw $4, (%esp)
++ /* copy the iret frame of 12 bytes */
++ .rept 3
++ pushl 16(%esp)
++ CFI_ADJUST_CFA_OFFSET 4
++ .endr
++ pushl %eax
++ CFI_ADJUST_CFA_OFFSET 4
++ SAVE_ALL
++ FIXUP_ESPFIX_STACK # %eax == %esp
++ xorl %edx,%edx # zero error code
++ call do_nmi
++ RESTORE_REGS
++ lss 12+4(%esp), %esp # back to espfix stack
++ CFI_ADJUST_CFA_OFFSET -24
++ jmp irq_return
++ CFI_ENDPROC
++KPROBE_END(nmi)
++
++#ifdef CONFIG_PARAVIRT
++ENTRY(native_iret)
++ iret
++.section __ex_table,"a"
++ .align 4
++ .long native_iret, iret_exc
++.previous
++END(native_iret)
++
++ENTRY(native_irq_enable_sysexit)
++ sti
++ sysexit
++END(native_irq_enable_sysexit)
++#endif
++
++KPROBE_ENTRY(int3)
++ RING0_INT_FRAME
++ pushl $-1 # mark this as an int
++ CFI_ADJUST_CFA_OFFSET 4
++ SAVE_ALL
++ xorl %edx,%edx # zero error code
++ movl %esp,%eax # pt_regs pointer
++ call do_int3
++ jmp ret_from_exception
++ CFI_ENDPROC
++KPROBE_END(int3)
++
++ENTRY(overflow)
++ RING0_INT_FRAME
++ pushl $0
++ CFI_ADJUST_CFA_OFFSET 4
++ pushl $do_overflow
++ CFI_ADJUST_CFA_OFFSET 4
++ jmp error_code
++ CFI_ENDPROC
++END(overflow)
++
++ENTRY(bounds)
++ RING0_INT_FRAME
++ pushl $0
++ CFI_ADJUST_CFA_OFFSET 4
++ pushl $do_bounds
++ CFI_ADJUST_CFA_OFFSET 4
++ jmp error_code
++ CFI_ENDPROC
++END(bounds)
++
++ENTRY(invalid_op)
++ RING0_INT_FRAME
++ pushl $0
++ CFI_ADJUST_CFA_OFFSET 4
++ pushl $do_invalid_op
++ CFI_ADJUST_CFA_OFFSET 4
++ jmp error_code
++ CFI_ENDPROC
++END(invalid_op)
++
++ENTRY(coprocessor_segment_overrun)
++ RING0_INT_FRAME
++ pushl $0
++ CFI_ADJUST_CFA_OFFSET 4
++ pushl $do_coprocessor_segment_overrun
++ CFI_ADJUST_CFA_OFFSET 4
++ jmp error_code
++ CFI_ENDPROC
++END(coprocessor_segment_overrun)
++
++ENTRY(invalid_TSS)
++ RING0_EC_FRAME
++ pushl $do_invalid_TSS
++ CFI_ADJUST_CFA_OFFSET 4
++ jmp error_code
++ CFI_ENDPROC
++END(invalid_TSS)
++
++ENTRY(segment_not_present)
++ RING0_EC_FRAME
++ pushl $do_segment_not_present
++ CFI_ADJUST_CFA_OFFSET 4
++ jmp error_code
++ CFI_ENDPROC
++END(segment_not_present)
++
++ENTRY(stack_segment)
++ RING0_EC_FRAME
++ pushl $do_stack_segment
++ CFI_ADJUST_CFA_OFFSET 4
++ jmp error_code
++ CFI_ENDPROC
++END(stack_segment)
++
++KPROBE_ENTRY(general_protection)
++ RING0_EC_FRAME
++ pushl $do_general_protection
++ CFI_ADJUST_CFA_OFFSET 4
++ jmp error_code
++ CFI_ENDPROC
++KPROBE_END(general_protection)
++
++ENTRY(alignment_check)
++ RING0_EC_FRAME
++ pushl $do_alignment_check
++ CFI_ADJUST_CFA_OFFSET 4
++ jmp error_code
++ CFI_ENDPROC
++END(alignment_check)
++
++ENTRY(divide_error)
++ RING0_INT_FRAME
++ pushl $0 # no error code
++ CFI_ADJUST_CFA_OFFSET 4
++ pushl $do_divide_error
++ CFI_ADJUST_CFA_OFFSET 4
++ jmp error_code
++ CFI_ENDPROC
++END(divide_error)
++
++#ifdef CONFIG_X86_MCE
++ENTRY(machine_check)
++ RING0_INT_FRAME
++ pushl $0
++ CFI_ADJUST_CFA_OFFSET 4
++ pushl machine_check_vector
++ CFI_ADJUST_CFA_OFFSET 4
++ jmp error_code
++ CFI_ENDPROC
++END(machine_check)
++#endif
++
++ENTRY(spurious_interrupt_bug)
++ RING0_INT_FRAME
++ pushl $0
++ CFI_ADJUST_CFA_OFFSET 4
++ pushl $do_spurious_interrupt_bug
++ CFI_ADJUST_CFA_OFFSET 4
++ jmp error_code
++ CFI_ENDPROC
++END(spurious_interrupt_bug)
++
++ENTRY(kernel_thread_helper)
++ pushl $0 # fake return address for unwinder
++ CFI_STARTPROC
++ movl %edx,%eax
++ push %edx
++ CFI_ADJUST_CFA_OFFSET 4
++ call *%ebx
++ push %eax
++ CFI_ADJUST_CFA_OFFSET 4
++ call do_exit
++ CFI_ENDPROC
++ENDPROC(kernel_thread_helper)
++
++#ifdef CONFIG_XEN
++/* Xen doesn't set %esp to be precisely what the normal sysenter
++ entrypoint expects, so fix it up before using the normal path. */
++ENTRY(xen_sysenter_target)
++ RING0_INT_FRAME
++ addl $5*4, %esp /* remove xen-provided frame */
++ CFI_ADJUST_CFA_OFFSET -5*4
++ jmp sysenter_past_esp
++ CFI_ENDPROC
++
++ENTRY(xen_hypervisor_callback)
++ CFI_STARTPROC
++ pushl $0
++ CFI_ADJUST_CFA_OFFSET 4
++ SAVE_ALL
++ TRACE_IRQS_OFF
++
++ /* Check to see if we got the event in the critical
++ region in xen_iret_direct, after we've reenabled
++ events and checked for pending events. This simulates
++ iret instruction's behaviour where it delivers a
++ pending interrupt when enabling interrupts. */
++ movl PT_EIP(%esp),%eax
++ cmpl $xen_iret_start_crit,%eax
++ jb 1f
++ cmpl $xen_iret_end_crit,%eax
++ jae 1f
++
++ jmp xen_iret_crit_fixup
++
++ENTRY(xen_do_upcall)
++1: mov %esp, %eax
++ call xen_evtchn_do_upcall
++ jmp ret_from_intr
++ CFI_ENDPROC
++ENDPROC(xen_hypervisor_callback)
++
++# Hypervisor uses this for application faults while it executes.
++# We get here for two reasons:
++# 1. Fault while reloading DS, ES, FS or GS
++# 2. Fault while executing IRET
++# Category 1 we fix up by reattempting the load, and zeroing the segment
++# register if the load fails.
++# Category 2 we fix up by jumping to do_iret_error. We cannot use the
++# normal Linux return path in this case because if we use the IRET hypercall
++# to pop the stack frame we end up in an infinite loop of failsafe callbacks.
++# We distinguish between categories by maintaining a status value in EAX.
++ENTRY(xen_failsafe_callback)
++ CFI_STARTPROC
++ pushl %eax
++ CFI_ADJUST_CFA_OFFSET 4
++ movl $1,%eax
++1: mov 4(%esp),%ds
++2: mov 8(%esp),%es
++3: mov 12(%esp),%fs
++4: mov 16(%esp),%gs
++ testl %eax,%eax
++ popl %eax
++ CFI_ADJUST_CFA_OFFSET -4
++ lea 16(%esp),%esp
++ CFI_ADJUST_CFA_OFFSET -16
++ jz 5f
++ addl $16,%esp
++ jmp iret_exc # EAX != 0 => Category 2 (Bad IRET)
++5: pushl $0 # EAX == 0 => Category 1 (Bad segment)
++ CFI_ADJUST_CFA_OFFSET 4
++ SAVE_ALL
++ jmp ret_from_exception
++ CFI_ENDPROC
++
++.section .fixup,"ax"
++6: xorl %eax,%eax
++ movl %eax,4(%esp)
++ jmp 1b
++7: xorl %eax,%eax
++ movl %eax,8(%esp)
++ jmp 2b
++8: xorl %eax,%eax
++ movl %eax,12(%esp)
++ jmp 3b
++9: xorl %eax,%eax
++ movl %eax,16(%esp)
++ jmp 4b
++.previous
++.section __ex_table,"a"
++ .align 4
++ .long 1b,6b
++ .long 2b,7b
++ .long 3b,8b
++ .long 4b,9b
++.previous
++ENDPROC(xen_failsafe_callback)
++
++#endif /* CONFIG_XEN */
++
++#ifdef CONFIG_FTRACE
++#ifdef CONFIG_DYNAMIC_FTRACE
++
++ENTRY(mcount)
++ pushl %eax
++ pushl %ecx
++ pushl %edx
++ movl 0xc(%esp), %eax
++ subl $MCOUNT_INSN_SIZE, %eax
++
++.globl mcount_call
++mcount_call:
++ call ftrace_stub
++
++ popl %edx
++ popl %ecx
++ popl %eax
++
++ ret
++END(mcount)
++
++ENTRY(ftrace_caller)
++ pushl %eax
++ pushl %ecx
++ pushl %edx
++ movl 0xc(%esp), %eax
++ movl 0x4(%ebp), %edx
++ subl $MCOUNT_INSN_SIZE, %eax
++
++.globl ftrace_call
++ftrace_call:
++ call ftrace_stub
++
++ popl %edx
++ popl %ecx
++ popl %eax
++
++.globl ftrace_stub
++ftrace_stub:
++ ret
++END(ftrace_caller)
++
++#else /* ! CONFIG_DYNAMIC_FTRACE */
++
++ENTRY(mcount)
++ cmpl $ftrace_stub, ftrace_trace_function
++ jnz trace
++.globl ftrace_stub
++ftrace_stub:
++ ret
++
++ /* taken from glibc */
++trace:
++ pushl %eax
++ pushl %ecx
++ pushl %edx
++ movl 0xc(%esp), %eax
++ movl 0x4(%ebp), %edx
++ subl $MCOUNT_INSN_SIZE, %eax
++
++ call *ftrace_trace_function
++
++ popl %edx
++ popl %ecx
++ popl %eax
++
++ jmp ftrace_stub
++END(mcount)
++#endif /* CONFIG_DYNAMIC_FTRACE */
++#endif /* CONFIG_FTRACE */
++
++.section .rodata,"a"
++#include "syscall_table_32.S"
++
++syscall_table_size=(.-sys_call_table)
+diff -Nurb linux-2.6.27-590/arch/x86/mm/fault.c linux-2.6.27-591/arch/x86/mm/fault.c
+--- linux-2.6.27-590/arch/x86/mm/fault.c 2010-01-26 17:49:18.000000000 -0500
++++ linux-2.6.27-591/arch/x86/mm/fault.c 2010-01-29 15:43:46.000000000 -0500
+@@ -79,6 +79,15 @@
+ #endif
+ }
+
++
++extern void (*rec_event)(void *,unsigned int);
++struct event_spec {
++ unsigned long pc;
++ unsigned long dcookie;
++ unsigned count;
++ unsigned char reason;
++};
++
+ /*
+ * X86_32
+ * Sometimes AMD Athlon/Opteron CPUs report invalid exceptions on prefetch.
+diff -Nurb linux-2.6.27-590/arch/x86/mm/fault.c.orig linux-2.6.27-591/arch/x86/mm/fault.c.orig
+--- linux-2.6.27-590/arch/x86/mm/fault.c.orig 1969-12-31 19:00:00.000000000 -0500
++++ linux-2.6.27-591/arch/x86/mm/fault.c.orig 2010-01-26 17:49:18.000000000 -0500
+@@ -0,0 +1,961 @@
++/*
++ * Copyright (C) 1995 Linus Torvalds
++ * Copyright (C) 2001,2002 Andi Kleen, SuSE Labs.
++ */
++
++#include <linux/signal.h>
++#include <linux/sched.h>
++#include <linux/kernel.h>
++#include <linux/errno.h>
++#include <linux/string.h>
++#include <linux/types.h>
++#include <linux/ptrace.h>
++#include <linux/mmiotrace.h>
++#include <linux/mman.h>
++#include <linux/mm.h>
++#include <linux/smp.h>
++#include <linux/interrupt.h>
++#include <linux/init.h>
++#include <linux/tty.h>
++#include <linux/vt_kern.h> /* For unblank_screen() */
++#include <linux/compiler.h>
++#include <linux/highmem.h>
++#include <linux/bootmem.h> /* for max_low_pfn */
++#include <linux/vmalloc.h>
++#include <linux/module.h>
++#include <linux/kprobes.h>
++#include <linux/uaccess.h>
++#include <linux/kdebug.h>
++
++#include <asm/system.h>
++#include <asm/desc.h>
++#include <asm/segment.h>
++#include <asm/pgalloc.h>
++#include <asm/smp.h>
++#include <asm/tlbflush.h>
++#include <asm/proto.h>
++#include <asm-generic/sections.h>
++
++/*
++ * Page fault error code bits
++ * bit 0 == 0 means no page found, 1 means protection fault
++ * bit 1 == 0 means read, 1 means write
++ * bit 2 == 0 means kernel, 1 means user-mode
++ * bit 3 == 1 means use of reserved bit detected
++ * bit 4 == 1 means fault was an instruction fetch
++ */
++#define PF_PROT (1<<0)
++#define PF_WRITE (1<<1)
++#define PF_USER (1<<2)
++#define PF_RSVD (1<<3)
++#define PF_INSTR (1<<4)
++
++static inline int kmmio_fault(struct pt_regs *regs, unsigned long addr)
++{
++#ifdef CONFIG_MMIOTRACE_HOOKS
++ if (unlikely(is_kmmio_active()))
++ if (kmmio_handler(regs, addr) == 1)
++ return -1;
++#endif
++ return 0;
++}
++
++static inline int notify_page_fault(struct pt_regs *regs)
++{
++#ifdef CONFIG_KPROBES
++ int ret = 0;
++
++ /* kprobe_running() needs smp_processor_id() */
++ if (!user_mode_vm(regs)) {
++ preempt_disable();
++ if (kprobe_running() && kprobe_fault_handler(regs, 14))
++ ret = 1;
++ preempt_enable();
++ }
++
++ return ret;
++#else
++ return 0;
++#endif
++}
++
++/*
++ * X86_32
++ * Sometimes AMD Athlon/Opteron CPUs report invalid exceptions on prefetch.
++ * Check that here and ignore it.
++ *
++ * X86_64
++ * Sometimes the CPU reports invalid exceptions on prefetch.
++ * Check that here and ignore it.
++ *
++ * Opcode checker based on code by Richard Brunner
++ */
++static int is_prefetch(struct pt_regs *regs, unsigned long addr,
++ unsigned long error_code)
++{
++ unsigned char *instr;
++ int scan_more = 1;
++ int prefetch = 0;
++ unsigned char *max_instr;
++
++ /*
++ * If it was a exec (instruction fetch) fault on NX page, then
++ * do not ignore the fault:
++ */
++ if (error_code & PF_INSTR)
++ return 0;
++
++ instr = (unsigned char *)convert_ip_to_linear(current, regs);
++ max_instr = instr + 15;
++
++ if (user_mode(regs) && instr >= (unsigned char *)TASK_SIZE)
++ return 0;
++
++ while (scan_more && instr < max_instr) {
++ unsigned char opcode;
++ unsigned char instr_hi;
++ unsigned char instr_lo;
++
++ if (probe_kernel_address(instr, opcode))
++ break;
++
++ instr_hi = opcode & 0xf0;
++ instr_lo = opcode & 0x0f;
++ instr++;
++
++ switch (instr_hi) {
++ case 0x20:
++ case 0x30:
++ /*
++ * Values 0x26,0x2E,0x36,0x3E are valid x86 prefixes.
++ * In X86_64 long mode, the CPU will signal invalid
++ * opcode if some of these prefixes are present so
++ * X86_64 will never get here anyway
++ */
++ scan_more = ((instr_lo & 7) == 0x6);
++ break;
++#ifdef CONFIG_X86_64
++ case 0x40:
++ /*
++ * In AMD64 long mode 0x40..0x4F are valid REX prefixes
++ * Need to figure out under what instruction mode the
++ * instruction was issued. Could check the LDT for lm,
++ * but for now it's good enough to assume that long
++ * mode only uses well known segments or kernel.
++ */
++ scan_more = (!user_mode(regs)) || (regs->cs == __USER_CS);
++ break;
++#endif
++ case 0x60:
++ /* 0x64 thru 0x67 are valid prefixes in all modes. */
++ scan_more = (instr_lo & 0xC) == 0x4;
++ break;
++ case 0xF0:
++ /* 0xF0, 0xF2, 0xF3 are valid prefixes in all modes. */
++ scan_more = !instr_lo || (instr_lo>>1) == 1;
++ break;
++ case 0x00:
++ /* Prefetch instruction is 0x0F0D or 0x0F18 */
++ scan_more = 0;
++
++ if (probe_kernel_address(instr, opcode))
++ break;
++ prefetch = (instr_lo == 0xF) &&
++ (opcode == 0x0D || opcode == 0x18);
++ break;
++ default:
++ scan_more = 0;
++ break;
++ }
++ }
++ return prefetch;
++}
++
++static void force_sig_info_fault(int si_signo, int si_code,
++ unsigned long address, struct task_struct *tsk)
++{
++ siginfo_t info;
++
++ info.si_signo = si_signo;
++ info.si_errno = 0;
++ info.si_code = si_code;
++ info.si_addr = (void __user *)address;
++ force_sig_info(si_signo, &info, tsk);
++}
++
++#ifdef CONFIG_X86_64
++static int bad_address(void *p)
++{
++ unsigned long dummy;
++ return probe_kernel_address((unsigned long *)p, dummy);
++}
++#endif
++
++static void dump_pagetable(unsigned long address)
++{
++#ifdef CONFIG_X86_32
++ __typeof__(pte_val(__pte(0))) page;
++
++ page = read_cr3();
++ page = ((__typeof__(page) *) __va(page))[address >> PGDIR_SHIFT];
++#ifdef CONFIG_X86_PAE
++ printk("*pdpt = %016Lx ", page);
++ if ((page >> PAGE_SHIFT) < max_low_pfn
++ && page & _PAGE_PRESENT) {
++ page &= PAGE_MASK;
++ page = ((__typeof__(page) *) __va(page))[(address >> PMD_SHIFT)
++ & (PTRS_PER_PMD - 1)];
++ printk(KERN_CONT "*pde = %016Lx ", page);
++ page &= ~_PAGE_NX;
++ }
++#else
++ printk("*pde = %08lx ", page);
++#endif
++
++ /*
++ * We must not directly access the pte in the highpte
++ * case if the page table is located in highmem.
++ * And let's rather not kmap-atomic the pte, just in case
++ * it's allocated already.
++ */
++ if ((page >> PAGE_SHIFT) < max_low_pfn
++ && (page & _PAGE_PRESENT)
++ && !(page & _PAGE_PSE)) {
++ page &= PAGE_MASK;
++ page = ((__typeof__(page) *) __va(page))[(address >> PAGE_SHIFT)
++ & (PTRS_PER_PTE - 1)];
++ printk("*pte = %0*Lx ", sizeof(page)*2, (u64)page);
++ }
++
++ printk("\n");
++#else /* CONFIG_X86_64 */
++ pgd_t *pgd;
++ pud_t *pud;
++ pmd_t *pmd;
++ pte_t *pte;
++
++ pgd = (pgd_t *)read_cr3();
++
++ pgd = __va((unsigned long)pgd & PHYSICAL_PAGE_MASK);
++ pgd += pgd_index(address);
++ if (bad_address(pgd)) goto bad;
++ printk("PGD %lx ", pgd_val(*pgd));
++ if (!pgd_present(*pgd)) goto ret;
++
++ pud = pud_offset(pgd, address);
++ if (bad_address(pud)) goto bad;
++ printk("PUD %lx ", pud_val(*pud));
++ if (!pud_present(*pud) || pud_large(*pud))
++ goto ret;
++
++ pmd = pmd_offset(pud, address);
++ if (bad_address(pmd)) goto bad;
++ printk("PMD %lx ", pmd_val(*pmd));
++ if (!pmd_present(*pmd) || pmd_large(*pmd)) goto ret;
++
++ pte = pte_offset_kernel(pmd, address);
++ if (bad_address(pte)) goto bad;
++ printk("PTE %lx", pte_val(*pte));
++ret:
++ printk("\n");
++ return;
++bad:
++ printk("BAD\n");
++#endif
++}
++
++#ifdef CONFIG_X86_32
++static inline pmd_t *vmalloc_sync_one(pgd_t *pgd, unsigned long address)
++{
++ unsigned index = pgd_index(address);
++ pgd_t *pgd_k;
++ pud_t *pud, *pud_k;
++ pmd_t *pmd, *pmd_k;
++
++ pgd += index;
++ pgd_k = init_mm.pgd + index;
++
++ if (!pgd_present(*pgd_k))
++ return NULL;
++
++ /*
++ * set_pgd(pgd, *pgd_k); here would be useless on PAE
++ * and redundant with the set_pmd() on non-PAE. As would
++ * set_pud.
++ */
++
++ pud = pud_offset(pgd, address);
++ pud_k = pud_offset(pgd_k, address);
++ if (!pud_present(*pud_k))
++ return NULL;
++
++ pmd = pmd_offset(pud, address);
++ pmd_k = pmd_offset(pud_k, address);
++ if (!pmd_present(*pmd_k))
++ return NULL;
++ if (!pmd_present(*pmd)) {
++ set_pmd(pmd, *pmd_k);
++ arch_flush_lazy_mmu_mode();
++ } else
++ BUG_ON(pmd_page(*pmd) != pmd_page(*pmd_k));
++ return pmd_k;
++}
++#endif
++
++#ifdef CONFIG_X86_64
++static const char errata93_warning[] =
++KERN_ERR "******* Your BIOS seems to not contain a fix for K8 errata #93\n"
++KERN_ERR "******* Working around it, but it may cause SEGVs or burn power.\n"
++KERN_ERR "******* Please consider a BIOS update.\n"
++KERN_ERR "******* Disabling USB legacy in the BIOS may also help.\n";
++#endif
++
++/* Workaround for K8 erratum #93 & buggy BIOS.
++ BIOS SMM functions are required to use a specific workaround
++ to avoid corruption of the 64bit RIP register on C stepping K8.
++ A lot of BIOS that didn't get tested properly miss this.
++ The OS sees this as a page fault with the upper 32bits of RIP cleared.
++ Try to work around it here.
++ Note we only handle faults in kernel here.
++ Does nothing for X86_32
++ */
++static int is_errata93(struct pt_regs *regs, unsigned long address)
++{
++#ifdef CONFIG_X86_64
++ static int warned;
++ if (address != regs->ip)
++ return 0;
++ if ((address >> 32) != 0)
++ return 0;
++ address |= 0xffffffffUL << 32;
++ if ((address >= (u64)_stext && address <= (u64)_etext) ||
++ (address >= MODULES_VADDR && address <= MODULES_END)) {
++ if (!warned) {
++ printk(errata93_warning);
++ warned = 1;
++ }
++ regs->ip = address;
++ return 1;
++ }
++#endif
++ return 0;
++}
++
++/*
++ * Work around K8 erratum #100 K8 in compat mode occasionally jumps to illegal
++ * addresses >4GB. We catch this in the page fault handler because these
++ * addresses are not reachable. Just detect this case and return. Any code
++ * segment in LDT is compatibility mode.
++ */
++static int is_errata100(struct pt_regs *regs, unsigned long address)
++{
++#ifdef CONFIG_X86_64
++ if ((regs->cs == __USER32_CS || (regs->cs & (1<<2))) &&
++ (address >> 32))
++ return 1;
++#endif
++ return 0;
++}
++
++void do_invalid_op(struct pt_regs *, unsigned long);
++
++static int is_f00f_bug(struct pt_regs *regs, unsigned long address)
++{
++#ifdef CONFIG_X86_F00F_BUG
++ unsigned long nr;
++ /*
++ * Pentium F0 0F C7 C8 bug workaround.
++ */
++ if (boot_cpu_data.f00f_bug) {
++ nr = (address - idt_descr.address) >> 3;
++
++ if (nr == 6) {
++ do_invalid_op(regs, 0);
++ return 1;
++ }
++ }
++#endif
++ return 0;
++}
++
++static void show_fault_oops(struct pt_regs *regs, unsigned long error_code,
++ unsigned long address)
++{
++#ifdef CONFIG_X86_32
++ if (!oops_may_print())
++ return;
++#endif
++
++#ifdef CONFIG_X86_PAE
++ if (error_code & PF_INSTR) {
++ unsigned int level;
++ pte_t *pte = lookup_address(address, &level);
++
++ if (pte && pte_present(*pte) && !pte_exec(*pte))
++ printk(KERN_CRIT "kernel tried to execute "
++ "NX-protected page - exploit attempt? "
++ "(uid: %d)\n", current->uid);
++ }
++#endif
++
++ printk(KERN_ALERT "BUG: unable to handle kernel ");
++ if (address < PAGE_SIZE)
++ printk(KERN_CONT "NULL pointer dereference");
++ else
++ printk(KERN_CONT "paging request");
++ printk(KERN_CONT " at %p\n", (void *) address);
++ printk(KERN_ALERT "IP:");
++ printk_address(regs->ip, 1);
++ dump_pagetable(address);
++}
++
++#ifdef CONFIG_X86_64
++static noinline void pgtable_bad(unsigned long address, struct pt_regs *regs,
++ unsigned long error_code)
++{
++ unsigned long flags = oops_begin();
++ struct task_struct *tsk;
++
++ printk(KERN_ALERT "%s: Corrupted page table at address %lx\n",
++ current->comm, address);
++ dump_pagetable(address);
++ tsk = current;
++ tsk->thread.cr2 = address;
++ tsk->thread.trap_no = 14;
++ tsk->thread.error_code = error_code;
++ if (__die("Bad pagetable", regs, error_code))
++ regs = NULL;
++ oops_end(flags, regs, SIGKILL);
++}
++#endif
++
++static int spurious_fault_check(unsigned long error_code, pte_t *pte)
++{
++ if ((error_code & PF_WRITE) && !pte_write(*pte))
++ return 0;
++ if ((error_code & PF_INSTR) && !pte_exec(*pte))
++ return 0;
++
++ return 1;
++}
++
++/*
++ * Handle a spurious fault caused by a stale TLB entry. This allows
++ * us to lazily refresh the TLB when increasing the permissions of a
++ * kernel page (RO -> RW or NX -> X). Doing it eagerly is very
++ * expensive since that implies doing a full cross-processor TLB
++ * flush, even if no stale TLB entries exist on other processors.
++ * There are no security implications to leaving a stale TLB when
++ * increasing the permissions on a page.
++ */
++static int spurious_fault(unsigned long address,
++ unsigned long error_code)
++{
++ pgd_t *pgd;
++ pud_t *pud;
++ pmd_t *pmd;
++ pte_t *pte;
++
++ /* Reserved-bit violation or user access to kernel space? */
++ if (error_code & (PF_USER | PF_RSVD))
++ return 0;
++
++ pgd = init_mm.pgd + pgd_index(address);
++ if (!pgd_present(*pgd))
++ return 0;
++
++ pud = pud_offset(pgd, address);
++ if (!pud_present(*pud))
++ return 0;
++
++ if (pud_large(*pud))
++ return spurious_fault_check(error_code, (pte_t *) pud);
++
++ pmd = pmd_offset(pud, address);
++ if (!pmd_present(*pmd))
++ return 0;
++
++ if (pmd_large(*pmd))
++ return spurious_fault_check(error_code, (pte_t *) pmd);
++
++ pte = pte_offset_kernel(pmd, address);
++ if (!pte_present(*pte))
++ return 0;
++
++ return spurious_fault_check(error_code, pte);
++}
++
++/*
++ * X86_32
++ * Handle a fault on the vmalloc or module mapping area
++ *
++ * X86_64
++ * Handle a fault on the vmalloc area
++ *
++ * This assumes no large pages in there.
++ */
++static int vmalloc_fault(unsigned long address)
++{
++#ifdef CONFIG_X86_32
++ unsigned long pgd_paddr;
++ pmd_t *pmd_k;
++ pte_t *pte_k;
++
++ /* Make sure we are in vmalloc area */
++ if (!(address >= VMALLOC_START && address < VMALLOC_END))
++ return -1;
++
++ /*
++ * Synchronize this task's top level page-table
++ * with the 'reference' page table.
++ *
++ * Do _not_ use "current" here. We might be inside
++ * an interrupt in the middle of a task switch..
++ */
++ pgd_paddr = read_cr3();
++ pmd_k = vmalloc_sync_one(__va(pgd_paddr), address);
++ if (!pmd_k)
++ return -1;
++ pte_k = pte_offset_kernel(pmd_k, address);
++ if (!pte_present(*pte_k))
++ return -1;
++ return 0;
++#else
++ pgd_t *pgd, *pgd_ref;
++ pud_t *pud, *pud_ref;
++ pmd_t *pmd, *pmd_ref;
++ pte_t *pte, *pte_ref;
++
++ /* Make sure we are in vmalloc area */
++ if (!(address >= VMALLOC_START && address < VMALLOC_END))
++ return -1;
++
++ /* Copy kernel mappings over when needed. This can also
++ happen within a race in page table update. In the later
++ case just flush. */
++
++ pgd = pgd_offset(current->active_mm, address);
++ pgd_ref = pgd_offset_k(address);
++ if (pgd_none(*pgd_ref))
++ return -1;
++ if (pgd_none(*pgd))
++ set_pgd(pgd, *pgd_ref);
++ else
++ BUG_ON(pgd_page_vaddr(*pgd) != pgd_page_vaddr(*pgd_ref));
++
++ /* Below here mismatches are bugs because these lower tables
++ are shared */
++
++ pud = pud_offset(pgd, address);
++ pud_ref = pud_offset(pgd_ref, address);
++ if (pud_none(*pud_ref))
++ return -1;
++ if (pud_none(*pud) || pud_page_vaddr(*pud) != pud_page_vaddr(*pud_ref))
++ BUG();
++ pmd = pmd_offset(pud, address);
++ pmd_ref = pmd_offset(pud_ref, address);
++ if (pmd_none(*pmd_ref))
++ return -1;
++ if (pmd_none(*pmd) || pmd_page(*pmd) != pmd_page(*pmd_ref))
++ BUG();
++ pte_ref = pte_offset_kernel(pmd_ref, address);
++ if (!pte_present(*pte_ref))
++ return -1;
++ pte = pte_offset_kernel(pmd, address);
++ /* Don't use pte_page here, because the mappings can point
++ outside mem_map, and the NUMA hash lookup cannot handle
++ that. */
++ if (!pte_present(*pte) || pte_pfn(*pte) != pte_pfn(*pte_ref))
++ BUG();
++ return 0;
++#endif
++}
++
++int show_unhandled_signals = 1;
++
++/*
++ * This routine handles page faults. It determines the address,
++ * and the problem, and then passes it off to one of the appropriate
++ * routines.
++ */
++#ifdef CONFIG_X86_64
++asmlinkage
++#endif
++void __kprobes do_page_fault(struct pt_regs *regs, unsigned long error_code)
++{
++ struct task_struct *tsk;
++ struct mm_struct *mm;
++ struct vm_area_struct *vma;
++ unsigned long address;
++ int write, si_code;
++ int fault;
++#ifdef CONFIG_X86_64
++ unsigned long flags;
++#endif
++
++ /*
++ * We can fault from pretty much anywhere, with unknown IRQ state.
++ */
++ trace_hardirqs_fixup();
++
++ tsk = current;
++ mm = tsk->mm;
++ prefetchw(&mm->mmap_sem);
++
++ /* get the address */
++ address = read_cr2();
++
++ si_code = SEGV_MAPERR;
++
++ if (notify_page_fault(regs))
++ return;
++ if (unlikely(kmmio_fault(regs, address)))
++ return;
++
++ /*
++ * We fault-in kernel-space virtual memory on-demand. The
++ * 'reference' page table is init_mm.pgd.
++ *
++ * NOTE! We MUST NOT take any locks for this case. We may
++ * be in an interrupt or a critical region, and should
++ * only copy the information from the master page table,
++ * nothing more.
++ *
++ * This verifies that the fault happens in kernel space
++ * (error_code & 4) == 0, and that the fault was not a
++ * protection error (error_code & 9) == 0.
++ */
++#ifdef CONFIG_X86_32
++ if (unlikely(address >= TASK_SIZE)) {
++#else
++ if (unlikely(address >= TASK_SIZE64)) {
++#endif
++ if (!(error_code & (PF_RSVD|PF_USER|PF_PROT)) &&
++ vmalloc_fault(address) >= 0)
++ return;
++
++ /* Can handle a stale RO->RW TLB */
++ if (spurious_fault(address, error_code))
++ return;
++
++ /*
++ * Don't take the mm semaphore here. If we fixup a prefetch
++ * fault we could otherwise deadlock.
++ */
++ goto bad_area_nosemaphore;
++ }
++
++
++#ifdef CONFIG_X86_32
++ /* It's safe to allow irq's after cr2 has been saved and the vmalloc
++ fault has been handled. */
++ if (regs->flags & (X86_EFLAGS_IF | X86_VM_MASK))
++ local_irq_enable();
++
++ /*
++ * If we're in an interrupt, have no user context or are running in an
++ * atomic region then we must not take the fault.
++ */
++ if (in_atomic() || !mm)
++ goto bad_area_nosemaphore;
++#else /* CONFIG_X86_64 */
++ if (likely(regs->flags & X86_EFLAGS_IF))
++ local_irq_enable();
++
++ if (unlikely(error_code & PF_RSVD))
++ pgtable_bad(address, regs, error_code);
++
++ /*
++ * If we're in an interrupt, have no user context or are running in an
++ * atomic region then we must not take the fault.
++ */
++ if (unlikely(in_atomic() || !mm))
++ goto bad_area_nosemaphore;
++
++ /*
++ * User-mode registers count as a user access even for any
++ * potential system fault or CPU buglet.
++ */
++ if (user_mode_vm(regs))
++ error_code |= PF_USER;
++again:
++#endif
++ /* When running in the kernel we expect faults to occur only to
++ * addresses in user space. All other faults represent errors in the
++ * kernel and should generate an OOPS. Unfortunately, in the case of an
++ * erroneous fault occurring in a code path which already holds mmap_sem
++ * we will deadlock attempting to validate the fault against the
++ * address space. Luckily the kernel only validly references user
++ * space from well defined areas of code, which are listed in the
++ * exceptions table.
++ *
++ * As the vast majority of faults will be valid we will only perform
++ * the source reference check when there is a possibility of a deadlock.
++ * Attempt to lock the address space, if we cannot we then validate the
++ * source. If this is invalid we can skip the address space check,
++ * thus avoiding the deadlock.
++ */
++ if (!down_read_trylock(&mm->mmap_sem)) {
++ if ((error_code & PF_USER) == 0 &&
++ !search_exception_tables(regs->ip))
++ goto bad_area_nosemaphore;
++ down_read(&mm->mmap_sem);
++ }
++
++ vma = find_vma(mm, address);
++ if (!vma)
++ goto bad_area;
++ if (vma->vm_start <= address)
++ goto good_area;
++ if (!(vma->vm_flags & VM_GROWSDOWN))
++ goto bad_area;
++ if (error_code & PF_USER) {
++ /*
++ * Accessing the stack below %sp is always a bug.
++ * The large cushion allows instructions like enter
++ * and pusha to work. ("enter $65535,$31" pushes
++ * 32 pointers and then decrements %sp by 65535.)
++ */
++ if (address + 65536 + 32 * sizeof(unsigned long) < regs->sp)
++ goto bad_area;
++ }
++ if (expand_stack(vma, address))
++ goto bad_area;
++/*
++ * Ok, we have a good vm_area for this memory access, so
++ * we can handle it..
++ */
++good_area:
++ si_code = SEGV_ACCERR;
++ write = 0;
++ switch (error_code & (PF_PROT|PF_WRITE)) {
++ default: /* 3: write, present */
++ /* fall through */
++ case PF_WRITE: /* write, not present */
++ if (!(vma->vm_flags & VM_WRITE))
++ goto bad_area;
++ write++;
++ break;
++ case PF_PROT: /* read, present */
++ goto bad_area;
++ case 0: /* read, not present */
++ if (!(vma->vm_flags & (VM_READ | VM_EXEC | VM_WRITE)))
++ goto bad_area;
++ }
++
++#ifdef CONFIG_X86_32
++survive:
++#endif
++ /*
++ * If for any reason at all we couldn't handle the fault,
++ * make sure we exit gracefully rather than endlessly redo
++ * the fault.
++ */
++ fault = handle_mm_fault(mm, vma, address, write);
++ if (unlikely(fault & VM_FAULT_ERROR)) {
++ if (fault & VM_FAULT_OOM)
++ goto out_of_memory;
++ else if (fault & VM_FAULT_SIGBUS)
++ goto do_sigbus;
++ BUG();
++ }
++ if (fault & VM_FAULT_MAJOR)
++ tsk->maj_flt++;
++ else
++ tsk->min_flt++;
++
++#ifdef CONFIG_X86_32
++ /*
++ * Did it hit the DOS screen memory VA from vm86 mode?
++ */
++ if (v8086_mode(regs)) {
++ unsigned long bit = (address - 0xA0000) >> PAGE_SHIFT;
++ if (bit < 32)
++ tsk->thread.screen_bitmap |= 1 << bit;
++ }
++#endif
++ up_read(&mm->mmap_sem);
++ return;
++
++/*
++ * Something tried to access memory that isn't in our memory map..
++ * Fix it, but check if it's kernel or user first..
++ */
++bad_area:
++ up_read(&mm->mmap_sem);
++
++bad_area_nosemaphore:
++ /* User mode accesses just cause a SIGSEGV */
++ if (error_code & PF_USER) {
++ /*
++ * It's possible to have interrupts off here.
++ */
++ local_irq_enable();
++
++ /*
++ * Valid to do another page fault here because this one came
++ * from user space.
++ */
++ if (is_prefetch(regs, address, error_code))
++ return;
++
++ if (is_errata100(regs, address))
++ return;
++
++ if (show_unhandled_signals && unhandled_signal(tsk, SIGSEGV) &&
++ printk_ratelimit()) {
++ printk(
++ "%s%s[%d]: segfault at %lx ip %p sp %p error %lx",
++ task_pid_nr(tsk) > 1 ? KERN_INFO : KERN_EMERG,
++ tsk->comm, task_pid_nr(tsk), address,
++ (void *) regs->ip, (void *) regs->sp, error_code);
++ print_vma_addr(" in ", regs->ip);
++ printk("\n");
++ }
++
++ tsk->thread.cr2 = address;
++ /* Kernel addresses are always protection faults */
++ tsk->thread.error_code = error_code | (address >= TASK_SIZE);
++ tsk->thread.trap_no = 14;
++ force_sig_info_fault(SIGSEGV, si_code, address, tsk);
++ return;
++ }
++
++ if (is_f00f_bug(regs, address))
++ return;
++
++no_context:
++ /* Are we prepared to handle this kernel fault? */
++ if (fixup_exception(regs))
++ return;
++
++ /*
++ * X86_32
++ * Valid to do another page fault here, because if this fault
++ * had been triggered by is_prefetch fixup_exception would have
++ * handled it.
++ *
++ * X86_64
++ * Hall of shame of CPU/BIOS bugs.
++ */
++ if (is_prefetch(regs, address, error_code))
++ return;
++
++ if (is_errata93(regs, address))
++ return;
++
++/*
++ * Oops. The kernel tried to access some bad page. We'll have to
++ * terminate things with extreme prejudice.
++ */
++#ifdef CONFIG_X86_32
++ bust_spinlocks(1);
++#else
++ flags = oops_begin();
++#endif
++
++ show_fault_oops(regs, error_code, address);
++
++ tsk->thread.cr2 = address;
++ tsk->thread.trap_no = 14;
++ tsk->thread.error_code = error_code;
++
++#ifdef CONFIG_X86_32
++ die("Oops", regs, error_code);
++ bust_spinlocks(0);
++ do_exit(SIGKILL);
++#else
++ if (__die("Oops", regs, error_code))
++ regs = NULL;
++ /* Executive summary in case the body of the oops scrolled away */
++ printk(KERN_EMERG "CR2: %016lx\n", address);
++ oops_end(flags, regs, SIGKILL);
++#endif
++
++/*
++ * We ran out of memory, or some other thing happened to us that made
++ * us unable to handle the page fault gracefully.
++ */
++out_of_memory:
++ up_read(&mm->mmap_sem);
++ if (is_global_init(tsk)) {
++ yield();
++#ifdef CONFIG_X86_32
++ down_read(&mm->mmap_sem);
++ goto survive;
++#else
++ goto again;
++#endif
++ }
++
++ printk("VM: killing process %s\n", tsk->comm);
++ if (error_code & PF_USER)
++ do_group_exit(SIGKILL);
++ goto no_context;
++
++do_sigbus:
++ up_read(&mm->mmap_sem);
++
++ /* Kernel mode? Handle exceptions or die */
++ if (!(error_code & PF_USER))
++ goto no_context;
++#ifdef CONFIG_X86_32
++ /* User space => ok to do another page fault */
++ if (is_prefetch(regs, address, error_code))
++ return;
++#endif
++ tsk->thread.cr2 = address;
++ tsk->thread.error_code = error_code;
++ tsk->thread.trap_no = 14;
++ force_sig_info_fault(SIGBUS, BUS_ADRERR, address, tsk);
++}
++
++DEFINE_SPINLOCK(pgd_lock);
++LIST_HEAD(pgd_list);
++
++void vmalloc_sync_all(void)
++{
++#ifdef CONFIG_X86_32
++ unsigned long start = VMALLOC_START & PGDIR_MASK;
++ unsigned long address;
++
++ if (SHARED_KERNEL_PMD)
++ return;
++
++ BUILD_BUG_ON(TASK_SIZE & ~PGDIR_MASK);
++ for (address = start; address >= TASK_SIZE; address += PGDIR_SIZE) {
++ unsigned long flags;
++ struct page *page;
++
++ spin_lock_irqsave(&pgd_lock, flags);
++ list_for_each_entry(page, &pgd_list, lru) {
++ if (!vmalloc_sync_one(page_address(page),
++ address))
++ break;
++ }
++ spin_unlock_irqrestore(&pgd_lock, flags);
++ }
++#else /* CONFIG_X86_64 */
++ unsigned long start = VMALLOC_START & PGDIR_MASK;
++ unsigned long address;
++
++ for (address = start; address <= VMALLOC_END; address += PGDIR_SIZE) {
++ const pgd_t *pgd_ref = pgd_offset_k(address);
++ unsigned long flags;
++ struct page *page;
++
++ if (pgd_none(*pgd_ref))
++ continue;
++ spin_lock_irqsave(&pgd_lock, flags);
++ list_for_each_entry(page, &pgd_list, lru) {
++ pgd_t *pgd;
++ pgd = (pgd_t *)page_address(page) + pgd_index(address);
++ if (pgd_none(*pgd))
++ set_pgd(pgd, *pgd_ref);
++ else
++ BUG_ON(pgd_page_vaddr(*pgd) != pgd_page_vaddr(*pgd_ref));
++ }
++ spin_unlock_irqrestore(&pgd_lock, flags);
++ }
++#endif
++}
+diff -Nurb linux-2.6.27-590/drivers/oprofile/cpu_buffer.c linux-2.6.27-591/drivers/oprofile/cpu_buffer.c
+--- linux-2.6.27-590/drivers/oprofile/cpu_buffer.c 2008-10-09 18:13:53.000000000 -0400
++++ linux-2.6.27-591/drivers/oprofile/cpu_buffer.c 2010-01-29 15:43:46.000000000 -0500
+@@ -21,6 +21,7 @@
+ #include <linux/oprofile.h>
+ #include <linux/vmalloc.h>
+ #include <linux/errno.h>
++#include <linux/arrays.h>
+
+ #include "event_buffer.h"
+ #include "cpu_buffer.h"
+@@ -147,6 +148,17 @@
+ b->head_pos = 0;
+ }
+
++#ifdef CONFIG_CHOPSTIX
++
++struct event_spec {
++ unsigned int pc;
++ unsigned long dcookie;
++ unsigned count;
++};
++
++extern void (*rec_event)(void *,unsigned int);
++#endif
++
+ static inline void
+ add_sample(struct oprofile_cpu_buffer * cpu_buf,
+ unsigned long pc, unsigned long event)
+@@ -155,6 +167,7 @@
+ entry->eip = pc;
+ entry->event = event;
+ increment_head(cpu_buf);
++
+ }
+
+ static inline void
+@@ -250,8 +263,28 @@
+ {
+ int is_kernel = !user_mode(regs);
+ unsigned long pc = profile_pc(regs);
++ int res=0;
+
++#ifdef CONFIG_CHOPSTIX
++ if (rec_event) {
++ struct event esig;
++ struct event_spec espec;
++ esig.task = current;
++ espec.pc=pc;
++ espec.count=1;
++ esig.event_data=&espec;
++ esig.event_type=event; /* index in the event array currently set up */
++ /* make sure the counters are loaded in the order we want them to show up*/
++ (*rec_event)(&esig, 1);
++ }
++ else {
+ oprofile_add_ext_sample(pc, regs, event, is_kernel);
++ }
++#else
++ oprofile_add_ext_sample(pc, regs, event, is_kernel);
++#endif
++
++
+ }
+
+ void oprofile_add_pc(unsigned long pc, int is_kernel, unsigned long event)
+diff -Nurb linux-2.6.27-590/drivers/oprofile/cpu_buffer.c.orig linux-2.6.27-591/drivers/oprofile/cpu_buffer.c.orig
+--- linux-2.6.27-590/drivers/oprofile/cpu_buffer.c.orig 1969-12-31 19:00:00.000000000 -0500
++++ linux-2.6.27-591/drivers/oprofile/cpu_buffer.c.orig 2008-10-09 18:13:53.000000000 -0400
+@@ -0,0 +1,307 @@
++/**
++ * @file cpu_buffer.c
++ *
++ * @remark Copyright 2002 OProfile authors
++ * @remark Read the file COPYING
++ *
++ * @author John Levon <levon@movementarian.org>
++ *
++ * Each CPU has a local buffer that stores PC value/event
++ * pairs. We also log context switches when we notice them.
++ * Eventually each CPU's buffer is processed into the global
++ * event buffer by sync_buffer().
++ *
++ * We use a local buffer for two reasons: an NMI or similar
++ * interrupt cannot synchronise, and high sampling rates
++ * would lead to catastrophic global synchronisation if
++ * a global buffer was used.
++ */
++
++#include <linux/sched.h>
++#include <linux/oprofile.h>
++#include <linux/vmalloc.h>
++#include <linux/errno.h>
++
++#include "event_buffer.h"
++#include "cpu_buffer.h"
++#include "buffer_sync.h"
++#include "oprof.h"
++
++DEFINE_PER_CPU(struct oprofile_cpu_buffer, cpu_buffer);
++
++static void wq_sync_buffer(struct work_struct *work);
++
++#define DEFAULT_TIMER_EXPIRE (HZ / 10)
++static int work_enabled;
++
++void free_cpu_buffers(void)
++{
++ int i;
++
++ for_each_online_cpu(i) {
++ vfree(per_cpu(cpu_buffer, i).buffer);
++ per_cpu(cpu_buffer, i).buffer = NULL;
++ }
++}
++
++int alloc_cpu_buffers(void)
++{
++ int i;
++
++ unsigned long buffer_size = fs_cpu_buffer_size;
++
++ for_each_online_cpu(i) {
++ struct oprofile_cpu_buffer *b = &per_cpu(cpu_buffer, i);
++
++ b->buffer = vmalloc_node(sizeof(struct op_sample) * buffer_size,
++ cpu_to_node(i));
++ if (!b->buffer)
++ goto fail;
++
++ b->last_task = NULL;
++ b->last_is_kernel = -1;
++ b->tracing = 0;
++ b->buffer_size = buffer_size;
++ b->tail_pos = 0;
++ b->head_pos = 0;
++ b->sample_received = 0;
++ b->sample_lost_overflow = 0;
++ b->backtrace_aborted = 0;
++ b->sample_invalid_eip = 0;
++ b->cpu = i;
++ INIT_DELAYED_WORK(&b->work, wq_sync_buffer);
++ }
++ return 0;
++
++fail:
++ free_cpu_buffers();
++ return -ENOMEM;
++}
++
++void start_cpu_work(void)
++{
++ int i;
++
++ work_enabled = 1;
++
++ for_each_online_cpu(i) {
++ struct oprofile_cpu_buffer *b = &per_cpu(cpu_buffer, i);
++
++ /*
++ * Spread the work by 1 jiffy per cpu so they dont all
++ * fire at once.
++ */
++ schedule_delayed_work_on(i, &b->work, DEFAULT_TIMER_EXPIRE + i);
++ }
++}
++
++void end_cpu_work(void)
++{
++ int i;
++
++ work_enabled = 0;
++
++ for_each_online_cpu(i) {
++ struct oprofile_cpu_buffer *b = &per_cpu(cpu_buffer, i);
++
++ cancel_delayed_work(&b->work);
++ }
++
++ flush_scheduled_work();
++}
++
++/* Resets the cpu buffer to a sane state. */
++void cpu_buffer_reset(struct oprofile_cpu_buffer * cpu_buf)
++{
++ /* reset these to invalid values; the next sample
++ * collected will populate the buffer with proper
++ * values to initialize the buffer
++ */
++ cpu_buf->last_is_kernel = -1;
++ cpu_buf->last_task = NULL;
++}
++
++/* compute number of available slots in cpu_buffer queue */
++static unsigned long nr_available_slots(struct oprofile_cpu_buffer const * b)
++{
++ unsigned long head = b->head_pos;
++ unsigned long tail = b->tail_pos;
++
++ if (tail > head)
++ return (tail - head) - 1;
++
++ return tail + (b->buffer_size - head) - 1;
++}
++
++static void increment_head(struct oprofile_cpu_buffer * b)
++{
++ unsigned long new_head = b->head_pos + 1;
++
++ /* Ensure anything written to the slot before we
++ * increment is visible */
++ wmb();
++
++ if (new_head < b->buffer_size)
++ b->head_pos = new_head;
++ else
++ b->head_pos = 0;
++}
++
++static inline void
++add_sample(struct oprofile_cpu_buffer * cpu_buf,
++ unsigned long pc, unsigned long event)
++{
++ struct op_sample * entry = &cpu_buf->buffer[cpu_buf->head_pos];
++ entry->eip = pc;
++ entry->event = event;
++ increment_head(cpu_buf);
++}
++
++static inline void
++add_code(struct oprofile_cpu_buffer * buffer, unsigned long value)
++{
++ add_sample(buffer, ESCAPE_CODE, value);
++}
++
++/* This must be safe from any context. It's safe writing here
++ * because of the head/tail separation of the writer and reader
++ * of the CPU buffer.
++ *
++ * is_kernel is needed because on some architectures you cannot
++ * tell if you are in kernel or user space simply by looking at
++ * pc. We tag this in the buffer by generating kernel enter/exit
++ * events whenever is_kernel changes
++ */
++static int log_sample(struct oprofile_cpu_buffer * cpu_buf, unsigned long pc,
++ int is_kernel, unsigned long event)
++{
++ struct task_struct * task;
++
++ cpu_buf->sample_received++;
++
++ if (pc == ESCAPE_CODE) {
++ cpu_buf->sample_invalid_eip++;
++ return 0;
++ }
++
++ if (nr_available_slots(cpu_buf) < 3) {
++ cpu_buf->sample_lost_overflow++;
++ return 0;
++ }
++
++ is_kernel = !!is_kernel;
++
++ task = current;
++
++ /* notice a switch from user->kernel or vice versa */
++ if (cpu_buf->last_is_kernel != is_kernel) {
++ cpu_buf->last_is_kernel = is_kernel;
++ add_code(cpu_buf, is_kernel);
++ }
++
++ /* notice a task switch */
++ if (cpu_buf->last_task != task) {
++ cpu_buf->last_task = task;
++ add_code(cpu_buf, (unsigned long)task);
++ }
++
++ add_sample(cpu_buf, pc, event);
++ return 1;
++}
++
++static int oprofile_begin_trace(struct oprofile_cpu_buffer * cpu_buf)
++{
++ if (nr_available_slots(cpu_buf) < 4) {
++ cpu_buf->sample_lost_overflow++;
++ return 0;
++ }
++
++ add_code(cpu_buf, CPU_TRACE_BEGIN);
++ cpu_buf->tracing = 1;
++ return 1;
++}
++
++static void oprofile_end_trace(struct oprofile_cpu_buffer * cpu_buf)
++{
++ cpu_buf->tracing = 0;
++}
++
++void oprofile_add_ext_sample(unsigned long pc, struct pt_regs * const regs,
++ unsigned long event, int is_kernel)
++{
++ struct oprofile_cpu_buffer *cpu_buf = &__get_cpu_var(cpu_buffer);
++
++ if (!backtrace_depth) {
++ log_sample(cpu_buf, pc, is_kernel, event);
++ return;
++ }
++
++ if (!oprofile_begin_trace(cpu_buf))
++ return;
++
++ /* if log_sample() fail we can't backtrace since we lost the source
++ * of this event */
++ if (log_sample(cpu_buf, pc, is_kernel, event))
++ oprofile_ops.backtrace(regs, backtrace_depth);
++ oprofile_end_trace(cpu_buf);
++}
++
++void oprofile_add_sample(struct pt_regs * const regs, unsigned long event)
++{
++ int is_kernel = !user_mode(regs);
++ unsigned long pc = profile_pc(regs);
++
++ oprofile_add_ext_sample(pc, regs, event, is_kernel);
++}
++
++void oprofile_add_pc(unsigned long pc, int is_kernel, unsigned long event)
++{
++ struct oprofile_cpu_buffer *cpu_buf = &__get_cpu_var(cpu_buffer);
++ log_sample(cpu_buf, pc, is_kernel, event);
++}
++
++void oprofile_add_trace(unsigned long pc)
++{
++ struct oprofile_cpu_buffer *cpu_buf = &__get_cpu_var(cpu_buffer);
++
++ if (!cpu_buf->tracing)
++ return;
++
++ if (nr_available_slots(cpu_buf) < 1) {
++ cpu_buf->tracing = 0;
++ cpu_buf->sample_lost_overflow++;
++ return;
++ }
++
++ /* broken frame can give an eip with the same value as an escape code,
++ * abort the trace if we get it */
++ if (pc == ESCAPE_CODE) {
++ cpu_buf->tracing = 0;
++ cpu_buf->backtrace_aborted++;
++ return;
++ }
++
++ add_sample(cpu_buf, pc, 0);
++}
++
++/*
++ * This serves to avoid cpu buffer overflow, and makes sure
++ * the task mortuary progresses
++ *
++ * By using schedule_delayed_work_on and then schedule_delayed_work
++ * we guarantee this will stay on the correct cpu
++ */
++static void wq_sync_buffer(struct work_struct *work)
++{
++ struct oprofile_cpu_buffer * b =
++ container_of(work, struct oprofile_cpu_buffer, work.work);
++ if (b->cpu != smp_processor_id()) {
++ printk("WQ on CPU%d, prefer CPU%d\n",
++ smp_processor_id(), b->cpu);
++ }
++ sync_buffer(b->cpu);
++
++ /* don't re-add the work if we're shutting down */
++ if (work_enabled)
++ schedule_delayed_work(&b->work, DEFAULT_TIMER_EXPIRE);
++}
+diff -Nurb linux-2.6.27-590/fs/bio.c linux-2.6.27-591/fs/bio.c
+--- linux-2.6.27-590/fs/bio.c 2008-10-09 18:13:53.000000000 -0400
++++ linux-2.6.27-591/fs/bio.c 2010-01-29 15:43:46.000000000 -0500
+@@ -27,6 +27,7 @@
+ #include <linux/workqueue.h>
+ #include <linux/blktrace_api.h>
+ #include <scsi/sg.h> /* for struct sg_iovec */
++#include <linux/arrays.h>
+
+ static struct kmem_cache *bio_slab __read_mostly;
+
+@@ -44,6 +45,7 @@
+ };
+ #undef BV
+
++
+ /*
+ * fs_bio_set is the bio_set containing bio and iovec memory pools used by
+ * IO code that does not need private memory pools.
+@@ -1171,6 +1173,14 @@
+ }
+ }
+
++struct event_spec {
++ unsigned long pc;
++ unsigned long dcookie;
++ unsigned count;
++ unsigned char reason;
++};
++
++extern void (*rec_event)(void *,unsigned int);
+ /**
+ * bio_endio - end I/O on a bio
+ * @bio: bio
+@@ -1192,6 +1202,24 @@
+ else if (!test_bit(BIO_UPTODATE, &bio->bi_flags))
+ error = -EIO;
+
++#ifdef CONFIG_CHOPSTIX
++ if (rec_event) {
++ struct event event;
++ struct event_spec espec;
++ unsigned long eip;
++
++ espec.reason = 1;/*response */
++
++ eip = bio->bi_end_io;
++ event.event_data=&espec;
++ espec.pc=eip;
++ event.event_type=3;
++ /* index in the event array currently set up */
++ /* make sure the counters are loaded in the order we want them to show up*/
++ (*rec_event)(&event, bytes_done);
++ }
++#endif
++
+ if (bio->bi_end_io)
+ bio->bi_end_io(bio, error);
+ }
+diff -Nurb linux-2.6.27-590/fs/bio.c.orig linux-2.6.27-591/fs/bio.c.orig
+--- linux-2.6.27-590/fs/bio.c.orig 1969-12-31 19:00:00.000000000 -0500
++++ linux-2.6.27-591/fs/bio.c.orig 2008-10-09 18:13:53.000000000 -0400
+@@ -0,0 +1,1401 @@
++/*
++ * Copyright (C) 2001 Jens Axboe <axboe@kernel.dk>
++ *
++ * This program is free software; you can redistribute it and/or modify
++ * it under the terms of the GNU General Public License version 2 as
++ * published by the Free Software Foundation.
++ *
++ * This program is distributed in the hope that it will be useful,
++ * but WITHOUT ANY WARRANTY; without even the implied warranty of
++ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
++ * GNU General Public License for more details.
++ *
++ * You should have received a copy of the GNU General Public Licens
++ * along with this program; if not, write to the Free Software
++ * Foundation, Inc., 59 Temple Place, Suite 330, Boston, MA 02111-
++ *
++ */
++#include <linux/mm.h>
++#include <linux/swap.h>
++#include <linux/bio.h>
++#include <linux/blkdev.h>
++#include <linux/slab.h>
++#include <linux/init.h>
++#include <linux/kernel.h>
++#include <linux/module.h>
++#include <linux/mempool.h>
++#include <linux/workqueue.h>
++#include <linux/blktrace_api.h>
++#include <scsi/sg.h> /* for struct sg_iovec */
++
++static struct kmem_cache *bio_slab __read_mostly;
++
++mempool_t *bio_split_pool __read_mostly;
++
++/*
++ * if you change this list, also change bvec_alloc or things will
++ * break badly! cannot be bigger than what you can fit into an
++ * unsigned short
++ */
++
++#define BV(x) { .nr_vecs = x, .name = "biovec-"__stringify(x) }
++static struct biovec_slab bvec_slabs[BIOVEC_NR_POOLS] __read_mostly = {
++ BV(1), BV(4), BV(16), BV(64), BV(128), BV(BIO_MAX_PAGES),
++};
++#undef BV
++
++/*
++ * fs_bio_set is the bio_set containing bio and iovec memory pools used by
++ * IO code that does not need private memory pools.
++ */
++struct bio_set *fs_bio_set;
++
++unsigned int bvec_nr_vecs(unsigned short idx)
++{
++ return bvec_slabs[idx].nr_vecs;
++}
++
++struct bio_vec *bvec_alloc_bs(gfp_t gfp_mask, int nr, unsigned long *idx, struct bio_set *bs)
++{
++ struct bio_vec *bvl;
++
++ /*
++ * see comment near bvec_array define!
++ */
++ switch (nr) {
++ case 1 : *idx = 0; break;
++ case 2 ... 4: *idx = 1; break;
++ case 5 ... 16: *idx = 2; break;
++ case 17 ... 64: *idx = 3; break;
++ case 65 ... 128: *idx = 4; break;
++ case 129 ... BIO_MAX_PAGES: *idx = 5; break;
++ default:
++ return NULL;
++ }
++ /*
++ * idx now points to the pool we want to allocate from
++ */
++
++ bvl = mempool_alloc(bs->bvec_pools[*idx], gfp_mask);
++ if (bvl)
++ memset(bvl, 0, bvec_nr_vecs(*idx) * sizeof(struct bio_vec));
++
++ return bvl;
++}
++
++void bio_free(struct bio *bio, struct bio_set *bio_set)
++{
++ if (bio->bi_io_vec) {
++ const int pool_idx = BIO_POOL_IDX(bio);
++
++ BIO_BUG_ON(pool_idx >= BIOVEC_NR_POOLS);
++
++ mempool_free(bio->bi_io_vec, bio_set->bvec_pools[pool_idx]);
++ }
++
++ if (bio_integrity(bio))
++ bio_integrity_free(bio, bio_set);
++
++ mempool_free(bio, bio_set->bio_pool);
++}
++
++/*
++ * default destructor for a bio allocated with bio_alloc_bioset()
++ */
++static void bio_fs_destructor(struct bio *bio)
++{
++ bio_free(bio, fs_bio_set);
++}
++
++void bio_init(struct bio *bio)
++{
++ memset(bio, 0, sizeof(*bio));
++ bio->bi_flags = 1 << BIO_UPTODATE;
++ atomic_set(&bio->bi_cnt, 1);
++}
++
++/**
++ * bio_alloc_bioset - allocate a bio for I/O
++ * @gfp_mask: the GFP_ mask given to the slab allocator
++ * @nr_iovecs: number of iovecs to pre-allocate
++ * @bs: the bio_set to allocate from
++ *
++ * Description:
++ * bio_alloc_bioset will first try it's on mempool to satisfy the allocation.
++ * If %__GFP_WAIT is set then we will block on the internal pool waiting
++ * for a &struct bio to become free.
++ *
++ * allocate bio and iovecs from the memory pools specified by the
++ * bio_set structure.
++ **/
++struct bio *bio_alloc_bioset(gfp_t gfp_mask, int nr_iovecs, struct bio_set *bs)
++{
++ struct bio *bio = mempool_alloc(bs->bio_pool, gfp_mask);
++
++ if (likely(bio)) {
++ struct bio_vec *bvl = NULL;
++
++ bio_init(bio);
++ if (likely(nr_iovecs)) {
++ unsigned long uninitialized_var(idx);
++
++ bvl = bvec_alloc_bs(gfp_mask, nr_iovecs, &idx, bs);
++ if (unlikely(!bvl)) {
++ mempool_free(bio, bs->bio_pool);
++ bio = NULL;
++ goto out;
++ }
++ bio->bi_flags |= idx << BIO_POOL_OFFSET;
++ bio->bi_max_vecs = bvec_nr_vecs(idx);
++ }
++ bio->bi_io_vec = bvl;
++ }
++out:
++ return bio;
++}
++
++struct bio *bio_alloc(gfp_t gfp_mask, int nr_iovecs)
++{
++ struct bio *bio = bio_alloc_bioset(gfp_mask, nr_iovecs, fs_bio_set);
++
++ if (bio)
++ bio->bi_destructor = bio_fs_destructor;
++
++ return bio;
++}
++
++void zero_fill_bio(struct bio *bio)
++{
++ unsigned long flags;
++ struct bio_vec *bv;
++ int i;
++
++ bio_for_each_segment(bv, bio, i) {
++ char *data = bvec_kmap_irq(bv, &flags);
++ memset(data, 0, bv->bv_len);
++ flush_dcache_page(bv->bv_page);
++ bvec_kunmap_irq(data, &flags);
++ }
++}
++EXPORT_SYMBOL(zero_fill_bio);
++
++/**
++ * bio_put - release a reference to a bio
++ * @bio: bio to release reference to
++ *
++ * Description:
++ * Put a reference to a &struct bio, either one you have gotten with
++ * bio_alloc or bio_get. The last put of a bio will free it.
++ **/
++void bio_put(struct bio *bio)
++{
++ BIO_BUG_ON(!atomic_read(&bio->bi_cnt));
++
++ /*
++ * last put frees it
++ */
++ if (atomic_dec_and_test(&bio->bi_cnt)) {
++ bio->bi_next = NULL;
++ bio->bi_destructor(bio);
++ }
++}
++
++inline int bio_phys_segments(struct request_queue *q, struct bio *bio)
++{
++ if (unlikely(!bio_flagged(bio, BIO_SEG_VALID)))
++ blk_recount_segments(q, bio);
++
++ return bio->bi_phys_segments;
++}
++
++inline int bio_hw_segments(struct request_queue *q, struct bio *bio)
++{
++ if (unlikely(!bio_flagged(bio, BIO_SEG_VALID)))
++ blk_recount_segments(q, bio);
++
++ return bio->bi_hw_segments;
++}
++
++/**
++ * __bio_clone - clone a bio
++ * @bio: destination bio
++ * @bio_src: bio to clone
++ *
++ * Clone a &bio. Caller will own the returned bio, but not
++ * the actual data it points to. Reference count of returned
++ * bio will be one.
++ */
++void __bio_clone(struct bio *bio, struct bio *bio_src)
++{
++ memcpy(bio->bi_io_vec, bio_src->bi_io_vec,
++ bio_src->bi_max_vecs * sizeof(struct bio_vec));
++
++ /*
++ * most users will be overriding ->bi_bdev with a new target,
++ * so we don't set nor calculate new physical/hw segment counts here
++ */
++ bio->bi_sector = bio_src->bi_sector;
++ bio->bi_bdev = bio_src->bi_bdev;
++ bio->bi_flags |= 1 << BIO_CLONED;
++ bio->bi_rw = bio_src->bi_rw;
++ bio->bi_vcnt = bio_src->bi_vcnt;
++ bio->bi_size = bio_src->bi_size;
++ bio->bi_idx = bio_src->bi_idx;
++}
++
++/**
++ * bio_clone - clone a bio
++ * @bio: bio to clone
++ * @gfp_mask: allocation priority
++ *
++ * Like __bio_clone, only also allocates the returned bio
++ */
++struct bio *bio_clone(struct bio *bio, gfp_t gfp_mask)
++{
++ struct bio *b = bio_alloc_bioset(gfp_mask, bio->bi_max_vecs, fs_bio_set);
++
++ if (!b)
++ return NULL;
++
++ b->bi_destructor = bio_fs_destructor;
++ __bio_clone(b, bio);
++
++ if (bio_integrity(bio)) {
++ int ret;
++
++ ret = bio_integrity_clone(b, bio, fs_bio_set);
++
++ if (ret < 0)
++ return NULL;
++ }
++
++ return b;
++}
++
++/**
++ * bio_get_nr_vecs - return approx number of vecs
++ * @bdev: I/O target
++ *
++ * Return the approximate number of pages we can send to this target.
++ * There's no guarantee that you will be able to fit this number of pages
++ * into a bio, it does not account for dynamic restrictions that vary
++ * on offset.
++ */
++int bio_get_nr_vecs(struct block_device *bdev)
++{
++ struct request_queue *q = bdev_get_queue(bdev);
++ int nr_pages;
++
++ nr_pages = ((q->max_sectors << 9) + PAGE_SIZE - 1) >> PAGE_SHIFT;
++ if (nr_pages > q->max_phys_segments)
++ nr_pages = q->max_phys_segments;
++ if (nr_pages > q->max_hw_segments)
++ nr_pages = q->max_hw_segments;
++
++ return nr_pages;
++}
++
++static int __bio_add_page(struct request_queue *q, struct bio *bio, struct page
++ *page, unsigned int len, unsigned int offset,
++ unsigned short max_sectors)
++{
++ int retried_segments = 0;
++ struct bio_vec *bvec;
++
++ /*
++ * cloned bio must not modify vec list
++ */
++ if (unlikely(bio_flagged(bio, BIO_CLONED)))
++ return 0;
++
++ if (((bio->bi_size + len) >> 9) > max_sectors)
++ return 0;
++
++ /*
++ * For filesystems with a blocksize smaller than the pagesize
++ * we will often be called with the same page as last time and
++ * a consecutive offset. Optimize this special case.
++ */
++ if (bio->bi_vcnt > 0) {
++ struct bio_vec *prev = &bio->bi_io_vec[bio->bi_vcnt - 1];
++
++ if (page == prev->bv_page &&
++ offset == prev->bv_offset + prev->bv_len) {
++ prev->bv_len += len;
++
++ if (q->merge_bvec_fn) {
++ struct bvec_merge_data bvm = {
++ .bi_bdev = bio->bi_bdev,
++ .bi_sector = bio->bi_sector,
++ .bi_size = bio->bi_size,
++ .bi_rw = bio->bi_rw,
++ };
++
++ if (q->merge_bvec_fn(q, &bvm, prev) < len) {
++ prev->bv_len -= len;
++ return 0;
++ }
++ }
++
++ goto done;
++ }
++ }
++
++ if (bio->bi_vcnt >= bio->bi_max_vecs)
++ return 0;
++
++ /*
++ * we might lose a segment or two here, but rather that than
++ * make this too complex.
++ */
++
++ while (bio->bi_phys_segments >= q->max_phys_segments
++ || bio->bi_hw_segments >= q->max_hw_segments
++ || BIOVEC_VIRT_OVERSIZE(bio->bi_size)) {
++
++ if (retried_segments)
++ return 0;
++
++ retried_segments = 1;
++ blk_recount_segments(q, bio);
++ }
++
++ /*
++ * setup the new entry, we might clear it again later if we
++ * cannot add the page
++ */
++ bvec = &bio->bi_io_vec[bio->bi_vcnt];
++ bvec->bv_page = page;
++ bvec->bv_len = len;
++ bvec->bv_offset = offset;
++
++ /*
++ * if queue has other restrictions (eg varying max sector size
++ * depending on offset), it can specify a merge_bvec_fn in the
++ * queue to get further control
++ */
++ if (q->merge_bvec_fn) {
++ struct bvec_merge_data bvm = {
++ .bi_bdev = bio->bi_bdev,
++ .bi_sector = bio->bi_sector,
++ .bi_size = bio->bi_size,
++ .bi_rw = bio->bi_rw,
++ };
++
++ /*
++ * merge_bvec_fn() returns number of bytes it can accept
++ * at this offset
++ */
++ if (q->merge_bvec_fn(q, &bvm, bvec) < len) {
++ bvec->bv_page = NULL;
++ bvec->bv_len = 0;
++ bvec->bv_offset = 0;
++ return 0;
++ }
++ }
++
++ /* If we may be able to merge these biovecs, force a recount */
++ if (bio->bi_vcnt && (BIOVEC_PHYS_MERGEABLE(bvec-1, bvec) ||
++ BIOVEC_VIRT_MERGEABLE(bvec-1, bvec)))
++ bio->bi_flags &= ~(1 << BIO_SEG_VALID);
++
++ bio->bi_vcnt++;
++ bio->bi_phys_segments++;
++ bio->bi_hw_segments++;
++ done:
++ bio->bi_size += len;
++ return len;
++}
++
++/**
++ * bio_add_pc_page - attempt to add page to bio
++ * @q: the target queue
++ * @bio: destination bio
++ * @page: page to add
++ * @len: vec entry length
++ * @offset: vec entry offset
++ *
++ * Attempt to add a page to the bio_vec maplist. This can fail for a
++ * number of reasons, such as the bio being full or target block
++ * device limitations. The target block device must allow bio's
++ * smaller than PAGE_SIZE, so it is always possible to add a single
++ * page to an empty bio. This should only be used by REQ_PC bios.
++ */
++int bio_add_pc_page(struct request_queue *q, struct bio *bio, struct page *page,
++ unsigned int len, unsigned int offset)
++{
++ return __bio_add_page(q, bio, page, len, offset, q->max_hw_sectors);
++}
++
++/**
++ * bio_add_page - attempt to add page to bio
++ * @bio: destination bio
++ * @page: page to add
++ * @len: vec entry length
++ * @offset: vec entry offset
++ *
++ * Attempt to add a page to the bio_vec maplist. This can fail for a
++ * number of reasons, such as the bio being full or target block
++ * device limitations. The target block device must allow bio's
++ * smaller than PAGE_SIZE, so it is always possible to add a single
++ * page to an empty bio.
++ */
++int bio_add_page(struct bio *bio, struct page *page, unsigned int len,
++ unsigned int offset)
++{
++ struct request_queue *q = bdev_get_queue(bio->bi_bdev);
++ return __bio_add_page(q, bio, page, len, offset, q->max_sectors);
++}
++
++struct bio_map_data {
++ struct bio_vec *iovecs;
++ int nr_sgvecs;
++ struct sg_iovec *sgvecs;
++};
++
++static void bio_set_map_data(struct bio_map_data *bmd, struct bio *bio,
++ struct sg_iovec *iov, int iov_count)
++{
++ memcpy(bmd->iovecs, bio->bi_io_vec, sizeof(struct bio_vec) * bio->bi_vcnt);
++ memcpy(bmd->sgvecs, iov, sizeof(struct sg_iovec) * iov_count);
++ bmd->nr_sgvecs = iov_count;
++ bio->bi_private = bmd;
++}
++
++static void bio_free_map_data(struct bio_map_data *bmd)
++{
++ kfree(bmd->iovecs);
++ kfree(bmd->sgvecs);
++ kfree(bmd);
++}
++
++static struct bio_map_data *bio_alloc_map_data(int nr_segs, int iov_count,
++ gfp_t gfp_mask)
++{
++ struct bio_map_data *bmd = kmalloc(sizeof(*bmd), gfp_mask);
++
++ if (!bmd)
++ return NULL;
++
++ bmd->iovecs = kmalloc(sizeof(struct bio_vec) * nr_segs, gfp_mask);
++ if (!bmd->iovecs) {
++ kfree(bmd);
++ return NULL;
++ }
++
++ bmd->sgvecs = kmalloc(sizeof(struct sg_iovec) * iov_count, gfp_mask);
++ if (bmd->sgvecs)
++ return bmd;
++
++ kfree(bmd->iovecs);
++ kfree(bmd);
++ return NULL;
++}
++
++static int __bio_copy_iov(struct bio *bio, struct bio_vec *iovecs,
++ struct sg_iovec *iov, int iov_count, int uncopy)
++{
++ int ret = 0, i;
++ struct bio_vec *bvec;
++ int iov_idx = 0;
++ unsigned int iov_off = 0;
++ int read = bio_data_dir(bio) == READ;
++
++ __bio_for_each_segment(bvec, bio, i, 0) {
++ char *bv_addr = page_address(bvec->bv_page);
++ unsigned int bv_len = iovecs[i].bv_len;
++
++ while (bv_len && iov_idx < iov_count) {
++ unsigned int bytes;
++ char *iov_addr;
++
++ bytes = min_t(unsigned int,
++ iov[iov_idx].iov_len - iov_off, bv_len);
++ iov_addr = iov[iov_idx].iov_base + iov_off;
++
++ if (!ret) {
++ if (!read && !uncopy)
++ ret = copy_from_user(bv_addr, iov_addr,
++ bytes);
++ if (read && uncopy)
++ ret = copy_to_user(iov_addr, bv_addr,
++ bytes);
++
++ if (ret)
++ ret = -EFAULT;
++ }
++
++ bv_len -= bytes;
++ bv_addr += bytes;
++ iov_addr += bytes;
++ iov_off += bytes;
++
++ if (iov[iov_idx].iov_len == iov_off) {
++ iov_idx++;
++ iov_off = 0;
++ }
++ }
++
++ if (uncopy)
++ __free_page(bvec->bv_page);
++ }
++
++ return ret;
++}
++
++/**
++ * bio_uncopy_user - finish previously mapped bio
++ * @bio: bio being terminated
++ *
++ * Free pages allocated from bio_copy_user() and write back data
++ * to user space in case of a read.
++ */
++int bio_uncopy_user(struct bio *bio)
++{
++ struct bio_map_data *bmd = bio->bi_private;
++ int ret;
++
++ ret = __bio_copy_iov(bio, bmd->iovecs, bmd->sgvecs, bmd->nr_sgvecs, 1);
++
++ bio_free_map_data(bmd);
++ bio_put(bio);
++ return ret;
++}
++
++/**
++ * bio_copy_user_iov - copy user data to bio
++ * @q: destination block queue
++ * @iov: the iovec.
++ * @iov_count: number of elements in the iovec
++ * @write_to_vm: bool indicating writing to pages or not
++ *
++ * Prepares and returns a bio for indirect user io, bouncing data
++ * to/from kernel pages as necessary. Must be paired with
++ * call bio_uncopy_user() on io completion.
++ */
++struct bio *bio_copy_user_iov(struct request_queue *q, struct sg_iovec *iov,
++ int iov_count, int write_to_vm)
++{
++ struct bio_map_data *bmd;
++ struct bio_vec *bvec;
++ struct page *page;
++ struct bio *bio;
++ int i, ret;
++ int nr_pages = 0;
++ unsigned int len = 0;
++
++ for (i = 0; i < iov_count; i++) {
++ unsigned long uaddr;
++ unsigned long end;
++ unsigned long start;
++
++ uaddr = (unsigned long)iov[i].iov_base;
++ end = (uaddr + iov[i].iov_len + PAGE_SIZE - 1) >> PAGE_SHIFT;
++ start = uaddr >> PAGE_SHIFT;
++
++ nr_pages += end - start;
++ len += iov[i].iov_len;
++ }
++
++ bmd = bio_alloc_map_data(nr_pages, iov_count, GFP_KERNEL);
++ if (!bmd)
++ return ERR_PTR(-ENOMEM);
++
++ ret = -ENOMEM;
++ bio = bio_alloc(GFP_KERNEL, nr_pages);
++ if (!bio)
++ goto out_bmd;
++
++ bio->bi_rw |= (!write_to_vm << BIO_RW);
++
++ ret = 0;
++ while (len) {
++ unsigned int bytes = PAGE_SIZE;
++
++ if (bytes > len)
++ bytes = len;
++
++ page = alloc_page(q->bounce_gfp | GFP_KERNEL);
++ if (!page) {
++ ret = -ENOMEM;
++ break;
++ }
++
++ if (bio_add_pc_page(q, bio, page, bytes, 0) < bytes)
++ break;
++
++ len -= bytes;
++ }
++
++ if (ret)
++ goto cleanup;
++
++ /*
++ * success
++ */
++ if (!write_to_vm) {
++ ret = __bio_copy_iov(bio, bio->bi_io_vec, iov, iov_count, 0);
++ if (ret)
++ goto cleanup;
++ }
++
++ bio_set_map_data(bmd, bio, iov, iov_count);
++ return bio;
++cleanup:
++ bio_for_each_segment(bvec, bio, i)
++ __free_page(bvec->bv_page);
++
++ bio_put(bio);
++out_bmd:
++ bio_free_map_data(bmd);
++ return ERR_PTR(ret);
++}
++
++/**
++ * bio_copy_user - copy user data to bio
++ * @q: destination block queue
++ * @uaddr: start of user address
++ * @len: length in bytes
++ * @write_to_vm: bool indicating writing to pages or not
++ *
++ * Prepares and returns a bio for indirect user io, bouncing data
++ * to/from kernel pages as necessary. Must be paired with
++ * call bio_uncopy_user() on io completion.
++ */
++struct bio *bio_copy_user(struct request_queue *q, unsigned long uaddr,
++ unsigned int len, int write_to_vm)
++{
++ struct sg_iovec iov;
++
++ iov.iov_base = (void __user *)uaddr;
++ iov.iov_len = len;
++
++ return bio_copy_user_iov(q, &iov, 1, write_to_vm);
++}
++
++static struct bio *__bio_map_user_iov(struct request_queue *q,
++ struct block_device *bdev,
++ struct sg_iovec *iov, int iov_count,
++ int write_to_vm)
++{
++ int i, j;
++ int nr_pages = 0;
++ struct page **pages;
++ struct bio *bio;
++ int cur_page = 0;
++ int ret, offset;
++
++ for (i = 0; i < iov_count; i++) {
++ unsigned long uaddr = (unsigned long)iov[i].iov_base;
++ unsigned long len = iov[i].iov_len;
++ unsigned long end = (uaddr + len + PAGE_SIZE - 1) >> PAGE_SHIFT;
++ unsigned long start = uaddr >> PAGE_SHIFT;
++
++ nr_pages += end - start;
++ /*
++ * buffer must be aligned to at least hardsector size for now
++ */
++ if (uaddr & queue_dma_alignment(q))
++ return ERR_PTR(-EINVAL);
++ }
++
++ if (!nr_pages)
++ return ERR_PTR(-EINVAL);
++
++ bio = bio_alloc(GFP_KERNEL, nr_pages);
++ if (!bio)
++ return ERR_PTR(-ENOMEM);
++
++ ret = -ENOMEM;
++ pages = kcalloc(nr_pages, sizeof(struct page *), GFP_KERNEL);
++ if (!pages)
++ goto out;
++
++ for (i = 0; i < iov_count; i++) {
++ unsigned long uaddr = (unsigned long)iov[i].iov_base;
++ unsigned long len = iov[i].iov_len;
++ unsigned long end = (uaddr + len + PAGE_SIZE - 1) >> PAGE_SHIFT;
++ unsigned long start = uaddr >> PAGE_SHIFT;
++ const int local_nr_pages = end - start;
++ const int page_limit = cur_page + local_nr_pages;
++
++ ret = get_user_pages_fast(uaddr, local_nr_pages,
++ write_to_vm, &pages[cur_page]);
++ if (ret < local_nr_pages) {
++ ret = -EFAULT;
++ goto out_unmap;
++ }
++
++ offset = uaddr & ~PAGE_MASK;
++ for (j = cur_page; j < page_limit; j++) {
++ unsigned int bytes = PAGE_SIZE - offset;
++
++ if (len <= 0)
++ break;
++
++ if (bytes > len)
++ bytes = len;
++
++ /*
++ * sorry...
++ */
++ if (bio_add_pc_page(q, bio, pages[j], bytes, offset) <
++ bytes)
++ break;
++
++ len -= bytes;
++ offset = 0;
++ }
++
++ cur_page = j;
++ /*
++ * release the pages we didn't map into the bio, if any
++ */
++ while (j < page_limit)
++ page_cache_release(pages[j++]);
++ }
++
++ kfree(pages);
++
++ /*
++ * set data direction, and check if mapped pages need bouncing
++ */
++ if (!write_to_vm)
++ bio->bi_rw |= (1 << BIO_RW);
++
++ bio->bi_bdev = bdev;
++ bio->bi_flags |= (1 << BIO_USER_MAPPED);
++ return bio;
++
++ out_unmap:
++ for (i = 0; i < nr_pages; i++) {
++ if(!pages[i])
++ break;
++ page_cache_release(pages[i]);
++ }
++ out:
++ kfree(pages);
++ bio_put(bio);
++ return ERR_PTR(ret);
++}
++
++/**
++ * bio_map_user - map user address into bio
++ * @q: the struct request_queue for the bio
++ * @bdev: destination block device
++ * @uaddr: start of user address
++ * @len: length in bytes
++ * @write_to_vm: bool indicating writing to pages or not
++ *
++ * Map the user space address into a bio suitable for io to a block
++ * device. Returns an error pointer in case of error.
++ */
++struct bio *bio_map_user(struct request_queue *q, struct block_device *bdev,
++ unsigned long uaddr, unsigned int len, int write_to_vm)
++{
++ struct sg_iovec iov;
++
++ iov.iov_base = (void __user *)uaddr;
++ iov.iov_len = len;
++
++ return bio_map_user_iov(q, bdev, &iov, 1, write_to_vm);
++}
++
++/**
++ * bio_map_user_iov - map user sg_iovec table into bio
++ * @q: the struct request_queue for the bio
++ * @bdev: destination block device
++ * @iov: the iovec.
++ * @iov_count: number of elements in the iovec
++ * @write_to_vm: bool indicating writing to pages or not
++ *
++ * Map the user space address into a bio suitable for io to a block
++ * device. Returns an error pointer in case of error.
++ */
++struct bio *bio_map_user_iov(struct request_queue *q, struct block_device *bdev,
++ struct sg_iovec *iov, int iov_count,
++ int write_to_vm)
++{
++ struct bio *bio;
++
++ bio = __bio_map_user_iov(q, bdev, iov, iov_count, write_to_vm);
++
++ if (IS_ERR(bio))
++ return bio;
++
++ /*
++ * subtle -- if __bio_map_user() ended up bouncing a bio,
++ * it would normally disappear when its bi_end_io is run.
++ * however, we need it for the unmap, so grab an extra
++ * reference to it
++ */
++ bio_get(bio);
++
++ return bio;
++}
++
++static void __bio_unmap_user(struct bio *bio)
++{
++ struct bio_vec *bvec;
++ int i;
++
++ /*
++ * make sure we dirty pages we wrote to
++ */
++ __bio_for_each_segment(bvec, bio, i, 0) {
++ if (bio_data_dir(bio) == READ)
++ set_page_dirty_lock(bvec->bv_page);
++
++ page_cache_release(bvec->bv_page);
++ }
++
++ bio_put(bio);
++}
++
++/**
++ * bio_unmap_user - unmap a bio
++ * @bio: the bio being unmapped
++ *
++ * Unmap a bio previously mapped by bio_map_user(). Must be called with
++ * a process context.
++ *
++ * bio_unmap_user() may sleep.
++ */
++void bio_unmap_user(struct bio *bio)
++{
++ __bio_unmap_user(bio);
++ bio_put(bio);
++}
++
++static void bio_map_kern_endio(struct bio *bio, int err)
++{
++ bio_put(bio);
++}
++
++
++static struct bio *__bio_map_kern(struct request_queue *q, void *data,
++ unsigned int len, gfp_t gfp_mask)
++{
++ unsigned long kaddr = (unsigned long)data;
++ unsigned long end = (kaddr + len + PAGE_SIZE - 1) >> PAGE_SHIFT;
++ unsigned long start = kaddr >> PAGE_SHIFT;
++ const int nr_pages = end - start;
++ int offset, i;
++ struct bio *bio;
++
++ bio = bio_alloc(gfp_mask, nr_pages);
++ if (!bio)
++ return ERR_PTR(-ENOMEM);
++
++ offset = offset_in_page(kaddr);
++ for (i = 0; i < nr_pages; i++) {
++ unsigned int bytes = PAGE_SIZE - offset;
++
++ if (len <= 0)
++ break;
++
++ if (bytes > len)
++ bytes = len;
++
++ if (bio_add_pc_page(q, bio, virt_to_page(data), bytes,
++ offset) < bytes)
++ break;
++
++ data += bytes;
++ len -= bytes;
++ offset = 0;
++ }
++
++ bio->bi_end_io = bio_map_kern_endio;
++ return bio;
++}
++
++/**
++ * bio_map_kern - map kernel address into bio
++ * @q: the struct request_queue for the bio
++ * @data: pointer to buffer to map
++ * @len: length in bytes
++ * @gfp_mask: allocation flags for bio allocation
++ *
++ * Map the kernel address into a bio suitable for io to a block
++ * device. Returns an error pointer in case of error.
++ */
++struct bio *bio_map_kern(struct request_queue *q, void *data, unsigned int len,
++ gfp_t gfp_mask)
++{
++ struct bio *bio;
++
++ bio = __bio_map_kern(q, data, len, gfp_mask);
++ if (IS_ERR(bio))
++ return bio;
++
++ if (bio->bi_size == len)
++ return bio;
++
++ /*
++ * Don't support partial mappings.
++ */
++ bio_put(bio);
++ return ERR_PTR(-EINVAL);
++}
++
++static void bio_copy_kern_endio(struct bio *bio, int err)
++{
++ struct bio_vec *bvec;
++ const int read = bio_data_dir(bio) == READ;
++ struct bio_map_data *bmd = bio->bi_private;
++ int i;
++ char *p = bmd->sgvecs[0].iov_base;
++
++ __bio_for_each_segment(bvec, bio, i, 0) {
++ char *addr = page_address(bvec->bv_page);
++ int len = bmd->iovecs[i].bv_len;
++
++ if (read && !err)
++ memcpy(p, addr, len);
++
++ __free_page(bvec->bv_page);
++ p += len;
++ }
++
++ bio_free_map_data(bmd);
++ bio_put(bio);
++}
++
++/**
++ * bio_copy_kern - copy kernel address into bio
++ * @q: the struct request_queue for the bio
++ * @data: pointer to buffer to copy
++ * @len: length in bytes
++ * @gfp_mask: allocation flags for bio and page allocation
++ * @reading: data direction is READ
++ *
++ * copy the kernel address into a bio suitable for io to a block
++ * device. Returns an error pointer in case of error.
++ */
++struct bio *bio_copy_kern(struct request_queue *q, void *data, unsigned int len,
++ gfp_t gfp_mask, int reading)
++{
++ unsigned long kaddr = (unsigned long)data;
++ unsigned long end = (kaddr + len + PAGE_SIZE - 1) >> PAGE_SHIFT;
++ unsigned long start = kaddr >> PAGE_SHIFT;
++ const int nr_pages = end - start;
++ struct bio *bio;
++ struct bio_vec *bvec;
++ struct bio_map_data *bmd;
++ int i, ret;
++ struct sg_iovec iov;
++
++ iov.iov_base = data;
++ iov.iov_len = len;
++
++ bmd = bio_alloc_map_data(nr_pages, 1, gfp_mask);
++ if (!bmd)
++ return ERR_PTR(-ENOMEM);
++
++ ret = -ENOMEM;
++ bio = bio_alloc(gfp_mask, nr_pages);
++ if (!bio)
++ goto out_bmd;
++
++ while (len) {
++ struct page *page;
++ unsigned int bytes = PAGE_SIZE;
++
++ if (bytes > len)
++ bytes = len;
++
++ page = alloc_page(q->bounce_gfp | gfp_mask);
++ if (!page) {
++ ret = -ENOMEM;
++ goto cleanup;
++ }
++
++ if (bio_add_pc_page(q, bio, page, bytes, 0) < bytes) {
++ ret = -EINVAL;
++ goto cleanup;
++ }
++
++ len -= bytes;
++ }
++
++ if (!reading) {
++ void *p = data;
++
++ bio_for_each_segment(bvec, bio, i) {
++ char *addr = page_address(bvec->bv_page);
++
++ memcpy(addr, p, bvec->bv_len);
++ p += bvec->bv_len;
++ }
++ }
++
++ bio->bi_private = bmd;
++ bio->bi_end_io = bio_copy_kern_endio;
++
++ bio_set_map_data(bmd, bio, &iov, 1);
++ return bio;
++cleanup:
++ bio_for_each_segment(bvec, bio, i)
++ __free_page(bvec->bv_page);
++
++ bio_put(bio);
++out_bmd:
++ bio_free_map_data(bmd);
++
++ return ERR_PTR(ret);
++}
++
++/*
++ * bio_set_pages_dirty() and bio_check_pages_dirty() are support functions
++ * for performing direct-IO in BIOs.
++ *
++ * The problem is that we cannot run set_page_dirty() from interrupt context
++ * because the required locks are not interrupt-safe. So what we can do is to
++ * mark the pages dirty _before_ performing IO. And in interrupt context,
++ * check that the pages are still dirty. If so, fine. If not, redirty them
++ * in process context.
++ *
++ * We special-case compound pages here: normally this means reads into hugetlb
++ * pages. The logic in here doesn't really work right for compound pages
++ * because the VM does not uniformly chase down the head page in all cases.
++ * But dirtiness of compound pages is pretty meaningless anyway: the VM doesn't
++ * handle them at all. So we skip compound pages here at an early stage.
++ *
++ * Note that this code is very hard to test under normal circumstances because
++ * direct-io pins the pages with get_user_pages(). This makes
++ * is_page_cache_freeable return false, and the VM will not clean the pages.
++ * But other code (eg, pdflush) could clean the pages if they are mapped
++ * pagecache.
++ *
++ * Simply disabling the call to bio_set_pages_dirty() is a good way to test the
++ * deferred bio dirtying paths.
++ */
++
++/*
++ * bio_set_pages_dirty() will mark all the bio's pages as dirty.
++ */
++void bio_set_pages_dirty(struct bio *bio)
++{
++ struct bio_vec *bvec = bio->bi_io_vec;
++ int i;
++
++ for (i = 0; i < bio->bi_vcnt; i++) {
++ struct page *page = bvec[i].bv_page;
++
++ if (page && !PageCompound(page))
++ set_page_dirty_lock(page);
++ }
++}
++
++static void bio_release_pages(struct bio *bio)
++{
++ struct bio_vec *bvec = bio->bi_io_vec;
++ int i;
++
++ for (i = 0; i < bio->bi_vcnt; i++) {
++ struct page *page = bvec[i].bv_page;
++
++ if (page)
++ put_page(page);
++ }
++}
++
++/*
++ * bio_check_pages_dirty() will check that all the BIO's pages are still dirty.
++ * If they are, then fine. If, however, some pages are clean then they must
++ * have been written out during the direct-IO read. So we take another ref on
++ * the BIO and the offending pages and re-dirty the pages in process context.
++ *
++ * It is expected that bio_check_pages_dirty() will wholly own the BIO from
++ * here on. It will run one page_cache_release() against each page and will
++ * run one bio_put() against the BIO.
++ */
++
++static void bio_dirty_fn(struct work_struct *work);
++
++static DECLARE_WORK(bio_dirty_work, bio_dirty_fn);
++static DEFINE_SPINLOCK(bio_dirty_lock);
++static struct bio *bio_dirty_list;
++
++/*
++ * This runs in process context
++ */
++static void bio_dirty_fn(struct work_struct *work)
++{
++ unsigned long flags;
++ struct bio *bio;
++
++ spin_lock_irqsave(&bio_dirty_lock, flags);
++ bio = bio_dirty_list;
++ bio_dirty_list = NULL;
++ spin_unlock_irqrestore(&bio_dirty_lock, flags);
++
++ while (bio) {
++ struct bio *next = bio->bi_private;
++
++ bio_set_pages_dirty(bio);
++ bio_release_pages(bio);
++ bio_put(bio);
++ bio = next;
++ }
++}
++
++void bio_check_pages_dirty(struct bio *bio)
++{
++ struct bio_vec *bvec = bio->bi_io_vec;
++ int nr_clean_pages = 0;
++ int i;
++
++ for (i = 0; i < bio->bi_vcnt; i++) {
++ struct page *page = bvec[i].bv_page;
++
++ if (PageDirty(page) || PageCompound(page)) {
++ page_cache_release(page);
++ bvec[i].bv_page = NULL;
++ } else {
++ nr_clean_pages++;
++ }
++ }
++
++ if (nr_clean_pages) {
++ unsigned long flags;
++
++ spin_lock_irqsave(&bio_dirty_lock, flags);
++ bio->bi_private = bio_dirty_list;
++ bio_dirty_list = bio;
++ spin_unlock_irqrestore(&bio_dirty_lock, flags);
++ schedule_work(&bio_dirty_work);
++ } else {
++ bio_put(bio);
++ }
++}
++
++/**
++ * bio_endio - end I/O on a bio
++ * @bio: bio
++ * @error: error, if any
++ *
++ * Description:
++ * bio_endio() will end I/O on the whole bio. bio_endio() is the
++ * preferred way to end I/O on a bio, it takes care of clearing
++ * BIO_UPTODATE on error. @error is 0 on success, and and one of the
++ * established -Exxxx (-EIO, for instance) error values in case
++ * something went wrong. Noone should call bi_end_io() directly on a
++ * bio unless they own it and thus know that it has an end_io
++ * function.
++ **/
++void bio_endio(struct bio *bio, int error)
++{
++ if (error)
++ clear_bit(BIO_UPTODATE, &bio->bi_flags);
++ else if (!test_bit(BIO_UPTODATE, &bio->bi_flags))
++ error = -EIO;
++
++ if (bio->bi_end_io)
++ bio->bi_end_io(bio, error);
++}
++
++void bio_pair_release(struct bio_pair *bp)
++{
++ if (atomic_dec_and_test(&bp->cnt)) {
++ struct bio *master = bp->bio1.bi_private;
++
++ bio_endio(master, bp->error);
++ mempool_free(bp, bp->bio2.bi_private);
++ }
++}
++
++static void bio_pair_end_1(struct bio *bi, int err)
++{
++ struct bio_pair *bp = container_of(bi, struct bio_pair, bio1);
++
++ if (err)
++ bp->error = err;
++
++ bio_pair_release(bp);
++}
++
++static void bio_pair_end_2(struct bio *bi, int err)
++{
++ struct bio_pair *bp = container_of(bi, struct bio_pair, bio2);
++
++ if (err)
++ bp->error = err;
++
++ bio_pair_release(bp);
++}
++
++/*
++ * split a bio - only worry about a bio with a single page
++ * in it's iovec
++ */
++struct bio_pair *bio_split(struct bio *bi, mempool_t *pool, int first_sectors)
++{
++ struct bio_pair *bp = mempool_alloc(pool, GFP_NOIO);
++
++ if (!bp)
++ return bp;
++
++ blk_add_trace_pdu_int(bdev_get_queue(bi->bi_bdev), BLK_TA_SPLIT, bi,
++ bi->bi_sector + first_sectors);
++
++ BUG_ON(bi->bi_vcnt != 1);
++ BUG_ON(bi->bi_idx != 0);
++ atomic_set(&bp->cnt, 3);
++ bp->error = 0;
++ bp->bio1 = *bi;
++ bp->bio2 = *bi;
++ bp->bio2.bi_sector += first_sectors;
++ bp->bio2.bi_size -= first_sectors << 9;
++ bp->bio1.bi_size = first_sectors << 9;
++
++ bp->bv1 = bi->bi_io_vec[0];
++ bp->bv2 = bi->bi_io_vec[0];
++ bp->bv2.bv_offset += first_sectors << 9;
++ bp->bv2.bv_len -= first_sectors << 9;
++ bp->bv1.bv_len = first_sectors << 9;
++
++ bp->bio1.bi_io_vec = &bp->bv1;
++ bp->bio2.bi_io_vec = &bp->bv2;
++
++ bp->bio1.bi_max_vecs = 1;
++ bp->bio2.bi_max_vecs = 1;
++
++ bp->bio1.bi_end_io = bio_pair_end_1;
++ bp->bio2.bi_end_io = bio_pair_end_2;
++
++ bp->bio1.bi_private = bi;
++ bp->bio2.bi_private = pool;
++
++ if (bio_integrity(bi))
++ bio_integrity_split(bi, bp, first_sectors);
++
++ return bp;
++}
++
++
++/*
++ * create memory pools for biovec's in a bio_set.
++ * use the global biovec slabs created for general use.
++ */
++static int biovec_create_pools(struct bio_set *bs, int pool_entries)
++{
++ int i;
++
++ for (i = 0; i < BIOVEC_NR_POOLS; i++) {
++ struct biovec_slab *bp = bvec_slabs + i;
++ mempool_t **bvp = bs->bvec_pools + i;
++
++ *bvp = mempool_create_slab_pool(pool_entries, bp->slab);
++ if (!*bvp)
++ return -ENOMEM;
++ }
++ return 0;
++}
++
++static void biovec_free_pools(struct bio_set *bs)
++{
++ int i;
++
++ for (i = 0; i < BIOVEC_NR_POOLS; i++) {
++ mempool_t *bvp = bs->bvec_pools[i];
++
++ if (bvp)
++ mempool_destroy(bvp);
++ }
++
++}
++
++void bioset_free(struct bio_set *bs)
++{
++ if (bs->bio_pool)
++ mempool_destroy(bs->bio_pool);
++
++ bioset_integrity_free(bs);
++ biovec_free_pools(bs);
++
++ kfree(bs);
++}
++
++struct bio_set *bioset_create(int bio_pool_size, int bvec_pool_size)
++{
++ struct bio_set *bs = kzalloc(sizeof(*bs), GFP_KERNEL);
++
++ if (!bs)
++ return NULL;
++
++ bs->bio_pool = mempool_create_slab_pool(bio_pool_size, bio_slab);
++ if (!bs->bio_pool)
++ goto bad;
++
++ if (bioset_integrity_create(bs, bio_pool_size))
++ goto bad;
++
++ if (!biovec_create_pools(bs, bvec_pool_size))
++ return bs;
++
++bad:
++ bioset_free(bs);
++ return NULL;
++}
++
++static void __init biovec_init_slabs(void)
++{
++ int i;
++
++ for (i = 0; i < BIOVEC_NR_POOLS; i++) {
++ int size;
++ struct biovec_slab *bvs = bvec_slabs + i;
++
++ size = bvs->nr_vecs * sizeof(struct bio_vec);
++ bvs->slab = kmem_cache_create(bvs->name, size, 0,
++ SLAB_HWCACHE_ALIGN|SLAB_PANIC, NULL);
++ }
++}
++
++static int __init init_bio(void)
++{
++ bio_slab = KMEM_CACHE(bio, SLAB_HWCACHE_ALIGN|SLAB_PANIC);
++
++ bio_integrity_init_slab();
++ biovec_init_slabs();
++
++ fs_bio_set = bioset_create(BIO_POOL_SIZE, 2);
++ if (!fs_bio_set)
++ panic("bio: can't allocate bios\n");
++
++ bio_split_pool = mempool_create_kmalloc_pool(BIO_SPLIT_ENTRIES,
++ sizeof(struct bio_pair));
++ if (!bio_split_pool)
++ panic("bio: can't create split pool\n");
++
++ return 0;
++}
++
++subsys_initcall(init_bio);
++
++EXPORT_SYMBOL(bio_alloc);
++EXPORT_SYMBOL(bio_put);
++EXPORT_SYMBOL(bio_free);
++EXPORT_SYMBOL(bio_endio);
++EXPORT_SYMBOL(bio_init);
++EXPORT_SYMBOL(__bio_clone);
++EXPORT_SYMBOL(bio_clone);
++EXPORT_SYMBOL(bio_phys_segments);
++EXPORT_SYMBOL(bio_hw_segments);
++EXPORT_SYMBOL(bio_add_page);
++EXPORT_SYMBOL(bio_add_pc_page);
++EXPORT_SYMBOL(bio_get_nr_vecs);
++EXPORT_SYMBOL(bio_map_user);
++EXPORT_SYMBOL(bio_unmap_user);
++EXPORT_SYMBOL(bio_map_kern);
++EXPORT_SYMBOL(bio_copy_kern);
++EXPORT_SYMBOL(bio_pair_release);
++EXPORT_SYMBOL(bio_split);
++EXPORT_SYMBOL(bio_split_pool);
++EXPORT_SYMBOL(bio_copy_user);
++EXPORT_SYMBOL(bio_uncopy_user);
++EXPORT_SYMBOL(bioset_create);
++EXPORT_SYMBOL(bioset_free);
++EXPORT_SYMBOL(bio_alloc_bioset);
+diff -Nurb linux-2.6.27-590/fs/exec.c linux-2.6.27-591/fs/exec.c
+--- linux-2.6.27-590/fs/exec.c 2010-01-26 17:49:20.000000000 -0500
++++ linux-2.6.27-591/fs/exec.c 2010-01-29 16:19:58.000000000 -0500
+@@ -27,6 +27,7 @@
+ #include <linux/fdtable.h>
+ #include <linux/mm.h>
+ #include <linux/stat.h>
++#include <linux/dcookies.h>
+ #include <linux/fcntl.h>
+ #include <linux/smp_lock.h>
+ #include <linux/swap.h>
+@@ -39,7 +40,7 @@
+ #include <linux/personality.h>
+ #include <linux/binfmts.h>
+ #include <linux/utsname.h>
+-#include <linux/pid_namespace.h>
++/*#include <linux/pid_namespace.h>*/
+ #include <linux/module.h>
+ #include <linux/namei.h>
+ #include <linux/proc_fs.h>
+@@ -698,6 +699,13 @@
+ goto out;
+ }
+
++ #ifdef CONFIG_CHOPSTIX
++ unsigned long cookie;
++ extern void (*rec_event)(void *, unsigned int);
++ if (rec_event && !nd.dentry->d_cookie)
++ get_dcookie(nd.dentry, nd.mnt, &cookie);
++ #endif
++
+ return file;
+
+ out_path_put:
+diff -Nurb linux-2.6.27-590/fs/exec.c.orig linux-2.6.27-591/fs/exec.c.orig
+--- linux-2.6.27-590/fs/exec.c.orig 1969-12-31 19:00:00.000000000 -0500
++++ linux-2.6.27-591/fs/exec.c.orig 2010-01-26 17:49:20.000000000 -0500
+@@ -0,0 +1,1857 @@
++/*
++ * linux/fs/exec.c
++ *
++ * Copyright (C) 1991, 1992 Linus Torvalds
++ */
++
++/*
++ * #!-checking implemented by tytso.
++ */
++/*
++ * Demand-loading implemented 01.12.91 - no need to read anything but
++ * the header into memory. The inode of the executable is put into
++ * "current->executable", and page faults do the actual loading. Clean.
++ *
++ * Once more I can proudly say that linux stood up to being changed: it
++ * was less than 2 hours work to get demand-loading completely implemented.
++ *
++ * Demand loading changed July 1993 by Eric Youngdale. Use mmap instead,
++ * current->executable is only used by the procfs. This allows a dispatch
++ * table to check for several different types of binary formats. We keep
++ * trying until we recognize the file or we run out of supported binary
++ * formats.
++ */
++
++#include <linux/slab.h>
++#include <linux/file.h>
++#include <linux/fdtable.h>
++#include <linux/mm.h>
++#include <linux/stat.h>
++#include <linux/fcntl.h>
++#include <linux/smp_lock.h>
++#include <linux/swap.h>
++#include <linux/string.h>
++#include <linux/init.h>
++#include <linux/pagemap.h>
++#include <linux/highmem.h>
++#include <linux/spinlock.h>
++#include <linux/key.h>
++#include <linux/personality.h>
++#include <linux/binfmts.h>
++#include <linux/utsname.h>
++#include <linux/pid_namespace.h>
++#include <linux/module.h>
++#include <linux/namei.h>
++#include <linux/proc_fs.h>
++#include <linux/mount.h>
++#include <linux/security.h>
++#include <linux/syscalls.h>
++#include <linux/tsacct_kern.h>
++#include <linux/cn_proc.h>
++#include <linux/audit.h>
++#include <linux/tracehook.h>
++
++#include <asm/uaccess.h>
++#include <asm/mmu_context.h>
++#include <asm/tlb.h>
++
++#ifdef CONFIG_KMOD
++#include <linux/kmod.h>
++#endif
++
++#ifdef __alpha__
++/* for /sbin/loader handling in search_binary_handler() */
++#include <linux/a.out.h>
++#endif
++
++int core_uses_pid;
++char core_pattern[CORENAME_MAX_SIZE] = "core";
++int suid_dumpable = 0;
++
++/* The maximal length of core_pattern is also specified in sysctl.c */
++
++static LIST_HEAD(formats);
++static DEFINE_RWLOCK(binfmt_lock);
++
++int register_binfmt(struct linux_binfmt * fmt)
++{
++ if (!fmt)
++ return -EINVAL;
++ write_lock(&binfmt_lock);
++ list_add(&fmt->lh, &formats);
++ write_unlock(&binfmt_lock);
++ return 0;
++}
++
++EXPORT_SYMBOL(register_binfmt);
++
++void unregister_binfmt(struct linux_binfmt * fmt)
++{
++ write_lock(&binfmt_lock);
++ list_del(&fmt->lh);
++ write_unlock(&binfmt_lock);
++}
++
++EXPORT_SYMBOL(unregister_binfmt);
++
++static inline void put_binfmt(struct linux_binfmt * fmt)
++{
++ module_put(fmt->module);
++}
++
++/*
++ * Note that a shared library must be both readable and executable due to
++ * security reasons.
++ *
++ * Also note that we take the address to load from from the file itself.
++ */
++SYSCALL_DEFINE1(uselib, const char __user *, library)
++{
++ struct file *file;
++ struct nameidata nd;
++ char *tmp = getname(library);
++ int error = PTR_ERR(tmp);
++
++ if (!IS_ERR(tmp)) {
++ error = path_lookup_open(AT_FDCWD, tmp,
++ LOOKUP_FOLLOW, &nd,
++ FMODE_READ|FMODE_EXEC);
++ putname(tmp);
++ }
++ if (error)
++ goto out;
++
++ error = -EINVAL;
++ if (!S_ISREG(nd.path.dentry->d_inode->i_mode))
++ goto exit;
++
++ error = -EACCES;
++ if (nd.path.mnt->mnt_flags & MNT_NOEXEC)
++ goto exit;
++
++ error = vfs_permission(&nd, MAY_READ | MAY_EXEC | MAY_OPEN);
++ if (error)
++ goto exit;
++
++ file = nameidata_to_filp(&nd, O_RDONLY|O_LARGEFILE);
++ error = PTR_ERR(file);
++ if (IS_ERR(file))
++ goto out;
++
++ error = -ENOEXEC;
++ if(file->f_op) {
++ struct linux_binfmt * fmt;
++
++ read_lock(&binfmt_lock);
++ list_for_each_entry(fmt, &formats, lh) {
++ if (!fmt->load_shlib)
++ continue;
++ if (!try_module_get(fmt->module))
++ continue;
++ read_unlock(&binfmt_lock);
++ error = fmt->load_shlib(file);
++ read_lock(&binfmt_lock);
++ put_binfmt(fmt);
++ if (error != -ENOEXEC)
++ break;
++ }
++ read_unlock(&binfmt_lock);
++ }
++ fput(file);
++out:
++ return error;
++exit:
++ release_open_intent(&nd);
++ path_put(&nd.path);
++ goto out;
++}
++
++#ifdef CONFIG_MMU
++
++static struct page *get_arg_page(struct linux_binprm *bprm, unsigned long pos,
++ int write)
++{
++ struct page *page;
++ int ret;
++
++#ifdef CONFIG_STACK_GROWSUP
++ if (write) {
++ ret = expand_stack_downwards(bprm->vma, pos);
++ if (ret < 0)
++ return NULL;
++ }
++#endif
++ ret = get_user_pages(current, bprm->mm, pos,
++ 1, write, 1, &page, NULL);
++ if (ret <= 0)
++ return NULL;
++
++ if (write) {
++ unsigned long size = bprm->vma->vm_end - bprm->vma->vm_start;
++ struct rlimit *rlim;
++
++ /*
++ * We've historically supported up to 32 pages (ARG_MAX)
++ * of argument strings even with small stacks
++ */
++ if (size <= ARG_MAX)
++ return page;
++
++ /*
++ * Limit to 1/4-th the stack size for the argv+env strings.
++ * This ensures that:
++ * - the remaining binfmt code will not run out of stack space,
++ * - the program will have a reasonable amount of stack left
++ * to work from.
++ */
++ rlim = current->signal->rlim;
++ if (size > rlim[RLIMIT_STACK].rlim_cur / 4) {
++ put_page(page);
++ return NULL;
++ }
++ }
++
++ return page;
++}
++
++static void put_arg_page(struct page *page)
++{
++ put_page(page);
++}
++
++static void free_arg_page(struct linux_binprm *bprm, int i)
++{
++}
++
++static void free_arg_pages(struct linux_binprm *bprm)
++{
++}
++
++static void flush_arg_page(struct linux_binprm *bprm, unsigned long pos,
++ struct page *page)
++{
++ flush_cache_page(bprm->vma, pos, page_to_pfn(page));
++}
++
++static int __bprm_mm_init(struct linux_binprm *bprm)
++{
++ int err = -ENOMEM;
++ struct vm_area_struct *vma = NULL;
++ struct mm_struct *mm = bprm->mm;
++
++ bprm->vma = vma = kmem_cache_zalloc(vm_area_cachep, GFP_KERNEL);
++ if (!vma)
++ goto err;
++
++ down_write(&mm->mmap_sem);
++ vma->vm_mm = mm;
++
++ /*
++ * Place the stack at the largest stack address the architecture
++ * supports. Later, we'll move this to an appropriate place. We don't
++ * use STACK_TOP because that can depend on attributes which aren't
++ * configured yet.
++ */
++ vma->vm_end = STACK_TOP_MAX;
++ vma->vm_start = vma->vm_end - PAGE_SIZE;
++
++ vma->vm_flags = VM_STACK_FLAGS;
++ vma->vm_page_prot = vm_get_page_prot(vma->vm_flags);
++ err = insert_vm_struct(mm, vma);
++ if (err) {
++ up_write(&mm->mmap_sem);
++ goto err;
++ }
++
++ mm->total_vm = 0;
++ vx_vmpages_inc(mm);
++ mm->stack_vm = 1;
++ up_write(&mm->mmap_sem);
++
++ bprm->p = vma->vm_end - sizeof(void *);
++
++ return 0;
++
++err:
++ if (vma) {
++ bprm->vma = NULL;
++ kmem_cache_free(vm_area_cachep, vma);
++ }
++
++ return err;
++}
++
++static bool valid_arg_len(struct linux_binprm *bprm, long len)
++{
++ return len <= MAX_ARG_STRLEN;
++}
++
++#else
++
++static struct page *get_arg_page(struct linux_binprm *bprm, unsigned long pos,
++ int write)
++{
++ struct page *page;
++
++ page = bprm->page[pos / PAGE_SIZE];
++ if (!page && write) {
++ page = alloc_page(GFP_HIGHUSER|__GFP_ZERO);
++ if (!page)
++ return NULL;
++ bprm->page[pos / PAGE_SIZE] = page;
++ }
++
++ return page;
++}
++
++static void put_arg_page(struct page *page)
++{
++}
++
++static void free_arg_page(struct linux_binprm *bprm, int i)
++{
++ if (bprm->page[i]) {
++ __free_page(bprm->page[i]);
++ bprm->page[i] = NULL;
++ }
++}
++
++static void free_arg_pages(struct linux_binprm *bprm)
++{
++ int i;
++
++ for (i = 0; i < MAX_ARG_PAGES; i++)
++ free_arg_page(bprm, i);
++}
++
++static void flush_arg_page(struct linux_binprm *bprm, unsigned long pos,
++ struct page *page)
++{
++}
++
++static int __bprm_mm_init(struct linux_binprm *bprm)
++{
++ bprm->p = PAGE_SIZE * MAX_ARG_PAGES - sizeof(void *);
++ return 0;
++}
++
++static bool valid_arg_len(struct linux_binprm *bprm, long len)
++{
++ return len <= bprm->p;
++}
++
++#endif /* CONFIG_MMU */
++
++/*
++ * Create a new mm_struct and populate it with a temporary stack
++ * vm_area_struct. We don't have enough context at this point to set the stack
++ * flags, permissions, and offset, so we use temporary values. We'll update
++ * them later in setup_arg_pages().
++ */
++int bprm_mm_init(struct linux_binprm *bprm)
++{
++ int err;
++ struct mm_struct *mm = NULL;
++
++ bprm->mm = mm = mm_alloc();
++ err = -ENOMEM;
++ if (!mm)
++ goto err;
++
++ err = init_new_context(current, mm);
++ if (err)
++ goto err;
++
++ err = __bprm_mm_init(bprm);
++ if (err)
++ goto err;
++
++ return 0;
++
++err:
++ if (mm) {
++ bprm->mm = NULL;
++ mmdrop(mm);
++ }
++
++ return err;
++}
++
++/*
++ * count() counts the number of strings in array ARGV.
++ */
++static int count(char __user * __user * argv, int max)
++{
++ int i = 0;
++
++ if (argv != NULL) {
++ for (;;) {
++ char __user * p;
++
++ if (get_user(p, argv))
++ return -EFAULT;
++ if (!p)
++ break;
++ argv++;
++ if(++i > max)
++ return -E2BIG;
++ cond_resched();
++ }
++ }
++ return i;
++}
++
++/*
++ * 'copy_strings()' copies argument/environment strings from the old
++ * processes's memory to the new process's stack. The call to get_user_pages()
++ * ensures the destination page is created and not swapped out.
++ */
++static int copy_strings(int argc, char __user * __user * argv,
++ struct linux_binprm *bprm)
++{
++ struct page *kmapped_page = NULL;
++ char *kaddr = NULL;
++ unsigned long kpos = 0;
++ int ret;
++
++ while (argc-- > 0) {
++ char __user *str;
++ int len;
++ unsigned long pos;
++
++ if (get_user(str, argv+argc) ||
++ !(len = strnlen_user(str, MAX_ARG_STRLEN))) {
++ ret = -EFAULT;
++ goto out;
++ }
++
++ if (!valid_arg_len(bprm, len)) {
++ ret = -E2BIG;
++ goto out;
++ }
++
++ /* We're going to work our way backwords. */
++ pos = bprm->p;
++ str += len;
++ bprm->p -= len;
++
++ while (len > 0) {
++ int offset, bytes_to_copy;
++
++ offset = pos % PAGE_SIZE;
++ if (offset == 0)
++ offset = PAGE_SIZE;
++
++ bytes_to_copy = offset;
++ if (bytes_to_copy > len)
++ bytes_to_copy = len;
++
++ offset -= bytes_to_copy;
++ pos -= bytes_to_copy;
++ str -= bytes_to_copy;
++ len -= bytes_to_copy;
++
++ if (!kmapped_page || kpos != (pos & PAGE_MASK)) {
++ struct page *page;
++
++ page = get_arg_page(bprm, pos, 1);
++ if (!page) {
++ ret = -E2BIG;
++ goto out;
++ }
++
++ if (kmapped_page) {
++ flush_kernel_dcache_page(kmapped_page);
++ kunmap(kmapped_page);
++ put_arg_page(kmapped_page);
++ }
++ kmapped_page = page;
++ kaddr = kmap(kmapped_page);
++ kpos = pos & PAGE_MASK;
++ flush_arg_page(bprm, kpos, kmapped_page);
++ }
++ if (copy_from_user(kaddr+offset, str, bytes_to_copy)) {
++ ret = -EFAULT;
++ goto out;
++ }
++ }
++ }
++ ret = 0;
++out:
++ if (kmapped_page) {
++ flush_kernel_dcache_page(kmapped_page);
++ kunmap(kmapped_page);
++ put_arg_page(kmapped_page);
++ }
++ return ret;
++}
++
++/*
++ * Like copy_strings, but get argv and its values from kernel memory.
++ */
++int copy_strings_kernel(int argc,char ** argv, struct linux_binprm *bprm)
++{
++ int r;
++ mm_segment_t oldfs = get_fs();
++ set_fs(KERNEL_DS);
++ r = copy_strings(argc, (char __user * __user *)argv, bprm);
++ set_fs(oldfs);
++ return r;
++}
++EXPORT_SYMBOL(copy_strings_kernel);
++
++#ifdef CONFIG_MMU
++
++/*
++ * During bprm_mm_init(), we create a temporary stack at STACK_TOP_MAX. Once
++ * the binfmt code determines where the new stack should reside, we shift it to
++ * its final location. The process proceeds as follows:
++ *
++ * 1) Use shift to calculate the new vma endpoints.
++ * 2) Extend vma to cover both the old and new ranges. This ensures the
++ * arguments passed to subsequent functions are consistent.
++ * 3) Move vma's page tables to the new range.
++ * 4) Free up any cleared pgd range.
++ * 5) Shrink the vma to cover only the new range.
++ */
++static int shift_arg_pages(struct vm_area_struct *vma, unsigned long shift)
++{
++ struct mm_struct *mm = vma->vm_mm;
++ unsigned long old_start = vma->vm_start;
++ unsigned long old_end = vma->vm_end;
++ unsigned long length = old_end - old_start;
++ unsigned long new_start = old_start - shift;
++ unsigned long new_end = old_end - shift;
++ struct mmu_gather *tlb;
++
++ BUG_ON(new_start > new_end);
++
++ /*
++ * ensure there are no vmas between where we want to go
++ * and where we are
++ */
++ if (vma != find_vma(mm, new_start))
++ return -EFAULT;
++
++ /*
++ * cover the whole range: [new_start, old_end)
++ */
++ vma_adjust(vma, new_start, old_end, vma->vm_pgoff, NULL);
++
++ /*
++ * move the page tables downwards, on failure we rely on
++ * process cleanup to remove whatever mess we made.
++ */
++ if (length != move_page_tables(vma, old_start,
++ vma, new_start, length))
++ return -ENOMEM;
++
++ lru_add_drain();
++ tlb = tlb_gather_mmu(mm, 0);
++ if (new_end > old_start) {
++ /*
++ * when the old and new regions overlap clear from new_end.
++ */
++ free_pgd_range(tlb, new_end, old_end, new_end,
++ vma->vm_next ? vma->vm_next->vm_start : 0);
++ } else {
++ /*
++ * otherwise, clean from old_start; this is done to not touch
++ * the address space in [new_end, old_start) some architectures
++ * have constraints on va-space that make this illegal (IA64) -
++ * for the others its just a little faster.
++ */
++ free_pgd_range(tlb, old_start, old_end, new_end,
++ vma->vm_next ? vma->vm_next->vm_start : 0);
++ }
++ tlb_finish_mmu(tlb, new_end, old_end);
++
++ /*
++ * shrink the vma to just the new range.
++ */
++ vma_adjust(vma, new_start, new_end, vma->vm_pgoff, NULL);
++
++ return 0;
++}
++
++#define EXTRA_STACK_VM_PAGES 20 /* random */
++
++/*
++ * Finalizes the stack vm_area_struct. The flags and permissions are updated,
++ * the stack is optionally relocated, and some extra space is added.
++ */
++int setup_arg_pages(struct linux_binprm *bprm,
++ unsigned long stack_top,
++ int executable_stack)
++{
++ unsigned long ret;
++ unsigned long stack_shift;
++ struct mm_struct *mm = current->mm;
++ struct vm_area_struct *vma = bprm->vma;
++ struct vm_area_struct *prev = NULL;
++ unsigned long vm_flags;
++ unsigned long stack_base;
++
++#ifdef CONFIG_STACK_GROWSUP
++ /* Limit stack size to 1GB */
++ stack_base = current->signal->rlim[RLIMIT_STACK].rlim_max;
++ if (stack_base > (1 << 30))
++ stack_base = 1 << 30;
++
++ /* Make sure we didn't let the argument array grow too large. */
++ if (vma->vm_end - vma->vm_start > stack_base)
++ return -ENOMEM;
++
++ stack_base = PAGE_ALIGN(stack_top - stack_base);
++
++ stack_shift = vma->vm_start - stack_base;
++ mm->arg_start = bprm->p - stack_shift;
++ bprm->p = vma->vm_end - stack_shift;
++#else
++ stack_top = arch_align_stack(stack_top);
++ stack_top = PAGE_ALIGN(stack_top);
++ stack_shift = vma->vm_end - stack_top;
++
++ bprm->p -= stack_shift;
++ mm->arg_start = bprm->p;
++#endif
++
++ if (bprm->loader)
++ bprm->loader -= stack_shift;
++ bprm->exec -= stack_shift;
++
++ down_write(&mm->mmap_sem);
++ vm_flags = VM_STACK_FLAGS;
++
++ /*
++ * Adjust stack execute permissions; explicitly enable for
++ * EXSTACK_ENABLE_X, disable for EXSTACK_DISABLE_X and leave alone
++ * (arch default) otherwise.
++ */
++ if (unlikely(executable_stack == EXSTACK_ENABLE_X))
++ vm_flags |= VM_EXEC;
++ else if (executable_stack == EXSTACK_DISABLE_X)
++ vm_flags &= ~VM_EXEC;
++ vm_flags |= mm->def_flags;
++
++ ret = mprotect_fixup(vma, &prev, vma->vm_start, vma->vm_end,
++ vm_flags);
++ if (ret)
++ goto out_unlock;
++ BUG_ON(prev != vma);
++
++ /* Move stack pages down in memory. */
++ if (stack_shift) {
++ ret = shift_arg_pages(vma, stack_shift);
++ if (ret) {
++ up_write(&mm->mmap_sem);
++ return ret;
++ }
++ }
++
++#ifdef CONFIG_STACK_GROWSUP
++ stack_base = vma->vm_end + EXTRA_STACK_VM_PAGES * PAGE_SIZE;
++#else
++ stack_base = vma->vm_start - EXTRA_STACK_VM_PAGES * PAGE_SIZE;
++#endif
++ ret = expand_stack(vma, stack_base);
++ if (ret)
++ ret = -EFAULT;
++
++out_unlock:
++ up_write(&mm->mmap_sem);
++ return 0;
++}
++EXPORT_SYMBOL(setup_arg_pages);
++
++#endif /* CONFIG_MMU */
++
++struct file *open_exec(const char *name)
++{
++ struct nameidata nd;
++ struct file *file;
++ int err;
++
++ err = path_lookup_open(AT_FDCWD, name, LOOKUP_FOLLOW, &nd,
++ FMODE_READ|FMODE_EXEC);
++ if (err)
++ goto out;
++
++ err = -EACCES;
++ if (!S_ISREG(nd.path.dentry->d_inode->i_mode))
++ goto out_path_put;
++
++ if (nd.path.mnt->mnt_flags & MNT_NOEXEC)
++ goto out_path_put;
++
++ err = vfs_permission(&nd, MAY_EXEC | MAY_OPEN);
++ if (err)
++ goto out_path_put;
++
++ file = nameidata_to_filp(&nd, O_RDONLY|O_LARGEFILE);
++ if (IS_ERR(file))
++ return file;
++
++ err = deny_write_access(file);
++ if (err) {
++ fput(file);
++ goto out;
++ }
++
++ return file;
++
++ out_path_put:
++ release_open_intent(&nd);
++ path_put(&nd.path);
++ out:
++ return ERR_PTR(err);
++}
++EXPORT_SYMBOL(open_exec);
++
++int kernel_read(struct file *file, unsigned long offset,
++ char *addr, unsigned long count)
++{
++ mm_segment_t old_fs;
++ loff_t pos = offset;
++ int result;
++
++ old_fs = get_fs();
++ set_fs(get_ds());
++ /* The cast to a user pointer is valid due to the set_fs() */
++ result = vfs_read(file, (void __user *)addr, count, &pos);
++ set_fs(old_fs);
++ return result;
++}
++
++EXPORT_SYMBOL(kernel_read);
++
++static int exec_mmap(struct mm_struct *mm)
++{
++ struct task_struct *tsk;
++ struct mm_struct * old_mm, *active_mm;
++
++ /* Notify parent that we're no longer interested in the old VM */
++ tsk = current;
++ old_mm = current->mm;
++ mm_release(tsk, old_mm);
++
++ if (old_mm) {
++ /*
++ * Make sure that if there is a core dump in progress
++ * for the old mm, we get out and die instead of going
++ * through with the exec. We must hold mmap_sem around
++ * checking core_state and changing tsk->mm.
++ */
++ down_read(&old_mm->mmap_sem);
++ if (unlikely(old_mm->core_state)) {
++ up_read(&old_mm->mmap_sem);
++ return -EINTR;
++ }
++ }
++ task_lock(tsk);
++ active_mm = tsk->active_mm;
++ tsk->mm = mm;
++ tsk->active_mm = mm;
++ activate_mm(active_mm, mm);
++ task_unlock(tsk);
++ arch_pick_mmap_layout(mm);
++ if (old_mm) {
++ up_read(&old_mm->mmap_sem);
++ BUG_ON(active_mm != old_mm);
++ mm_update_next_owner(old_mm);
++ mmput(old_mm);
++ return 0;
++ }
++ mmdrop(active_mm);
++ return 0;
++}
++
++/*
++ * This function makes sure the current process has its own signal table,
++ * so that flush_signal_handlers can later reset the handlers without
++ * disturbing other processes. (Other processes might share the signal
++ * table via the CLONE_SIGHAND option to clone().)
++ */
++static int de_thread(struct task_struct *tsk)
++{
++ struct signal_struct *sig = tsk->signal;
++ struct sighand_struct *oldsighand = tsk->sighand;
++ spinlock_t *lock = &oldsighand->siglock;
++ struct task_struct *leader = NULL;
++ int count;
++
++ if (thread_group_empty(tsk))
++ goto no_thread_group;
++
++ /*
++ * Kill all other threads in the thread group.
++ */
++ spin_lock_irq(lock);
++ if (signal_group_exit(sig)) {
++ /*
++ * Another group action in progress, just
++ * return so that the signal is processed.
++ */
++ spin_unlock_irq(lock);
++ return -EAGAIN;
++ }
++ sig->group_exit_task = tsk;
++ zap_other_threads(tsk);
++
++ /* Account for the thread group leader hanging around: */
++ count = thread_group_leader(tsk) ? 1 : 2;
++ sig->notify_count = count;
++ while (atomic_read(&sig->count) > count) {
++ __set_current_state(TASK_UNINTERRUPTIBLE);
++ spin_unlock_irq(lock);
++ schedule();
++ spin_lock_irq(lock);
++ }
++ spin_unlock_irq(lock);
++
++ /*
++ * At this point all other threads have exited, all we have to
++ * do is to wait for the thread group leader to become inactive,
++ * and to assume its PID:
++ */
++ if (!thread_group_leader(tsk)) {
++ leader = tsk->group_leader;
++
++ sig->notify_count = -1; /* for exit_notify() */
++ for (;;) {
++ write_lock_irq(&tasklist_lock);
++ if (likely(leader->exit_state))
++ break;
++ __set_current_state(TASK_UNINTERRUPTIBLE);
++ write_unlock_irq(&tasklist_lock);
++ schedule();
++ }
++
++ if (unlikely(task_child_reaper(tsk) == leader))
++ task_active_pid_ns(tsk)->child_reaper = tsk;
++ /*
++ * The only record we have of the real-time age of a
++ * process, regardless of execs it's done, is start_time.
++ * All the past CPU time is accumulated in signal_struct
++ * from sister threads now dead. But in this non-leader
++ * exec, nothing survives from the original leader thread,
++ * whose birth marks the true age of this process now.
++ * When we take on its identity by switching to its PID, we
++ * also take its birthdate (always earlier than our own).
++ */
++ tsk->start_time = leader->start_time;
++
++ BUG_ON(!same_thread_group(leader, tsk));
++ BUG_ON(has_group_leader_pid(tsk));
++ /*
++ * An exec() starts a new thread group with the
++ * TGID of the previous thread group. Rehash the
++ * two threads with a switched PID, and release
++ * the former thread group leader:
++ */
++
++ /* Become a process group leader with the old leader's pid.
++ * The old leader becomes a thread of the this thread group.
++ * Note: The old leader also uses this pid until release_task
++ * is called. Odd but simple and correct.
++ */
++ detach_pid(tsk, PIDTYPE_PID);
++ tsk->pid = leader->pid;
++ attach_pid(tsk, PIDTYPE_PID, task_pid(leader));
++ transfer_pid(leader, tsk, PIDTYPE_PGID);
++ transfer_pid(leader, tsk, PIDTYPE_SID);
++ list_replace_rcu(&leader->tasks, &tsk->tasks);
++
++ tsk->group_leader = tsk;
++ leader->group_leader = tsk;
++
++ tsk->exit_signal = SIGCHLD;
++
++ BUG_ON(leader->exit_state != EXIT_ZOMBIE);
++ leader->exit_state = EXIT_DEAD;
++
++ write_unlock_irq(&tasklist_lock);
++ }
++
++ sig->group_exit_task = NULL;
++ sig->notify_count = 0;
++
++no_thread_group:
++ exit_itimers(sig);
++ flush_itimer_signals();
++ if (leader)
++ release_task(leader);
++
++ if (atomic_read(&oldsighand->count) != 1) {
++ struct sighand_struct *newsighand;
++ /*
++ * This ->sighand is shared with the CLONE_SIGHAND
++ * but not CLONE_THREAD task, switch to the new one.
++ */
++ newsighand = kmem_cache_alloc(sighand_cachep, GFP_KERNEL);
++ if (!newsighand)
++ return -ENOMEM;
++
++ atomic_set(&newsighand->count, 1);
++ memcpy(newsighand->action, oldsighand->action,
++ sizeof(newsighand->action));
++
++ write_lock_irq(&tasklist_lock);
++ spin_lock(&oldsighand->siglock);
++ rcu_assign_pointer(tsk->sighand, newsighand);
++ spin_unlock(&oldsighand->siglock);
++ write_unlock_irq(&tasklist_lock);
++
++ __cleanup_sighand(oldsighand);
++ }
++
++ BUG_ON(!thread_group_leader(tsk));
++ return 0;
++}
++
++/*
++ * These functions flushes out all traces of the currently running executable
++ * so that a new one can be started
++ */
++static void flush_old_files(struct files_struct * files)
++{
++ long j = -1;
++ struct fdtable *fdt;
++
++ spin_lock(&files->file_lock);
++ for (;;) {
++ unsigned long set, i;
++
++ j++;
++ i = j * __NFDBITS;
++ fdt = files_fdtable(files);
++ if (i >= fdt->max_fds)
++ break;
++ set = fdt->close_on_exec->fds_bits[j];
++ if (!set)
++ continue;
++ fdt->close_on_exec->fds_bits[j] = 0;
++ spin_unlock(&files->file_lock);
++ for ( ; set ; i++,set >>= 1) {
++ if (set & 1) {
++ sys_close(i);
++ }
++ }
++ spin_lock(&files->file_lock);
++
++ }
++ spin_unlock(&files->file_lock);
++}
++
++char *get_task_comm(char *buf, struct task_struct *tsk)
++{
++ /* buf must be at least sizeof(tsk->comm) in size */
++ task_lock(tsk);
++ strncpy(buf, tsk->comm, sizeof(tsk->comm));
++ task_unlock(tsk);
++ return buf;
++}
++
++void set_task_comm(struct task_struct *tsk, char *buf)
++{
++ task_lock(tsk);
++ strlcpy(tsk->comm, buf, sizeof(tsk->comm));
++ task_unlock(tsk);
++}
++
++int flush_old_exec(struct linux_binprm * bprm)
++{
++ char * name;
++ int i, ch, retval;
++ char tcomm[sizeof(current->comm)];
++
++ /*
++ * Make sure we have a private signal table and that
++ * we are unassociated from the previous thread group.
++ */
++ retval = de_thread(current);
++ if (retval)
++ goto out;
++
++ set_mm_exe_file(bprm->mm, bprm->file);
++
++ /*
++ * Release all of the old mmap stuff
++ */
++ retval = exec_mmap(bprm->mm);
++ if (retval)
++ goto out;
++
++ bprm->mm = NULL; /* We're using it now */
++
++ /* This is the point of no return */
++ current->sas_ss_sp = current->sas_ss_size = 0;
++
++ if (current->euid == current->uid && current->egid == current->gid)
++ set_dumpable(current->mm, 1);
++ else
++ set_dumpable(current->mm, suid_dumpable);
++
++ name = bprm->filename;
++
++ /* Copies the binary name from after last slash */
++ for (i=0; (ch = *(name++)) != '\0';) {
++ if (ch == '/')
++ i = 0; /* overwrite what we wrote */
++ else
++ if (i < (sizeof(tcomm) - 1))
++ tcomm[i++] = ch;
++ }
++ tcomm[i] = '\0';
++ set_task_comm(current, tcomm);
++
++ current->flags &= ~PF_RANDOMIZE;
++ flush_thread();
++
++ /* Set the new mm task size. We have to do that late because it may
++ * depend on TIF_32BIT which is only updated in flush_thread() on
++ * some architectures like powerpc
++ */
++ current->mm->task_size = TASK_SIZE;
++
++ if (bprm->e_uid != current->euid || bprm->e_gid != current->egid) {
++ suid_keys(current);
++ set_dumpable(current->mm, suid_dumpable);
++ current->pdeath_signal = 0;
++ } else if (file_permission(bprm->file, MAY_READ) ||
++ (bprm->interp_flags & BINPRM_FLAGS_ENFORCE_NONDUMP)) {
++ suid_keys(current);
++ set_dumpable(current->mm, suid_dumpable);
++ }
++
++ /* An exec changes our domain. We are no longer part of the thread
++ group */
++
++ current->self_exec_id++;
++
++ flush_signal_handlers(current, 0);
++ flush_old_files(current->files);
++
++ return 0;
++
++out:
++ return retval;
++}
++
++EXPORT_SYMBOL(flush_old_exec);
++
++/*
++ * Fill the binprm structure from the inode.
++ * Check permissions, then read the first 128 (BINPRM_BUF_SIZE) bytes
++ */
++int prepare_binprm(struct linux_binprm *bprm)
++{
++ int mode;
++ struct inode * inode = bprm->file->f_path.dentry->d_inode;
++ int retval;
++
++ mode = inode->i_mode;
++ if (bprm->file->f_op == NULL)
++ return -EACCES;
++
++ bprm->e_uid = current->euid;
++ bprm->e_gid = current->egid;
++
++ if(!(bprm->file->f_path.mnt->mnt_flags & MNT_NOSUID)) {
++ /* Set-uid? */
++ if (mode & S_ISUID) {
++ current->personality &= ~PER_CLEAR_ON_SETID;
++ bprm->e_uid = inode->i_uid;
++ }
++
++ /* Set-gid? */
++ /*
++ * If setgid is set but no group execute bit then this
++ * is a candidate for mandatory locking, not a setgid
++ * executable.
++ */
++ if ((mode & (S_ISGID | S_IXGRP)) == (S_ISGID | S_IXGRP)) {
++ current->personality &= ~PER_CLEAR_ON_SETID;
++ bprm->e_gid = inode->i_gid;
++ }
++ }
++
++ /* fill in binprm security blob */
++ retval = security_bprm_set(bprm);
++ if (retval)
++ return retval;
++
++ memset(bprm->buf,0,BINPRM_BUF_SIZE);
++ return kernel_read(bprm->file,0,bprm->buf,BINPRM_BUF_SIZE);
++}
++
++EXPORT_SYMBOL(prepare_binprm);
++
++static int unsafe_exec(struct task_struct *p)
++{
++ int unsafe = tracehook_unsafe_exec(p);
++
++ if (atomic_read(&p->fs->count) > 1 ||
++ atomic_read(&p->files->count) > 1 ||
++ atomic_read(&p->sighand->count) > 1)
++ unsafe |= LSM_UNSAFE_SHARE;
++
++ return unsafe;
++}
++
++void compute_creds(struct linux_binprm *bprm)
++{
++ int unsafe;
++
++ if (bprm->e_uid != current->uid) {
++ suid_keys(current);
++ current->pdeath_signal = 0;
++ }
++ exec_keys(current);
++
++ task_lock(current);
++ unsafe = unsafe_exec(current);
++ security_bprm_apply_creds(bprm, unsafe);
++ task_unlock(current);
++ security_bprm_post_apply_creds(bprm);
++}
++EXPORT_SYMBOL(compute_creds);
++
++/*
++ * Arguments are '\0' separated strings found at the location bprm->p
++ * points to; chop off the first by relocating brpm->p to right after
++ * the first '\0' encountered.
++ */
++int remove_arg_zero(struct linux_binprm *bprm)
++{
++ int ret = 0;
++ unsigned long offset;
++ char *kaddr;
++ struct page *page;
++
++ if (!bprm->argc)
++ return 0;
++
++ do {
++ offset = bprm->p & ~PAGE_MASK;
++ page = get_arg_page(bprm, bprm->p, 0);
++ if (!page) {
++ ret = -EFAULT;
++ goto out;
++ }
++ kaddr = kmap_atomic(page, KM_USER0);
++
++ for (; offset < PAGE_SIZE && kaddr[offset];
++ offset++, bprm->p++)
++ ;
++
++ kunmap_atomic(kaddr, KM_USER0);
++ put_arg_page(page);
++
++ if (offset == PAGE_SIZE)
++ free_arg_page(bprm, (bprm->p >> PAGE_SHIFT) - 1);
++ } while (offset == PAGE_SIZE);
++
++ bprm->p++;
++ bprm->argc--;
++ ret = 0;
++
++out:
++ return ret;
++}
++EXPORT_SYMBOL(remove_arg_zero);
++
++/*
++ * cycle the list of binary formats handler, until one recognizes the image
++ */
++int search_binary_handler(struct linux_binprm *bprm,struct pt_regs *regs)
++{
++ unsigned int depth = bprm->recursion_depth;
++ int try,retval;
++ struct linux_binfmt *fmt;
++#ifdef __alpha__
++ /* handle /sbin/loader.. */
++ {
++ struct exec * eh = (struct exec *) bprm->buf;
++
++ if (!bprm->loader && eh->fh.f_magic == 0x183 &&
++ (eh->fh.f_flags & 0x3000) == 0x3000)
++ {
++ struct file * file;
++ unsigned long loader;
++
++ allow_write_access(bprm->file);
++ fput(bprm->file);
++ bprm->file = NULL;
++
++ loader = bprm->vma->vm_end - sizeof(void *);
++
++ file = open_exec("/sbin/loader");
++ retval = PTR_ERR(file);
++ if (IS_ERR(file))
++ return retval;
++
++ /* Remember if the application is TASO. */
++ bprm->sh_bang = eh->ah.entry < 0x100000000UL;
++
++ bprm->file = file;
++ bprm->loader = loader;
++ retval = prepare_binprm(bprm);
++ if (retval<0)
++ return retval;
++ /* should call search_binary_handler recursively here,
++ but it does not matter */
++ }
++ }
++#endif
++ retval = security_bprm_check(bprm);
++ if (retval)
++ return retval;
++
++ /* kernel module loader fixup */
++ /* so we don't try to load run modprobe in kernel space. */
++ set_fs(USER_DS);
++
++ retval = audit_bprm(bprm);
++ if (retval)
++ return retval;
++
++ retval = -ENOENT;
++ for (try=0; try<2; try++) {
++ read_lock(&binfmt_lock);
++ list_for_each_entry(fmt, &formats, lh) {
++ int (*fn)(struct linux_binprm *, struct pt_regs *) = fmt->load_binary;
++ if (!fn)
++ continue;
++ if (!try_module_get(fmt->module))
++ continue;
++ read_unlock(&binfmt_lock);
++ retval = fn(bprm, regs);
++ /*
++ * Restore the depth counter to its starting value
++ * in this call, so we don't have to rely on every
++ * load_binary function to restore it on return.
++ */
++ bprm->recursion_depth = depth;
++ if (retval >= 0) {
++ if (depth == 0)
++ tracehook_report_exec(fmt, bprm, regs);
++ put_binfmt(fmt);
++ allow_write_access(bprm->file);
++ if (bprm->file)
++ fput(bprm->file);
++ bprm->file = NULL;
++ current->did_exec = 1;
++ proc_exec_connector(current);
++ return retval;
++ }
++ read_lock(&binfmt_lock);
++ put_binfmt(fmt);
++ if (retval != -ENOEXEC || bprm->mm == NULL)
++ break;
++ if (!bprm->file) {
++ read_unlock(&binfmt_lock);
++ return retval;
++ }
++ }
++ read_unlock(&binfmt_lock);
++ if (retval != -ENOEXEC || bprm->mm == NULL) {
++ break;
++#ifdef CONFIG_KMOD
++ }else{
++#define printable(c) (((c)=='\t') || ((c)=='\n') || (0x20<=(c) && (c)<=0x7e))
++ if (printable(bprm->buf[0]) &&
++ printable(bprm->buf[1]) &&
++ printable(bprm->buf[2]) &&
++ printable(bprm->buf[3]))
++ break; /* -ENOEXEC */
++ request_module("binfmt-%04x", *(unsigned short *)(&bprm->buf[2]));
++#endif
++ }
++ }
++ return retval;
++}
++
++EXPORT_SYMBOL(search_binary_handler);
++
++void free_bprm(struct linux_binprm *bprm)
++{
++ free_arg_pages(bprm);
++ kfree(bprm);
++}
++
++/*
++ * sys_execve() executes a new program.
++ */
++int do_execve(char * filename,
++ char __user *__user *argv,
++ char __user *__user *envp,
++ struct pt_regs * regs)
++{
++ struct linux_binprm *bprm;
++ struct file *file;
++ struct files_struct *displaced;
++ int retval;
++
++ retval = unshare_files(&displaced);
++ if (retval)
++ goto out_ret;
++
++ retval = -ENOMEM;
++ bprm = kzalloc(sizeof(*bprm), GFP_KERNEL);
++ if (!bprm)
++ goto out_files;
++
++ file = open_exec(filename);
++ retval = PTR_ERR(file);
++ if (IS_ERR(file))
++ goto out_kfree;
++
++ sched_exec();
++
++ bprm->file = file;
++ bprm->filename = filename;
++ bprm->interp = filename;
++
++ retval = bprm_mm_init(bprm);
++ if (retval)
++ goto out_file;
++
++ bprm->argc = count(argv, MAX_ARG_STRINGS);
++ if ((retval = bprm->argc) < 0)
++ goto out_mm;
++
++ bprm->envc = count(envp, MAX_ARG_STRINGS);
++ if ((retval = bprm->envc) < 0)
++ goto out_mm;
++
++ retval = security_bprm_alloc(bprm);
++ if (retval)
++ goto out;
++
++ retval = prepare_binprm(bprm);
++ if (retval < 0)
++ goto out;
++
++ retval = copy_strings_kernel(1, &bprm->filename, bprm);
++ if (retval < 0)
++ goto out;
++
++ bprm->exec = bprm->p;
++ retval = copy_strings(bprm->envc, envp, bprm);
++ if (retval < 0)
++ goto out;
++
++ retval = copy_strings(bprm->argc, argv, bprm);
++ if (retval < 0)
++ goto out;
++
++ current->flags &= ~PF_KTHREAD;
++ retval = search_binary_handler(bprm,regs);
++ if (retval >= 0) {
++ /* execve success */
++ security_bprm_free(bprm);
++ acct_update_integrals(current);
++ free_bprm(bprm);
++ if (displaced)
++ put_files_struct(displaced);
++ return retval;
++ }
++
++out:
++ if (bprm->security)
++ security_bprm_free(bprm);
++
++out_mm:
++ if (bprm->mm)
++ mmput (bprm->mm);
++
++out_file:
++ if (bprm->file) {
++ allow_write_access(bprm->file);
++ fput(bprm->file);
++ }
++out_kfree:
++ free_bprm(bprm);
++
++out_files:
++ if (displaced)
++ reset_files_struct(displaced);
++out_ret:
++ return retval;
++}
++
++int set_binfmt(struct linux_binfmt *new)
++{
++ struct linux_binfmt *old = current->binfmt;
++
++ if (new) {
++ if (!try_module_get(new->module))
++ return -1;
++ }
++ current->binfmt = new;
++ if (old)
++ module_put(old->module);
++ return 0;
++}
++
++EXPORT_SYMBOL(set_binfmt);
++
++/* format_corename will inspect the pattern parameter, and output a
++ * name into corename, which must have space for at least
++ * CORENAME_MAX_SIZE bytes plus one byte for the zero terminator.
++ */
++static int format_corename(char *corename, int nr_threads, long signr)
++{
++ const char *pat_ptr = core_pattern;
++ int ispipe = (*pat_ptr == '|');
++ char *out_ptr = corename;
++ char *const out_end = corename + CORENAME_MAX_SIZE;
++ int rc;
++ int pid_in_pattern = 0;
++
++ /* Repeat as long as we have more pattern to process and more output
++ space */
++ while (*pat_ptr) {
++ if (*pat_ptr != '%') {
++ if (out_ptr == out_end)
++ goto out;
++ *out_ptr++ = *pat_ptr++;
++ } else {
++ switch (*++pat_ptr) {
++ case 0:
++ goto out;
++ /* Double percent, output one percent */
++ case '%':
++ if (out_ptr == out_end)
++ goto out;
++ *out_ptr++ = '%';
++ break;
++ /* pid */
++ case 'p':
++ pid_in_pattern = 1;
++ rc = snprintf(out_ptr, out_end - out_ptr,
++ "%d", task_tgid_vnr(current));
++ if (rc > out_end - out_ptr)
++ goto out;
++ out_ptr += rc;
++ break;
++ /* uid */
++ case 'u':
++ rc = snprintf(out_ptr, out_end - out_ptr,
++ "%d", current->uid);
++ if (rc > out_end - out_ptr)
++ goto out;
++ out_ptr += rc;
++ break;
++ /* gid */
++ case 'g':
++ rc = snprintf(out_ptr, out_end - out_ptr,
++ "%d", current->gid);
++ if (rc > out_end - out_ptr)
++ goto out;
++ out_ptr += rc;
++ break;
++ /* signal that caused the coredump */
++ case 's':
++ rc = snprintf(out_ptr, out_end - out_ptr,
++ "%ld", signr);
++ if (rc > out_end - out_ptr)
++ goto out;
++ out_ptr += rc;
++ break;
++ /* UNIX time of coredump */
++ case 't': {
++ struct timeval tv;
++ vx_gettimeofday(&tv);
++ rc = snprintf(out_ptr, out_end - out_ptr,
++ "%lu", tv.tv_sec);
++ if (rc > out_end - out_ptr)
++ goto out;
++ out_ptr += rc;
++ break;
++ }
++ /* hostname */
++ case 'h':
++ down_read(&uts_sem);
++ rc = snprintf(out_ptr, out_end - out_ptr,
++ "%s", utsname()->nodename);
++ up_read(&uts_sem);
++ if (rc > out_end - out_ptr)
++ goto out;
++ out_ptr += rc;
++ break;
++ /* executable */
++ case 'e':
++ rc = snprintf(out_ptr, out_end - out_ptr,
++ "%s", current->comm);
++ if (rc > out_end - out_ptr)
++ goto out;
++ out_ptr += rc;
++ break;
++ /* core limit size */
++ case 'c':
++ rc = snprintf(out_ptr, out_end - out_ptr,
++ "%lu", current->signal->rlim[RLIMIT_CORE].rlim_cur);
++ if (rc > out_end - out_ptr)
++ goto out;
++ out_ptr += rc;
++ break;
++ default:
++ break;
++ }
++ ++pat_ptr;
++ }
++ }
++ /* Backward compatibility with core_uses_pid:
++ *
++ * If core_pattern does not include a %p (as is the default)
++ * and core_uses_pid is set, then .%pid will be appended to
++ * the filename. Do not do this for piped commands. */
++ if (!ispipe && !pid_in_pattern
++ && (core_uses_pid || nr_threads)) {
++ rc = snprintf(out_ptr, out_end - out_ptr,
++ ".%d", task_tgid_vnr(current));
++ if (rc > out_end - out_ptr)
++ goto out;
++ out_ptr += rc;
++ }
++out:
++ *out_ptr = 0;
++ return ispipe;
++}
++
++static int zap_process(struct task_struct *start)
++{
++ struct task_struct *t;
++ int nr = 0;
++
++ start->signal->flags = SIGNAL_GROUP_EXIT;
++ start->signal->group_stop_count = 0;
++
++ t = start;
++ do {
++ if (t != current && t->mm) {
++ sigaddset(&t->pending.signal, SIGKILL);
++ signal_wake_up(t, 1);
++ nr++;
++ }
++ } while_each_thread(start, t);
++
++ return nr;
++}
++
++static inline int zap_threads(struct task_struct *tsk, struct mm_struct *mm,
++ struct core_state *core_state, int exit_code)
++{
++ struct task_struct *g, *p;
++ unsigned long flags;
++ int nr = -EAGAIN;
++
++ spin_lock_irq(&tsk->sighand->siglock);
++ if (!signal_group_exit(tsk->signal)) {
++ mm->core_state = core_state;
++ tsk->signal->group_exit_code = exit_code;
++ nr = zap_process(tsk);
++ }
++ spin_unlock_irq(&tsk->sighand->siglock);
++ if (unlikely(nr < 0))
++ return nr;
++
++ if (atomic_read(&mm->mm_users) == nr + 1)
++ goto done;
++ /*
++ * We should find and kill all tasks which use this mm, and we should
++ * count them correctly into ->nr_threads. We don't take tasklist
++ * lock, but this is safe wrt:
++ *
++ * fork:
++ * None of sub-threads can fork after zap_process(leader). All
++ * processes which were created before this point should be
++ * visible to zap_threads() because copy_process() adds the new
++ * process to the tail of init_task.tasks list, and lock/unlock
++ * of ->siglock provides a memory barrier.
++ *
++ * do_exit:
++ * The caller holds mm->mmap_sem. This means that the task which
++ * uses this mm can't pass exit_mm(), so it can't exit or clear
++ * its ->mm.
++ *
++ * de_thread:
++ * It does list_replace_rcu(&leader->tasks, ¤t->tasks),
++ * we must see either old or new leader, this does not matter.
++ * However, it can change p->sighand, so lock_task_sighand(p)
++ * must be used. Since p->mm != NULL and we hold ->mmap_sem
++ * it can't fail.
++ *
++ * Note also that "g" can be the old leader with ->mm == NULL
++ * and already unhashed and thus removed from ->thread_group.
++ * This is OK, __unhash_process()->list_del_rcu() does not
++ * clear the ->next pointer, we will find the new leader via
++ * next_thread().
++ */
++ rcu_read_lock();
++ for_each_process(g) {
++ if (g == tsk->group_leader)
++ continue;
++ if (g->flags & PF_KTHREAD)
++ continue;
++ p = g;
++ do {
++ if (p->mm) {
++ if (unlikely(p->mm == mm)) {
++ lock_task_sighand(p, &flags);
++ nr += zap_process(p);
++ unlock_task_sighand(p, &flags);
++ }
++ break;
++ }
++ } while_each_thread(g, p);
++ }
++ rcu_read_unlock();
++done:
++ atomic_set(&core_state->nr_threads, nr);
++ return nr;
++}
++
++static int coredump_wait(int exit_code, struct core_state *core_state)
++{
++ struct task_struct *tsk = current;
++ struct mm_struct *mm = tsk->mm;
++ struct completion *vfork_done;
++ int core_waiters;
++
++ init_completion(&core_state->startup);
++ core_state->dumper.task = tsk;
++ core_state->dumper.next = NULL;
++ core_waiters = zap_threads(tsk, mm, core_state, exit_code);
++ up_write(&mm->mmap_sem);
++
++ if (unlikely(core_waiters < 0))
++ goto fail;
++
++ /*
++ * Make sure nobody is waiting for us to release the VM,
++ * otherwise we can deadlock when we wait on each other
++ */
++ vfork_done = tsk->vfork_done;
++ if (vfork_done) {
++ tsk->vfork_done = NULL;
++ complete(vfork_done);
++ }
++
++ if (core_waiters)
++ wait_for_completion(&core_state->startup);
++fail:
++ return core_waiters;
++}
++
++static void coredump_finish(struct mm_struct *mm)
++{
++ struct core_thread *curr, *next;
++ struct task_struct *task;
++
++ next = mm->core_state->dumper.next;
++ while ((curr = next) != NULL) {
++ next = curr->next;
++ task = curr->task;
++ /*
++ * see exit_mm(), curr->task must not see
++ * ->task == NULL before we read ->next.
++ */
++ smp_mb();
++ curr->task = NULL;
++ wake_up_process(task);
++ }
++
++ mm->core_state = NULL;
++}
++
++/*
++ * set_dumpable converts traditional three-value dumpable to two flags and
++ * stores them into mm->flags. It modifies lower two bits of mm->flags, but
++ * these bits are not changed atomically. So get_dumpable can observe the
++ * intermediate state. To avoid doing unexpected behavior, get get_dumpable
++ * return either old dumpable or new one by paying attention to the order of
++ * modifying the bits.
++ *
++ * dumpable | mm->flags (binary)
++ * old new | initial interim final
++ * ---------+-----------------------
++ * 0 1 | 00 01 01
++ * 0 2 | 00 10(*) 11
++ * 1 0 | 01 00 00
++ * 1 2 | 01 11 11
++ * 2 0 | 11 10(*) 00
++ * 2 1 | 11 11 01
++ *
++ * (*) get_dumpable regards interim value of 10 as 11.
++ */
++void set_dumpable(struct mm_struct *mm, int value)
++{
++ switch (value) {
++ case 0:
++ clear_bit(MMF_DUMPABLE, &mm->flags);
++ smp_wmb();
++ clear_bit(MMF_DUMP_SECURELY, &mm->flags);
++ break;
++ case 1:
++ set_bit(MMF_DUMPABLE, &mm->flags);
++ smp_wmb();
++ clear_bit(MMF_DUMP_SECURELY, &mm->flags);
++ break;
++ case 2:
++ set_bit(MMF_DUMP_SECURELY, &mm->flags);
++ smp_wmb();
++ set_bit(MMF_DUMPABLE, &mm->flags);
++ break;
++ }
++}
++
++int get_dumpable(struct mm_struct *mm)
++{
++ int ret;
++
++ ret = mm->flags & 0x3;
++ return (ret >= 2) ? 2 : ret;
++}
++
++int do_coredump(long signr, int exit_code, struct pt_regs * regs)
++{
++ struct core_state core_state;
++ char corename[CORENAME_MAX_SIZE + 1];
++ struct mm_struct *mm = current->mm;
++ struct linux_binfmt * binfmt;
++ struct inode * inode;
++ struct file * file;
++ int retval = 0;
++ int fsuid = current->fsuid;
++ int flag = 0;
++ int ispipe = 0;
++ unsigned long core_limit = current->signal->rlim[RLIMIT_CORE].rlim_cur;
++ char **helper_argv = NULL;
++ int helper_argc = 0;
++ char *delimit;
++
++ audit_core_dumps(signr);
++
++ binfmt = current->binfmt;
++ if (!binfmt || !binfmt->core_dump)
++ goto fail;
++ down_write(&mm->mmap_sem);
++ /*
++ * If another thread got here first, or we are not dumpable, bail out.
++ */
++ if (mm->core_state || !get_dumpable(mm)) {
++ up_write(&mm->mmap_sem);
++ goto fail;
++ }
++
++ /*
++ * We cannot trust fsuid as being the "true" uid of the
++ * process nor do we know its entire history. We only know it
++ * was tainted so we dump it as root in mode 2.
++ */
++ if (get_dumpable(mm) == 2) { /* Setuid core dump mode */
++ flag = O_EXCL; /* Stop rewrite attacks */
++ current->fsuid = 0; /* Dump root private */
++ }
++
++ retval = coredump_wait(exit_code, &core_state);
++ if (retval < 0)
++ goto fail;
++
++ /*
++ * Clear any false indication of pending signals that might
++ * be seen by the filesystem code called to write the core file.
++ */
++ clear_thread_flag(TIF_SIGPENDING);
++
++ /*
++ * lock_kernel() because format_corename() is controlled by sysctl, which
++ * uses lock_kernel()
++ */
++ lock_kernel();
++ ispipe = format_corename(corename, retval, signr);
++ unlock_kernel();
++ /*
++ * Don't bother to check the RLIMIT_CORE value if core_pattern points
++ * to a pipe. Since we're not writing directly to the filesystem
++ * RLIMIT_CORE doesn't really apply, as no actual core file will be
++ * created unless the pipe reader choses to write out the core file
++ * at which point file size limits and permissions will be imposed
++ * as it does with any other process
++ */
++ if ((!ispipe) && (core_limit < binfmt->min_coredump))
++ goto fail_unlock;
++
++ if (ispipe) {
++ helper_argv = argv_split(GFP_KERNEL, corename+1, &helper_argc);
++ /* Terminate the string before the first option */
++ delimit = strchr(corename, ' ');
++ if (delimit)
++ *delimit = '\0';
++ delimit = strrchr(helper_argv[0], '/');
++ if (delimit)
++ delimit++;
++ else
++ delimit = helper_argv[0];
++ if (!strcmp(delimit, current->comm)) {
++ printk(KERN_NOTICE "Recursive core dump detected, "
++ "aborting\n");
++ goto fail_unlock;
++ }
++
++ core_limit = RLIM_INFINITY;
++
++ /* SIGPIPE can happen, but it's just never processed */
++ if (call_usermodehelper_pipe(corename+1, helper_argv, NULL,
++ &file)) {
++ printk(KERN_INFO "Core dump to %s pipe failed\n",
++ corename);
++ goto fail_unlock;
++ }
++ } else
++ file = filp_open(corename,
++ O_CREAT | 2 | O_NOFOLLOW | O_LARGEFILE | flag,
++ 0600);
++ if (IS_ERR(file))
++ goto fail_unlock;
++ inode = file->f_path.dentry->d_inode;
++ if (inode->i_nlink > 1)
++ goto close_fail; /* multiple links - don't dump */
++ if (!ispipe && d_unhashed(file->f_path.dentry))
++ goto close_fail;
++
++ /* AK: actually i see no reason to not allow this for named pipes etc.,
++ but keep the previous behaviour for now. */
++ if (!ispipe && !S_ISREG(inode->i_mode))
++ goto close_fail;
++ /*
++ * Dont allow local users get cute and trick others to coredump
++ * into their pre-created files:
++ */
++ if (inode->i_uid != current->fsuid)
++ goto close_fail;
++ if (!file->f_op)
++ goto close_fail;
++ if (!file->f_op->write)
++ goto close_fail;
++ if (!ispipe && do_truncate(file->f_path.dentry, 0, 0, file) != 0)
++ goto close_fail;
++
++ retval = binfmt->core_dump(signr, regs, file, core_limit);
++
++ if (retval)
++ current->signal->group_exit_code |= 0x80;
++close_fail:
++ filp_close(file, NULL);
++fail_unlock:
++ if (helper_argv)
++ argv_free(helper_argv);
++
++ current->fsuid = fsuid;
++ coredump_finish(mm);
++fail:
++ return retval;
++}
+diff -Nurb linux-2.6.27-590/include/linux/arrays.h linux-2.6.27-591/include/linux/arrays.h
+--- linux-2.6.27-590/include/linux/arrays.h 1969-12-31 19:00:00.000000000 -0500
++++ linux-2.6.27-591/include/linux/arrays.h 2010-01-29 15:43:46.000000000 -0500
+@@ -0,0 +1,36 @@
++#ifndef __ARRAYS_H__
++#define __ARRAYS_H__
++#include <linux/list.h>
++
++#define SAMPLING_METHOD_DEFAULT 0
++#define SAMPLING_METHOD_LOG 1
++
++/* Every probe has an array handler */
++
++/* XXX - Optimize this structure */
++
++extern void (*rec_event)(void *,unsigned int);
++struct array_handler {
++ struct list_head link;
++ unsigned int (*hash_func)(void *);
++ unsigned int (*sampling_func)(void *,int,void *);
++ unsigned short size;
++ unsigned int threshold;
++ unsigned char **expcount;
++ unsigned int sampling_method;
++ unsigned int **arrays;
++ unsigned int arraysize;
++ unsigned int num_samples[2];
++ void **epoch_samples; /* size-sized lists of samples */
++ unsigned int (*serialize)(void *, void *);
++ unsigned char code[5];
++};
++
++struct event {
++ struct list_head link;
++ void *event_data;
++ unsigned int count;
++ unsigned int event_type;
++ struct task_struct *task;
++};
++#endif
+diff -Nurb linux-2.6.27-590/include/linux/sched.h.orig linux-2.6.27-591/include/linux/sched.h.orig
+--- linux-2.6.27-590/include/linux/sched.h.orig 1969-12-31 19:00:00.000000000 -0500
++++ linux-2.6.27-591/include/linux/sched.h.orig 2010-01-26 17:49:20.000000000 -0500
+@@ -0,0 +1,2244 @@
++#ifndef _LINUX_SCHED_H
++#define _LINUX_SCHED_H
++
++/*
++ * cloning flags:
++ */
++#define CSIGNAL 0x000000ff /* signal mask to be sent at exit */
++#define CLONE_VM 0x00000100 /* set if VM shared between processes */
++#define CLONE_FS 0x00000200 /* set if fs info shared between processes */
++#define CLONE_FILES 0x00000400 /* set if open files shared between processes */
++#define CLONE_SIGHAND 0x00000800 /* set if signal handlers and blocked signals shared */
++#define CLONE_PTRACE 0x00002000 /* set if we want to let tracing continue on the child too */
++#define CLONE_VFORK 0x00004000 /* set if the parent wants the child to wake it up on mm_release */
++#define CLONE_PARENT 0x00008000 /* set if we want to have the same parent as the cloner */
++#define CLONE_THREAD 0x00010000 /* Same thread group? */
++#define CLONE_NEWNS 0x00020000 /* New namespace group? */
++#define CLONE_SYSVSEM 0x00040000 /* share system V SEM_UNDO semantics */
++#define CLONE_SETTLS 0x00080000 /* create a new TLS for the child */
++#define CLONE_PARENT_SETTID 0x00100000 /* set the TID in the parent */
++#define CLONE_CHILD_CLEARTID 0x00200000 /* clear the TID in the child */
++#define CLONE_DETACHED 0x00400000 /* Unused, ignored */
++#define CLONE_UNTRACED 0x00800000 /* set if the tracing process can't force CLONE_PTRACE on this clone */
++#define CLONE_CHILD_SETTID 0x01000000 /* set the TID in the child */
++#define CLONE_STOPPED 0x02000000 /* Start in stopped state */
++#define CLONE_NEWUTS 0x04000000 /* New utsname group? */
++#define CLONE_NEWIPC 0x08000000 /* New ipcs */
++#define CLONE_NEWUSER 0x10000000 /* New user namespace */
++#define CLONE_NEWPID 0x20000000 /* New pid namespace */
++#define CLONE_NEWNET 0x40000000 /* New network namespace */
++#define CLONE_IO 0x80000000 /* Clone io context */
++
++/*
++ * Scheduling policies
++ */
++#define SCHED_NORMAL 0
++#define SCHED_FIFO 1
++#define SCHED_RR 2
++#define SCHED_BATCH 3
++/* SCHED_ISO: reserved but not implemented yet */
++#define SCHED_IDLE 5
++
++#ifdef __KERNEL__
++
++struct sched_param {
++ int sched_priority;
++};
++
++#include <asm/param.h> /* for HZ */
++
++#include <linux/capability.h>
++#include <linux/threads.h>
++#include <linux/kernel.h>
++#include <linux/types.h>
++#include <linux/timex.h>
++#include <linux/jiffies.h>
++#include <linux/rbtree.h>
++#include <linux/thread_info.h>
++#include <linux/cpumask.h>
++#include <linux/errno.h>
++#include <linux/nodemask.h>
++#include <linux/mm_types.h>
++
++#include <asm/system.h>
++#include <asm/page.h>
++#include <asm/ptrace.h>
++#include <asm/cputime.h>
++
++#include <linux/smp.h>
++#include <linux/sem.h>
++#include <linux/signal.h>
++#include <linux/fs_struct.h>
++#include <linux/compiler.h>
++#include <linux/completion.h>
++#include <linux/percpu.h>
++#include <linux/topology.h>
++#include <linux/proportions.h>
++#include <linux/seccomp.h>
++#include <linux/rcupdate.h>
++#include <linux/rtmutex.h>
++
++#include <linux/time.h>
++#include <linux/param.h>
++#include <linux/resource.h>
++#include <linux/timer.h>
++#include <linux/hrtimer.h>
++#include <linux/task_io_accounting.h>
++#include <linux/kobject.h>
++#include <linux/latencytop.h>
++#include <linux/cred.h>
++#include <linux/pid.h>
++
++#include <asm/processor.h>
++
++struct mem_cgroup;
++struct exec_domain;
++struct futex_pi_state;
++struct robust_list_head;
++struct bio;
++
++/*
++ * List of flags we want to share for kernel threads,
++ * if only because they are not used by them anyway.
++ */
++#define CLONE_KERNEL (CLONE_FS | CLONE_FILES | CLONE_SIGHAND)
++
++/*
++ * These are the constant used to fake the fixed-point load-average
++ * counting. Some notes:
++ * - 11 bit fractions expand to 22 bits by the multiplies: this gives
++ * a load-average precision of 10 bits integer + 11 bits fractional
++ * - if you want to count load-averages more often, you need more
++ * precision, or rounding will get you. With 2-second counting freq,
++ * the EXP_n values would be 1981, 2034 and 2043 if still using only
++ * 11 bit fractions.
++ */
++extern unsigned long avenrun[]; /* Load averages */
++
++#define FSHIFT 11 /* nr of bits of precision */
++#define FIXED_1 (1<<FSHIFT) /* 1.0 as fixed-point */
++#define LOAD_FREQ (5*HZ+1) /* 5 sec intervals */
++#define EXP_1 1884 /* 1/exp(5sec/1min) as fixed-point */
++#define EXP_5 2014 /* 1/exp(5sec/5min) */
++#define EXP_15 2037 /* 1/exp(5sec/15min) */
++
++#define CALC_LOAD(load,exp,n) \
++ load *= exp; \
++ load += n*(FIXED_1-exp); \
++ load >>= FSHIFT;
++
++extern unsigned long total_forks;
++extern int nr_threads;
++DECLARE_PER_CPU(unsigned long, process_counts);
++extern int nr_processes(void);
++extern unsigned long nr_running(void);
++extern unsigned long nr_uninterruptible(void);
++extern unsigned long nr_active(void);
++extern unsigned long nr_iowait(void);
++
++struct seq_file;
++struct cfs_rq;
++struct task_group;
++#ifdef CONFIG_SCHED_DEBUG
++extern void proc_sched_show_task(struct task_struct *p, struct seq_file *m);
++extern void proc_sched_set_task(struct task_struct *p);
++extern void
++print_cfs_rq(struct seq_file *m, int cpu, struct cfs_rq *cfs_rq);
++#else
++static inline void
++proc_sched_show_task(struct task_struct *p, struct seq_file *m)
++{
++}
++static inline void proc_sched_set_task(struct task_struct *p)
++{
++}
++static inline void
++print_cfs_rq(struct seq_file *m, int cpu, struct cfs_rq *cfs_rq)
++{
++}
++#endif
++
++extern unsigned long long time_sync_thresh;
++
++/*
++ * Task state bitmask. NOTE! These bits are also
++ * encoded in fs/proc/array.c: get_task_state().
++ *
++ * We have two separate sets of flags: task->state
++ * is about runnability, while task->exit_state are
++ * about the task exiting. Confusing, but this way
++ * modifying one set can't modify the other one by
++ * mistake.
++ */
++#define TASK_RUNNING 0
++#define TASK_INTERRUPTIBLE 1
++#define TASK_UNINTERRUPTIBLE 2
++#define __TASK_STOPPED 4
++#define __TASK_TRACED 8
++/* in tsk->exit_state */
++#define EXIT_ZOMBIE 16
++#define EXIT_DEAD 32
++/* in tsk->state again */
++#define TASK_DEAD 64
++#define TASK_WAKEKILL 128
++
++/* Convenience macros for the sake of set_task_state */
++#define TASK_KILLABLE (TASK_WAKEKILL | TASK_UNINTERRUPTIBLE)
++#define TASK_STOPPED (TASK_WAKEKILL | __TASK_STOPPED)
++#define TASK_TRACED (TASK_WAKEKILL | __TASK_TRACED)
++
++/* Convenience macros for the sake of wake_up */
++#define TASK_NORMAL (TASK_INTERRUPTIBLE | TASK_UNINTERRUPTIBLE)
++#define TASK_ALL (TASK_NORMAL | __TASK_STOPPED | __TASK_TRACED)
++
++/* get_task_state() */
++#define TASK_REPORT (TASK_RUNNING | TASK_INTERRUPTIBLE | \
++ TASK_UNINTERRUPTIBLE | __TASK_STOPPED | \
++ __TASK_TRACED)
++
++#define task_is_traced(task) ((task->state & __TASK_TRACED) != 0)
++#define task_is_stopped(task) ((task->state & __TASK_STOPPED) != 0)
++#define task_is_stopped_or_traced(task) \
++ ((task->state & (__TASK_STOPPED | __TASK_TRACED)) != 0)
++#define task_contributes_to_load(task) \
++ ((task->state & TASK_UNINTERRUPTIBLE) != 0)
++
++#define __set_task_state(tsk, state_value) \
++ do { (tsk)->state = (state_value); } while (0)
++#define set_task_state(tsk, state_value) \
++ set_mb((tsk)->state, (state_value))
++
++/*
++ * set_current_state() includes a barrier so that the write of current->state
++ * is correctly serialised wrt the caller's subsequent test of whether to
++ * actually sleep:
++ *
++ * set_current_state(TASK_UNINTERRUPTIBLE);
++ * if (do_i_need_to_sleep())
++ * schedule();
++ *
++ * If the caller does not need such serialisation then use __set_current_state()
++ */
++#define __set_current_state(state_value) \
++ do { current->state = (state_value); } while (0)
++#define set_current_state(state_value) \
++ set_mb(current->state, (state_value))
++
++/* Task command name length */
++#define TASK_COMM_LEN 16
++
++#include <linux/spinlock.h>
++
++/*
++ * This serializes "schedule()" and also protects
++ * the run-queue from deletions/modifications (but
++ * _adding_ to the beginning of the run-queue has
++ * a separate lock).
++ */
++extern rwlock_t tasklist_lock;
++extern spinlock_t mmlist_lock;
++
++struct task_struct;
++
++extern void sched_init(void);
++extern void sched_init_smp(void);
++extern asmlinkage void schedule_tail(struct task_struct *prev);
++extern void init_idle(struct task_struct *idle, int cpu);
++extern void init_idle_bootup_task(struct task_struct *idle);
++
++extern int runqueue_is_locked(void);
++
++extern cpumask_t nohz_cpu_mask;
++#if defined(CONFIG_SMP) && defined(CONFIG_NO_HZ)
++extern int select_nohz_load_balancer(int cpu);
++#else
++static inline int select_nohz_load_balancer(int cpu)
++{
++ return 0;
++}
++#endif
++
++extern unsigned long rt_needs_cpu(int cpu);
++
++/*
++ * Only dump TASK_* tasks. (0 for all tasks)
++ */
++extern void show_state_filter(unsigned long state_filter);
++
++static inline void show_state(void)
++{
++ show_state_filter(0);
++}
++
++extern void show_regs(struct pt_regs *);
++
++/*
++ * TASK is a pointer to the task whose backtrace we want to see (or NULL for current
++ * task), SP is the stack pointer of the first frame that should be shown in the back
++ * trace (or NULL if the entire call-chain of the task should be shown).
++ */
++extern void show_stack(struct task_struct *task, unsigned long *sp);
++
++void io_schedule(void);
++long io_schedule_timeout(long timeout);
++
++extern void cpu_init (void);
++extern void trap_init(void);
++extern void account_process_tick(struct task_struct *task, int user);
++extern void update_process_times(int user);
++extern void scheduler_tick(void);
++extern void hrtick_resched(void);
++
++extern void sched_show_task(struct task_struct *p);
++
++#ifdef CONFIG_DETECT_SOFTLOCKUP
++extern void softlockup_tick(void);
++extern void touch_softlockup_watchdog(void);
++extern void touch_all_softlockup_watchdogs(void);
++extern unsigned int softlockup_panic;
++extern unsigned long sysctl_hung_task_check_count;
++extern unsigned long sysctl_hung_task_timeout_secs;
++extern unsigned long sysctl_hung_task_warnings;
++extern int softlockup_thresh;
++#else
++static inline void softlockup_tick(void)
++{
++}
++static inline void spawn_softlockup_task(void)
++{
++}
++static inline void touch_softlockup_watchdog(void)
++{
++}
++static inline void touch_all_softlockup_watchdogs(void)
++{
++}
++#endif
++
++
++/* Attach to any functions which should be ignored in wchan output. */
++#define __sched __attribute__((__section__(".sched.text")))
++
++/* Linker adds these: start and end of __sched functions */
++extern char __sched_text_start[], __sched_text_end[];
++
++/* Is this address in the __sched functions? */
++extern int in_sched_functions(unsigned long addr);
++
++#define MAX_SCHEDULE_TIMEOUT LONG_MAX
++extern signed long schedule_timeout(signed long timeout);
++extern signed long schedule_timeout_interruptible(signed long timeout);
++extern signed long schedule_timeout_killable(signed long timeout);
++extern signed long schedule_timeout_uninterruptible(signed long timeout);
++asmlinkage void schedule(void);
++
++struct nsproxy;
++struct user_namespace;
++
++/* Maximum number of active map areas.. This is a random (large) number */
++#define DEFAULT_MAX_MAP_COUNT 65536
++
++extern int sysctl_max_map_count;
++
++#include <linux/aio.h>
++
++extern unsigned long
++arch_get_unmapped_area(struct file *, unsigned long, unsigned long,
++ unsigned long, unsigned long);
++extern unsigned long
++arch_get_unmapped_area_topdown(struct file *filp, unsigned long addr,
++ unsigned long len, unsigned long pgoff,
++ unsigned long flags);
++extern void arch_unmap_area(struct mm_struct *, unsigned long);
++extern void arch_unmap_area_topdown(struct mm_struct *, unsigned long);
++
++#if NR_CPUS >= CONFIG_SPLIT_PTLOCK_CPUS
++/*
++ * The mm counters are not protected by its page_table_lock,
++ * so must be incremented atomically.
++ */
++#define __set_mm_counter(mm, member, value) \
++ atomic_long_set(&(mm)->_##member, value)
++#define get_mm_counter(mm, member) \
++ ((unsigned long)atomic_long_read(&(mm)->_##member))
++#else /* NR_CPUS < CONFIG_SPLIT_PTLOCK_CPUS */
++/*
++ * The mm counters are protected by its page_table_lock,
++ * so can be incremented directly.
++ */
++#define __set_mm_counter(mm, member, value) (mm)->_##member = (value)
++#define get_mm_counter(mm, member) ((mm)->_##member)
++
++#endif /* NR_CPUS < CONFIG_SPLIT_PTLOCK_CPUS */
++
++#define set_mm_counter(mm, member, value) \
++ vx_ ## member ## pages_sub((mm), (get_mm_counter(mm, member) - value))
++#define add_mm_counter(mm, member, value) \
++ vx_ ## member ## pages_add((mm), (value))
++#define inc_mm_counter(mm, member) vx_ ## member ## pages_inc((mm))
++#define dec_mm_counter(mm, member) vx_ ## member ## pages_dec((mm))
++
++#define get_mm_rss(mm) \
++ (get_mm_counter(mm, file_rss) + get_mm_counter(mm, anon_rss))
++#define update_hiwater_rss(mm) do { \
++ unsigned long _rss = get_mm_rss(mm); \
++ if ((mm)->hiwater_rss < _rss) \
++ (mm)->hiwater_rss = _rss; \
++} while (0)
++#define update_hiwater_vm(mm) do { \
++ if ((mm)->hiwater_vm < (mm)->total_vm) \
++ (mm)->hiwater_vm = (mm)->total_vm; \
++} while (0)
++
++extern void set_dumpable(struct mm_struct *mm, int value);
++extern int get_dumpable(struct mm_struct *mm);
++
++/* mm flags */
++/* dumpable bits */
++#define MMF_DUMPABLE 0 /* core dump is permitted */
++#define MMF_DUMP_SECURELY 1 /* core file is readable only by root */
++#define MMF_DUMPABLE_BITS 2
++
++/* coredump filter bits */
++#define MMF_DUMP_ANON_PRIVATE 2
++#define MMF_DUMP_ANON_SHARED 3
++#define MMF_DUMP_MAPPED_PRIVATE 4
++#define MMF_DUMP_MAPPED_SHARED 5
++#define MMF_DUMP_ELF_HEADERS 6
++#define MMF_DUMP_FILTER_SHIFT MMF_DUMPABLE_BITS
++#define MMF_DUMP_FILTER_BITS 5
++#define MMF_DUMP_FILTER_MASK \
++ (((1 << MMF_DUMP_FILTER_BITS) - 1) << MMF_DUMP_FILTER_SHIFT)
++#define MMF_DUMP_FILTER_DEFAULT \
++ ((1 << MMF_DUMP_ANON_PRIVATE) | (1 << MMF_DUMP_ANON_SHARED))
++
++struct sighand_struct {
++ atomic_t count;
++ struct k_sigaction action[_NSIG];
++ spinlock_t siglock;
++ wait_queue_head_t signalfd_wqh;
++};
++
++struct pacct_struct {
++ int ac_flag;
++ long ac_exitcode;
++ unsigned long ac_mem;
++ cputime_t ac_utime, ac_stime;
++ unsigned long ac_minflt, ac_majflt;
++};
++
++/*
++ * NOTE! "signal_struct" does not have it's own
++ * locking, because a shared signal_struct always
++ * implies a shared sighand_struct, so locking
++ * sighand_struct is always a proper superset of
++ * the locking of signal_struct.
++ */
++struct signal_struct {
++ atomic_t count;
++ atomic_t live;
++
++ wait_queue_head_t wait_chldexit; /* for wait4() */
++
++ /* current thread group signal load-balancing target: */
++ struct task_struct *curr_target;
++
++ /* shared signal handling: */
++ struct sigpending shared_pending;
++
++ /* thread group exit support */
++ int group_exit_code;
++ /* overloaded:
++ * - notify group_exit_task when ->count is equal to notify_count
++ * - everyone except group_exit_task is stopped during signal delivery
++ * of fatal signals, group_exit_task processes the signal.
++ */
++ struct task_struct *group_exit_task;
++ int notify_count;
++
++ /* thread group stop support, overloads group_exit_code too */
++ int group_stop_count;
++ unsigned int flags; /* see SIGNAL_* flags below */
++
++ /* POSIX.1b Interval Timers */
++ struct list_head posix_timers;
++
++ /* ITIMER_REAL timer for the process */
++ struct hrtimer real_timer;
++ struct pid *leader_pid;
++ ktime_t it_real_incr;
++
++ /* ITIMER_PROF and ITIMER_VIRTUAL timers for the process */
++ cputime_t it_prof_expires, it_virt_expires;
++ cputime_t it_prof_incr, it_virt_incr;
++
++ /* job control IDs */
++
++ /*
++ * pgrp and session fields are deprecated.
++ * use the task_session_Xnr and task_pgrp_Xnr routines below
++ */
++
++ union {
++ pid_t pgrp __deprecated;
++ pid_t __pgrp;
++ };
++
++ struct pid *tty_old_pgrp;
++
++ union {
++ pid_t session __deprecated;
++ pid_t __session;
++ };
++
++ /* boolean value for session group leader */
++ int leader;
++
++ struct tty_struct *tty; /* NULL if no tty */
++
++ /*
++ * Cumulative resource counters for dead threads in the group,
++ * and for reaped dead child processes forked by this group.
++ * Live threads maintain their own counters and add to these
++ * in __exit_signal, except for the group leader.
++ */
++ cputime_t utime, stime, cutime, cstime;
++ cputime_t gtime;
++ cputime_t cgtime;
++ unsigned long nvcsw, nivcsw, cnvcsw, cnivcsw;
++ unsigned long min_flt, maj_flt, cmin_flt, cmaj_flt;
++ unsigned long inblock, oublock, cinblock, coublock;
++ struct task_io_accounting ioac;
++
++ /*
++ * Cumulative ns of scheduled CPU time for dead threads in the
++ * group, not including a zombie group leader. (This only differs
++ * from jiffies_to_ns(utime + stime) if sched_clock uses something
++ * other than jiffies.)
++ */
++ unsigned long long sum_sched_runtime;
++
++ /*
++ * We don't bother to synchronize most readers of this at all,
++ * because there is no reader checking a limit that actually needs
++ * to get both rlim_cur and rlim_max atomically, and either one
++ * alone is a single word that can safely be read normally.
++ * getrlimit/setrlimit use task_lock(current->group_leader) to
++ * protect this instead of the siglock, because they really
++ * have no need to disable irqs.
++ */
++ struct rlimit rlim[RLIM_NLIMITS];
++
++ struct list_head cpu_timers[3];
++
++ /* keep the process-shared keyrings here so that they do the right
++ * thing in threads created with CLONE_THREAD */
++#ifdef CONFIG_KEYS
++ struct key *session_keyring; /* keyring inherited over fork */
++ struct key *process_keyring; /* keyring private to this process */
++#endif
++#ifdef CONFIG_BSD_PROCESS_ACCT
++ struct pacct_struct pacct; /* per-process accounting information */
++#endif
++#ifdef CONFIG_TASKSTATS
++ struct taskstats *stats;
++#endif
++#ifdef CONFIG_AUDIT
++ unsigned audit_tty;
++ struct tty_audit_buf *tty_audit_buf;
++#endif
++};
++
++/* Context switch must be unlocked if interrupts are to be enabled */
++#ifdef __ARCH_WANT_INTERRUPTS_ON_CTXSW
++# define __ARCH_WANT_UNLOCKED_CTXSW
++#endif
++
++/*
++ * Bits in flags field of signal_struct.
++ */
++#define SIGNAL_STOP_STOPPED 0x00000001 /* job control stop in effect */
++#define SIGNAL_STOP_DEQUEUED 0x00000002 /* stop signal dequeued */
++#define SIGNAL_STOP_CONTINUED 0x00000004 /* SIGCONT since WCONTINUED reap */
++#define SIGNAL_GROUP_EXIT 0x00000008 /* group exit in progress */
++/*
++ * Pending notifications to parent.
++ */
++#define SIGNAL_CLD_STOPPED 0x00000010
++#define SIGNAL_CLD_CONTINUED 0x00000020
++#define SIGNAL_CLD_MASK (SIGNAL_CLD_STOPPED|SIGNAL_CLD_CONTINUED)
++
++#define SIGNAL_UNKILLABLE 0x00000040 /* for init: ignore fatal signals */
++
++/* If true, all threads except ->group_exit_task have pending SIGKILL */
++static inline int signal_group_exit(const struct signal_struct *sig)
++{
++ return (sig->flags & SIGNAL_GROUP_EXIT) ||
++ (sig->group_exit_task != NULL);
++}
++
++/*
++ * Some day this will be a full-fledged user tracking system..
++ */
++struct user_struct {
++ atomic_t __count; /* reference count */
++ atomic_t processes; /* How many processes does this user have? */
++ atomic_t files; /* How many open files does this user have? */
++ atomic_t sigpending; /* How many pending signals does this user have? */
++#ifdef CONFIG_INOTIFY_USER
++ atomic_t inotify_watches; /* How many inotify watches does this user have? */
++ atomic_t inotify_devs; /* How many inotify devs does this user have opened? */
++#endif
++#ifdef CONFIG_EPOLL
++ atomic_t epoll_watches; /* The number of file descriptors currently watched */
++#endif
++#ifdef CONFIG_POSIX_MQUEUE
++ /* protected by mq_lock */
++ unsigned long mq_bytes; /* How many bytes can be allocated to mqueue? */
++#endif
++ unsigned long locked_shm; /* How many pages of mlocked shm ? */
++
++#ifdef CONFIG_KEYS
++ struct key *uid_keyring; /* UID specific keyring */
++ struct key *session_keyring; /* UID's default session keyring */
++#endif
++
++ /* Hash table maintenance information */
++ struct hlist_node uidhash_node;
++ uid_t uid;
++
++#ifdef CONFIG_USER_SCHED
++ struct task_group *tg;
++#ifdef CONFIG_SYSFS
++ struct kobject kobj;
++ struct work_struct work;
++#endif
++#endif
++};
++
++extern int uids_sysfs_init(void);
++
++extern struct user_struct *find_user(uid_t);
++
++extern struct user_struct root_user;
++#define INIT_USER (&root_user)
++
++struct backing_dev_info;
++struct reclaim_state;
++
++#if defined(CONFIG_SCHEDSTATS) || defined(CONFIG_TASK_DELAY_ACCT)
++struct sched_info {
++ /* cumulative counters */
++ unsigned long pcount; /* # of times run on this cpu */
++ unsigned long long cpu_time, /* time spent on the cpu */
++ run_delay; /* time spent waiting on a runqueue */
++
++ /* timestamps */
++ unsigned long long last_arrival,/* when we last ran on a cpu */
++ last_queued; /* when we were last queued to run */
++#ifdef CONFIG_SCHEDSTATS
++ /* BKL stats */
++ unsigned int bkl_count;
++#endif
++};
++#endif /* defined(CONFIG_SCHEDSTATS) || defined(CONFIG_TASK_DELAY_ACCT) */
++
++#ifdef CONFIG_SCHEDSTATS
++extern const struct file_operations proc_schedstat_operations;
++#endif /* CONFIG_SCHEDSTATS */
++
++#ifdef CONFIG_TASK_DELAY_ACCT
++struct task_delay_info {
++ spinlock_t lock;
++ unsigned int flags; /* Private per-task flags */
++
++ /* For each stat XXX, add following, aligned appropriately
++ *
++ * struct timespec XXX_start, XXX_end;
++ * u64 XXX_delay;
++ * u32 XXX_count;
++ *
++ * Atomicity of updates to XXX_delay, XXX_count protected by
++ * single lock above (split into XXX_lock if contention is an issue).
++ */
++
++ /*
++ * XXX_count is incremented on every XXX operation, the delay
++ * associated with the operation is added to XXX_delay.
++ * XXX_delay contains the accumulated delay time in nanoseconds.
++ */
++ struct timespec blkio_start, blkio_end; /* Shared by blkio, swapin */
++ u64 blkio_delay; /* wait for sync block io completion */
++ u64 swapin_delay; /* wait for swapin block io completion */
++ u32 blkio_count; /* total count of the number of sync block */
++ /* io operations performed */
++ u32 swapin_count; /* total count of the number of swapin block */
++ /* io operations performed */
++
++ struct timespec freepages_start, freepages_end;
++ u64 freepages_delay; /* wait for memory reclaim */
++ u32 freepages_count; /* total count of memory reclaim */
++};
++#endif /* CONFIG_TASK_DELAY_ACCT */
++
++static inline int sched_info_on(void)
++{
++#ifdef CONFIG_SCHEDSTATS
++ return 1;
++#elif defined(CONFIG_TASK_DELAY_ACCT)
++ extern int delayacct_on;
++ return delayacct_on;
++#else
++ return 0;
++#endif
++}
++
++enum cpu_idle_type {
++ CPU_IDLE,
++ CPU_NOT_IDLE,
++ CPU_NEWLY_IDLE,
++ CPU_MAX_IDLE_TYPES
++};
++
++/*
++ * sched-domains (multiprocessor balancing) declarations:
++ */
++
++/*
++ * Increase resolution of nice-level calculations:
++ */
++#define SCHED_LOAD_SHIFT 10
++#define SCHED_LOAD_SCALE (1L << SCHED_LOAD_SHIFT)
++
++#define SCHED_LOAD_SCALE_FUZZ SCHED_LOAD_SCALE
++
++#ifdef CONFIG_SMP
++#define SD_LOAD_BALANCE 1 /* Do load balancing on this domain. */
++#define SD_BALANCE_NEWIDLE 2 /* Balance when about to become idle */
++#define SD_BALANCE_EXEC 4 /* Balance on exec */
++#define SD_BALANCE_FORK 8 /* Balance on fork, clone */
++#define SD_WAKE_IDLE 16 /* Wake to idle CPU on task wakeup */
++#define SD_WAKE_AFFINE 32 /* Wake task to waking CPU */
++#define SD_WAKE_BALANCE 64 /* Perform balancing at task wakeup */
++#define SD_SHARE_CPUPOWER 128 /* Domain members share cpu power */
++#define SD_POWERSAVINGS_BALANCE 256 /* Balance for power savings */
++#define SD_SHARE_PKG_RESOURCES 512 /* Domain members share cpu pkg resources */
++#define SD_SERIALIZE 1024 /* Only a single load balancing instance */
++#define SD_WAKE_IDLE_FAR 2048 /* Gain latency sacrificing cache hit */
++
++#define BALANCE_FOR_MC_POWER \
++ (sched_smt_power_savings ? SD_POWERSAVINGS_BALANCE : 0)
++
++#define BALANCE_FOR_PKG_POWER \
++ ((sched_mc_power_savings || sched_smt_power_savings) ? \
++ SD_POWERSAVINGS_BALANCE : 0)
++
++#define test_sd_parent(sd, flag) ((sd->parent && \
++ (sd->parent->flags & flag)) ? 1 : 0)
++
++
++struct sched_group {
++ struct sched_group *next; /* Must be a circular list */
++ cpumask_t cpumask;
++
++ /*
++ * CPU power of this group, SCHED_LOAD_SCALE being max power for a
++ * single CPU. This is read only (except for setup, hotplug CPU).
++ * Note : Never change cpu_power without recompute its reciprocal
++ */
++ unsigned int __cpu_power;
++ /*
++ * reciprocal value of cpu_power to avoid expensive divides
++ * (see include/linux/reciprocal_div.h)
++ */
++ u32 reciprocal_cpu_power;
++};
++
++enum sched_domain_level {
++ SD_LV_NONE = 0,
++ SD_LV_SIBLING,
++ SD_LV_MC,
++ SD_LV_CPU,
++ SD_LV_NODE,
++ SD_LV_ALLNODES,
++ SD_LV_MAX
++};
++
++struct sched_domain_attr {
++ int relax_domain_level;
++};
++
++#define SD_ATTR_INIT (struct sched_domain_attr) { \
++ .relax_domain_level = -1, \
++}
++
++struct sched_domain {
++ /* These fields must be setup */
++ struct sched_domain *parent; /* top domain must be null terminated */
++ struct sched_domain *child; /* bottom domain must be null terminated */
++ struct sched_group *groups; /* the balancing groups of the domain */
++ cpumask_t span; /* span of all CPUs in this domain */
++ unsigned long min_interval; /* Minimum balance interval ms */
++ unsigned long max_interval; /* Maximum balance interval ms */
++ unsigned int busy_factor; /* less balancing by factor if busy */
++ unsigned int imbalance_pct; /* No balance until over watermark */
++ unsigned int cache_nice_tries; /* Leave cache hot tasks for # tries */
++ unsigned int busy_idx;
++ unsigned int idle_idx;
++ unsigned int newidle_idx;
++ unsigned int wake_idx;
++ unsigned int forkexec_idx;
++ int flags; /* See SD_* */
++ enum sched_domain_level level;
++
++ /* Runtime fields. */
++ unsigned long last_balance; /* init to jiffies. units in jiffies */
++ unsigned int balance_interval; /* initialise to 1. units in ms. */
++ unsigned int nr_balance_failed; /* initialise to 0 */
++
++ u64 last_update;
++
++#ifdef CONFIG_SCHEDSTATS
++ /* load_balance() stats */
++ unsigned int lb_count[CPU_MAX_IDLE_TYPES];
++ unsigned int lb_failed[CPU_MAX_IDLE_TYPES];
++ unsigned int lb_balanced[CPU_MAX_IDLE_TYPES];
++ unsigned int lb_imbalance[CPU_MAX_IDLE_TYPES];
++ unsigned int lb_gained[CPU_MAX_IDLE_TYPES];
++ unsigned int lb_hot_gained[CPU_MAX_IDLE_TYPES];
++ unsigned int lb_nobusyg[CPU_MAX_IDLE_TYPES];
++ unsigned int lb_nobusyq[CPU_MAX_IDLE_TYPES];
++
++ /* Active load balancing */
++ unsigned int alb_count;
++ unsigned int alb_failed;
++ unsigned int alb_pushed;
++
++ /* SD_BALANCE_EXEC stats */
++ unsigned int sbe_count;
++ unsigned int sbe_balanced;
++ unsigned int sbe_pushed;
++
++ /* SD_BALANCE_FORK stats */
++ unsigned int sbf_count;
++ unsigned int sbf_balanced;
++ unsigned int sbf_pushed;
++
++ /* try_to_wake_up() stats */
++ unsigned int ttwu_wake_remote;
++ unsigned int ttwu_move_affine;
++ unsigned int ttwu_move_balance;
++#endif
++};
++
++extern void partition_sched_domains(int ndoms_new, cpumask_t *doms_new,
++ struct sched_domain_attr *dattr_new);
++extern int arch_reinit_sched_domains(void);
++
++#else /* CONFIG_SMP */
++
++struct sched_domain_attr;
++
++static inline void
++partition_sched_domains(int ndoms_new, cpumask_t *doms_new,
++ struct sched_domain_attr *dattr_new)
++{
++}
++#endif /* !CONFIG_SMP */
++
++struct io_context; /* See blkdev.h */
++#define NGROUPS_SMALL 32
++#define NGROUPS_PER_BLOCK ((unsigned int)(PAGE_SIZE / sizeof(gid_t)))
++struct group_info {
++ int ngroups;
++ atomic_t usage;
++ gid_t small_block[NGROUPS_SMALL];
++ int nblocks;
++ gid_t *blocks[0];
++};
++
++/*
++ * get_group_info() must be called with the owning task locked (via task_lock())
++ * when task != current. The reason being that the vast majority of callers are
++ * looking at current->group_info, which can not be changed except by the
++ * current task. Changing current->group_info requires the task lock, too.
++ */
++#define get_group_info(group_info) do { \
++ atomic_inc(&(group_info)->usage); \
++} while (0)
++
++#define put_group_info(group_info) do { \
++ if (atomic_dec_and_test(&(group_info)->usage)) \
++ groups_free(group_info); \
++} while (0)
++
++extern struct group_info *groups_alloc(int gidsetsize);
++extern void groups_free(struct group_info *group_info);
++extern int set_current_groups(struct group_info *group_info);
++extern int groups_search(struct group_info *group_info, gid_t grp);
++/* access the groups "array" with this macro */
++#define GROUP_AT(gi, i) \
++ ((gi)->blocks[(i)/NGROUPS_PER_BLOCK][(i)%NGROUPS_PER_BLOCK])
++
++#ifdef ARCH_HAS_PREFETCH_SWITCH_STACK
++extern void prefetch_stack(struct task_struct *t);
++#else
++static inline void prefetch_stack(struct task_struct *t) { }
++#endif
++
++struct audit_context; /* See audit.c */
++struct mempolicy;
++struct pipe_inode_info;
++struct uts_namespace;
++
++struct rq;
++struct sched_domain;
++
++struct sched_class {
++ const struct sched_class *next;
++
++ void (*enqueue_task) (struct rq *rq, struct task_struct *p, int wakeup);
++ void (*dequeue_task) (struct rq *rq, struct task_struct *p, int sleep);
++ void (*yield_task) (struct rq *rq);
++ int (*select_task_rq)(struct task_struct *p, int sync);
++
++ void (*check_preempt_curr) (struct rq *rq, struct task_struct *p);
++
++ struct task_struct * (*pick_next_task) (struct rq *rq);
++ void (*put_prev_task) (struct rq *rq, struct task_struct *p);
++
++#ifdef CONFIG_SMP
++ unsigned long (*load_balance) (struct rq *this_rq, int this_cpu,
++ struct rq *busiest, unsigned long max_load_move,
++ struct sched_domain *sd, enum cpu_idle_type idle,
++ int *all_pinned, int *this_best_prio);
++
++ int (*move_one_task) (struct rq *this_rq, int this_cpu,
++ struct rq *busiest, struct sched_domain *sd,
++ enum cpu_idle_type idle);
++ void (*pre_schedule) (struct rq *this_rq, struct task_struct *task);
++ void (*post_schedule) (struct rq *this_rq);
++ void (*task_wake_up) (struct rq *this_rq, struct task_struct *task);
++#endif
++
++ void (*set_curr_task) (struct rq *rq);
++ void (*task_tick) (struct rq *rq, struct task_struct *p, int queued);
++ void (*task_new) (struct rq *rq, struct task_struct *p);
++ void (*set_cpus_allowed)(struct task_struct *p,
++ const cpumask_t *newmask);
++
++ void (*rq_online)(struct rq *rq);
++ void (*rq_offline)(struct rq *rq);
++
++ void (*switched_from) (struct rq *this_rq, struct task_struct *task,
++ int running);
++ void (*switched_to) (struct rq *this_rq, struct task_struct *task,
++ int running);
++ void (*prio_changed) (struct rq *this_rq, struct task_struct *task,
++ int oldprio, int running);
++
++#ifdef CONFIG_FAIR_GROUP_SCHED
++ void (*moved_group) (struct task_struct *p);
++#endif
++};
++
++struct load_weight {
++ unsigned long weight, inv_weight;
++};
++
++/*
++ * CFS stats for a schedulable entity (task, task-group etc)
++ *
++ * Current field usage histogram:
++ *
++ * 4 se->block_start
++ * 4 se->run_node
++ * 4 se->sleep_start
++ * 6 se->load.weight
++ */
++struct sched_entity {
++ struct load_weight load; /* for load-balancing */
++ struct rb_node run_node;
++ struct list_head group_node;
++ unsigned int on_rq;
++
++ u64 exec_start;
++ u64 sum_exec_runtime;
++ u64 vruntime;
++ u64 prev_sum_exec_runtime;
++
++ u64 last_wakeup;
++ u64 avg_overlap;
++
++#ifdef CONFIG_SCHEDSTATS
++ u64 wait_start;
++ u64 wait_max;
++ u64 wait_count;
++ u64 wait_sum;
++
++ u64 sleep_start;
++ u64 sleep_max;
++ s64 sum_sleep_runtime;
++
++ u64 block_start;
++ u64 block_max;
++ u64 exec_max;
++ u64 slice_max;
++
++ u64 nr_migrations;
++ u64 nr_migrations_cold;
++ u64 nr_failed_migrations_affine;
++ u64 nr_failed_migrations_running;
++ u64 nr_failed_migrations_hot;
++ u64 nr_forced_migrations;
++ u64 nr_forced2_migrations;
++
++ u64 nr_wakeups;
++ u64 nr_wakeups_sync;
++ u64 nr_wakeups_migrate;
++ u64 nr_wakeups_local;
++ u64 nr_wakeups_remote;
++ u64 nr_wakeups_affine;
++ u64 nr_wakeups_affine_attempts;
++ u64 nr_wakeups_passive;
++ u64 nr_wakeups_idle;
++#endif
++
++#ifdef CONFIG_FAIR_GROUP_SCHED
++ struct sched_entity *parent;
++ /* rq on which this entity is (to be) queued: */
++ struct cfs_rq *cfs_rq;
++ /* rq "owned" by this entity/group: */
++ struct cfs_rq *my_q;
++#endif
++};
++
++struct sched_rt_entity {
++ struct list_head run_list;
++ unsigned int time_slice;
++ unsigned long timeout;
++ int nr_cpus_allowed;
++
++ struct sched_rt_entity *back;
++#ifdef CONFIG_RT_GROUP_SCHED
++ struct sched_rt_entity *parent;
++ /* rq on which this entity is (to be) queued: */
++ struct rt_rq *rt_rq;
++ /* rq "owned" by this entity/group: */
++ struct rt_rq *my_q;
++#endif
++};
++
++struct task_struct {
++ volatile long state; /* -1 unrunnable, 0 runnable, >0 stopped */
++ void *stack;
++ atomic_t usage;
++ unsigned int flags; /* per process flags, defined below */
++ unsigned int ptrace;
++
++ int lock_depth; /* BKL lock depth */
++
++#ifdef CONFIG_SMP
++#ifdef __ARCH_WANT_UNLOCKED_CTXSW
++ int oncpu;
++#endif
++#endif
++
++ int prio, static_prio, normal_prio;
++ unsigned int rt_priority;
++ const struct sched_class *sched_class;
++ struct sched_entity se;
++ struct sched_rt_entity rt;
++
++#ifdef CONFIG_PREEMPT_NOTIFIERS
++ /* list of struct preempt_notifier: */
++ struct hlist_head preempt_notifiers;
++#endif
++
++ /*
++ * fpu_counter contains the number of consecutive context switches
++ * that the FPU is used. If this is over a threshold, the lazy fpu
++ * saving becomes unlazy to save the trap. This is an unsigned char
++ * so that after 256 times the counter wraps and the behavior turns
++ * lazy again; this to deal with bursty apps that only use FPU for
++ * a short time
++ */
++ unsigned char fpu_counter;
++ s8 oomkilladj; /* OOM kill score adjustment (bit shift). */
++#ifdef CONFIG_BLK_DEV_IO_TRACE
++ unsigned int btrace_seq;
++#endif
++
++ unsigned int policy;
++ cpumask_t cpus_allowed;
++
++#ifdef CONFIG_PREEMPT_RCU
++ int rcu_read_lock_nesting;
++ int rcu_flipctr_idx;
++#endif /* #ifdef CONFIG_PREEMPT_RCU */
++
++#if defined(CONFIG_SCHEDSTATS) || defined(CONFIG_TASK_DELAY_ACCT)
++ struct sched_info sched_info;
++#endif
++
++ struct list_head tasks;
++
++ struct mm_struct *mm, *active_mm;
++
++/* task state */
++ struct linux_binfmt *binfmt;
++ int exit_state;
++ int exit_code, exit_signal;
++ int pdeath_signal; /* The signal sent when the parent dies */
++ /* ??? */
++ unsigned int personality;
++ unsigned did_exec:1;
++ pid_t pid;
++ pid_t tgid;
++
++#ifdef CONFIG_CC_STACKPROTECTOR
++ /* Canary value for the -fstack-protector gcc feature */
++ unsigned long stack_canary;
++#endif
++ /*
++ * pointers to (original) parent process, youngest child, younger sibling,
++ * older sibling, respectively. (p->father can be replaced with
++ * p->real_parent->pid)
++ */
++ struct task_struct *real_parent; /* real parent process */
++ struct task_struct *parent; /* recipient of SIGCHLD, wait4() reports */
++ /*
++ * children/sibling forms the list of my natural children
++ */
++ struct list_head children; /* list of my children */
++ struct list_head sibling; /* linkage in my parent's children list */
++ struct task_struct *group_leader; /* threadgroup leader */
++
++ /*
++ * ptraced is the list of tasks this task is using ptrace on.
++ * This includes both natural children and PTRACE_ATTACH targets.
++ * p->ptrace_entry is p's link on the p->parent->ptraced list.
++ */
++ struct list_head ptraced;
++ struct list_head ptrace_entry;
++
++ /* PID/PID hash table linkage. */
++ struct pid_link pids[PIDTYPE_MAX];
++ struct list_head thread_group;
++
++ struct completion *vfork_done; /* for vfork() */
++ int __user *set_child_tid; /* CLONE_CHILD_SETTID */
++ int __user *clear_child_tid; /* CLONE_CHILD_CLEARTID */
++
++ cputime_t utime, stime, utimescaled, stimescaled;
++ cputime_t gtime;
++ cputime_t prev_utime, prev_stime;
++ unsigned long nvcsw, nivcsw; /* context switch counts */
++ struct timespec start_time; /* monotonic time */
++ struct timespec real_start_time; /* boot based time */
++/* mm fault and swap info: this can arguably be seen as either mm-specific or thread-specific */
++ unsigned long min_flt, maj_flt;
++
++ cputime_t it_prof_expires, it_virt_expires;
++ unsigned long long it_sched_expires;
++ struct list_head cpu_timers[3];
++
++/* process credentials */
++ uid_t uid,euid,suid,fsuid;
++ gid_t gid,egid,sgid,fsgid;
++ struct group_info *group_info;
++ kernel_cap_t cap_effective, cap_inheritable, cap_permitted, cap_bset;
++ struct user_struct *user;
++ unsigned securebits;
++#ifdef CONFIG_KEYS
++ unsigned char jit_keyring; /* default keyring to attach requested keys to */
++ struct key *request_key_auth; /* assumed request_key authority */
++ struct key *thread_keyring; /* keyring private to this thread */
++#endif
++ char comm[TASK_COMM_LEN]; /* executable name excluding path
++ - access with [gs]et_task_comm (which lock
++ it with task_lock())
++ - initialized normally by flush_old_exec */
++/* file system info */
++ int link_count, total_link_count;
++#ifdef CONFIG_SYSVIPC
++/* ipc stuff */
++ struct sysv_sem sysvsem;
++#endif
++#ifdef CONFIG_DETECT_SOFTLOCKUP
++/* hung task detection */
++ unsigned long last_switch_timestamp;
++ unsigned long last_switch_count;
++#endif
++/* CPU-specific state of this task */
++ struct thread_struct thread;
++/* filesystem information */
++ struct fs_struct *fs;
++/* open file information */
++ struct files_struct *files;
++/* namespaces */
++ struct nsproxy *nsproxy;
++/* signal handlers */
++ struct signal_struct *signal;
++ struct sighand_struct *sighand;
++
++ sigset_t blocked, real_blocked;
++ sigset_t saved_sigmask; /* restored if set_restore_sigmask() was used */
++ struct sigpending pending;
++
++ unsigned long sas_ss_sp;
++ size_t sas_ss_size;
++ int (*notifier)(void *priv);
++ void *notifier_data;
++ sigset_t *notifier_mask;
++#ifdef CONFIG_SECURITY
++ void *security;
++#endif
++ struct audit_context *audit_context;
++#ifdef CONFIG_AUDITSYSCALL
++ uid_t loginuid;
++ unsigned int sessionid;
++#endif
++ seccomp_t seccomp;
++
++/* vserver context data */
++ struct vx_info *vx_info;
++ struct nx_info *nx_info;
++
++ xid_t xid;
++ nid_t nid;
++ tag_t tag;
++
++/* Thread group tracking */
++ u32 parent_exec_id;
++ u32 self_exec_id;
++/* Protection of (de-)allocation: mm, files, fs, tty, keyrings */
++ spinlock_t alloc_lock;
++
++ /* Protection of the PI data structures: */
++ spinlock_t pi_lock;
++
++#ifdef CONFIG_RT_MUTEXES
++ /* PI waiters blocked on a rt_mutex held by this task */
++ struct plist_head pi_waiters;
++ /* Deadlock detection and priority inheritance handling */
++ struct rt_mutex_waiter *pi_blocked_on;
++#endif
++
++#ifdef CONFIG_DEBUG_MUTEXES
++ /* mutex deadlock detection */
++ struct mutex_waiter *blocked_on;
++#endif
++#ifdef CONFIG_TRACE_IRQFLAGS
++ unsigned int irq_events;
++ int hardirqs_enabled;
++ unsigned long hardirq_enable_ip;
++ unsigned int hardirq_enable_event;
++ unsigned long hardirq_disable_ip;
++ unsigned int hardirq_disable_event;
++ int softirqs_enabled;
++ unsigned long softirq_disable_ip;
++ unsigned int softirq_disable_event;
++ unsigned long softirq_enable_ip;
++ unsigned int softirq_enable_event;
++ int hardirq_context;
++ int softirq_context;
++#endif
++#ifdef CONFIG_LOCKDEP
++# define MAX_LOCK_DEPTH 48UL
++ u64 curr_chain_key;
++ int lockdep_depth;
++ unsigned int lockdep_recursion;
++ struct held_lock held_locks[MAX_LOCK_DEPTH];
++#endif
++
++/* journalling filesystem info */
++ void *journal_info;
++
++/* stacked block device info */
++ struct bio *bio_list, **bio_tail;
++
++/* VM state */
++ struct reclaim_state *reclaim_state;
++
++ struct backing_dev_info *backing_dev_info;
++
++ struct io_context *io_context;
++
++ unsigned long ptrace_message;
++ siginfo_t *last_siginfo; /* For ptrace use. */
++ struct task_io_accounting ioac;
++#if defined(CONFIG_TASK_XACCT)
++ u64 acct_rss_mem1; /* accumulated rss usage */
++ u64 acct_vm_mem1; /* accumulated virtual memory usage */
++ cputime_t acct_timexpd; /* stime + utime since last update */
++#endif
++#ifdef CONFIG_CPUSETS
++ nodemask_t mems_allowed;
++ int cpuset_mems_generation;
++ int cpuset_mem_spread_rotor;
++#endif
++#ifdef CONFIG_CGROUPS
++ /* Control Group info protected by css_set_lock */
++ struct css_set *cgroups;
++ /* cg_list protected by css_set_lock and tsk->alloc_lock */
++ struct list_head cg_list;
++#endif
++#ifdef CONFIG_FUTEX
++ struct robust_list_head __user *robust_list;
++#ifdef CONFIG_COMPAT
++ struct compat_robust_list_head __user *compat_robust_list;
++#endif
++ struct list_head pi_state_list;
++ struct futex_pi_state *pi_state_cache;
++#endif
++#ifdef CONFIG_NUMA
++ struct mempolicy *mempolicy;
++ short il_next;
++#endif
++ atomic_t fs_excl; /* holding fs exclusive resources */
++ struct rcu_head rcu;
++
++ struct list_head *scm_work_list;
++
++/*
++ * cache last used pipe for splice
++ */
++ struct pipe_inode_info *splice_pipe;
++#ifdef CONFIG_TASK_DELAY_ACCT
++ struct task_delay_info *delays;
++#endif
++#ifdef CONFIG_FAULT_INJECTION
++ int make_it_fail;
++#endif
++ struct prop_local_single dirties;
++#ifdef CONFIG_LATENCYTOP
++ int latency_record_count;
++ struct latency_record latency_record[LT_SAVECOUNT];
++#endif
++};
++
++/*
++ * Priority of a process goes from 0..MAX_PRIO-1, valid RT
++ * priority is 0..MAX_RT_PRIO-1, and SCHED_NORMAL/SCHED_BATCH
++ * tasks are in the range MAX_RT_PRIO..MAX_PRIO-1. Priority
++ * values are inverted: lower p->prio value means higher priority.
++ *
++ * The MAX_USER_RT_PRIO value allows the actual maximum
++ * RT priority to be separate from the value exported to
++ * user-space. This allows kernel threads to set their
++ * priority to a value higher than any user task. Note:
++ * MAX_RT_PRIO must not be smaller than MAX_USER_RT_PRIO.
++ */
++
++#define MAX_USER_RT_PRIO 100
++#define MAX_RT_PRIO MAX_USER_RT_PRIO
++
++#define MAX_PRIO (MAX_RT_PRIO + 40)
++#define DEFAULT_PRIO (MAX_RT_PRIO + 20)
++
++static inline int rt_prio(int prio)
++{
++ if (unlikely(prio < MAX_RT_PRIO))
++ return 1;
++ return 0;
++}
++
++static inline int rt_task(struct task_struct *p)
++{
++ return rt_prio(p->prio);
++}
++
++static inline void set_task_session(struct task_struct *tsk, pid_t session)
++{
++ tsk->signal->__session = session;
++}
++
++static inline void set_task_pgrp(struct task_struct *tsk, pid_t pgrp)
++{
++ tsk->signal->__pgrp = pgrp;
++}
++
++static inline struct pid *task_pid(struct task_struct *task)
++{
++ return task->pids[PIDTYPE_PID].pid;
++}
++
++static inline struct pid *task_tgid(struct task_struct *task)
++{
++ return task->group_leader->pids[PIDTYPE_PID].pid;
++}
++
++static inline struct pid *task_pgrp(struct task_struct *task)
++{
++ return task->group_leader->pids[PIDTYPE_PGID].pid;
++}
++
++static inline struct pid *task_session(struct task_struct *task)
++{
++ return task->group_leader->pids[PIDTYPE_SID].pid;
++}
++
++struct pid_namespace;
++
++/*
++ * the helpers to get the task's different pids as they are seen
++ * from various namespaces
++ *
++ * task_xid_nr() : global id, i.e. the id seen from the init namespace;
++ * task_xid_vnr() : virtual id, i.e. the id seen from the pid namespace of
++ * current.
++ * task_xid_nr_ns() : id seen from the ns specified;
++ *
++ * set_task_vxid() : assigns a virtual id to a task;
++ *
++ * see also pid_nr() etc in include/linux/pid.h
++ */
++
++#include <linux/vserver/base.h>
++#include <linux/vserver/context.h>
++#include <linux/vserver/debug.h>
++#include <linux/vserver/pid.h>
++
++static inline pid_t task_pid_nr(struct task_struct *tsk)
++{
++ return tsk->pid;
++}
++
++pid_t task_pid_nr_ns(struct task_struct *tsk, struct pid_namespace *ns);
++
++static inline pid_t task_pid_vnr(struct task_struct *tsk)
++{
++ return vx_map_pid(pid_vnr(task_pid(tsk)));
++}
++
++
++static inline pid_t task_tgid_nr(struct task_struct *tsk)
++{
++ return tsk->tgid;
++}
++
++pid_t task_tgid_nr_ns(struct task_struct *tsk, struct pid_namespace *ns);
++
++static inline pid_t task_tgid_vnr(struct task_struct *tsk)
++{
++ return vx_map_tgid(pid_vnr(task_tgid(tsk)));
++}
++
++
++static inline pid_t task_pgrp_nr(struct task_struct *tsk)
++{
++ return tsk->signal->__pgrp;
++}
++
++pid_t task_pgrp_nr_ns(struct task_struct *tsk, struct pid_namespace *ns);
++
++static inline pid_t task_pgrp_vnr(struct task_struct *tsk)
++{
++ return pid_vnr(task_pgrp(tsk));
++}
++
++
++static inline pid_t task_session_nr(struct task_struct *tsk)
++{
++ return tsk->signal->__session;
++}
++
++pid_t task_session_nr_ns(struct task_struct *tsk, struct pid_namespace *ns);
++
++static inline pid_t task_session_vnr(struct task_struct *tsk)
++{
++ return pid_vnr(task_session(tsk));
++}
++
++
++/**
++ * pid_alive - check that a task structure is not stale
++ * @p: Task structure to be checked.
++ *
++ * Test if a process is not yet dead (at most zombie state)
++ * If pid_alive fails, then pointers within the task structure
++ * can be stale and must not be dereferenced.
++ */
++static inline int pid_alive(struct task_struct *p)
++{
++ return p->pids[PIDTYPE_PID].pid != NULL;
++}
++
++/**
++ * is_global_init - check if a task structure is init
++ * @tsk: Task structure to be checked.
++ *
++ * Check if a task structure is the first user space task the kernel created.
++ */
++static inline int is_global_init(struct task_struct *tsk)
++{
++ return tsk->pid == 1;
++}
++
++/*
++ * is_container_init:
++ * check whether in the task is init in its own pid namespace.
++ */
++extern int is_container_init(struct task_struct *tsk);
++
++extern struct pid *cad_pid;
++
++extern void free_task(struct task_struct *tsk);
++#define get_task_struct(tsk) do { atomic_inc(&(tsk)->usage); } while(0)
++
++extern void __put_task_struct(struct task_struct *t);
++
++static inline void put_task_struct(struct task_struct *t)
++{
++ if (atomic_dec_and_test(&t->usage))
++ __put_task_struct(t);
++}
++
++extern cputime_t task_utime(struct task_struct *p);
++extern cputime_t task_stime(struct task_struct *p);
++extern cputime_t task_gtime(struct task_struct *p);
++
++/*
++ * Per process flags
++ */
++#define PF_ALIGNWARN 0x00000001 /* Print alignment warning msgs */
++ /* Not implemented yet, only for 486*/
++#define PF_STARTING 0x00000002 /* being created */
++#define PF_EXITING 0x00000004 /* getting shut down */
++#define PF_EXITPIDONE 0x00000008 /* pi exit done on shut down */
++#define PF_VCPU 0x00000010 /* I'm a virtual CPU */
++#define PF_FORKNOEXEC 0x00000040 /* forked but didn't exec */
++#define PF_SUPERPRIV 0x00000100 /* used super-user privileges */
++#define PF_DUMPCORE 0x00000200 /* dumped core */
++#define PF_SIGNALED 0x00000400 /* killed by a signal */
++#define PF_MEMALLOC 0x00000800 /* Allocating memory */
++#define PF_FLUSHER 0x00001000 /* responsible for disk writeback */
++#define PF_USED_MATH 0x00002000 /* if unset the fpu must be initialized before use */
++#define PF_NOFREEZE 0x00008000 /* this thread should not be frozen */
++#define PF_FROZEN 0x00010000 /* frozen for system suspend */
++#define PF_FSTRANS 0x00020000 /* inside a filesystem transaction */
++#define PF_KSWAPD 0x00040000 /* I am kswapd */
++#define PF_SWAPOFF 0x00080000 /* I am in swapoff */
++#define PF_LESS_THROTTLE 0x00100000 /* Throttle me less: I clean memory */
++#define PF_KTHREAD 0x00200000 /* I am a kernel thread */
++#define PF_RANDOMIZE 0x00400000 /* randomize virtual address space */
++#define PF_SWAPWRITE 0x00800000 /* Allowed to write to swap */
++#define PF_SPREAD_PAGE 0x01000000 /* Spread page cache over cpuset */
++#define PF_SPREAD_SLAB 0x02000000 /* Spread some slab caches over cpuset */
++#define PF_THREAD_BOUND 0x04000000 /* Thread bound to specific cpu */
++#define PF_MEMPOLICY 0x10000000 /* Non-default NUMA mempolicy */
++#define PF_MUTEX_TESTER 0x20000000 /* Thread belongs to the rt mutex tester */
++#define PF_FREEZER_SKIP 0x40000000 /* Freezer should not count it as freezeable */
++#define PF_FREEZER_NOSIG 0x80000000 /* Freezer won't send signals to it */
++
++/*
++ * Only the _current_ task can read/write to tsk->flags, but other
++ * tasks can access tsk->flags in readonly mode for example
++ * with tsk_used_math (like during threaded core dumping).
++ * There is however an exception to this rule during ptrace
++ * or during fork: the ptracer task is allowed to write to the
++ * child->flags of its traced child (same goes for fork, the parent
++ * can write to the child->flags), because we're guaranteed the
++ * child is not running and in turn not changing child->flags
++ * at the same time the parent does it.
++ */
++#define clear_stopped_child_used_math(child) do { (child)->flags &= ~PF_USED_MATH; } while (0)
++#define set_stopped_child_used_math(child) do { (child)->flags |= PF_USED_MATH; } while (0)
++#define clear_used_math() clear_stopped_child_used_math(current)
++#define set_used_math() set_stopped_child_used_math(current)
++#define conditional_stopped_child_used_math(condition, child) \
++ do { (child)->flags &= ~PF_USED_MATH, (child)->flags |= (condition) ? PF_USED_MATH : 0; } while (0)
++#define conditional_used_math(condition) \
++ conditional_stopped_child_used_math(condition, current)
++#define copy_to_stopped_child_used_math(child) \
++ do { (child)->flags &= ~PF_USED_MATH, (child)->flags |= current->flags & PF_USED_MATH; } while (0)
++/* NOTE: this will return 0 or PF_USED_MATH, it will never return 1 */
++#define tsk_used_math(p) ((p)->flags & PF_USED_MATH)
++#define used_math() tsk_used_math(current)
++
++#ifdef CONFIG_SMP
++extern int set_cpus_allowed_ptr(struct task_struct *p,
++ const cpumask_t *new_mask);
++#else
++static inline int set_cpus_allowed_ptr(struct task_struct *p,
++ const cpumask_t *new_mask)
++{
++ if (!cpu_isset(0, *new_mask))
++ return -EINVAL;
++ return 0;
++}
++#endif
++static inline int set_cpus_allowed(struct task_struct *p, cpumask_t new_mask)
++{
++ return set_cpus_allowed_ptr(p, &new_mask);
++}
++
++extern unsigned long long sched_clock(void);
++
++extern void sched_clock_init(void);
++extern u64 sched_clock_cpu(int cpu);
++
++#ifndef CONFIG_HAVE_UNSTABLE_SCHED_CLOCK
++static inline void sched_clock_tick(void)
++{
++}
++
++static inline void sched_clock_idle_sleep_event(void)
++{
++}
++
++static inline void sched_clock_idle_wakeup_event(u64 delta_ns)
++{
++}
++#else
++extern void sched_clock_tick(void);
++extern void sched_clock_idle_sleep_event(void);
++extern void sched_clock_idle_wakeup_event(u64 delta_ns);
++#endif
++
++/*
++ * For kernel-internal use: high-speed (but slightly incorrect) per-cpu
++ * clock constructed from sched_clock():
++ */
++extern unsigned long long cpu_clock(int cpu);
++
++extern unsigned long long
++task_sched_runtime(struct task_struct *task);
++
++/* sched_exec is called by processes performing an exec */
++#ifdef CONFIG_SMP
++extern void sched_exec(void);
++#else
++#define sched_exec() {}
++#endif
++
++extern void sched_clock_idle_sleep_event(void);
++extern void sched_clock_idle_wakeup_event(u64 delta_ns);
++
++#ifdef CONFIG_HOTPLUG_CPU
++extern void idle_task_exit(void);
++#else
++static inline void idle_task_exit(void) {}
++#endif
++
++extern void sched_idle_next(void);
++
++#if defined(CONFIG_NO_HZ) && defined(CONFIG_SMP)
++extern void wake_up_idle_cpu(int cpu);
++#else
++static inline void wake_up_idle_cpu(int cpu) { }
++#endif
++
++#ifdef CONFIG_SCHED_DEBUG
++extern unsigned int sysctl_sched_latency;
++extern unsigned int sysctl_sched_min_granularity;
++extern unsigned int sysctl_sched_wakeup_granularity;
++extern unsigned int sysctl_sched_child_runs_first;
++extern unsigned int sysctl_sched_features;
++extern unsigned int sysctl_sched_migration_cost;
++extern unsigned int sysctl_sched_nr_migrate;
++extern unsigned int sysctl_sched_shares_ratelimit;
++
++int sched_nr_latency_handler(struct ctl_table *table, int write,
++ struct file *file, void __user *buffer, size_t *length,
++ loff_t *ppos);
++#endif
++extern unsigned int sysctl_sched_rt_period;
++extern int sysctl_sched_rt_runtime;
++
++int sched_rt_handler(struct ctl_table *table, int write,
++ struct file *filp, void __user *buffer, size_t *lenp,
++ loff_t *ppos);
++
++extern unsigned int sysctl_sched_compat_yield;
++
++#ifdef CONFIG_RT_MUTEXES
++extern int rt_mutex_getprio(struct task_struct *p);
++extern void rt_mutex_setprio(struct task_struct *p, int prio);
++extern void rt_mutex_adjust_pi(struct task_struct *p);
++#else
++static inline int rt_mutex_getprio(struct task_struct *p)
++{
++ return p->normal_prio;
++}
++# define rt_mutex_adjust_pi(p) do { } while (0)
++#endif
++
++extern void set_user_nice(struct task_struct *p, long nice);
++extern int task_prio(const struct task_struct *p);
++extern int task_nice(const struct task_struct *p);
++extern int can_nice(const struct task_struct *p, const int nice);
++extern int task_curr(const struct task_struct *p);
++extern int idle_cpu(int cpu);
++extern int sched_setscheduler(struct task_struct *, int, struct sched_param *);
++extern int sched_setscheduler_nocheck(struct task_struct *, int,
++ struct sched_param *);
++extern struct task_struct *idle_task(int cpu);
++extern struct task_struct *curr_task(int cpu);
++extern void set_curr_task(int cpu, struct task_struct *p);
++
++void yield(void);
++
++/*
++ * The default (Linux) execution domain.
++ */
++extern struct exec_domain default_exec_domain;
++
++union thread_union {
++ struct thread_info thread_info;
++ unsigned long stack[THREAD_SIZE/sizeof(long)];
++};
++
++#ifndef __HAVE_ARCH_KSTACK_END
++static inline int kstack_end(void *addr)
++{
++ /* Reliable end of stack detection:
++ * Some APM bios versions misalign the stack
++ */
++ return !(((unsigned long)addr+sizeof(void*)-1) & (THREAD_SIZE-sizeof(void*)));
++}
++#endif
++
++extern union thread_union init_thread_union;
++extern struct task_struct init_task;
++
++extern struct mm_struct init_mm;
++
++extern struct pid_namespace init_pid_ns;
++
++/*
++ * find a task by one of its numerical ids
++ *
++ * find_task_by_pid_type_ns():
++ * it is the most generic call - it finds a task by all id,
++ * type and namespace specified
++ * find_task_by_pid_ns():
++ * finds a task by its pid in the specified namespace
++ * find_task_by_vpid():
++ * finds a task by its virtual pid
++ *
++ * see also find_vpid() etc in include/linux/pid.h
++ */
++
++extern struct task_struct *find_task_by_pid_type_ns(int type, int pid,
++ struct pid_namespace *ns);
++
++extern struct task_struct *find_task_by_vpid(pid_t nr);
++extern struct task_struct *find_task_by_pid_ns(pid_t nr,
++ struct pid_namespace *ns);
++
++extern void __set_special_pids(struct pid *pid);
++
++/* per-UID process charging. */
++extern struct user_struct * alloc_uid(struct user_namespace *, uid_t);
++static inline struct user_struct *get_uid(struct user_struct *u)
++{
++ atomic_inc(&u->__count);
++ return u;
++}
++extern void free_uid(struct user_struct *);
++extern void switch_uid(struct user_struct *);
++extern void release_uids(struct user_namespace *ns);
++
++#include <asm/current.h>
++
++extern void do_timer(unsigned long ticks);
++
++extern int wake_up_state(struct task_struct *tsk, unsigned int state);
++extern int wake_up_process(struct task_struct *tsk);
++extern void wake_up_new_task(struct task_struct *tsk,
++ unsigned long clone_flags);
++#ifdef CONFIG_SMP
++ extern void kick_process(struct task_struct *tsk);
++#else
++ static inline void kick_process(struct task_struct *tsk) { }
++#endif
++extern void sched_fork(struct task_struct *p, int clone_flags);
++extern void sched_dead(struct task_struct *p);
++
++extern int in_group_p(gid_t);
++extern int in_egroup_p(gid_t);
++
++extern void proc_caches_init(void);
++extern void flush_signals(struct task_struct *);
++extern void ignore_signals(struct task_struct *);
++extern void flush_signal_handlers(struct task_struct *, int force_default);
++extern int dequeue_signal(struct task_struct *tsk, sigset_t *mask, siginfo_t *info);
++
++static inline int dequeue_signal_lock(struct task_struct *tsk, sigset_t *mask, siginfo_t *info)
++{
++ unsigned long flags;
++ int ret;
++
++ spin_lock_irqsave(&tsk->sighand->siglock, flags);
++ ret = dequeue_signal(tsk, mask, info);
++ spin_unlock_irqrestore(&tsk->sighand->siglock, flags);
++
++ return ret;
++}
++
++extern void block_all_signals(int (*notifier)(void *priv), void *priv,
++ sigset_t *mask);
++extern void unblock_all_signals(void);
++extern void release_task(struct task_struct * p);
++extern int send_sig_info(int, struct siginfo *, struct task_struct *);
++extern int force_sigsegv(int, struct task_struct *);
++extern int force_sig_info(int, struct siginfo *, struct task_struct *);
++extern int __kill_pgrp_info(int sig, struct siginfo *info, struct pid *pgrp);
++extern int kill_pid_info(int sig, struct siginfo *info, struct pid *pid);
++extern int kill_pid_info_as_uid(int, struct siginfo *, struct pid *, uid_t, uid_t, u32);
++extern int kill_pgrp(struct pid *pid, int sig, int priv);
++extern int kill_pid(struct pid *pid, int sig, int priv);
++extern int kill_proc_info(int, struct siginfo *, pid_t);
++extern int do_notify_parent(struct task_struct *, int);
++extern void force_sig(int, struct task_struct *);
++extern void force_sig_specific(int, struct task_struct *);
++extern int send_sig(int, struct task_struct *, int);
++extern void zap_other_threads(struct task_struct *p);
++extern struct sigqueue *sigqueue_alloc(void);
++extern void sigqueue_free(struct sigqueue *);
++extern int send_sigqueue(struct sigqueue *, struct task_struct *, int group);
++extern int do_sigaction(int, struct k_sigaction *, struct k_sigaction *);
++extern int do_sigaltstack(const stack_t __user *, stack_t __user *, unsigned long);
++
++static inline int kill_cad_pid(int sig, int priv)
++{
++ return kill_pid(cad_pid, sig, priv);
++}
++
++/* These can be the second arg to send_sig_info/send_group_sig_info. */
++#define SEND_SIG_NOINFO ((struct siginfo *) 0)
++#define SEND_SIG_PRIV ((struct siginfo *) 1)
++#define SEND_SIG_FORCED ((struct siginfo *) 2)
++
++static inline int is_si_special(const struct siginfo *info)
++{
++ return info <= SEND_SIG_FORCED;
++}
++
++/* True if we are on the alternate signal stack. */
++
++static inline int on_sig_stack(unsigned long sp)
++{
++ return (sp - current->sas_ss_sp < current->sas_ss_size);
++}
++
++static inline int sas_ss_flags(unsigned long sp)
++{
++ return (current->sas_ss_size == 0 ? SS_DISABLE
++ : on_sig_stack(sp) ? SS_ONSTACK : 0);
++}
++
++/*
++ * Routines for handling mm_structs
++ */
++extern struct mm_struct * mm_alloc(void);
++
++/* mmdrop drops the mm and the page tables */
++extern void __mmdrop(struct mm_struct *);
++static inline void mmdrop(struct mm_struct * mm)
++{
++ if (unlikely(atomic_dec_and_test(&mm->mm_count)))
++ __mmdrop(mm);
++}
++
++/* mmput gets rid of the mappings and all user-space */
++extern void mmput(struct mm_struct *);
++/* Grab a reference to a task's mm, if it is not already going away */
++extern struct mm_struct *get_task_mm(struct task_struct *task);
++/* Remove the current tasks stale references to the old mm_struct */
++extern void mm_release(struct task_struct *, struct mm_struct *);
++/* Allocate a new mm structure and copy contents from tsk->mm */
++extern struct mm_struct *dup_mm(struct task_struct *tsk);
++
++extern int copy_thread(int, unsigned long, unsigned long, unsigned long, struct task_struct *, struct pt_regs *);
++extern void flush_thread(void);
++extern void exit_thread(void);
++
++extern void exit_files(struct task_struct *);
++extern void __cleanup_signal(struct signal_struct *);
++extern void __cleanup_sighand(struct sighand_struct *);
++
++extern void exit_itimers(struct signal_struct *);
++extern void flush_itimer_signals(void);
++
++extern NORET_TYPE void do_group_exit(int);
++
++extern void daemonize(const char *, ...);
++extern int allow_signal(int);
++extern int disallow_signal(int);
++
++extern int do_execve(char *, char __user * __user *, char __user * __user *, struct pt_regs *);
++extern long do_fork(unsigned long, unsigned long, struct pt_regs *, unsigned long, int __user *, int __user *);
++struct task_struct *fork_idle(int);
++
++extern void set_task_comm(struct task_struct *tsk, char *from);
++extern char *get_task_comm(char *to, struct task_struct *tsk);
++
++#ifdef CONFIG_SMP
++extern unsigned long wait_task_inactive(struct task_struct *, long match_state);
++#else
++static inline unsigned long wait_task_inactive(struct task_struct *p,
++ long match_state)
++{
++ return 1;
++}
++#endif
++
++#define next_task(p) list_entry(rcu_dereference((p)->tasks.next), struct task_struct, tasks)
++
++#define for_each_process(p) \
++ for (p = &init_task ; (p = next_task(p)) != &init_task ; )
++
++/*
++ * Careful: do_each_thread/while_each_thread is a double loop so
++ * 'break' will not work as expected - use goto instead.
++ */
++#define do_each_thread(g, t) \
++ for (g = t = &init_task ; (g = t = next_task(g)) != &init_task ; ) do
++
++#define while_each_thread(g, t) \
++ while ((t = next_thread(t)) != g)
++
++/* de_thread depends on thread_group_leader not being a pid based check */
++#define thread_group_leader(p) (p == p->group_leader)
++
++/* Do to the insanities of de_thread it is possible for a process
++ * to have the pid of the thread group leader without actually being
++ * the thread group leader. For iteration through the pids in proc
++ * all we care about is that we have a task with the appropriate
++ * pid, we don't actually care if we have the right task.
++ */
++static inline int has_group_leader_pid(struct task_struct *p)
++{
++ return p->pid == p->tgid;
++}
++
++static inline
++int same_thread_group(struct task_struct *p1, struct task_struct *p2)
++{
++ return p1->tgid == p2->tgid;
++}
++
++static inline struct task_struct *next_thread(const struct task_struct *p)
++{
++ return list_entry(rcu_dereference(p->thread_group.next),
++ struct task_struct, thread_group);
++}
++
++static inline int thread_group_empty(struct task_struct *p)
++{
++ return list_empty(&p->thread_group);
++}
++
++#define delay_group_leader(p) \
++ (thread_group_leader(p) && !thread_group_empty(p))
++
++/*
++ * Protects ->fs, ->files, ->mm, ->group_info, ->comm, keyring
++ * subscriptions and synchronises with wait4(). Also used in procfs. Also
++ * pins the final release of task.io_context. Also protects ->cpuset and
++ * ->cgroup.subsys[].
++ *
++ * Nests both inside and outside of read_lock(&tasklist_lock).
++ * It must not be nested with write_lock_irq(&tasklist_lock),
++ * neither inside nor outside.
++ */
++static inline void task_lock(struct task_struct *p)
++{
++ spin_lock(&p->alloc_lock);
++}
++
++static inline void task_unlock(struct task_struct *p)
++{
++ spin_unlock(&p->alloc_lock);
++}
++
++extern struct sighand_struct *lock_task_sighand(struct task_struct *tsk,
++ unsigned long *flags);
++
++static inline void unlock_task_sighand(struct task_struct *tsk,
++ unsigned long *flags)
++{
++ spin_unlock_irqrestore(&tsk->sighand->siglock, *flags);
++}
++
++#ifndef __HAVE_THREAD_FUNCTIONS
++
++#define task_thread_info(task) ((struct thread_info *)(task)->stack)
++#define task_stack_page(task) ((task)->stack)
++
++static inline void setup_thread_stack(struct task_struct *p, struct task_struct *org)
++{
++ *task_thread_info(p) = *task_thread_info(org);
++ task_thread_info(p)->task = p;
++}
++
++static inline unsigned long *end_of_stack(struct task_struct *p)
++{
++ return (unsigned long *)(task_thread_info(p) + 1);
++}
++
++#endif
++
++static inline int object_is_on_stack(void *obj)
++{
++ void *stack = task_stack_page(current);
++
++ return (obj >= stack) && (obj < (stack + THREAD_SIZE));
++}
++
++extern void thread_info_cache_init(void);
++
++/* set thread flags in other task's structures
++ * - see asm/thread_info.h for TIF_xxxx flags available
++ */
++static inline void set_tsk_thread_flag(struct task_struct *tsk, int flag)
++{
++ set_ti_thread_flag(task_thread_info(tsk), flag);
++}
++
++static inline void clear_tsk_thread_flag(struct task_struct *tsk, int flag)
++{
++ clear_ti_thread_flag(task_thread_info(tsk), flag);
++}
++
++static inline int test_and_set_tsk_thread_flag(struct task_struct *tsk, int flag)
++{
++ return test_and_set_ti_thread_flag(task_thread_info(tsk), flag);
++}
++
++static inline int test_and_clear_tsk_thread_flag(struct task_struct *tsk, int flag)
++{
++ return test_and_clear_ti_thread_flag(task_thread_info(tsk), flag);
++}
++
++static inline int test_tsk_thread_flag(struct task_struct *tsk, int flag)
++{
++ return test_ti_thread_flag(task_thread_info(tsk), flag);
++}
++
++static inline void set_tsk_need_resched(struct task_struct *tsk)
++{
++ set_tsk_thread_flag(tsk,TIF_NEED_RESCHED);
++}
++
++static inline void clear_tsk_need_resched(struct task_struct *tsk)
++{
++ clear_tsk_thread_flag(tsk,TIF_NEED_RESCHED);
++}
++
++static inline int test_tsk_need_resched(struct task_struct *tsk)
++{
++ return unlikely(test_tsk_thread_flag(tsk,TIF_NEED_RESCHED));
++}
++
++static inline int signal_pending(struct task_struct *p)
++{
++ return unlikely(test_tsk_thread_flag(p,TIF_SIGPENDING));
++}
++
++extern int __fatal_signal_pending(struct task_struct *p);
++
++static inline int fatal_signal_pending(struct task_struct *p)
++{
++ return signal_pending(p) && __fatal_signal_pending(p);
++}
++
++static inline int signal_pending_state(long state, struct task_struct *p)
++{
++ if (!(state & (TASK_INTERRUPTIBLE | TASK_WAKEKILL)))
++ return 0;
++ if (!signal_pending(p))
++ return 0;
++
++ return (state & TASK_INTERRUPTIBLE) || __fatal_signal_pending(p);
++}
++
++static inline int need_resched(void)
++{
++ return unlikely(test_thread_flag(TIF_NEED_RESCHED));
++}
++
++/*
++ * cond_resched() and cond_resched_lock(): latency reduction via
++ * explicit rescheduling in places that are safe. The return
++ * value indicates whether a reschedule was done in fact.
++ * cond_resched_lock() will drop the spinlock before scheduling,
++ * cond_resched_softirq() will enable bhs before scheduling.
++ */
++extern int _cond_resched(void);
++#ifdef CONFIG_PREEMPT_BKL
++static inline int cond_resched(void)
++{
++ return 0;
++}
++#else
++static inline int cond_resched(void)
++{
++ return _cond_resched();
++}
++#endif
++extern int cond_resched_lock(spinlock_t * lock);
++extern int cond_resched_softirq(void);
++static inline int cond_resched_bkl(void)
++{
++ return _cond_resched();
++}
++
++/*
++ * Does a critical section need to be broken due to another
++ * task waiting?: (technically does not depend on CONFIG_PREEMPT,
++ * but a general need for low latency)
++ */
++static inline int spin_needbreak(spinlock_t *lock)
++{
++#ifdef CONFIG_PREEMPT
++ return spin_is_contended(lock);
++#else
++ return 0;
++#endif
++}
++
++/*
++ * Reevaluate whether the task has signals pending delivery.
++ * Wake the task if so.
++ * This is required every time the blocked sigset_t changes.
++ * callers must hold sighand->siglock.
++ */
++extern void recalc_sigpending_and_wake(struct task_struct *t);
++extern void recalc_sigpending(void);
++
++extern void signal_wake_up(struct task_struct *t, int resume_stopped);
++
++/*
++ * Wrappers for p->thread_info->cpu access. No-op on UP.
++ */
++#ifdef CONFIG_SMP
++
++static inline unsigned int task_cpu(const struct task_struct *p)
++{
++ return task_thread_info(p)->cpu;
++}
++
++extern void set_task_cpu(struct task_struct *p, unsigned int cpu);
++
++#else
++
++static inline unsigned int task_cpu(const struct task_struct *p)
++{
++ return 0;
++}
++
++static inline void set_task_cpu(struct task_struct *p, unsigned int cpu)
++{
++}
++
++#endif /* CONFIG_SMP */
++
++extern void arch_pick_mmap_layout(struct mm_struct *mm);
++
++#ifdef CONFIG_TRACING
++extern void
++__trace_special(void *__tr, void *__data,
++ unsigned long arg1, unsigned long arg2, unsigned long arg3);
++#else
++static inline void
++__trace_special(void *__tr, void *__data,
++ unsigned long arg1, unsigned long arg2, unsigned long arg3)
++{
++}
++#endif
++
++extern long sched_setaffinity(pid_t pid, const cpumask_t *new_mask);
++extern long sched_getaffinity(pid_t pid, cpumask_t *mask);
++
++extern int sched_mc_power_savings, sched_smt_power_savings;
++
++extern void normalize_rt_tasks(void);
++
++#ifdef CONFIG_GROUP_SCHED
++
++extern struct task_group init_task_group;
++#ifdef CONFIG_USER_SCHED
++extern struct task_group root_task_group;
++#endif
++
++extern struct task_group *sched_create_group(struct task_group *parent);
++extern void sched_destroy_group(struct task_group *tg);
++extern void sched_move_task(struct task_struct *tsk);
++#ifdef CONFIG_FAIR_GROUP_SCHED
++extern int sched_group_set_shares(struct task_group *tg, unsigned long shares);
++extern unsigned long sched_group_shares(struct task_group *tg);
++#endif
++#ifdef CONFIG_RT_GROUP_SCHED
++extern int sched_group_set_rt_runtime(struct task_group *tg,
++ long rt_runtime_us);
++extern long sched_group_rt_runtime(struct task_group *tg);
++extern int sched_group_set_rt_period(struct task_group *tg,
++ long rt_period_us);
++extern long sched_group_rt_period(struct task_group *tg);
++#endif
++#endif
++
++#ifdef CONFIG_TASK_XACCT
++static inline void add_rchar(struct task_struct *tsk, ssize_t amt)
++{
++ tsk->ioac.rchar += amt;
++}
++
++static inline void add_wchar(struct task_struct *tsk, ssize_t amt)
++{
++ tsk->ioac.wchar += amt;
++}
++
++static inline void inc_syscr(struct task_struct *tsk)
++{
++ tsk->ioac.syscr++;
++}
++
++static inline void inc_syscw(struct task_struct *tsk)
++{
++ tsk->ioac.syscw++;
++}
++#else
++static inline void add_rchar(struct task_struct *tsk, ssize_t amt)
++{
++}
++
++static inline void add_wchar(struct task_struct *tsk, ssize_t amt)
++{
++}
++
++static inline void inc_syscr(struct task_struct *tsk)
++{
++}
++
++static inline void inc_syscw(struct task_struct *tsk)
++{
++}
++#endif
++
++#ifndef TASK_SIZE_OF
++#define TASK_SIZE_OF(tsk) TASK_SIZE
++#endif
++
++#ifdef CONFIG_MM_OWNER
++extern void mm_update_next_owner(struct mm_struct *mm);
++extern void mm_init_owner(struct mm_struct *mm, struct task_struct *p);
++#else
++static inline void mm_update_next_owner(struct mm_struct *mm)
++{
++}
++
++static inline void mm_init_owner(struct mm_struct *mm, struct task_struct *p)
++{
++}
++#endif /* CONFIG_MM_OWNER */
++
++#define TASK_STATE_TO_CHAR_STR "RSDTtZX"
++
++#endif /* __KERNEL__ */
++
++#endif
+diff -Nurb linux-2.6.27-590/include/linux/sched.h.rej linux-2.6.27-591/include/linux/sched.h.rej
+--- linux-2.6.27-590/include/linux/sched.h.rej 1969-12-31 19:00:00.000000000 -0500
++++ linux-2.6.27-591/include/linux/sched.h.rej 2010-01-29 15:43:46.000000000 -0500
+@@ -0,0 +1,19 @@
++***************
++*** 850,855 ****
++ #endif
++ unsigned long sleep_avg;
++ unsigned long long timestamp, last_ran;
++ unsigned long long sched_time; /* sched_clock time spent running */
++ enum sleep_type sleep_type;
++
++--- 850,859 ----
++ #endif
++ unsigned long sleep_avg;
++ unsigned long long timestamp, last_ran;
+++ #ifdef CONFIG_CHOPSTIX
+++ unsigned long last_interrupted, last_ran_j;
+++ #endif
+++
++ unsigned long long sched_time; /* sched_clock time spent running */
++ enum sleep_type sleep_type;
++
+diff -Nurb linux-2.6.27-590/kernel/sched.c linux-2.6.27-591/kernel/sched.c
+--- linux-2.6.27-590/kernel/sched.c 2010-01-26 17:49:20.000000000 -0500
++++ linux-2.6.27-591/kernel/sched.c 2010-01-29 15:43:46.000000000 -0500
+@@ -10,7 +10,7 @@
+ * 1998-11-19 Implemented schedule_timeout() and related stuff
+ * by Andrea Arcangeli
+ * 2002-01-04 New ultra-scalable O(1) scheduler by Ingo Molnar:
+- * hybrid priority-list and round-robin design with
++ * hybrid priority-list and round-robin deventn with
+ * an array-switch method of distributing timeslices
+ * and per-CPU runqueues. Cleanups and useful suggestions
+ * by Davide Libenzi, preemptible kernel bits by Robert Love.
+@@ -79,6 +79,9 @@
+
+ #include "sched_cpupri.h"
+
++#define INTERRUPTIBLE -1
++#define RUNNING 0
++
+ /*
+ * Convert user-nice values [ -20 ... 0 ... 19 ]
+ * to static priority [ MAX_RT_PRIO..MAX_PRIO-1 ],
+@@ -5369,6 +5372,7 @@
+ get_task_struct(p);
+ read_unlock(&tasklist_lock);
+
++
+ retval = -EPERM;
+ if ((current->euid != p->euid) && (current->euid != p->uid) &&
+ !capable(CAP_SYS_NICE))
+diff -Nurb linux-2.6.27-590/kernel/sched.c.orig linux-2.6.27-591/kernel/sched.c.orig
+--- linux-2.6.27-590/kernel/sched.c.orig 1969-12-31 19:00:00.000000000 -0500
++++ linux-2.6.27-591/kernel/sched.c.orig 2010-01-26 17:49:20.000000000 -0500
+@@ -0,0 +1,9298 @@
++/*
++ * kernel/sched.c
++ *
++ * Kernel scheduler and related syscalls
++ *
++ * Copyright (C) 1991-2002 Linus Torvalds
++ *
++ * 1996-12-23 Modified by Dave Grothe to fix bugs in semaphores and
++ * make semaphores SMP safe
++ * 1998-11-19 Implemented schedule_timeout() and related stuff
++ * by Andrea Arcangeli
++ * 2002-01-04 New ultra-scalable O(1) scheduler by Ingo Molnar:
++ * hybrid priority-list and round-robin design with
++ * an array-switch method of distributing timeslices
++ * and per-CPU runqueues. Cleanups and useful suggestions
++ * by Davide Libenzi, preemptible kernel bits by Robert Love.
++ * 2003-09-03 Interactivity tuning by Con Kolivas.
++ * 2004-04-02 Scheduler domains code by Nick Piggin
++ * 2007-04-15 Work begun on replacing all interactivity tuning with a
++ * fair scheduling design by Con Kolivas.
++ * 2007-05-05 Load balancing (smp-nice) and other improvements
++ * by Peter Williams
++ * 2007-05-06 Interactivity improvements to CFS by Mike Galbraith
++ * 2007-07-01 Group scheduling enhancements by Srivatsa Vaddagiri
++ * 2007-11-29 RT balancing improvements by Steven Rostedt, Gregory Haskins,
++ * Thomas Gleixner, Mike Kravetz
++ */
++
++#include <linux/mm.h>
++#include <linux/module.h>
++#include <linux/nmi.h>
++#include <linux/init.h>
++#include <linux/uaccess.h>
++#include <linux/highmem.h>
++#include <linux/smp_lock.h>
++#include <asm/mmu_context.h>
++#include <linux/interrupt.h>
++#include <linux/capability.h>
++#include <linux/completion.h>
++#include <linux/kernel_stat.h>
++#include <linux/debug_locks.h>
++#include <linux/security.h>
++#include <linux/notifier.h>
++#include <linux/profile.h>
++#include <linux/freezer.h>
++#include <linux/vmalloc.h>
++#include <linux/blkdev.h>
++#include <linux/delay.h>
++#include <linux/pid_namespace.h>
++#include <linux/smp.h>
++#include <linux/threads.h>
++#include <linux/timer.h>
++#include <linux/rcupdate.h>
++#include <linux/cpu.h>
++#include <linux/cpuset.h>
++#include <linux/percpu.h>
++#include <linux/kthread.h>
++#include <linux/seq_file.h>
++#include <linux/sysctl.h>
++#include <linux/syscalls.h>
++#include <linux/times.h>
++#include <linux/tsacct_kern.h>
++#include <linux/kprobes.h>
++#include <linux/delayacct.h>
++#include <linux/reciprocal_div.h>
++#include <linux/unistd.h>
++#include <linux/pagemap.h>
++#include <linux/hrtimer.h>
++#include <linux/tick.h>
++#include <linux/bootmem.h>
++#include <linux/debugfs.h>
++#include <linux/ctype.h>
++#include <linux/ftrace.h>
++#include <linux/vs_sched.h>
++#include <linux/vs_cvirt.h>
++
++#include <asm/tlb.h>
++#include <asm/irq_regs.h>
++
++#include "sched_cpupri.h"
++
++/*
++ * Convert user-nice values [ -20 ... 0 ... 19 ]
++ * to static priority [ MAX_RT_PRIO..MAX_PRIO-1 ],
++ * and back.
++ */
++#define NICE_TO_PRIO(nice) (MAX_RT_PRIO + (nice) + 20)
++#define PRIO_TO_NICE(prio) ((prio) - MAX_RT_PRIO - 20)
++#define TASK_NICE(p) PRIO_TO_NICE((p)->static_prio)
++
++/*
++ * 'User priority' is the nice value converted to something we
++ * can work with better when scaling various scheduler parameters,
++ * it's a [ 0 ... 39 ] range.
++ */
++#define USER_PRIO(p) ((p)-MAX_RT_PRIO)
++#define TASK_USER_PRIO(p) USER_PRIO((p)->static_prio)
++#define MAX_USER_PRIO (USER_PRIO(MAX_PRIO))
++
++/*
++ * Helpers for converting nanosecond timing to jiffy resolution
++ */
++#define NS_TO_JIFFIES(TIME) ((unsigned long)(TIME) / (NSEC_PER_SEC / HZ))
++
++#define NICE_0_LOAD SCHED_LOAD_SCALE
++#define NICE_0_SHIFT SCHED_LOAD_SHIFT
++
++/*
++ * These are the 'tuning knobs' of the scheduler:
++ *
++ * default timeslice is 100 msecs (used only for SCHED_RR tasks).
++ * Timeslices get refilled after they expire.
++ */
++#define DEF_TIMESLICE (100 * HZ / 1000)
++
++/*
++ * single value that denotes runtime == period, ie unlimited time.
++ */
++#define RUNTIME_INF ((u64)~0ULL)
++
++#ifdef CONFIG_SMP
++/*
++ * Divide a load by a sched group cpu_power : (load / sg->__cpu_power)
++ * Since cpu_power is a 'constant', we can use a reciprocal divide.
++ */
++static inline u32 sg_div_cpu_power(const struct sched_group *sg, u32 load)
++{
++ return reciprocal_divide(load, sg->reciprocal_cpu_power);
++}
++
++/*
++ * Each time a sched group cpu_power is changed,
++ * we must compute its reciprocal value
++ */
++static inline void sg_inc_cpu_power(struct sched_group *sg, u32 val)
++{
++ sg->__cpu_power += val;
++ sg->reciprocal_cpu_power = reciprocal_value(sg->__cpu_power);
++}
++#endif
++
++static inline int rt_policy(int policy)
++{
++ if (unlikely(policy == SCHED_FIFO || policy == SCHED_RR))
++ return 1;
++ return 0;
++}
++
++static inline int task_has_rt_policy(struct task_struct *p)
++{
++ return rt_policy(p->policy);
++}
++
++/*
++ * This is the priority-queue data structure of the RT scheduling class:
++ */
++struct rt_prio_array {
++ DECLARE_BITMAP(bitmap, MAX_RT_PRIO+1); /* include 1 bit for delimiter */
++ struct list_head queue[MAX_RT_PRIO];
++};
++
++struct rt_bandwidth {
++ /* nests inside the rq lock: */
++ spinlock_t rt_runtime_lock;
++ ktime_t rt_period;
++ u64 rt_runtime;
++ struct hrtimer rt_period_timer;
++};
++
++static struct rt_bandwidth def_rt_bandwidth;
++
++static int do_sched_rt_period_timer(struct rt_bandwidth *rt_b, int overrun);
++
++static enum hrtimer_restart sched_rt_period_timer(struct hrtimer *timer)
++{
++ struct rt_bandwidth *rt_b =
++ container_of(timer, struct rt_bandwidth, rt_period_timer);
++ ktime_t now;
++ int overrun;
++ int idle = 0;
++
++ for (;;) {
++ now = hrtimer_cb_get_time(timer);
++ overrun = hrtimer_forward(timer, now, rt_b->rt_period);
++
++ if (!overrun)
++ break;
++
++ idle = do_sched_rt_period_timer(rt_b, overrun);
++ }
++
++ return idle ? HRTIMER_NORESTART : HRTIMER_RESTART;
++}
++
++static
++void init_rt_bandwidth(struct rt_bandwidth *rt_b, u64 period, u64 runtime)
++{
++ rt_b->rt_period = ns_to_ktime(period);
++ rt_b->rt_runtime = runtime;
++
++ spin_lock_init(&rt_b->rt_runtime_lock);
++
++ hrtimer_init(&rt_b->rt_period_timer,
++ CLOCK_MONOTONIC, HRTIMER_MODE_REL);
++ rt_b->rt_period_timer.function = sched_rt_period_timer;
++ rt_b->rt_period_timer.cb_mode = HRTIMER_CB_IRQSAFE_UNLOCKED;
++}
++
++static void start_rt_bandwidth(struct rt_bandwidth *rt_b)
++{
++ ktime_t now;
++
++ if (rt_b->rt_runtime == RUNTIME_INF)
++ return;
++
++ if (hrtimer_active(&rt_b->rt_period_timer))
++ return;
++
++ spin_lock(&rt_b->rt_runtime_lock);
++ for (;;) {
++ if (hrtimer_active(&rt_b->rt_period_timer))
++ break;
++
++ now = hrtimer_cb_get_time(&rt_b->rt_period_timer);
++ hrtimer_forward(&rt_b->rt_period_timer, now, rt_b->rt_period);
++ hrtimer_start(&rt_b->rt_period_timer,
++ rt_b->rt_period_timer.expires,
++ HRTIMER_MODE_ABS);
++ }
++ spin_unlock(&rt_b->rt_runtime_lock);
++}
++
++#ifdef CONFIG_RT_GROUP_SCHED
++static void destroy_rt_bandwidth(struct rt_bandwidth *rt_b)
++{
++ hrtimer_cancel(&rt_b->rt_period_timer);
++}
++#endif
++
++/*
++ * sched_domains_mutex serializes calls to arch_init_sched_domains,
++ * detach_destroy_domains and partition_sched_domains.
++ */
++static DEFINE_MUTEX(sched_domains_mutex);
++
++#ifdef CONFIG_GROUP_SCHED
++
++#include <linux/cgroup.h>
++
++struct cfs_rq;
++
++static LIST_HEAD(task_groups);
++
++/* task group related information */
++struct task_group {
++#ifdef CONFIG_CGROUP_SCHED
++ struct cgroup_subsys_state css;
++#endif
++
++#ifdef CONFIG_FAIR_GROUP_SCHED
++ /* schedulable entities of this group on each cpu */
++ struct sched_entity **se;
++ /* runqueue "owned" by this group on each cpu */
++ struct cfs_rq **cfs_rq;
++ unsigned long shares;
++#endif
++
++#ifdef CONFIG_RT_GROUP_SCHED
++ struct sched_rt_entity **rt_se;
++ struct rt_rq **rt_rq;
++
++ struct rt_bandwidth rt_bandwidth;
++#endif
++
++ struct rcu_head rcu;
++ struct list_head list;
++
++ struct task_group *parent;
++ struct list_head siblings;
++ struct list_head children;
++};
++
++#ifdef CONFIG_USER_SCHED
++
++/*
++ * Root task group.
++ * Every UID task group (including init_task_group aka UID-0) will
++ * be a child to this group.
++ */
++struct task_group root_task_group;
++
++#ifdef CONFIG_FAIR_GROUP_SCHED
++/* Default task group's sched entity on each cpu */
++static DEFINE_PER_CPU(struct sched_entity, init_sched_entity);
++/* Default task group's cfs_rq on each cpu */
++static DEFINE_PER_CPU(struct cfs_rq, init_cfs_rq) ____cacheline_aligned_in_smp;
++#endif /* CONFIG_FAIR_GROUP_SCHED */
++
++#ifdef CONFIG_RT_GROUP_SCHED
++static DEFINE_PER_CPU(struct sched_rt_entity, init_sched_rt_entity);
++static DEFINE_PER_CPU(struct rt_rq, init_rt_rq) ____cacheline_aligned_in_smp;
++#endif /* CONFIG_RT_GROUP_SCHED */
++#else /* !CONFIG_FAIR_GROUP_SCHED */
++#define root_task_group init_task_group
++#endif /* CONFIG_FAIR_GROUP_SCHED */
++
++/* task_group_lock serializes add/remove of task groups and also changes to
++ * a task group's cpu shares.
++ */
++static DEFINE_SPINLOCK(task_group_lock);
++
++#ifdef CONFIG_FAIR_GROUP_SCHED
++#ifdef CONFIG_USER_SCHED
++# define INIT_TASK_GROUP_LOAD (2*NICE_0_LOAD)
++#else /* !CONFIG_USER_SCHED */
++# define INIT_TASK_GROUP_LOAD NICE_0_LOAD
++#endif /* CONFIG_USER_SCHED */
++
++/*
++ * A weight of 0 or 1 can cause arithmetics problems.
++ * A weight of a cfs_rq is the sum of weights of which entities
++ * are queued on this cfs_rq, so a weight of a entity should not be
++ * too large, so as the shares value of a task group.
++ * (The default weight is 1024 - so there's no practical
++ * limitation from this.)
++ */
++#define MIN_SHARES 2
++#define MAX_SHARES (1UL << 18)
++
++static int init_task_group_load = INIT_TASK_GROUP_LOAD;
++#endif
++
++/* Default task group.
++ * Every task in system belong to this group at bootup.
++ */
++struct task_group init_task_group;
++
++/* return group to which a task belongs */
++static inline struct task_group *task_group(struct task_struct *p)
++{
++ struct task_group *tg;
++
++#ifdef CONFIG_USER_SCHED
++ tg = p->user->tg;
++#elif defined(CONFIG_CGROUP_SCHED)
++ tg = container_of(task_subsys_state(p, cpu_cgroup_subsys_id),
++ struct task_group, css);
++#else
++ tg = &init_task_group;
++#endif
++ return tg;
++}
++
++/* Change a task's cfs_rq and parent entity if it moves across CPUs/groups */
++static inline void set_task_rq(struct task_struct *p, unsigned int cpu)
++{
++#ifdef CONFIG_FAIR_GROUP_SCHED
++ p->se.cfs_rq = task_group(p)->cfs_rq[cpu];
++ p->se.parent = task_group(p)->se[cpu];
++#endif
++
++#ifdef CONFIG_RT_GROUP_SCHED
++ p->rt.rt_rq = task_group(p)->rt_rq[cpu];
++ p->rt.parent = task_group(p)->rt_se[cpu];
++#endif
++}
++
++#else
++
++static inline void set_task_rq(struct task_struct *p, unsigned int cpu) { }
++static inline struct task_group *task_group(struct task_struct *p)
++{
++ return NULL;
++}
++
++#endif /* CONFIG_GROUP_SCHED */
++
++/* CFS-related fields in a runqueue */
++struct cfs_rq {
++ struct load_weight load;
++ unsigned long nr_running;
++
++ u64 exec_clock;
++ u64 min_vruntime;
++ u64 pair_start;
++
++ struct rb_root tasks_timeline;
++ struct rb_node *rb_leftmost;
++
++ struct list_head tasks;
++ struct list_head *balance_iterator;
++
++ /*
++ * 'curr' points to currently running entity on this cfs_rq.
++ * It is set to NULL otherwise (i.e when none are currently running).
++ */
++ struct sched_entity *curr, *next;
++
++ unsigned long nr_spread_over;
++
++#ifdef CONFIG_FAIR_GROUP_SCHED
++ struct rq *rq; /* cpu runqueue to which this cfs_rq is attached */
++
++ /*
++ * leaf cfs_rqs are those that hold tasks (lowest schedulable entity in
++ * a hierarchy). Non-leaf lrqs hold other higher schedulable entities
++ * (like users, containers etc.)
++ *
++ * leaf_cfs_rq_list ties together list of leaf cfs_rq's in a cpu. This
++ * list is used during load balance.
++ */
++ struct list_head leaf_cfs_rq_list;
++ struct task_group *tg; /* group that "owns" this runqueue */
++
++#ifdef CONFIG_SMP
++ /*
++ * the part of load.weight contributed by tasks
++ */
++ unsigned long task_weight;
++
++ /*
++ * h_load = weight * f(tg)
++ *
++ * Where f(tg) is the recursive weight fraction assigned to
++ * this group.
++ */
++ unsigned long h_load;
++
++ /*
++ * this cpu's part of tg->shares
++ */
++ unsigned long shares;
++
++ /*
++ * load.weight at the time we set shares
++ */
++ unsigned long rq_weight;
++#endif
++#endif
++};
++
++/* Real-Time classes' related field in a runqueue: */
++struct rt_rq {
++ struct rt_prio_array active;
++ unsigned long rt_nr_running;
++#if defined CONFIG_SMP || defined CONFIG_RT_GROUP_SCHED
++ int highest_prio; /* highest queued rt task prio */
++#endif
++#ifdef CONFIG_SMP
++ unsigned long rt_nr_migratory;
++ int overloaded;
++#endif
++ int rt_throttled;
++ u64 rt_time;
++ u64 rt_runtime;
++ /* Nests inside the rq lock: */
++ spinlock_t rt_runtime_lock;
++
++#ifdef CONFIG_RT_GROUP_SCHED
++ unsigned long rt_nr_boosted;
++
++ struct rq *rq;
++ struct list_head leaf_rt_rq_list;
++ struct task_group *tg;
++ struct sched_rt_entity *rt_se;
++#endif
++};
++
++#ifdef CONFIG_SMP
++
++/*
++ * We add the notion of a root-domain which will be used to define per-domain
++ * variables. Each exclusive cpuset essentially defines an island domain by
++ * fully partitioning the member cpus from any other cpuset. Whenever a new
++ * exclusive cpuset is created, we also create and attach a new root-domain
++ * object.
++ *
++ */
++struct root_domain {
++ atomic_t refcount;
++ cpumask_t span;
++ cpumask_t online;
++
++ /*
++ * The "RT overload" flag: it gets set if a CPU has more than
++ * one runnable RT task.
++ */
++ cpumask_t rto_mask;
++ atomic_t rto_count;
++#ifdef CONFIG_SMP
++ struct cpupri cpupri;
++#endif
++};
++
++/*
++ * By default the system creates a single root-domain with all cpus as
++ * members (mimicking the global state we have today).
++ */
++static struct root_domain def_root_domain;
++
++#endif
++ unsigned long norm_time;
++ unsigned long idle_time;
++#ifdef CONFIG_VSERVER_IDLETIME
++ int idle_skip;
++#endif
++#ifdef CONFIG_VSERVER_HARDCPU
++ struct list_head hold_queue;
++ unsigned long nr_onhold;
++ int idle_tokens;
++#endif
++
++/*
++ * This is the main, per-CPU runqueue data structure.
++ *
++ * Locking rule: those places that want to lock multiple runqueues
++ * (such as the load balancing or the thread migration code), lock
++ * acquire operations must be ordered by ascending &runqueue.
++ */
++struct rq {
++ /* runqueue lock: */
++ spinlock_t lock;
++
++ /*
++ * nr_running and cpu_load should be in the same cacheline because
++ * remote CPUs use both these fields when doing load calculation.
++ */
++ unsigned long nr_running;
++ #define CPU_LOAD_IDX_MAX 5
++ unsigned long cpu_load[CPU_LOAD_IDX_MAX];
++ unsigned char idle_at_tick;
++#ifdef CONFIG_NO_HZ
++ unsigned long last_tick_seen;
++ unsigned char in_nohz_recently;
++#endif
++ /* capture load from *all* tasks on this cpu: */
++ struct load_weight load;
++ unsigned long nr_load_updates;
++ u64 nr_switches;
++
++ struct cfs_rq cfs;
++ struct rt_rq rt;
++
++#ifdef CONFIG_FAIR_GROUP_SCHED
++ /* list of leaf cfs_rq on this cpu: */
++ struct list_head leaf_cfs_rq_list;
++#endif
++#ifdef CONFIG_RT_GROUP_SCHED
++ struct list_head leaf_rt_rq_list;
++#endif
++
++ /*
++ * This is part of a global counter where only the total sum
++ * over all CPUs matters. A task can increase this counter on
++ * one CPU and if it got migrated afterwards it may decrease
++ * it on another CPU. Always updated under the runqueue lock:
++ */
++ unsigned long nr_uninterruptible;
++
++ struct task_struct *curr, *idle;
++ unsigned long next_balance;
++ struct mm_struct *prev_mm;
++
++ u64 clock;
++
++ atomic_t nr_iowait;
++
++#ifdef CONFIG_SMP
++ struct root_domain *rd;
++ struct sched_domain *sd;
++
++ /* For active balancing */
++ int active_balance;
++ int push_cpu;
++ /* cpu of this runqueue: */
++ int cpu;
++ int online;
++
++ unsigned long avg_load_per_task;
++
++ struct task_struct *migration_thread;
++ struct list_head migration_queue;
++#endif
++
++#ifdef CONFIG_SCHED_HRTICK
++#ifdef CONFIG_SMP
++ int hrtick_csd_pending;
++ struct call_single_data hrtick_csd;
++#endif
++ struct hrtimer hrtick_timer;
++#endif
++
++#ifdef CONFIG_SCHEDSTATS
++ /* latency stats */
++ struct sched_info rq_sched_info;
++
++ /* sys_sched_yield() stats */
++ unsigned int yld_exp_empty;
++ unsigned int yld_act_empty;
++ unsigned int yld_both_empty;
++ unsigned int yld_count;
++
++ /* schedule() stats */
++ unsigned int sched_switch;
++ unsigned int sched_count;
++ unsigned int sched_goidle;
++
++ /* try_to_wake_up() stats */
++ unsigned int ttwu_count;
++ unsigned int ttwu_local;
++
++ /* BKL stats */
++ unsigned int bkl_count;
++#endif
++};
++
++static DEFINE_PER_CPU_SHARED_ALIGNED(struct rq, runqueues);
++
++static inline void check_preempt_curr(struct rq *rq, struct task_struct *p)
++{
++ rq->curr->sched_class->check_preempt_curr(rq, p);
++}
++
++static inline int cpu_of(struct rq *rq)
++{
++#ifdef CONFIG_SMP
++ return rq->cpu;
++#else
++ return 0;
++#endif
++}
++
++/*
++ * The domain tree (rq->sd) is protected by RCU's quiescent state transition.
++ * See detach_destroy_domains: synchronize_sched for details.
++ *
++ * The domain tree of any CPU may only be accessed from within
++ * preempt-disabled sections.
++ */
++#define for_each_domain(cpu, __sd) \
++ for (__sd = rcu_dereference(cpu_rq(cpu)->sd); __sd; __sd = __sd->parent)
++
++#define cpu_rq(cpu) (&per_cpu(runqueues, (cpu)))
++#define this_rq() (&__get_cpu_var(runqueues))
++#define task_rq(p) cpu_rq(task_cpu(p))
++#define cpu_curr(cpu) (cpu_rq(cpu)->curr)
++
++static inline void update_rq_clock(struct rq *rq)
++{
++ rq->clock = sched_clock_cpu(cpu_of(rq));
++}
++
++/*
++ * Tunables that become constants when CONFIG_SCHED_DEBUG is off:
++ */
++#ifdef CONFIG_SCHED_DEBUG
++# define const_debug __read_mostly
++#else
++# define const_debug static const
++#endif
++
++/**
++ * runqueue_is_locked
++ *
++ * Returns true if the current cpu runqueue is locked.
++ * This interface allows printk to be called with the runqueue lock
++ * held and know whether or not it is OK to wake up the klogd.
++ */
++int runqueue_is_locked(void)
++{
++ int cpu = get_cpu();
++ struct rq *rq = cpu_rq(cpu);
++ int ret;
++
++ ret = spin_is_locked(&rq->lock);
++ put_cpu();
++ return ret;
++}
++
++/*
++ * Debugging: various feature bits
++ */
++
++#define SCHED_FEAT(name, enabled) \
++ __SCHED_FEAT_##name ,
++
++enum {
++#include "sched_features.h"
++};
++
++#undef SCHED_FEAT
++
++#define SCHED_FEAT(name, enabled) \
++ (1UL << __SCHED_FEAT_##name) * enabled |
++
++const_debug unsigned int sysctl_sched_features =
++#include "sched_features.h"
++ 0;
++
++#undef SCHED_FEAT
++
++#ifdef CONFIG_SCHED_DEBUG
++#define SCHED_FEAT(name, enabled) \
++ #name ,
++
++static __read_mostly char *sched_feat_names[] = {
++#include "sched_features.h"
++ NULL
++};
++
++#undef SCHED_FEAT
++
++static int sched_feat_open(struct inode *inode, struct file *filp)
++{
++ filp->private_data = inode->i_private;
++ return 0;
++}
++
++static ssize_t
++sched_feat_read(struct file *filp, char __user *ubuf,
++ size_t cnt, loff_t *ppos)
++{
++ char *buf;
++ int r = 0;
++ int len = 0;
++ int i;
++
++ for (i = 0; sched_feat_names[i]; i++) {
++ len += strlen(sched_feat_names[i]);
++ len += 4;
++ }
++
++ buf = kmalloc(len + 2, GFP_KERNEL);
++ if (!buf)
++ return -ENOMEM;
++
++ for (i = 0; sched_feat_names[i]; i++) {
++ if (sysctl_sched_features & (1UL << i))
++ r += sprintf(buf + r, "%s ", sched_feat_names[i]);
++ else
++ r += sprintf(buf + r, "NO_%s ", sched_feat_names[i]);
++ }
++
++ r += sprintf(buf + r, "\n");
++ WARN_ON(r >= len + 2);
++
++ r = simple_read_from_buffer(ubuf, cnt, ppos, buf, r);
++
++ kfree(buf);
++
++ return r;
++}
++
++static ssize_t
++sched_feat_write(struct file *filp, const char __user *ubuf,
++ size_t cnt, loff_t *ppos)
++{
++ char buf[64];
++ char *cmp = buf;
++ int neg = 0;
++ int i;
++
++ if (cnt > 63)
++ cnt = 63;
++
++ if (copy_from_user(&buf, ubuf, cnt))
++ return -EFAULT;
++
++ buf[cnt] = 0;
++
++ if (strncmp(buf, "NO_", 3) == 0) {
++ neg = 1;
++ cmp += 3;
++ }
++
++ for (i = 0; sched_feat_names[i]; i++) {
++ int len = strlen(sched_feat_names[i]);
++
++ if (strncmp(cmp, sched_feat_names[i], len) == 0) {
++ if (neg)
++ sysctl_sched_features &= ~(1UL << i);
++ else
++ sysctl_sched_features |= (1UL << i);
++ break;
++ }
++ }
++
++ if (!sched_feat_names[i])
++ return -EINVAL;
++
++ filp->f_pos += cnt;
++
++ return cnt;
++}
++
++static struct file_operations sched_feat_fops = {
++ .open = sched_feat_open,
++ .read = sched_feat_read,
++ .write = sched_feat_write,
++};
++
++static __init int sched_init_debug(void)
++{
++ debugfs_create_file("sched_features", 0644, NULL, NULL,
++ &sched_feat_fops);
++
++ return 0;
++}
++late_initcall(sched_init_debug);
++
++#endif
++
++#define sched_feat(x) (sysctl_sched_features & (1UL << __SCHED_FEAT_##x))
++
++/*
++ * Number of tasks to iterate in a single balance run.
++ * Limited because this is done with IRQs disabled.
++ */
++const_debug unsigned int sysctl_sched_nr_migrate = 32;
++
++/*
++ * ratelimit for updating the group shares.
++ * default: 0.25ms
++ */
++unsigned int sysctl_sched_shares_ratelimit = 250000;
++
++/*
++ * period over which we measure -rt task cpu usage in us.
++ * default: 1s
++ */
++unsigned int sysctl_sched_rt_period = 1000000;
++
++static __read_mostly int scheduler_running;
++
++/*
++ * part of the period that we allow rt tasks to run in us.
++ * default: 0.95s
++ */
++int sysctl_sched_rt_runtime = 950000;
++
++static inline u64 global_rt_period(void)
++{
++ return (u64)sysctl_sched_rt_period * NSEC_PER_USEC;
++}
++
++static inline u64 global_rt_runtime(void)
++{
++ if (sysctl_sched_rt_runtime < 0)
++ return RUNTIME_INF;
++
++ return (u64)sysctl_sched_rt_runtime * NSEC_PER_USEC;
++}
++
++#ifndef prepare_arch_switch
++# define prepare_arch_switch(next) do { } while (0)
++#endif
++#ifndef finish_arch_switch
++# define finish_arch_switch(prev) do { } while (0)
++#endif
++
++static inline int task_current(struct rq *rq, struct task_struct *p)
++{
++ return rq->curr == p;
++}
++
++#ifndef __ARCH_WANT_UNLOCKED_CTXSW
++static inline int task_running(struct rq *rq, struct task_struct *p)
++{
++ return task_current(rq, p);
++}
++
++static inline void prepare_lock_switch(struct rq *rq, struct task_struct *next)
++{
++}
++
++static inline void finish_lock_switch(struct rq *rq, struct task_struct *prev)
++{
++#ifdef CONFIG_DEBUG_SPINLOCK
++ /* this is a valid case when another task releases the spinlock */
++ rq->lock.owner = current;
++#endif
++ /*
++ * If we are tracking spinlock dependencies then we have to
++ * fix up the runqueue lock - which gets 'carried over' from
++ * prev into current:
++ */
++ spin_acquire(&rq->lock.dep_map, 0, 0, _THIS_IP_);
++
++ spin_unlock_irq(&rq->lock);
++}
++
++#else /* __ARCH_WANT_UNLOCKED_CTXSW */
++static inline int task_running(struct rq *rq, struct task_struct *p)
++{
++#ifdef CONFIG_SMP
++ return p->oncpu;
++#else
++ return task_current(rq, p);
++#endif
++}
++
++static inline void prepare_lock_switch(struct rq *rq, struct task_struct *next)
++{
++#ifdef CONFIG_SMP
++ /*
++ * We can optimise this out completely for !SMP, because the
++ * SMP rebalancing from interrupt is the only thing that cares
++ * here.
++ */
++ next->oncpu = 1;
++#endif
++#ifdef __ARCH_WANT_INTERRUPTS_ON_CTXSW
++ spin_unlock_irq(&rq->lock);
++#else
++ spin_unlock(&rq->lock);
++#endif
++}
++
++static inline void finish_lock_switch(struct rq *rq, struct task_struct *prev)
++{
++#ifdef CONFIG_SMP
++ /*
++ * After ->oncpu is cleared, the task can be moved to a different CPU.
++ * We must ensure this doesn't happen until the switch is completely
++ * finished.
++ */
++ smp_wmb();
++ prev->oncpu = 0;
++#endif
++#ifndef __ARCH_WANT_INTERRUPTS_ON_CTXSW
++ local_irq_enable();
++#endif
++}
++#endif /* __ARCH_WANT_UNLOCKED_CTXSW */
++
++/*
++ * __task_rq_lock - lock the runqueue a given task resides on.
++ * Must be called interrupts disabled.
++ */
++static inline struct rq *__task_rq_lock(struct task_struct *p)
++ __acquires(rq->lock)
++{
++ for (;;) {
++ struct rq *rq = task_rq(p);
++ spin_lock(&rq->lock);
++ if (likely(rq == task_rq(p)))
++ return rq;
++ spin_unlock(&rq->lock);
++ }
++}
++
++/*
++ * task_rq_lock - lock the runqueue a given task resides on and disable
++ * interrupts. Note the ordering: we can safely lookup the task_rq without
++ * explicitly disabling preemption.
++ */
++static struct rq *task_rq_lock(struct task_struct *p, unsigned long *flags)
++ __acquires(rq->lock)
++{
++ struct rq *rq;
++
++ for (;;) {
++ local_irq_save(*flags);
++ rq = task_rq(p);
++ spin_lock(&rq->lock);
++ if (likely(rq == task_rq(p)))
++ return rq;
++ spin_unlock_irqrestore(&rq->lock, *flags);
++ }
++}
++
++static void __task_rq_unlock(struct rq *rq)
++ __releases(rq->lock)
++{
++ spin_unlock(&rq->lock);
++}
++
++static inline void task_rq_unlock(struct rq *rq, unsigned long *flags)
++ __releases(rq->lock)
++{
++ spin_unlock_irqrestore(&rq->lock, *flags);
++}
++
++/*
++ * this_rq_lock - lock this runqueue and disable interrupts.
++ */
++static struct rq *this_rq_lock(void)
++ __acquires(rq->lock)
++{
++ struct rq *rq;
++
++ local_irq_disable();
++ rq = this_rq();
++ spin_lock(&rq->lock);
++
++ return rq;
++}
++
++#ifdef CONFIG_SCHED_HRTICK
++/*
++ * Use HR-timers to deliver accurate preemption points.
++ *
++ * Its all a bit involved since we cannot program an hrt while holding the
++ * rq->lock. So what we do is store a state in in rq->hrtick_* and ask for a
++ * reschedule event.
++ *
++ * When we get rescheduled we reprogram the hrtick_timer outside of the
++ * rq->lock.
++ */
++
++/*
++ * Use hrtick when:
++ * - enabled by features
++ * - hrtimer is actually high res
++ */
++static inline int hrtick_enabled(struct rq *rq)
++{
++ if (!sched_feat(HRTICK))
++ return 0;
++ if (!cpu_active(cpu_of(rq)))
++ return 0;
++ return hrtimer_is_hres_active(&rq->hrtick_timer);
++}
++
++static void hrtick_clear(struct rq *rq)
++{
++ if (hrtimer_active(&rq->hrtick_timer))
++ hrtimer_cancel(&rq->hrtick_timer);
++}
++
++/*
++ * High-resolution timer tick.
++ * Runs from hardirq context with interrupts disabled.
++ */
++static enum hrtimer_restart hrtick(struct hrtimer *timer)
++{
++ struct rq *rq = container_of(timer, struct rq, hrtick_timer);
++
++ WARN_ON_ONCE(cpu_of(rq) != smp_processor_id());
++
++ spin_lock(&rq->lock);
++ update_rq_clock(rq);
++ rq->curr->sched_class->task_tick(rq, rq->curr, 1);
++ spin_unlock(&rq->lock);
++
++ return HRTIMER_NORESTART;
++}
++
++#ifdef CONFIG_SMP
++/*
++ * called from hardirq (IPI) context
++ */
++static void __hrtick_start(void *arg)
++{
++ struct rq *rq = arg;
++
++ spin_lock(&rq->lock);
++ hrtimer_restart(&rq->hrtick_timer);
++ rq->hrtick_csd_pending = 0;
++ spin_unlock(&rq->lock);
++}
++
++/*
++ * Called to set the hrtick timer state.
++ *
++ * called with rq->lock held and irqs disabled
++ */
++static void hrtick_start(struct rq *rq, u64 delay)
++{
++ struct hrtimer *timer = &rq->hrtick_timer;
++ ktime_t time = ktime_add_ns(timer->base->get_time(), delay);
++
++ timer->expires = time;
++
++ if (rq == this_rq()) {
++ hrtimer_restart(timer);
++ } else if (!rq->hrtick_csd_pending) {
++ __smp_call_function_single(cpu_of(rq), &rq->hrtick_csd);
++ rq->hrtick_csd_pending = 1;
++ }
++}
++
++static int
++hotplug_hrtick(struct notifier_block *nfb, unsigned long action, void *hcpu)
++{
++ int cpu = (int)(long)hcpu;
++
++ switch (action) {
++ case CPU_UP_CANCELED:
++ case CPU_UP_CANCELED_FROZEN:
++ case CPU_DOWN_PREPARE:
++ case CPU_DOWN_PREPARE_FROZEN:
++ case CPU_DEAD:
++ case CPU_DEAD_FROZEN:
++ hrtick_clear(cpu_rq(cpu));
++ return NOTIFY_OK;
++ }
++
++ return NOTIFY_DONE;
++}
++
++static __init void init_hrtick(void)
++{
++ hotcpu_notifier(hotplug_hrtick, 0);
++}
++#else
++/*
++ * Called to set the hrtick timer state.
++ *
++ * called with rq->lock held and irqs disabled
++ */
++static void hrtick_start(struct rq *rq, u64 delay)
++{
++ hrtimer_start(&rq->hrtick_timer, ns_to_ktime(delay), HRTIMER_MODE_REL);
++}
++
++static void init_hrtick(void)
++{
++}
++#endif /* CONFIG_SMP */
++
++static void init_rq_hrtick(struct rq *rq)
++{
++#ifdef CONFIG_SMP
++ rq->hrtick_csd_pending = 0;
++
++ rq->hrtick_csd.flags = 0;
++ rq->hrtick_csd.func = __hrtick_start;
++ rq->hrtick_csd.info = rq;
++#endif
++
++ hrtimer_init(&rq->hrtick_timer, CLOCK_MONOTONIC, HRTIMER_MODE_REL);
++ rq->hrtick_timer.function = hrtick;
++ rq->hrtick_timer.cb_mode = HRTIMER_CB_IRQSAFE_PERCPU;
++}
++#else
++static inline void hrtick_clear(struct rq *rq)
++{
++}
++
++static inline void init_rq_hrtick(struct rq *rq)
++{
++}
++
++static inline void init_hrtick(void)
++{
++}
++#endif
++
++/*
++ * resched_task - mark a task 'to be rescheduled now'.
++ *
++ * On UP this means the setting of the need_resched flag, on SMP it
++ * might also involve a cross-CPU call to trigger the scheduler on
++ * the target CPU.
++ */
++#ifdef CONFIG_SMP
++
++#ifndef tsk_is_polling
++#define tsk_is_polling(t) test_tsk_thread_flag(t, TIF_POLLING_NRFLAG)
++#endif
++
++static void resched_task(struct task_struct *p)
++{
++ int cpu;
++
++ assert_spin_locked(&task_rq(p)->lock);
++
++ if (unlikely(test_tsk_thread_flag(p, TIF_NEED_RESCHED)))
++ return;
++
++ set_tsk_thread_flag(p, TIF_NEED_RESCHED);
++
++ cpu = task_cpu(p);
++ if (cpu == smp_processor_id())
++ return;
++
++ /* NEED_RESCHED must be visible before we test polling */
++ smp_mb();
++ if (!tsk_is_polling(p))
++ smp_send_reschedule(cpu);
++}
++
++static void resched_cpu(int cpu)
++{
++ struct rq *rq = cpu_rq(cpu);
++ unsigned long flags;
++
++ if (!spin_trylock_irqsave(&rq->lock, flags))
++ return;
++ resched_task(cpu_curr(cpu));
++ spin_unlock_irqrestore(&rq->lock, flags);
++}
++
++#ifdef CONFIG_NO_HZ
++/*
++ * When add_timer_on() enqueues a timer into the timer wheel of an
++ * idle CPU then this timer might expire before the next timer event
++ * which is scheduled to wake up that CPU. In case of a completely
++ * idle system the next event might even be infinite time into the
++ * future. wake_up_idle_cpu() ensures that the CPU is woken up and
++ * leaves the inner idle loop so the newly added timer is taken into
++ * account when the CPU goes back to idle and evaluates the timer
++ * wheel for the next timer event.
++ */
++void wake_up_idle_cpu(int cpu)
++{
++ struct rq *rq = cpu_rq(cpu);
++
++ if (cpu == smp_processor_id())
++ return;
++
++ /*
++ * This is safe, as this function is called with the timer
++ * wheel base lock of (cpu) held. When the CPU is on the way
++ * to idle and has not yet set rq->curr to idle then it will
++ * be serialized on the timer wheel base lock and take the new
++ * timer into account automatically.
++ */
++ if (rq->curr != rq->idle)
++ return;
++
++ /*
++ * We can set TIF_RESCHED on the idle task of the other CPU
++ * lockless. The worst case is that the other CPU runs the
++ * idle task through an additional NOOP schedule()
++ */
++ set_tsk_thread_flag(rq->idle, TIF_NEED_RESCHED);
++
++ /* NEED_RESCHED must be visible before we test polling */
++ smp_mb();
++ if (!tsk_is_polling(rq->idle))
++ smp_send_reschedule(cpu);
++}
++#endif /* CONFIG_NO_HZ */
++
++#else /* !CONFIG_SMP */
++static void resched_task(struct task_struct *p)
++{
++ assert_spin_locked(&task_rq(p)->lock);
++ set_tsk_need_resched(p);
++}
++#endif /* CONFIG_SMP */
++
++#if BITS_PER_LONG == 32
++# define WMULT_CONST (~0UL)
++#else
++# define WMULT_CONST (1UL << 32)
++#endif
++
++#define WMULT_SHIFT 32
++
++/*
++ * Shift right and round:
++ */
++#define SRR(x, y) (((x) + (1UL << ((y) - 1))) >> (y))
++
++/*
++ * delta *= weight / lw
++ */
++static unsigned long
++calc_delta_mine(unsigned long delta_exec, unsigned long weight,
++ struct load_weight *lw)
++{
++ u64 tmp;
++
++ if (!lw->inv_weight) {
++ if (BITS_PER_LONG > 32 && unlikely(lw->weight >= WMULT_CONST))
++ lw->inv_weight = 1;
++ else
++ lw->inv_weight = 1 + (WMULT_CONST-lw->weight/2)
++ / (lw->weight+1);
++ }
++
++ tmp = (u64)delta_exec * weight;
++ /*
++ * Check whether we'd overflow the 64-bit multiplication:
++ */
++ if (unlikely(tmp > WMULT_CONST))
++ tmp = SRR(SRR(tmp, WMULT_SHIFT/2) * lw->inv_weight,
++ WMULT_SHIFT/2);
++ else
++ tmp = SRR(tmp * lw->inv_weight, WMULT_SHIFT);
++
++ return (unsigned long)min(tmp, (u64)(unsigned long)LONG_MAX);
++}
++
++static inline void update_load_add(struct load_weight *lw, unsigned long inc)
++{
++ lw->weight += inc;
++ lw->inv_weight = 0;
++}
++
++static inline void update_load_sub(struct load_weight *lw, unsigned long dec)
++{
++ lw->weight -= dec;
++ lw->inv_weight = 0;
++}
++
++/*
++ * To aid in avoiding the subversion of "niceness" due to uneven distribution
++ * of tasks with abnormal "nice" values across CPUs the contribution that
++ * each task makes to its run queue's load is weighted according to its
++ * scheduling class and "nice" value. For SCHED_NORMAL tasks this is just a
++ * scaled version of the new time slice allocation that they receive on time
++ * slice expiry etc.
++ */
++
++#define WEIGHT_IDLEPRIO 2
++#define WMULT_IDLEPRIO (1 << 31)
++
++/*
++ * Nice levels are multiplicative, with a gentle 10% change for every
++ * nice level changed. I.e. when a CPU-bound task goes from nice 0 to
++ * nice 1, it will get ~10% less CPU time than another CPU-bound task
++ * that remained on nice 0.
++ *
++ * The "10% effect" is relative and cumulative: from _any_ nice level,
++ * if you go up 1 level, it's -10% CPU usage, if you go down 1 level
++ * it's +10% CPU usage. (to achieve that we use a multiplier of 1.25.
++ * If a task goes up by ~10% and another task goes down by ~10% then
++ * the relative distance between them is ~25%.)
++ */
++static const int prio_to_weight[40] = {
++ /* -20 */ 88761, 71755, 56483, 46273, 36291,
++ /* -15 */ 29154, 23254, 18705, 14949, 11916,
++ /* -10 */ 9548, 7620, 6100, 4904, 3906,
++ /* -5 */ 3121, 2501, 1991, 1586, 1277,
++ /* 0 */ 1024, 820, 655, 526, 423,
++ /* 5 */ 335, 272, 215, 172, 137,
++ /* 10 */ 110, 87, 70, 56, 45,
++ /* 15 */ 36, 29, 23, 18, 15,
++};
++
++/*
++ * Inverse (2^32/x) values of the prio_to_weight[] array, precalculated.
++ *
++ * In cases where the weight does not change often, we can use the
++ * precalculated inverse to speed up arithmetics by turning divisions
++ * into multiplications:
++ */
++static const u32 prio_to_wmult[40] = {
++ /* -20 */ 48388, 59856, 76040, 92818, 118348,
++ /* -15 */ 147320, 184698, 229616, 287308, 360437,
++ /* -10 */ 449829, 563644, 704093, 875809, 1099582,
++ /* -5 */ 1376151, 1717300, 2157191, 2708050, 3363326,
++ /* 0 */ 4194304, 5237765, 6557202, 8165337, 10153587,
++ /* 5 */ 12820798, 15790321, 19976592, 24970740, 31350126,
++ /* 10 */ 39045157, 49367440, 61356676, 76695844, 95443717,
++ /* 15 */ 119304647, 148102320, 186737708, 238609294, 286331153,
++};
++
++static void activate_task(struct rq *rq, struct task_struct *p, int wakeup);
++
++/*
++ * runqueue iterator, to support SMP load-balancing between different
++ * scheduling classes, without having to expose their internal data
++ * structures to the load-balancing proper:
++ */
++struct rq_iterator {
++ void *arg;
++ struct task_struct *(*start)(void *);
++ struct task_struct *(*next)(void *);
++};
++
++#ifdef CONFIG_SMP
++static unsigned long
++balance_tasks(struct rq *this_rq, int this_cpu, struct rq *busiest,
++ unsigned long max_load_move, struct sched_domain *sd,
++ enum cpu_idle_type idle, int *all_pinned,
++ int *this_best_prio, struct rq_iterator *iterator);
++
++static int
++iter_move_one_task(struct rq *this_rq, int this_cpu, struct rq *busiest,
++ struct sched_domain *sd, enum cpu_idle_type idle,
++ struct rq_iterator *iterator);
++#endif
++
++#ifdef CONFIG_CGROUP_CPUACCT
++static void cpuacct_charge(struct task_struct *tsk, u64 cputime);
++#else
++static inline void cpuacct_charge(struct task_struct *tsk, u64 cputime) {}
++#endif
++
++static inline void inc_cpu_load(struct rq *rq, unsigned long load)
++{
++ update_load_add(&rq->load, load);
++}
++
++static inline void dec_cpu_load(struct rq *rq, unsigned long load)
++{
++ update_load_sub(&rq->load, load);
++}
++
++#ifdef CONFIG_SMP
++static unsigned long source_load(int cpu, int type);
++static unsigned long target_load(int cpu, int type);
++static int task_hot(struct task_struct *p, u64 now, struct sched_domain *sd);
++
++static unsigned long cpu_avg_load_per_task(int cpu)
++{
++ struct rq *rq = cpu_rq(cpu);
++
++ if (rq->nr_running)
++ rq->avg_load_per_task = rq->load.weight / rq->nr_running;
++
++ return rq->avg_load_per_task;
++}
++
++#ifdef CONFIG_FAIR_GROUP_SCHED
++
++typedef void (*tg_visitor)(struct task_group *, int, struct sched_domain *);
++
++/*
++ * Iterate the full tree, calling @down when first entering a node and @up when
++ * leaving it for the final time.
++ */
++static void
++walk_tg_tree(tg_visitor down, tg_visitor up, int cpu, struct sched_domain *sd)
++{
++ struct task_group *parent, *child;
++
++ rcu_read_lock();
++ parent = &root_task_group;
++down:
++ (*down)(parent, cpu, sd);
++ list_for_each_entry_rcu(child, &parent->children, siblings) {
++ parent = child;
++ goto down;
++
++up:
++ continue;
++ }
++ (*up)(parent, cpu, sd);
++
++ child = parent;
++ parent = parent->parent;
++ if (parent)
++ goto up;
++ rcu_read_unlock();
++}
++
++static void __set_se_shares(struct sched_entity *se, unsigned long shares);
++
++/*
++ * Calculate and set the cpu's group shares.
++ */
++static void
++__update_group_shares_cpu(struct task_group *tg, int cpu,
++ unsigned long sd_shares, unsigned long sd_rq_weight)
++{
++ int boost = 0;
++ unsigned long shares;
++ unsigned long rq_weight;
++
++ if (!tg->se[cpu])
++ return;
++
++ rq_weight = tg->cfs_rq[cpu]->load.weight;
++
++ /*
++ * If there are currently no tasks on the cpu pretend there is one of
++ * average load so that when a new task gets to run here it will not
++ * get delayed by group starvation.
++ */
++ if (!rq_weight) {
++ boost = 1;
++ rq_weight = NICE_0_LOAD;
++ }
++
++ if (unlikely(rq_weight > sd_rq_weight))
++ rq_weight = sd_rq_weight;
++
++ /*
++ * \Sum shares * rq_weight
++ * shares = -----------------------
++ * \Sum rq_weight
++ *
++ */
++ shares = (sd_shares * rq_weight) / (sd_rq_weight + 1);
++
++ /*
++ * record the actual number of shares, not the boosted amount.
++ */
++ tg->cfs_rq[cpu]->shares = boost ? 0 : shares;
++ tg->cfs_rq[cpu]->rq_weight = rq_weight;
++
++ if (shares < MIN_SHARES)
++ shares = MIN_SHARES;
++ else if (shares > MAX_SHARES)
++ shares = MAX_SHARES;
++
++ __set_se_shares(tg->se[cpu], shares);
++}
++
++/*
++ * Re-compute the task group their per cpu shares over the given domain.
++ * This needs to be done in a bottom-up fashion because the rq weight of a
++ * parent group depends on the shares of its child groups.
++ */
++static void
++tg_shares_up(struct task_group *tg, int cpu, struct sched_domain *sd)
++{
++ unsigned long rq_weight = 0;
++ unsigned long shares = 0;
++ int i;
++
++ for_each_cpu_mask(i, sd->span) {
++ rq_weight += tg->cfs_rq[i]->load.weight;
++ shares += tg->cfs_rq[i]->shares;
++ }
++
++ if ((!shares && rq_weight) || shares > tg->shares)
++ shares = tg->shares;
++
++ if (!sd->parent || !(sd->parent->flags & SD_LOAD_BALANCE))
++ shares = tg->shares;
++
++ if (!rq_weight)
++ rq_weight = cpus_weight(sd->span) * NICE_0_LOAD;
++
++ for_each_cpu_mask(i, sd->span) {
++ struct rq *rq = cpu_rq(i);
++ unsigned long flags;
++
++ spin_lock_irqsave(&rq->lock, flags);
++ __update_group_shares_cpu(tg, i, shares, rq_weight);
++ spin_unlock_irqrestore(&rq->lock, flags);
++ }
++}
++
++/*
++ * Compute the cpu's hierarchical load factor for each task group.
++ * This needs to be done in a top-down fashion because the load of a child
++ * group is a fraction of its parents load.
++ */
++static void
++tg_load_down(struct task_group *tg, int cpu, struct sched_domain *sd)
++{
++ unsigned long load;
++
++ if (!tg->parent) {
++ load = cpu_rq(cpu)->load.weight;
++ } else {
++ load = tg->parent->cfs_rq[cpu]->h_load;
++ load *= tg->cfs_rq[cpu]->shares;
++ load /= tg->parent->cfs_rq[cpu]->load.weight + 1;
++ }
++
++ tg->cfs_rq[cpu]->h_load = load;
++}
++
++static void
++tg_nop(struct task_group *tg, int cpu, struct sched_domain *sd)
++{
++}
++
++static void update_shares(struct sched_domain *sd)
++{
++ u64 now = cpu_clock(raw_smp_processor_id());
++ s64 elapsed = now - sd->last_update;
++
++ if (elapsed >= (s64)(u64)sysctl_sched_shares_ratelimit) {
++ sd->last_update = now;
++ walk_tg_tree(tg_nop, tg_shares_up, 0, sd);
++ }
++}
++
++static void update_shares_locked(struct rq *rq, struct sched_domain *sd)
++{
++ spin_unlock(&rq->lock);
++ update_shares(sd);
++ spin_lock(&rq->lock);
++}
++
++static void update_h_load(int cpu)
++{
++ walk_tg_tree(tg_load_down, tg_nop, cpu, NULL);
++}
++
++#else
++
++static inline void update_shares(struct sched_domain *sd)
++{
++}
++
++static inline void update_shares_locked(struct rq *rq, struct sched_domain *sd)
++{
++}
++
++#endif
++
++#endif
++
++#ifdef CONFIG_FAIR_GROUP_SCHED
++static void cfs_rq_set_shares(struct cfs_rq *cfs_rq, unsigned long shares)
++{
++#ifdef CONFIG_SMP
++ cfs_rq->shares = shares;
++#endif
++}
++#endif
++
++#include "sched_stats.h"
++#include "sched_idletask.c"
++#include "sched_fair.c"
++#include "sched_rt.c"
++#ifdef CONFIG_SCHED_DEBUG
++# include "sched_debug.c"
++#endif
++
++#define sched_class_highest (&rt_sched_class)
++#define for_each_class(class) \
++ for (class = sched_class_highest; class; class = class->next)
++
++static void inc_nr_running(struct rq *rq)
++{
++ rq->nr_running++;
++}
++
++static void dec_nr_running(struct rq *rq)
++{
++ rq->nr_running--;
++}
++
++static void set_load_weight(struct task_struct *p)
++{
++ if (task_has_rt_policy(p)) {
++ p->se.load.weight = prio_to_weight[0] * 2;
++ p->se.load.inv_weight = prio_to_wmult[0] >> 1;
++ return;
++ }
++
++ /*
++ * SCHED_IDLE tasks get minimal weight:
++ */
++ if (p->policy == SCHED_IDLE) {
++ p->se.load.weight = WEIGHT_IDLEPRIO;
++ p->se.load.inv_weight = WMULT_IDLEPRIO;
++ return;
++ }
++
++ p->se.load.weight = prio_to_weight[p->static_prio - MAX_RT_PRIO];
++ p->se.load.inv_weight = prio_to_wmult[p->static_prio - MAX_RT_PRIO];
++}
++
++static void update_avg(u64 *avg, u64 sample)
++{
++ s64 diff = sample - *avg;
++ *avg += diff >> 3;
++}
++
++static void enqueue_task(struct rq *rq, struct task_struct *p, int wakeup)
++{
++ // BUG_ON(p->state & TASK_ONHOLD);
++ sched_info_queued(p);
++ p->sched_class->enqueue_task(rq, p, wakeup);
++ p->se.on_rq = 1;
++}
++
++static void dequeue_task(struct rq *rq, struct task_struct *p, int sleep)
++{
++ if (sleep && p->se.last_wakeup) {
++ update_avg(&p->se.avg_overlap,
++ p->se.sum_exec_runtime - p->se.last_wakeup);
++ p->se.last_wakeup = 0;
++ }
++
++ sched_info_dequeued(p);
++ p->sched_class->dequeue_task(rq, p, sleep);
++ p->se.on_rq = 0;
++}
++
++/*
++ * __normal_prio - return the priority that is based on the static prio
++ */
++static inline int __normal_prio(struct task_struct *p)
++{
++ return p->static_prio;
++}
++
++/*
++ * Calculate the expected normal priority: i.e. priority
++ * without taking RT-inheritance into account. Might be
++ * boosted by interactivity modifiers. Changes upon fork,
++ * setprio syscalls, and whenever the interactivity
++ * estimator recalculates.
++ */
++static inline int normal_prio(struct task_struct *p)
++{
++ int prio;
++
++ if (task_has_rt_policy(p))
++ prio = MAX_RT_PRIO-1 - p->rt_priority;
++ else
++ prio = __normal_prio(p);
++ return prio;
++}
++
++/*
++ * Calculate the current priority, i.e. the priority
++ * taken into account by the scheduler. This value might
++ * be boosted by RT tasks, or might be boosted by
++ * interactivity modifiers. Will be RT if the task got
++ * RT-boosted. If not then it returns p->normal_prio.
++ */
++static int effective_prio(struct task_struct *p)
++{
++ p->normal_prio = normal_prio(p);
++ /*
++ * If we are RT tasks or we were boosted to RT priority,
++ * keep the priority unchanged. Otherwise, update priority
++ * to the normal priority:
++ */
++ if (!rt_prio(p->prio))
++ return p->normal_prio;
++ return p->prio;
++}
++
++/*
++ * activate_task - move a task to the runqueue.
++ */
++static void activate_task(struct rq *rq, struct task_struct *p, int wakeup)
++{
++ if (task_contributes_to_load(p))
++ rq->nr_uninterruptible--;
++
++ enqueue_task(rq, p, wakeup);
++ inc_nr_running(rq);
++}
++
++/*
++ * deactivate_task - remove a task from the runqueue.
++ */
++static void deactivate_task(struct rq *rq, struct task_struct *p, int sleep)
++{
++ if (task_contributes_to_load(p))
++ rq->nr_uninterruptible++;
++
++ dequeue_task(rq, p, sleep);
++ dec_nr_running(rq);
++}
++
++/**
++ * task_curr - is this task currently executing on a CPU?
++ * @p: the task in question.
++ */
++inline int task_curr(const struct task_struct *p)
++{
++ return cpu_curr(task_cpu(p)) == p;
++}
++
++static inline void __set_task_cpu(struct task_struct *p, unsigned int cpu)
++{
++ set_task_rq(p, cpu);
++#ifdef CONFIG_SMP
++ /*
++ * After ->cpu is set up to a new value, task_rq_lock(p, ...) can be
++ * successfuly executed on another CPU. We must ensure that updates of
++ * per-task data have been completed by this moment.
++ */
++ smp_wmb();
++ task_thread_info(p)->cpu = cpu;
++#endif
++}
++
++static inline void check_class_changed(struct rq *rq, struct task_struct *p,
++ const struct sched_class *prev_class,
++ int oldprio, int running)
++{
++ if (prev_class != p->sched_class) {
++ if (prev_class->switched_from)
++ prev_class->switched_from(rq, p, running);
++ p->sched_class->switched_to(rq, p, running);
++ } else
++ p->sched_class->prio_changed(rq, p, oldprio, running);
++}
++
++#ifdef CONFIG_SMP
++
++/* Used instead of source_load when we know the type == 0 */
++static unsigned long weighted_cpuload(const int cpu)
++{
++ return cpu_rq(cpu)->load.weight;
++}
++
++/*
++ * Is this task likely cache-hot:
++ */
++static int
++task_hot(struct task_struct *p, u64 now, struct sched_domain *sd)
++{
++ s64 delta;
++
++ /*
++ * Buddy candidates are cache hot:
++ */
++ if (sched_feat(CACHE_HOT_BUDDY) && (&p->se == cfs_rq_of(&p->se)->next))
++ return 1;
++
++ if (p->sched_class != &fair_sched_class)
++ return 0;
++
++ if (sysctl_sched_migration_cost == -1)
++ return 1;
++ if (sysctl_sched_migration_cost == 0)
++ return 0;
++
++ delta = now - p->se.exec_start;
++
++ return delta < (s64)sysctl_sched_migration_cost;
++}
++
++
++void set_task_cpu(struct task_struct *p, unsigned int new_cpu)
++{
++ int old_cpu = task_cpu(p);
++ struct rq *old_rq = cpu_rq(old_cpu), *new_rq = cpu_rq(new_cpu);
++ struct cfs_rq *old_cfsrq = task_cfs_rq(p),
++ *new_cfsrq = cpu_cfs_rq(old_cfsrq, new_cpu);
++ u64 clock_offset;
++
++ clock_offset = old_rq->clock - new_rq->clock;
++
++#ifdef CONFIG_SCHEDSTATS
++ if (p->se.wait_start)
++ p->se.wait_start -= clock_offset;
++ if (p->se.sleep_start)
++ p->se.sleep_start -= clock_offset;
++ if (p->se.block_start)
++ p->se.block_start -= clock_offset;
++ if (old_cpu != new_cpu) {
++ schedstat_inc(p, se.nr_migrations);
++ if (task_hot(p, old_rq->clock, NULL))
++ schedstat_inc(p, se.nr_forced2_migrations);
++ }
++#endif
++ p->se.vruntime -= old_cfsrq->min_vruntime -
++ new_cfsrq->min_vruntime;
++
++ __set_task_cpu(p, new_cpu);
++}
++
++struct migration_req {
++ struct list_head list;
++
++ struct task_struct *task;
++ int dest_cpu;
++
++ struct completion done;
++};
++
++#include "sched_mon.h"
++
++
++/*
++ * The task's runqueue lock must be held.
++ * Returns true if you have to wait for migration thread.
++ */
++static int
++migrate_task(struct task_struct *p, int dest_cpu, struct migration_req *req)
++{
++ struct rq *rq = task_rq(p);
++
++ vxm_migrate_task(p, rq, dest_cpu);
++ /*
++ * If the task is not on a runqueue (and not running), then
++ * it is sufficient to simply update the task's cpu field.
++ */
++ if (!p->se.on_rq && !task_running(rq, p)) {
++ set_task_cpu(p, dest_cpu);
++ return 0;
++ }
++
++ init_completion(&req->done);
++ req->task = p;
++ req->dest_cpu = dest_cpu;
++ list_add(&req->list, &rq->migration_queue);
++
++ return 1;
++}
++
++/*
++ * wait_task_inactive - wait for a thread to unschedule.
++ *
++ * If @match_state is nonzero, it's the @p->state value just checked and
++ * not expected to change. If it changes, i.e. @p might have woken up,
++ * then return zero. When we succeed in waiting for @p to be off its CPU,
++ * we return a positive number (its total switch count). If a second call
++ * a short while later returns the same number, the caller can be sure that
++ * @p has remained unscheduled the whole time.
++ *
++ * The caller must ensure that the task *will* unschedule sometime soon,
++ * else this function might spin for a *long* time. This function can't
++ * be called with interrupts off, or it may introduce deadlock with
++ * smp_call_function() if an IPI is sent by the same process we are
++ * waiting to become inactive.
++ */
++unsigned long wait_task_inactive(struct task_struct *p, long match_state)
++{
++ unsigned long flags;
++ int running, on_rq;
++ unsigned long ncsw;
++ struct rq *rq;
++
++ for (;;) {
++ /*
++ * We do the initial early heuristics without holding
++ * any task-queue locks at all. We'll only try to get
++ * the runqueue lock when things look like they will
++ * work out!
++ */
++ rq = task_rq(p);
++
++ /*
++ * If the task is actively running on another CPU
++ * still, just relax and busy-wait without holding
++ * any locks.
++ *
++ * NOTE! Since we don't hold any locks, it's not
++ * even sure that "rq" stays as the right runqueue!
++ * But we don't care, since "task_running()" will
++ * return false if the runqueue has changed and p
++ * is actually now running somewhere else!
++ */
++ while (task_running(rq, p)) {
++ if (match_state && unlikely(p->state != match_state))
++ return 0;
++ cpu_relax();
++ }
++
++ /*
++ * Ok, time to look more closely! We need the rq
++ * lock now, to be *sure*. If we're wrong, we'll
++ * just go back and repeat.
++ */
++ rq = task_rq_lock(p, &flags);
++ running = task_running(rq, p);
++ on_rq = p->se.on_rq;
++ ncsw = 0;
++ if (!match_state || p->state == match_state) {
++ ncsw = p->nivcsw + p->nvcsw;
++ if (unlikely(!ncsw))
++ ncsw = 1;
++ }
++ task_rq_unlock(rq, &flags);
++
++ /*
++ * If it changed from the expected state, bail out now.
++ */
++ if (unlikely(!ncsw))
++ break;
++
++ /*
++ * Was it really running after all now that we
++ * checked with the proper locks actually held?
++ *
++ * Oops. Go back and try again..
++ */
++ if (unlikely(running)) {
++ cpu_relax();
++ continue;
++ }
++
++ /*
++ * It's not enough that it's not actively running,
++ * it must be off the runqueue _entirely_, and not
++ * preempted!
++ *
++ * So if it wa still runnable (but just not actively
++ * running right now), it's preempted, and we should
++ * yield - it could be a while.
++ */
++ if (unlikely(on_rq)) {
++ schedule_timeout_uninterruptible(1);
++ continue;
++ }
++
++ /*
++ * Ahh, all good. It wasn't running, and it wasn't
++ * runnable, which means that it will never become
++ * running in the future either. We're all done!
++ */
++ break;
++ }
++
++ return ncsw;
++}
++
++/***
++ * kick_process - kick a running thread to enter/exit the kernel
++ * @p: the to-be-kicked thread
++ *
++ * Cause a process which is running on another CPU to enter
++ * kernel-mode, without any delay. (to get signals handled.)
++ *
++ * NOTE: this function doesnt have to take the runqueue lock,
++ * because all it wants to ensure is that the remote task enters
++ * the kernel. If the IPI races and the task has been migrated
++ * to another CPU then no harm is done and the purpose has been
++ * achieved as well.
++ */
++void kick_process(struct task_struct *p)
++{
++ int cpu;
++
++ preempt_disable();
++ cpu = task_cpu(p);
++ if ((cpu != smp_processor_id()) && task_curr(p))
++ smp_send_reschedule(cpu);
++ preempt_enable();
++}
++
++/*
++ * Return a low guess at the load of a migration-source cpu weighted
++ * according to the scheduling class and "nice" value.
++ *
++ * We want to under-estimate the load of migration sources, to
++ * balance conservatively.
++ */
++static unsigned long source_load(int cpu, int type)
++{
++ struct rq *rq = cpu_rq(cpu);
++ unsigned long total = weighted_cpuload(cpu);
++
++ if (type == 0 || !sched_feat(LB_BIAS))
++ return total;
++
++ return min(rq->cpu_load[type-1], total);
++}
++
++/*
++ * Return a high guess at the load of a migration-target cpu weighted
++ * according to the scheduling class and "nice" value.
++ */
++static unsigned long target_load(int cpu, int type)
++{
++ struct rq *rq = cpu_rq(cpu);
++ unsigned long total = weighted_cpuload(cpu);
++
++ if (type == 0 || !sched_feat(LB_BIAS))
++ return total;
++
++ return max(rq->cpu_load[type-1], total);
++}
++
++/*
++ * find_idlest_group finds and returns the least busy CPU group within the
++ * domain.
++ */
++static struct sched_group *
++find_idlest_group(struct sched_domain *sd, struct task_struct *p, int this_cpu)
++{
++ struct sched_group *idlest = NULL, *this = NULL, *group = sd->groups;
++ unsigned long min_load = ULONG_MAX, this_load = 0;
++ int load_idx = sd->forkexec_idx;
++ int imbalance = 100 + (sd->imbalance_pct-100)/2;
++
++ do {
++ unsigned long load, avg_load;
++ int local_group;
++ int i;
++
++ /* Skip over this group if it has no CPUs allowed */
++ if (!cpus_intersects(group->cpumask, p->cpus_allowed))
++ continue;
++
++ local_group = cpu_isset(this_cpu, group->cpumask);
++
++ /* Tally up the load of all CPUs in the group */
++ avg_load = 0;
++
++ for_each_cpu_mask_nr(i, group->cpumask) {
++ /* Bias balancing toward cpus of our domain */
++ if (local_group)
++ load = source_load(i, load_idx);
++ else
++ load = target_load(i, load_idx);
++
++ avg_load += load;
++ }
++
++ /* Adjust by relative CPU power of the group */
++ avg_load = sg_div_cpu_power(group,
++ avg_load * SCHED_LOAD_SCALE);
++
++ if (local_group) {
++ this_load = avg_load;
++ this = group;
++ } else if (avg_load < min_load) {
++ min_load = avg_load;
++ idlest = group;
++ }
++ } while (group = group->next, group != sd->groups);
++
++ if (!idlest || 100*this_load < imbalance*min_load)
++ return NULL;
++ return idlest;
++}
++
++/*
++ * find_idlest_cpu - find the idlest cpu among the cpus in group.
++ */
++static int
++find_idlest_cpu(struct sched_group *group, struct task_struct *p, int this_cpu,
++ cpumask_t *tmp)
++{
++ unsigned long load, min_load = ULONG_MAX;
++ int idlest = -1;
++ int i;
++
++ /* Traverse only the allowed CPUs */
++ cpus_and(*tmp, group->cpumask, p->cpus_allowed);
++
++ for_each_cpu_mask_nr(i, *tmp) {
++ load = weighted_cpuload(i);
++
++ if (load < min_load || (load == min_load && i == this_cpu)) {
++ min_load = load;
++ idlest = i;
++ }
++ }
++
++ return idlest;
++}
++
++/*
++ * sched_balance_self: balance the current task (running on cpu) in domains
++ * that have the 'flag' flag set. In practice, this is SD_BALANCE_FORK and
++ * SD_BALANCE_EXEC.
++ *
++ * Balance, ie. select the least loaded group.
++ *
++ * Returns the target CPU number, or the same CPU if no balancing is needed.
++ *
++ * preempt must be disabled.
++ */
++static int sched_balance_self(int cpu, int flag)
++{
++ struct task_struct *t = current;
++ struct sched_domain *tmp, *sd = NULL;
++
++ for_each_domain(cpu, tmp) {
++ /*
++ * If power savings logic is enabled for a domain, stop there.
++ */
++ if (tmp->flags & SD_POWERSAVINGS_BALANCE)
++ break;
++ if (tmp->flags & flag)
++ sd = tmp;
++ }
++
++ if (sd)
++ update_shares(sd);
++
++ while (sd) {
++ cpumask_t span, tmpmask;
++ struct sched_group *group;
++ int new_cpu, weight;
++
++ if (!(sd->flags & flag)) {
++ sd = sd->child;
++ continue;
++ }
++
++ span = sd->span;
++ group = find_idlest_group(sd, t, cpu);
++ if (!group) {
++ sd = sd->child;
++ continue;
++ }
++
++ new_cpu = find_idlest_cpu(group, t, cpu, &tmpmask);
++ if (new_cpu == -1 || new_cpu == cpu) {
++ /* Now try balancing at a lower domain level of cpu */
++ sd = sd->child;
++ continue;
++ }
++
++ /* Now try balancing at a lower domain level of new_cpu */
++ cpu = new_cpu;
++ sd = NULL;
++ weight = cpus_weight(span);
++ for_each_domain(cpu, tmp) {
++ if (weight <= cpus_weight(tmp->span))
++ break;
++ if (tmp->flags & flag)
++ sd = tmp;
++ }
++ /* while loop will break here if sd == NULL */
++ }
++
++ return cpu;
++}
++
++#endif /* CONFIG_SMP */
++
++/***
++ * try_to_wake_up - wake up a thread
++ * @p: the to-be-woken-up thread
++ * @state: the mask of task states that can be woken
++ * @sync: do a synchronous wakeup?
++ *
++ * Put it on the run-queue if it's not already there. The "current"
++ * thread is always on the run-queue (except when the actual
++ * re-schedule is in progress), and as such you're allowed to do
++ * the simpler "current->state = TASK_RUNNING" to mark yourself
++ * runnable without the overhead of this.
++ *
++ * returns failure only if the task is already active.
++ */
++static int try_to_wake_up(struct task_struct *p, unsigned int state, int sync)
++{
++ int cpu, orig_cpu, this_cpu, success = 0;
++ unsigned long flags;
++ long old_state;
++ struct rq *rq;
++
++ if (!sched_feat(SYNC_WAKEUPS))
++ sync = 0;
++
++#ifdef CONFIG_SMP
++ if (sched_feat(LB_WAKEUP_UPDATE)) {
++ struct sched_domain *sd;
++
++ this_cpu = raw_smp_processor_id();
++ cpu = task_cpu(p);
++
++ for_each_domain(this_cpu, sd) {
++ if (cpu_isset(cpu, sd->span)) {
++ update_shares(sd);
++ break;
++ }
++ }
++ }
++#endif
++
++ smp_wmb();
++ rq = task_rq_lock(p, &flags);
++ old_state = p->state;
++ if (!(old_state & state))
++ goto out;
++
++ if (p->se.on_rq)
++ goto out_running;
++
++ cpu = task_cpu(p);
++ orig_cpu = cpu;
++ this_cpu = smp_processor_id();
++
++#ifdef CONFIG_SMP
++ if (unlikely(task_running(rq, p)))
++ goto out_activate;
++
++ cpu = p->sched_class->select_task_rq(p, sync);
++ if (cpu != orig_cpu) {
++ set_task_cpu(p, cpu);
++ task_rq_unlock(rq, &flags);
++ /* might preempt at this point */
++ rq = task_rq_lock(p, &flags);
++ old_state = p->state;
++
++ /* we need to unhold suspended tasks
++ if (old_state & TASK_ONHOLD) {
++ vx_unhold_task(p, rq);
++ old_state = p->state;
++ } */
++ if (!(old_state & state))
++ goto out;
++ if (p->se.on_rq)
++ goto out_running;
++
++ this_cpu = smp_processor_id();
++ cpu = task_cpu(p);
++ }
++
++#ifdef CONFIG_SCHEDSTATS
++ schedstat_inc(rq, ttwu_count);
++ if (cpu == this_cpu)
++ schedstat_inc(rq, ttwu_local);
++ else {
++ struct sched_domain *sd;
++ for_each_domain(this_cpu, sd) {
++ if (cpu_isset(cpu, sd->span)) {
++ schedstat_inc(sd, ttwu_wake_remote);
++ break;
++ }
++ }
++ }
++#endif /* CONFIG_SCHEDSTATS */
++
++out_activate:
++#endif /* CONFIG_SMP */
++ schedstat_inc(p, se.nr_wakeups);
++ if (sync)
++ schedstat_inc(p, se.nr_wakeups_sync);
++ if (orig_cpu != cpu)
++ schedstat_inc(p, se.nr_wakeups_migrate);
++ if (cpu == this_cpu)
++ schedstat_inc(p, se.nr_wakeups_local);
++ else
++ schedstat_inc(p, se.nr_wakeups_remote);
++ update_rq_clock(rq);
++ activate_task(rq, p, 1);
++ success = 1;
++
++out_running:
++ trace_mark(kernel_sched_wakeup,
++ "pid %d state %ld ## rq %p task %p rq->curr %p",
++ p->pid, p->state, rq, p, rq->curr);
++ check_preempt_curr(rq, p);
++
++ p->state = TASK_RUNNING;
++#ifdef CONFIG_SMP
++ if (p->sched_class->task_wake_up)
++ p->sched_class->task_wake_up(rq, p);
++#endif
++out:
++ current->se.last_wakeup = current->se.sum_exec_runtime;
++
++ task_rq_unlock(rq, &flags);
++
++ return success;
++}
++
++int wake_up_process(struct task_struct *p)
++{
++ return try_to_wake_up(p, TASK_ALL, 0);
++}
++EXPORT_SYMBOL(wake_up_process);
++
++int wake_up_state(struct task_struct *p, unsigned int state)
++{
++ return try_to_wake_up(p, state, 0);
++}
++
++/*
++ * Perform scheduler related setup for a newly forked process p.
++ * p is forked by current.
++ *
++ * __sched_fork() is basic setup used by init_idle() too:
++ */
++static void __sched_fork(struct task_struct *p)
++{
++ p->se.exec_start = 0;
++ p->se.sum_exec_runtime = 0;
++ p->se.prev_sum_exec_runtime = 0;
++ p->se.last_wakeup = 0;
++ p->se.avg_overlap = 0;
++
++#ifdef CONFIG_SCHEDSTATS
++ p->se.wait_start = 0;
++ p->se.sum_sleep_runtime = 0;
++ p->se.sleep_start = 0;
++ p->se.block_start = 0;
++ p->se.sleep_max = 0;
++ p->se.block_max = 0;
++ p->se.exec_max = 0;
++ p->se.slice_max = 0;
++ p->se.wait_max = 0;
++#endif
++
++ INIT_LIST_HEAD(&p->rt.run_list);
++ p->se.on_rq = 0;
++ INIT_LIST_HEAD(&p->se.group_node);
++
++#ifdef CONFIG_PREEMPT_NOTIFIERS
++ INIT_HLIST_HEAD(&p->preempt_notifiers);
++#endif
++
++ /*
++ * We mark the process as running here, but have not actually
++ * inserted it onto the runqueue yet. This guarantees that
++ * nobody will actually run it, and a signal or other external
++ * event cannot wake it up and insert it on the runqueue either.
++ */
++ p->state = TASK_RUNNING;
++}
++
++/*
++ * fork()/clone()-time setup:
++ */
++void sched_fork(struct task_struct *p, int clone_flags)
++{
++ int cpu = get_cpu();
++
++ __sched_fork(p);
++
++#ifdef CONFIG_SMP
++ cpu = sched_balance_self(cpu, SD_BALANCE_FORK);
++#endif
++ set_task_cpu(p, cpu);
++
++ /*
++ * Make sure we do not leak PI boosting priority to the child:
++ */
++ p->prio = current->normal_prio;
++ if (!rt_prio(p->prio))
++ p->sched_class = &fair_sched_class;
++
++#if defined(CONFIG_SCHEDSTATS) || defined(CONFIG_TASK_DELAY_ACCT)
++ if (likely(sched_info_on()))
++ memset(&p->sched_info, 0, sizeof(p->sched_info));
++#endif
++#if defined(CONFIG_SMP) && defined(__ARCH_WANT_UNLOCKED_CTXSW)
++ p->oncpu = 0;
++#endif
++#ifdef CONFIG_PREEMPT
++ /* Want to start with kernel preemption disabled. */
++ task_thread_info(p)->preempt_count = 1;
++#endif
++ put_cpu();
++}
++
++/*
++ * wake_up_new_task - wake up a newly created task for the first time.
++ *
++ * This function will do some initial scheduler statistics housekeeping
++ * that must be done for every newly created context, then puts the task
++ * on the runqueue and wakes it.
++ */
++void wake_up_new_task(struct task_struct *p, unsigned long clone_flags)
++{
++ unsigned long flags;
++ struct rq *rq;
++
++ rq = task_rq_lock(p, &flags);
++ BUG_ON(p->state != TASK_RUNNING);
++ update_rq_clock(rq);
++
++ p->prio = effective_prio(p);
++
++ if (!p->sched_class->task_new || !current->se.on_rq) {
++ activate_task(rq, p, 0);
++ } else {
++ /*
++ * Let the scheduling class do new task startup
++ * management (if any):
++ */
++ p->sched_class->task_new(rq, p);
++ inc_nr_running(rq);
++ }
++ trace_mark(kernel_sched_wakeup_new,
++ "pid %d state %ld ## rq %p task %p rq->curr %p",
++ p->pid, p->state, rq, p, rq->curr);
++ check_preempt_curr(rq, p);
++#ifdef CONFIG_SMP
++ if (p->sched_class->task_wake_up)
++ p->sched_class->task_wake_up(rq, p);
++#endif
++ task_rq_unlock(rq, &flags);
++}
++
++#ifdef CONFIG_PREEMPT_NOTIFIERS
++
++/**
++ * preempt_notifier_register - tell me when current is being being preempted & rescheduled
++ * @notifier: notifier struct to register
++ */
++void preempt_notifier_register(struct preempt_notifier *notifier)
++{
++ hlist_add_head(¬ifier->link, ¤t->preempt_notifiers);
++}
++EXPORT_SYMBOL_GPL(preempt_notifier_register);
++
++/**
++ * preempt_notifier_unregister - no longer interested in preemption notifications
++ * @notifier: notifier struct to unregister
++ *
++ * This is safe to call from within a preemption notifier.
++ */
++void preempt_notifier_unregister(struct preempt_notifier *notifier)
++{
++ hlist_del(¬ifier->link);
++}
++EXPORT_SYMBOL_GPL(preempt_notifier_unregister);
++
++static void fire_sched_in_preempt_notifiers(struct task_struct *curr)
++{
++ struct preempt_notifier *notifier;
++ struct hlist_node *node;
++
++ hlist_for_each_entry(notifier, node, &curr->preempt_notifiers, link)
++ notifier->ops->sched_in(notifier, raw_smp_processor_id());
++}
++
++static void
++fire_sched_out_preempt_notifiers(struct task_struct *curr,
++ struct task_struct *next)
++{
++ struct preempt_notifier *notifier;
++ struct hlist_node *node;
++
++ hlist_for_each_entry(notifier, node, &curr->preempt_notifiers, link)
++ notifier->ops->sched_out(notifier, next);
++}
++
++#else /* !CONFIG_PREEMPT_NOTIFIERS */
++
++static void fire_sched_in_preempt_notifiers(struct task_struct *curr)
++{
++}
++
++static void
++fire_sched_out_preempt_notifiers(struct task_struct *curr,
++ struct task_struct *next)
++{
++}
++
++#endif /* CONFIG_PREEMPT_NOTIFIERS */
++
++/**
++ * prepare_task_switch - prepare to switch tasks
++ * @rq: the runqueue preparing to switch
++ * @prev: the current task that is being switched out
++ * @next: the task we are going to switch to.
++ *
++ * This is called with the rq lock held and interrupts off. It must
++ * be paired with a subsequent finish_task_switch after the context
++ * switch.
++ *
++ * prepare_task_switch sets up locking and calls architecture specific
++ * hooks.
++ */
++static inline void
++prepare_task_switch(struct rq *rq, struct task_struct *prev,
++ struct task_struct *next)
++{
++ fire_sched_out_preempt_notifiers(prev, next);
++ prepare_lock_switch(rq, next);
++ prepare_arch_switch(next);
++}
++
++/**
++ * finish_task_switch - clean up after a task-switch
++ * @rq: runqueue associated with task-switch
++ * @prev: the thread we just switched away from.
++ *
++ * finish_task_switch must be called after the context switch, paired
++ * with a prepare_task_switch call before the context switch.
++ * finish_task_switch will reconcile locking set up by prepare_task_switch,
++ * and do any other architecture-specific cleanup actions.
++ *
++ * Note that we may have delayed dropping an mm in context_switch(). If
++ * so, we finish that here outside of the runqueue lock. (Doing it
++ * with the lock held can cause deadlocks; see schedule() for
++ * details.)
++ */
++static void finish_task_switch(struct rq *rq, struct task_struct *prev)
++ __releases(rq->lock)
++{
++ struct mm_struct *mm = rq->prev_mm;
++ long prev_state;
++
++ rq->prev_mm = NULL;
++
++ /*
++ * A task struct has one reference for the use as "current".
++ * If a task dies, then it sets TASK_DEAD in tsk->state and calls
++ * schedule one last time. The schedule call will never return, and
++ * the scheduled task must drop that reference.
++ * The test for TASK_DEAD must occur while the runqueue locks are
++ * still held, otherwise prev could be scheduled on another cpu, die
++ * there before we look at prev->state, and then the reference would
++ * be dropped twice.
++ * Manfred Spraul <manfred@colorfullife.com>
++ */
++ prev_state = prev->state;
++ finish_arch_switch(prev);
++ finish_lock_switch(rq, prev);
++#ifdef CONFIG_SMP
++ if (current->sched_class->post_schedule)
++ current->sched_class->post_schedule(rq);
++#endif
++
++ fire_sched_in_preempt_notifiers(current);
++ if (mm)
++ mmdrop(mm);
++ if (unlikely(prev_state == TASK_DEAD)) {
++ /*
++ * Remove function-return probe instances associated with this
++ * task and put them back on the free list.
++ */
++ kprobe_flush_task(prev);
++ put_task_struct(prev);
++ }
++}
++
++/**
++ * schedule_tail - first thing a freshly forked thread must call.
++ * @prev: the thread we just switched away from.
++ */
++asmlinkage void schedule_tail(struct task_struct *prev)
++ __releases(rq->lock)
++{
++ struct rq *rq = this_rq();
++
++ finish_task_switch(rq, prev);
++#ifdef __ARCH_WANT_UNLOCKED_CTXSW
++ /* In this case, finish_task_switch does not reenable preemption */
++ preempt_enable();
++#endif
++ if (current->set_child_tid)
++ put_user(task_pid_vnr(current), current->set_child_tid);
++}
++
++/*
++ * context_switch - switch to the new MM and the new
++ * thread's register state.
++ */
++static inline void
++context_switch(struct rq *rq, struct task_struct *prev,
++ struct task_struct *next)
++{
++ struct mm_struct *mm, *oldmm;
++
++ prepare_task_switch(rq, prev, next);
++ trace_mark(kernel_sched_schedule,
++ "prev_pid %d next_pid %d prev_state %ld "
++ "## rq %p prev %p next %p",
++ prev->pid, next->pid, prev->state,
++ rq, prev, next);
++ mm = next->mm;
++ oldmm = prev->active_mm;
++ /*
++ * For paravirt, this is coupled with an exit in switch_to to
++ * combine the page table reload and the switch backend into
++ * one hypercall.
++ */
++ arch_enter_lazy_cpu_mode();
++
++ if (unlikely(!mm)) {
++ next->active_mm = oldmm;
++ atomic_inc(&oldmm->mm_count);
++ enter_lazy_tlb(oldmm, next);
++ } else
++ switch_mm(oldmm, mm, next);
++
++ if (unlikely(!prev->mm)) {
++ prev->active_mm = NULL;
++ rq->prev_mm = oldmm;
++ }
++ /*
++ * Since the runqueue lock will be released by the next
++ * task (which is an invalid locking op but in the case
++ * of the scheduler it's an obvious special-case), so we
++ * do an early lockdep release here:
++ */
++#ifndef __ARCH_WANT_UNLOCKED_CTXSW
++ spin_release(&rq->lock.dep_map, 1, _THIS_IP_);
++#endif
++
++ /* Here we just switch the register state and the stack. */
++ switch_to(prev, next, prev);
++
++ barrier();
++ /*
++ * this_rq must be evaluated again because prev may have moved
++ * CPUs since it called schedule(), thus the 'rq' on its stack
++ * frame will be invalid.
++ */
++ finish_task_switch(this_rq(), prev);
++}
++
++/*
++ * nr_running, nr_uninterruptible and nr_context_switches:
++ *
++ * externally visible scheduler statistics: current number of runnable
++ * threads, current number of uninterruptible-sleeping threads, total
++ * number of context switches performed since bootup.
++ */
++unsigned long nr_running(void)
++{
++ unsigned long i, sum = 0;
++
++ for_each_online_cpu(i)
++ sum += cpu_rq(i)->nr_running;
++
++ return sum;
++}
++
++unsigned long nr_uninterruptible(void)
++{
++ unsigned long i, sum = 0;
++
++ for_each_possible_cpu(i)
++ sum += cpu_rq(i)->nr_uninterruptible;
++
++ /*
++ * Since we read the counters lockless, it might be slightly
++ * inaccurate. Do not allow it to go below zero though:
++ */
++ if (unlikely((long)sum < 0))
++ sum = 0;
++
++ return sum;
++}
++
++unsigned long long nr_context_switches(void)
++{
++ int i;
++ unsigned long long sum = 0;
++
++ for_each_possible_cpu(i)
++ sum += cpu_rq(i)->nr_switches;
++
++ return sum;
++}
++
++unsigned long nr_iowait(void)
++{
++ unsigned long i, sum = 0;
++
++ for_each_possible_cpu(i)
++ sum += atomic_read(&cpu_rq(i)->nr_iowait);
++
++ return sum;
++}
++
++unsigned long nr_active(void)
++{
++ unsigned long i, running = 0, uninterruptible = 0;
++
++ for_each_online_cpu(i) {
++ running += cpu_rq(i)->nr_running;
++ uninterruptible += cpu_rq(i)->nr_uninterruptible;
++ }
++
++ if (unlikely((long)uninterruptible < 0))
++ uninterruptible = 0;
++
++ return running + uninterruptible;
++}
++
++/*
++ * Update rq->cpu_load[] statistics. This function is usually called every
++ * scheduler tick (TICK_NSEC).
++ */
++static void update_cpu_load(struct rq *this_rq)
++{
++ unsigned long this_load = this_rq->load.weight;
++ int i, scale;
++
++ this_rq->nr_load_updates++;
++
++ /* Update our load: */
++ for (i = 0, scale = 1; i < CPU_LOAD_IDX_MAX; i++, scale += scale) {
++ unsigned long old_load, new_load;
++
++ /* scale is effectively 1 << i now, and >> i divides by scale */
++
++ old_load = this_rq->cpu_load[i];
++ new_load = this_load;
++ /*
++ * Round up the averaging division if load is increasing. This
++ * prevents us from getting stuck on 9 if the load is 10, for
++ * example.
++ */
++ if (new_load > old_load)
++ new_load += scale-1;
++ this_rq->cpu_load[i] = (old_load*(scale-1) + new_load) >> i;
++ }
++}
++
++#ifdef CONFIG_SMP
++
++/*
++ * double_rq_lock - safely lock two runqueues
++ *
++ * Note this does not disable interrupts like task_rq_lock,
++ * you need to do so manually before calling.
++ */
++static void double_rq_lock(struct rq *rq1, struct rq *rq2)
++ __acquires(rq1->lock)
++ __acquires(rq2->lock)
++{
++ BUG_ON(!irqs_disabled());
++ if (rq1 == rq2) {
++ spin_lock(&rq1->lock);
++ __acquire(rq2->lock); /* Fake it out ;) */
++ } else {
++ if (rq1 < rq2) {
++ spin_lock(&rq1->lock);
++ spin_lock_nested(&rq2->lock, SINGLE_DEPTH_NESTING);
++ } else {
++ spin_lock(&rq2->lock);
++ spin_lock_nested(&rq1->lock, SINGLE_DEPTH_NESTING);
++ }
++ }
++ update_rq_clock(rq1);
++ update_rq_clock(rq2);
++}
++
++/*
++ * double_rq_unlock - safely unlock two runqueues
++ *
++ * Note this does not restore interrupts like task_rq_unlock,
++ * you need to do so manually after calling.
++ */
++static void double_rq_unlock(struct rq *rq1, struct rq *rq2)
++ __releases(rq1->lock)
++ __releases(rq2->lock)
++{
++ spin_unlock(&rq1->lock);
++ if (rq1 != rq2)
++ spin_unlock(&rq2->lock);
++ else
++ __release(rq2->lock);
++}
++
++/*
++ * double_lock_balance - lock the busiest runqueue, this_rq is locked already.
++ */
++static int double_lock_balance(struct rq *this_rq, struct rq *busiest)
++ __releases(this_rq->lock)
++ __acquires(busiest->lock)
++ __acquires(this_rq->lock)
++{
++ int ret = 0;
++
++ if (unlikely(!irqs_disabled())) {
++ /* printk() doesn't work good under rq->lock */
++ spin_unlock(&this_rq->lock);
++ BUG_ON(1);
++ }
++ if (unlikely(!spin_trylock(&busiest->lock))) {
++ if (busiest < this_rq) {
++ spin_unlock(&this_rq->lock);
++ spin_lock(&busiest->lock);
++ spin_lock_nested(&this_rq->lock, SINGLE_DEPTH_NESTING);
++ ret = 1;
++ } else
++ spin_lock_nested(&busiest->lock, SINGLE_DEPTH_NESTING);
++ }
++ return ret;
++}
++
++static void double_unlock_balance(struct rq *this_rq, struct rq *busiest)
++ __releases(busiest->lock)
++{
++ spin_unlock(&busiest->lock);
++ lock_set_subclass(&this_rq->lock.dep_map, 0, _RET_IP_);
++}
++
++/*
++ * If dest_cpu is allowed for this process, migrate the task to it.
++ * This is accomplished by forcing the cpu_allowed mask to only
++ * allow dest_cpu, which will force the cpu onto dest_cpu. Then
++ * the cpu_allowed mask is restored.
++ */
++static void sched_migrate_task(struct task_struct *p, int dest_cpu)
++{
++ struct migration_req req;
++ unsigned long flags;
++ struct rq *rq;
++
++ rq = task_rq_lock(p, &flags);
++ if (!cpu_isset(dest_cpu, p->cpus_allowed)
++ || unlikely(!cpu_active(dest_cpu)))
++ goto out;
++
++ /* force the process onto the specified CPU */
++ if (migrate_task(p, dest_cpu, &req)) {
++ /* Need to wait for migration thread (might exit: take ref). */
++ struct task_struct *mt = rq->migration_thread;
++
++ get_task_struct(mt);
++ task_rq_unlock(rq, &flags);
++ wake_up_process(mt);
++ put_task_struct(mt);
++ wait_for_completion(&req.done);
++
++ return;
++ }
++out:
++ task_rq_unlock(rq, &flags);
++}
++
++/*
++ * sched_exec - execve() is a valuable balancing opportunity, because at
++ * this point the task has the smallest effective memory and cache footprint.
++ */
++void sched_exec(void)
++{
++ int new_cpu, this_cpu = get_cpu();
++ new_cpu = sched_balance_self(this_cpu, SD_BALANCE_EXEC);
++ put_cpu();
++ if (new_cpu != this_cpu)
++ sched_migrate_task(current, new_cpu);
++}
++
++/*
++ * pull_task - move a task from a remote runqueue to the local runqueue.
++ * Both runqueues must be locked.
++ */
++static void pull_task(struct rq *src_rq, struct task_struct *p,
++ struct rq *this_rq, int this_cpu)
++{
++ deactivate_task(src_rq, p, 0);
++ set_task_cpu(p, this_cpu);
++ activate_task(this_rq, p, 0);
++ /*
++ * Note that idle threads have a prio of MAX_PRIO, for this test
++ * to be always true for them.
++ */
++ check_preempt_curr(this_rq, p);
++}
++
++/*
++ * can_migrate_task - may task p from runqueue rq be migrated to this_cpu?
++ */
++static
++int can_migrate_task(struct task_struct *p, struct rq *rq, int this_cpu,
++ struct sched_domain *sd, enum cpu_idle_type idle,
++ int *all_pinned)
++{
++ /*
++ * We do not migrate tasks that are:
++ * 1) running (obviously), or
++ * 2) cannot be migrated to this CPU due to cpus_allowed, or
++ * 3) are cache-hot on their current CPU.
++ */
++ if (!cpu_isset(this_cpu, p->cpus_allowed)) {
++ schedstat_inc(p, se.nr_failed_migrations_affine);
++ return 0;
++ }
++ *all_pinned = 0;
++
++ if (task_running(rq, p)) {
++ schedstat_inc(p, se.nr_failed_migrations_running);
++ return 0;
++ }
++
++ /*
++ * Aggressive migration if:
++ * 1) task is cache cold, or
++ * 2) too many balance attempts have failed.
++ */
++
++ if (!task_hot(p, rq->clock, sd) ||
++ sd->nr_balance_failed > sd->cache_nice_tries) {
++#ifdef CONFIG_SCHEDSTATS
++ if (task_hot(p, rq->clock, sd)) {
++ schedstat_inc(sd, lb_hot_gained[idle]);
++ schedstat_inc(p, se.nr_forced_migrations);
++ }
++#endif
++ return 1;
++ }
++
++ if (task_hot(p, rq->clock, sd)) {
++ schedstat_inc(p, se.nr_failed_migrations_hot);
++ return 0;
++ }
++ return 1;
++}
++
++static unsigned long
++balance_tasks(struct rq *this_rq, int this_cpu, struct rq *busiest,
++ unsigned long max_load_move, struct sched_domain *sd,
++ enum cpu_idle_type idle, int *all_pinned,
++ int *this_best_prio, struct rq_iterator *iterator)
++{
++ int loops = 0, pulled = 0, pinned = 0;
++ struct task_struct *p;
++ long rem_load_move = max_load_move;
++
++ if (max_load_move == 0)
++ goto out;
++
++ pinned = 1;
++
++ /*
++ * Start the load-balancing iterator:
++ */
++ p = iterator->start(iterator->arg);
++next:
++ if (!p || loops++ > sysctl_sched_nr_migrate)
++ goto out;
++
++ if ((p->se.load.weight >> 1) > rem_load_move ||
++ !can_migrate_task(p, busiest, this_cpu, sd, idle, &pinned)) {
++ p = iterator->next(iterator->arg);
++ goto next;
++ }
++
++ pull_task(busiest, p, this_rq, this_cpu);
++ pulled++;
++ rem_load_move -= p->se.load.weight;
++
++ /*
++ * We only want to steal up to the prescribed amount of weighted load.
++ */
++ if (rem_load_move > 0) {
++ if (p->prio < *this_best_prio)
++ *this_best_prio = p->prio;
++ p = iterator->next(iterator->arg);
++ goto next;
++ }
++out:
++ /*
++ * Right now, this is one of only two places pull_task() is called,
++ * so we can safely collect pull_task() stats here rather than
++ * inside pull_task().
++ */
++ schedstat_add(sd, lb_gained[idle], pulled);
++
++ if (all_pinned)
++ *all_pinned = pinned;
++
++ return max_load_move - rem_load_move;
++}
++
++/*
++ * move_tasks tries to move up to max_load_move weighted load from busiest to
++ * this_rq, as part of a balancing operation within domain "sd".
++ * Returns 1 if successful and 0 otherwise.
++ *
++ * Called with both runqueues locked.
++ */
++static int move_tasks(struct rq *this_rq, int this_cpu, struct rq *busiest,
++ unsigned long max_load_move,
++ struct sched_domain *sd, enum cpu_idle_type idle,
++ int *all_pinned)
++{
++ const struct sched_class *class = sched_class_highest;
++ unsigned long total_load_moved = 0;
++ int this_best_prio = this_rq->curr->prio;
++
++ do {
++ total_load_moved +=
++ class->load_balance(this_rq, this_cpu, busiest,
++ max_load_move - total_load_moved,
++ sd, idle, all_pinned, &this_best_prio);
++ class = class->next;
++
++ if (idle == CPU_NEWLY_IDLE && this_rq->nr_running)
++ break;
++
++ } while (class && max_load_move > total_load_moved);
++
++ return total_load_moved > 0;
++}
++
++static int
++iter_move_one_task(struct rq *this_rq, int this_cpu, struct rq *busiest,
++ struct sched_domain *sd, enum cpu_idle_type idle,
++ struct rq_iterator *iterator)
++{
++ struct task_struct *p = iterator->start(iterator->arg);
++ int pinned = 0;
++
++ while (p) {
++ if (can_migrate_task(p, busiest, this_cpu, sd, idle, &pinned)) {
++ pull_task(busiest, p, this_rq, this_cpu);
++ /*
++ * Right now, this is only the second place pull_task()
++ * is called, so we can safely collect pull_task()
++ * stats here rather than inside pull_task().
++ */
++ schedstat_inc(sd, lb_gained[idle]);
++
++ return 1;
++ }
++ p = iterator->next(iterator->arg);
++ }
++
++ return 0;
++}
++
++/*
++ * move_one_task tries to move exactly one task from busiest to this_rq, as
++ * part of active balancing operations within "domain".
++ * Returns 1 if successful and 0 otherwise.
++ *
++ * Called with both runqueues locked.
++ */
++static int move_one_task(struct rq *this_rq, int this_cpu, struct rq *busiest,
++ struct sched_domain *sd, enum cpu_idle_type idle)
++{
++ const struct sched_class *class;
++
++ for (class = sched_class_highest; class; class = class->next)
++ if (class->move_one_task(this_rq, this_cpu, busiest, sd, idle))
++ return 1;
++
++ return 0;
++}
++
++/*
++ * find_busiest_group finds and returns the busiest CPU group within the
++ * domain. It calculates and returns the amount of weighted load which
++ * should be moved to restore balance via the imbalance parameter.
++ */
++static struct sched_group *
++find_busiest_group(struct sched_domain *sd, int this_cpu,
++ unsigned long *imbalance, enum cpu_idle_type idle,
++ int *sd_idle, const cpumask_t *cpus, int *balance)
++{
++ struct sched_group *busiest = NULL, *this = NULL, *group = sd->groups;
++ unsigned long max_load, avg_load, total_load, this_load, total_pwr;
++ unsigned long max_pull;
++ unsigned long busiest_load_per_task, busiest_nr_running;
++ unsigned long this_load_per_task, this_nr_running;
++ int load_idx, group_imb = 0;
++#if defined(CONFIG_SCHED_MC) || defined(CONFIG_SCHED_SMT)
++ int power_savings_balance = 1;
++ unsigned long leader_nr_running = 0, min_load_per_task = 0;
++ unsigned long min_nr_running = ULONG_MAX;
++ struct sched_group *group_min = NULL, *group_leader = NULL;
++#endif
++
++ max_load = this_load = total_load = total_pwr = 0;
++ busiest_load_per_task = busiest_nr_running = 0;
++ this_load_per_task = this_nr_running = 0;
++
++ if (idle == CPU_NOT_IDLE)
++ load_idx = sd->busy_idx;
++ else if (idle == CPU_NEWLY_IDLE)
++ load_idx = sd->newidle_idx;
++ else
++ load_idx = sd->idle_idx;
++
++ do {
++ unsigned long load, group_capacity, max_cpu_load, min_cpu_load;
++ int local_group;
++ int i;
++ int __group_imb = 0;
++ unsigned int balance_cpu = -1, first_idle_cpu = 0;
++ unsigned long sum_nr_running, sum_weighted_load;
++ unsigned long sum_avg_load_per_task;
++ unsigned long avg_load_per_task;
++
++ local_group = cpu_isset(this_cpu, group->cpumask);
++
++ if (local_group)
++ balance_cpu = first_cpu(group->cpumask);
++
++ /* Tally up the load of all CPUs in the group */
++ sum_weighted_load = sum_nr_running = avg_load = 0;
++ sum_avg_load_per_task = avg_load_per_task = 0;
++
++ max_cpu_load = 0;
++ min_cpu_load = ~0UL;
++
++ for_each_cpu_mask_nr(i, group->cpumask) {
++ struct rq *rq;
++
++ if (!cpu_isset(i, *cpus))
++ continue;
++
++ rq = cpu_rq(i);
++
++ if (*sd_idle && rq->nr_running)
++ *sd_idle = 0;
++
++ /* Bias balancing toward cpus of our domain */
++ if (local_group) {
++ if (idle_cpu(i) && !first_idle_cpu) {
++ first_idle_cpu = 1;
++ balance_cpu = i;
++ }
++
++ load = target_load(i, load_idx);
++ } else {
++ load = source_load(i, load_idx);
++ if (load > max_cpu_load)
++ max_cpu_load = load;
++ if (min_cpu_load > load)
++ min_cpu_load = load;
++ }
++
++ avg_load += load;
++ sum_nr_running += rq->nr_running;
++ sum_weighted_load += weighted_cpuload(i);
++
++ sum_avg_load_per_task += cpu_avg_load_per_task(i);
++ }
++
++ /*
++ * First idle cpu or the first cpu(busiest) in this sched group
++ * is eligible for doing load balancing at this and above
++ * domains. In the newly idle case, we will allow all the cpu's
++ * to do the newly idle load balance.
++ */
++ if (idle != CPU_NEWLY_IDLE && local_group &&
++ balance_cpu != this_cpu && balance) {
++ *balance = 0;
++ goto ret;
++ }
++
++ total_load += avg_load;
++ total_pwr += group->__cpu_power;
++
++ /* Adjust by relative CPU power of the group */
++ avg_load = sg_div_cpu_power(group,
++ avg_load * SCHED_LOAD_SCALE);
++
++
++ /*
++ * Consider the group unbalanced when the imbalance is larger
++ * than the average weight of two tasks.
++ *
++ * APZ: with cgroup the avg task weight can vary wildly and
++ * might not be a suitable number - should we keep a
++ * normalized nr_running number somewhere that negates
++ * the hierarchy?
++ */
++ avg_load_per_task = sg_div_cpu_power(group,
++ sum_avg_load_per_task * SCHED_LOAD_SCALE);
++
++ if ((max_cpu_load - min_cpu_load) > 2*avg_load_per_task)
++ __group_imb = 1;
++
++ group_capacity = group->__cpu_power / SCHED_LOAD_SCALE;
++
++ if (local_group) {
++ this_load = avg_load;
++ this = group;
++ this_nr_running = sum_nr_running;
++ this_load_per_task = sum_weighted_load;
++ } else if (avg_load > max_load &&
++ (sum_nr_running > group_capacity || __group_imb)) {
++ max_load = avg_load;
++ busiest = group;
++ busiest_nr_running = sum_nr_running;
++ busiest_load_per_task = sum_weighted_load;
++ group_imb = __group_imb;
++ }
++
++#if defined(CONFIG_SCHED_MC) || defined(CONFIG_SCHED_SMT)
++ /*
++ * Busy processors will not participate in power savings
++ * balance.
++ */
++ if (idle == CPU_NOT_IDLE ||
++ !(sd->flags & SD_POWERSAVINGS_BALANCE))
++ goto group_next;
++
++ /*
++ * If the local group is idle or completely loaded
++ * no need to do power savings balance at this domain
++ */
++ if (local_group && (this_nr_running >= group_capacity ||
++ !this_nr_running))
++ power_savings_balance = 0;
++
++ /*
++ * If a group is already running at full capacity or idle,
++ * don't include that group in power savings calculations
++ */
++ if (!power_savings_balance || sum_nr_running >= group_capacity
++ || !sum_nr_running)
++ goto group_next;
++
++ /*
++ * Calculate the group which has the least non-idle load.
++ * This is the group from where we need to pick up the load
++ * for saving power
++ */
++ if ((sum_nr_running < min_nr_running) ||
++ (sum_nr_running == min_nr_running &&
++ first_cpu(group->cpumask) <
++ first_cpu(group_min->cpumask))) {
++ group_min = group;
++ min_nr_running = sum_nr_running;
++ min_load_per_task = sum_weighted_load /
++ sum_nr_running;
++ }
++
++ /*
++ * Calculate the group which is almost near its
++ * capacity but still has some space to pick up some load
++ * from other group and save more power
++ */
++ if (sum_nr_running <= group_capacity - 1) {
++ if (sum_nr_running > leader_nr_running ||
++ (sum_nr_running == leader_nr_running &&
++ first_cpu(group->cpumask) >
++ first_cpu(group_leader->cpumask))) {
++ group_leader = group;
++ leader_nr_running = sum_nr_running;
++ }
++ }
++group_next:
++#endif
++ group = group->next;
++ } while (group != sd->groups);
++
++ if (!busiest || this_load >= max_load || busiest_nr_running == 0)
++ goto out_balanced;
++
++ avg_load = (SCHED_LOAD_SCALE * total_load) / total_pwr;
++
++ if (this_load >= avg_load ||
++ 100*max_load <= sd->imbalance_pct*this_load)
++ goto out_balanced;
++
++ busiest_load_per_task /= busiest_nr_running;
++ if (group_imb)
++ busiest_load_per_task = min(busiest_load_per_task, avg_load);
++
++ /*
++ * We're trying to get all the cpus to the average_load, so we don't
++ * want to push ourselves above the average load, nor do we wish to
++ * reduce the max loaded cpu below the average load, as either of these
++ * actions would just result in more rebalancing later, and ping-pong
++ * tasks around. Thus we look for the minimum possible imbalance.
++ * Negative imbalances (*we* are more loaded than anyone else) will
++ * be counted as no imbalance for these purposes -- we can't fix that
++ * by pulling tasks to us. Be careful of negative numbers as they'll
++ * appear as very large values with unsigned longs.
++ */
++ if (max_load <= busiest_load_per_task)
++ goto out_balanced;
++
++ /*
++ * In the presence of smp nice balancing, certain scenarios can have
++ * max load less than avg load(as we skip the groups at or below
++ * its cpu_power, while calculating max_load..)
++ */
++ if (max_load < avg_load) {
++ *imbalance = 0;
++ goto small_imbalance;
++ }
++
++ /* Don't want to pull so many tasks that a group would go idle */
++ max_pull = min(max_load - avg_load, max_load - busiest_load_per_task);
++
++ /* How much load to actually move to equalise the imbalance */
++ *imbalance = min(max_pull * busiest->__cpu_power,
++ (avg_load - this_load) * this->__cpu_power)
++ / SCHED_LOAD_SCALE;
++
++ /*
++ * if *imbalance is less than the average load per runnable task
++ * there is no gaurantee that any tasks will be moved so we'll have
++ * a think about bumping its value to force at least one task to be
++ * moved
++ */
++ if (*imbalance < busiest_load_per_task) {
++ unsigned long tmp, pwr_now, pwr_move;
++ unsigned int imbn;
++
++small_imbalance:
++ pwr_move = pwr_now = 0;
++ imbn = 2;
++ if (this_nr_running) {
++ this_load_per_task /= this_nr_running;
++ if (busiest_load_per_task > this_load_per_task)
++ imbn = 1;
++ } else
++ this_load_per_task = cpu_avg_load_per_task(this_cpu);
++
++ if (max_load - this_load + 2*busiest_load_per_task >=
++ busiest_load_per_task * imbn) {
++ *imbalance = busiest_load_per_task;
++ return busiest;
++ }
++
++ /*
++ * OK, we don't have enough imbalance to justify moving tasks,
++ * however we may be able to increase total CPU power used by
++ * moving them.
++ */
++
++ pwr_now += busiest->__cpu_power *
++ min(busiest_load_per_task, max_load);
++ pwr_now += this->__cpu_power *
++ min(this_load_per_task, this_load);
++ pwr_now /= SCHED_LOAD_SCALE;
++
++ /* Amount of load we'd subtract */
++ tmp = sg_div_cpu_power(busiest,
++ busiest_load_per_task * SCHED_LOAD_SCALE);
++ if (max_load > tmp)
++ pwr_move += busiest->__cpu_power *
++ min(busiest_load_per_task, max_load - tmp);
++
++ /* Amount of load we'd add */
++ if (max_load * busiest->__cpu_power <
++ busiest_load_per_task * SCHED_LOAD_SCALE)
++ tmp = sg_div_cpu_power(this,
++ max_load * busiest->__cpu_power);
++ else
++ tmp = sg_div_cpu_power(this,
++ busiest_load_per_task * SCHED_LOAD_SCALE);
++ pwr_move += this->__cpu_power *
++ min(this_load_per_task, this_load + tmp);
++ pwr_move /= SCHED_LOAD_SCALE;
++
++ /* Move if we gain throughput */
++ if (pwr_move > pwr_now)
++ *imbalance = busiest_load_per_task;
++ }
++
++ return busiest;
++
++out_balanced:
++#if defined(CONFIG_SCHED_MC) || defined(CONFIG_SCHED_SMT)
++ if (idle == CPU_NOT_IDLE || !(sd->flags & SD_POWERSAVINGS_BALANCE))
++ goto ret;
++
++ if (this == group_leader && group_leader != group_min) {
++ *imbalance = min_load_per_task;
++ return group_min;
++ }
++#endif
++ret:
++ *imbalance = 0;
++ return NULL;
++}
++
++/*
++ * find_busiest_queue - find the busiest runqueue among the cpus in group.
++ */
++static struct rq *
++find_busiest_queue(struct sched_group *group, enum cpu_idle_type idle,
++ unsigned long imbalance, const cpumask_t *cpus)
++{
++ struct rq *busiest = NULL, *rq;
++ unsigned long max_load = 0;
++ int i;
++
++ for_each_cpu_mask_nr(i, group->cpumask) {
++ unsigned long wl;
++
++ if (!cpu_isset(i, *cpus))
++ continue;
++
++ rq = cpu_rq(i);
++ wl = weighted_cpuload(i);
++
++ if (rq->nr_running == 1 && wl > imbalance)
++ continue;
++
++ if (wl > max_load) {
++ max_load = wl;
++ busiest = rq;
++ }
++ }
++
++ return busiest;
++}
++
++/*
++ * Max backoff if we encounter pinned tasks. Pretty arbitrary value, but
++ * so long as it is large enough.
++ */
++#define MAX_PINNED_INTERVAL 512
++
++/*
++ * Check this_cpu to ensure it is balanced within domain. Attempt to move
++ * tasks if there is an imbalance.
++ */
++static int load_balance(int this_cpu, struct rq *this_rq,
++ struct sched_domain *sd, enum cpu_idle_type idle,
++ int *balance, cpumask_t *cpus)
++{
++ int ld_moved, all_pinned = 0, active_balance = 0, sd_idle = 0;
++ struct sched_group *group;
++ unsigned long imbalance;
++ struct rq *busiest;
++ unsigned long flags;
++
++ cpus_setall(*cpus);
++
++ /*
++ * When power savings policy is enabled for the parent domain, idle
++ * sibling can pick up load irrespective of busy siblings. In this case,
++ * let the state of idle sibling percolate up as CPU_IDLE, instead of
++ * portraying it as CPU_NOT_IDLE.
++ */
++ if (idle != CPU_NOT_IDLE && sd->flags & SD_SHARE_CPUPOWER &&
++ !test_sd_parent(sd, SD_POWERSAVINGS_BALANCE))
++ sd_idle = 1;
++
++ schedstat_inc(sd, lb_count[idle]);
++
++redo:
++ update_shares(sd);
++ group = find_busiest_group(sd, this_cpu, &imbalance, idle, &sd_idle,
++ cpus, balance);
++
++ if (*balance == 0)
++ goto out_balanced;
++
++ if (!group) {
++ schedstat_inc(sd, lb_nobusyg[idle]);
++ goto out_balanced;
++ }
++
++ busiest = find_busiest_queue(group, idle, imbalance, cpus);
++ if (!busiest) {
++ schedstat_inc(sd, lb_nobusyq[idle]);
++ goto out_balanced;
++ }
++
++ BUG_ON(busiest == this_rq);
++
++ schedstat_add(sd, lb_imbalance[idle], imbalance);
++
++ ld_moved = 0;
++ if (busiest->nr_running > 1) {
++ /*
++ * Attempt to move tasks. If find_busiest_group has found
++ * an imbalance but busiest->nr_running <= 1, the group is
++ * still unbalanced. ld_moved simply stays zero, so it is
++ * correctly treated as an imbalance.
++ */
++ local_irq_save(flags);
++ double_rq_lock(this_rq, busiest);
++ ld_moved = move_tasks(this_rq, this_cpu, busiest,
++ imbalance, sd, idle, &all_pinned);
++ double_rq_unlock(this_rq, busiest);
++ local_irq_restore(flags);
++
++ /*
++ * some other cpu did the load balance for us.
++ */
++ if (ld_moved && this_cpu != smp_processor_id())
++ resched_cpu(this_cpu);
++
++ /* All tasks on this runqueue were pinned by CPU affinity */
++ if (unlikely(all_pinned)) {
++ cpu_clear(cpu_of(busiest), *cpus);
++ if (!cpus_empty(*cpus))
++ goto redo;
++ goto out_balanced;
++ }
++ }
++
++ if (!ld_moved) {
++ schedstat_inc(sd, lb_failed[idle]);
++ sd->nr_balance_failed++;
++
++ if (unlikely(sd->nr_balance_failed > sd->cache_nice_tries+2)) {
++
++ spin_lock_irqsave(&busiest->lock, flags);
++
++ /* don't kick the migration_thread, if the curr
++ * task on busiest cpu can't be moved to this_cpu
++ */
++ if (!cpu_isset(this_cpu, busiest->curr->cpus_allowed)) {
++ spin_unlock_irqrestore(&busiest->lock, flags);
++ all_pinned = 1;
++ goto out_one_pinned;
++ }
++
++ if (!busiest->active_balance) {
++ busiest->active_balance = 1;
++ busiest->push_cpu = this_cpu;
++ active_balance = 1;
++ }
++ spin_unlock_irqrestore(&busiest->lock, flags);
++ if (active_balance)
++ wake_up_process(busiest->migration_thread);
++
++ /*
++ * We've kicked active balancing, reset the failure
++ * counter.
++ */
++ sd->nr_balance_failed = sd->cache_nice_tries+1;
++ }
++ } else
++ sd->nr_balance_failed = 0;
++
++ if (likely(!active_balance)) {
++ /* We were unbalanced, so reset the balancing interval */
++ sd->balance_interval = sd->min_interval;
++ } else {
++ /*
++ * If we've begun active balancing, start to back off. This
++ * case may not be covered by the all_pinned logic if there
++ * is only 1 task on the busy runqueue (because we don't call
++ * move_tasks).
++ */
++ if (sd->balance_interval < sd->max_interval)
++ sd->balance_interval *= 2;
++ }
++
++ if (!ld_moved && !sd_idle && sd->flags & SD_SHARE_CPUPOWER &&
++ !test_sd_parent(sd, SD_POWERSAVINGS_BALANCE))
++ ld_moved = -1;
++
++ goto out;
++
++out_balanced:
++ schedstat_inc(sd, lb_balanced[idle]);
++
++ sd->nr_balance_failed = 0;
++
++out_one_pinned:
++ /* tune up the balancing interval */
++ if ((all_pinned && sd->balance_interval < MAX_PINNED_INTERVAL) ||
++ (sd->balance_interval < sd->max_interval))
++ sd->balance_interval *= 2;
++
++ if (!sd_idle && sd->flags & SD_SHARE_CPUPOWER &&
++ !test_sd_parent(sd, SD_POWERSAVINGS_BALANCE))
++ ld_moved = -1;
++ else
++ ld_moved = 0;
++out:
++ if (ld_moved)
++ update_shares(sd);
++ return ld_moved;
++}
++
++/*
++ * Check this_cpu to ensure it is balanced within domain. Attempt to move
++ * tasks if there is an imbalance.
++ *
++ * Called from schedule when this_rq is about to become idle (CPU_NEWLY_IDLE).
++ * this_rq is locked.
++ */
++static int
++load_balance_newidle(int this_cpu, struct rq *this_rq, struct sched_domain *sd,
++ cpumask_t *cpus)
++{
++ struct sched_group *group;
++ struct rq *busiest = NULL;
++ unsigned long imbalance;
++ int ld_moved = 0;
++ int sd_idle = 0;
++ int all_pinned = 0;
++
++ cpus_setall(*cpus);
++
++ /*
++ * When power savings policy is enabled for the parent domain, idle
++ * sibling can pick up load irrespective of busy siblings. In this case,
++ * let the state of idle sibling percolate up as IDLE, instead of
++ * portraying it as CPU_NOT_IDLE.
++ */
++ if (sd->flags & SD_SHARE_CPUPOWER &&
++ !test_sd_parent(sd, SD_POWERSAVINGS_BALANCE))
++ sd_idle = 1;
++
++ schedstat_inc(sd, lb_count[CPU_NEWLY_IDLE]);
++redo:
++ update_shares_locked(this_rq, sd);
++ group = find_busiest_group(sd, this_cpu, &imbalance, CPU_NEWLY_IDLE,
++ &sd_idle, cpus, NULL);
++ if (!group) {
++ schedstat_inc(sd, lb_nobusyg[CPU_NEWLY_IDLE]);
++ goto out_balanced;
++ }
++
++ busiest = find_busiest_queue(group, CPU_NEWLY_IDLE, imbalance, cpus);
++ if (!busiest) {
++ schedstat_inc(sd, lb_nobusyq[CPU_NEWLY_IDLE]);
++ goto out_balanced;
++ }
++
++ BUG_ON(busiest == this_rq);
++
++ schedstat_add(sd, lb_imbalance[CPU_NEWLY_IDLE], imbalance);
++
++ ld_moved = 0;
++ if (busiest->nr_running > 1) {
++ /* Attempt to move tasks */
++ double_lock_balance(this_rq, busiest);
++ /* this_rq->clock is already updated */
++ update_rq_clock(busiest);
++ ld_moved = move_tasks(this_rq, this_cpu, busiest,
++ imbalance, sd, CPU_NEWLY_IDLE,
++ &all_pinned);
++ double_unlock_balance(this_rq, busiest);
++
++ if (unlikely(all_pinned)) {
++ cpu_clear(cpu_of(busiest), *cpus);
++ if (!cpus_empty(*cpus))
++ goto redo;
++ }
++ }
++
++ if (!ld_moved) {
++ schedstat_inc(sd, lb_failed[CPU_NEWLY_IDLE]);
++ if (!sd_idle && sd->flags & SD_SHARE_CPUPOWER &&
++ !test_sd_parent(sd, SD_POWERSAVINGS_BALANCE))
++ return -1;
++ } else
++ sd->nr_balance_failed = 0;
++
++ update_shares_locked(this_rq, sd);
++ return ld_moved;
++
++out_balanced:
++ schedstat_inc(sd, lb_balanced[CPU_NEWLY_IDLE]);
++ if (!sd_idle && sd->flags & SD_SHARE_CPUPOWER &&
++ !test_sd_parent(sd, SD_POWERSAVINGS_BALANCE))
++ return -1;
++ sd->nr_balance_failed = 0;
++
++ return 0;
++}
++
++/*
++ * idle_balance is called by schedule() if this_cpu is about to become
++ * idle. Attempts to pull tasks from other CPUs.
++ */
++static void idle_balance(int this_cpu, struct rq *this_rq)
++{
++ struct sched_domain *sd;
++ int pulled_task = -1;
++ unsigned long next_balance = jiffies + HZ;
++ cpumask_t tmpmask;
++
++ for_each_domain(this_cpu, sd) {
++ unsigned long interval;
++
++ if (!(sd->flags & SD_LOAD_BALANCE))
++ continue;
++
++ if (sd->flags & SD_BALANCE_NEWIDLE)
++ /* If we've pulled tasks over stop searching: */
++ pulled_task = load_balance_newidle(this_cpu, this_rq,
++ sd, &tmpmask);
++
++ interval = msecs_to_jiffies(sd->balance_interval);
++ if (time_after(next_balance, sd->last_balance + interval))
++ next_balance = sd->last_balance + interval;
++ if (pulled_task)
++ break;
++ }
++ if (pulled_task || time_after(jiffies, this_rq->next_balance)) {
++ /*
++ * We are going idle. next_balance may be set based on
++ * a busy processor. So reset next_balance.
++ */
++ this_rq->next_balance = next_balance;
++ }
++}
++
++/*
++ * active_load_balance is run by migration threads. It pushes running tasks
++ * off the busiest CPU onto idle CPUs. It requires at least 1 task to be
++ * running on each physical CPU where possible, and avoids physical /
++ * logical imbalances.
++ *
++ * Called with busiest_rq locked.
++ */
++static void active_load_balance(struct rq *busiest_rq, int busiest_cpu)
++{
++ int target_cpu = busiest_rq->push_cpu;
++ struct sched_domain *sd;
++ struct rq *target_rq;
++
++ /* Is there any task to move? */
++ if (busiest_rq->nr_running <= 1)
++ return;
++
++ target_rq = cpu_rq(target_cpu);
++
++ /*
++ * This condition is "impossible", if it occurs
++ * we need to fix it. Originally reported by
++ * Bjorn Helgaas on a 128-cpu setup.
++ */
++ BUG_ON(busiest_rq == target_rq);
++
++ /* move a task from busiest_rq to target_rq */
++ double_lock_balance(busiest_rq, target_rq);
++ update_rq_clock(busiest_rq);
++ update_rq_clock(target_rq);
++
++ /* Search for an sd spanning us and the target CPU. */
++ for_each_domain(target_cpu, sd) {
++ if ((sd->flags & SD_LOAD_BALANCE) &&
++ cpu_isset(busiest_cpu, sd->span))
++ break;
++ }
++
++ if (likely(sd)) {
++ schedstat_inc(sd, alb_count);
++
++ if (move_one_task(target_rq, target_cpu, busiest_rq,
++ sd, CPU_IDLE))
++ schedstat_inc(sd, alb_pushed);
++ else
++ schedstat_inc(sd, alb_failed);
++ }
++ double_unlock_balance(busiest_rq, target_rq);
++}
++
++#ifdef CONFIG_NO_HZ
++static struct {
++ atomic_t load_balancer;
++ cpumask_t cpu_mask;
++} nohz ____cacheline_aligned = {
++ .load_balancer = ATOMIC_INIT(-1),
++ .cpu_mask = CPU_MASK_NONE,
++};
++
++/*
++ * This routine will try to nominate the ilb (idle load balancing)
++ * owner among the cpus whose ticks are stopped. ilb owner will do the idle
++ * load balancing on behalf of all those cpus. If all the cpus in the system
++ * go into this tickless mode, then there will be no ilb owner (as there is
++ * no need for one) and all the cpus will sleep till the next wakeup event
++ * arrives...
++ *
++ * For the ilb owner, tick is not stopped. And this tick will be used
++ * for idle load balancing. ilb owner will still be part of
++ * nohz.cpu_mask..
++ *
++ * While stopping the tick, this cpu will become the ilb owner if there
++ * is no other owner. And will be the owner till that cpu becomes busy
++ * or if all cpus in the system stop their ticks at which point
++ * there is no need for ilb owner.
++ *
++ * When the ilb owner becomes busy, it nominates another owner, during the
++ * next busy scheduler_tick()
++ */
++int select_nohz_load_balancer(int stop_tick)
++{
++ int cpu = smp_processor_id();
++
++ if (stop_tick) {
++ cpu_set(cpu, nohz.cpu_mask);
++ cpu_rq(cpu)->in_nohz_recently = 1;
++
++ /*
++ * If we are going offline and still the leader, give up!
++ */
++ if (!cpu_active(cpu) &&
++ atomic_read(&nohz.load_balancer) == cpu) {
++ if (atomic_cmpxchg(&nohz.load_balancer, cpu, -1) != cpu)
++ BUG();
++ return 0;
++ }
++
++ /* time for ilb owner also to sleep */
++ if (cpus_weight(nohz.cpu_mask) == num_online_cpus()) {
++ if (atomic_read(&nohz.load_balancer) == cpu)
++ atomic_set(&nohz.load_balancer, -1);
++ return 0;
++ }
++
++ if (atomic_read(&nohz.load_balancer) == -1) {
++ /* make me the ilb owner */
++ if (atomic_cmpxchg(&nohz.load_balancer, -1, cpu) == -1)
++ return 1;
++ } else if (atomic_read(&nohz.load_balancer) == cpu)
++ return 1;
++ } else {
++ if (!cpu_isset(cpu, nohz.cpu_mask))
++ return 0;
++
++ cpu_clear(cpu, nohz.cpu_mask);
++
++ if (atomic_read(&nohz.load_balancer) == cpu)
++ if (atomic_cmpxchg(&nohz.load_balancer, cpu, -1) != cpu)
++ BUG();
++ }
++ return 0;
++}
++#endif
++
++static DEFINE_SPINLOCK(balancing);
++
++/*
++ * It checks each scheduling domain to see if it is due to be balanced,
++ * and initiates a balancing operation if so.
++ *
++ * Balancing parameters are set up in arch_init_sched_domains.
++ */
++static void rebalance_domains(int cpu, enum cpu_idle_type idle)
++{
++ int balance = 1;
++ struct rq *rq = cpu_rq(cpu);
++ unsigned long interval;
++ struct sched_domain *sd;
++ /* Earliest time when we have to do rebalance again */
++ unsigned long next_balance = jiffies + 60*HZ;
++ int update_next_balance = 0;
++ int need_serialize;
++ cpumask_t tmp;
++
++ for_each_domain(cpu, sd) {
++ if (!(sd->flags & SD_LOAD_BALANCE))
++ continue;
++
++ interval = sd->balance_interval;
++ if (idle != CPU_IDLE)
++ interval *= sd->busy_factor;
++
++ /* scale ms to jiffies */
++ interval = msecs_to_jiffies(interval);
++ if (unlikely(!interval))
++ interval = 1;
++ if (interval > HZ*NR_CPUS/10)
++ interval = HZ*NR_CPUS/10;
++
++ need_serialize = sd->flags & SD_SERIALIZE;
++
++ if (need_serialize) {
++ if (!spin_trylock(&balancing))
++ goto out;
++ }
++
++ if (time_after_eq(jiffies, sd->last_balance + interval)) {
++ if (load_balance(cpu, rq, sd, idle, &balance, &tmp)) {
++ /*
++ * We've pulled tasks over so either we're no
++ * longer idle, or one of our SMT siblings is
++ * not idle.
++ */
++ idle = CPU_NOT_IDLE;
++ }
++ sd->last_balance = jiffies;
++ }
++ if (need_serialize)
++ spin_unlock(&balancing);
++out:
++ if (time_after(next_balance, sd->last_balance + interval)) {
++ next_balance = sd->last_balance + interval;
++ update_next_balance = 1;
++ }
++
++ /*
++ * Stop the load balance at this level. There is another
++ * CPU in our sched group which is doing load balancing more
++ * actively.
++ */
++ if (!balance)
++ break;
++ }
++
++ /*
++ * next_balance will be updated only when there is a need.
++ * When the cpu is attached to null domain for ex, it will not be
++ * updated.
++ */
++ if (likely(update_next_balance))
++ rq->next_balance = next_balance;
++}
++
++/*
++ * run_rebalance_domains is triggered when needed from the scheduler tick.
++ * In CONFIG_NO_HZ case, the idle load balance owner will do the
++ * rebalancing for all the cpus for whom scheduler ticks are stopped.
++ */
++static void run_rebalance_domains(struct softirq_action *h)
++{
++ int this_cpu = smp_processor_id();
++ struct rq *this_rq = cpu_rq(this_cpu);
++ enum cpu_idle_type idle = this_rq->idle_at_tick ?
++ CPU_IDLE : CPU_NOT_IDLE;
++
++ rebalance_domains(this_cpu, idle);
++
++#ifdef CONFIG_NO_HZ
++ /*
++ * If this cpu is the owner for idle load balancing, then do the
++ * balancing on behalf of the other idle cpus whose ticks are
++ * stopped.
++ */
++ if (this_rq->idle_at_tick &&
++ atomic_read(&nohz.load_balancer) == this_cpu) {
++ cpumask_t cpus = nohz.cpu_mask;
++ struct rq *rq;
++ int balance_cpu;
++
++ cpu_clear(this_cpu, cpus);
++ for_each_cpu_mask_nr(balance_cpu, cpus) {
++ /*
++ * If this cpu gets work to do, stop the load balancing
++ * work being done for other cpus. Next load
++ * balancing owner will pick it up.
++ */
++ if (need_resched())
++ break;
++
++ rebalance_domains(balance_cpu, CPU_IDLE);
++
++ rq = cpu_rq(balance_cpu);
++ if (time_after(this_rq->next_balance, rq->next_balance))
++ this_rq->next_balance = rq->next_balance;
++ }
++ }
++#endif
++}
++
++/*
++ * Trigger the SCHED_SOFTIRQ if it is time to do periodic load balancing.
++ *
++ * In case of CONFIG_NO_HZ, this is the place where we nominate a new
++ * idle load balancing owner or decide to stop the periodic load balancing,
++ * if the whole system is idle.
++ */
++static inline void trigger_load_balance(struct rq *rq, int cpu)
++{
++#ifdef CONFIG_NO_HZ
++ /*
++ * If we were in the nohz mode recently and busy at the current
++ * scheduler tick, then check if we need to nominate new idle
++ * load balancer.
++ */
++ if (rq->in_nohz_recently && !rq->idle_at_tick) {
++ rq->in_nohz_recently = 0;
++
++ if (atomic_read(&nohz.load_balancer) == cpu) {
++ cpu_clear(cpu, nohz.cpu_mask);
++ atomic_set(&nohz.load_balancer, -1);
++ }
++
++ if (atomic_read(&nohz.load_balancer) == -1) {
++ /*
++ * simple selection for now: Nominate the
++ * first cpu in the nohz list to be the next
++ * ilb owner.
++ *
++ * TBD: Traverse the sched domains and nominate
++ * the nearest cpu in the nohz.cpu_mask.
++ */
++ int ilb = first_cpu(nohz.cpu_mask);
++
++ if (ilb < nr_cpu_ids)
++ resched_cpu(ilb);
++ }
++ }
++
++ /*
++ * If this cpu is idle and doing idle load balancing for all the
++ * cpus with ticks stopped, is it time for that to stop?
++ */
++ if (rq->idle_at_tick && atomic_read(&nohz.load_balancer) == cpu &&
++ cpus_weight(nohz.cpu_mask) == num_online_cpus()) {
++ resched_cpu(cpu);
++ return;
++ }
++
++ /*
++ * If this cpu is idle and the idle load balancing is done by
++ * someone else, then no need raise the SCHED_SOFTIRQ
++ */
++ if (rq->idle_at_tick && atomic_read(&nohz.load_balancer) != cpu &&
++ cpu_isset(cpu, nohz.cpu_mask))
++ return;
++#endif
++ if (time_after_eq(jiffies, rq->next_balance))
++ raise_softirq(SCHED_SOFTIRQ);
++}
++
++#else /* CONFIG_SMP */
++
++/*
++ * on UP we do not need to balance between CPUs:
++ */
++static inline void idle_balance(int cpu, struct rq *rq)
++{
++}
++
++#endif
++
++DEFINE_PER_CPU(struct kernel_stat, kstat);
++
++EXPORT_PER_CPU_SYMBOL(kstat);
++
++/*
++ * Return p->sum_exec_runtime plus any more ns on the sched_clock
++ * that have not yet been banked in case the task is currently running.
++ */
++unsigned long long task_sched_runtime(struct task_struct *p)
++{
++ unsigned long flags;
++ u64 ns, delta_exec;
++ struct rq *rq;
++
++ rq = task_rq_lock(p, &flags);
++ ns = p->se.sum_exec_runtime;
++ if (task_current(rq, p)) {
++ update_rq_clock(rq);
++ delta_exec = rq->clock - p->se.exec_start;
++ if ((s64)delta_exec > 0)
++ ns += delta_exec;
++ }
++ task_rq_unlock(rq, &flags);
++
++ return ns;
++}
++
++/*
++ * Account user cpu time to a process.
++ * @p: the process that the cpu time gets accounted to
++ * @cputime: the cpu time spent in user space since the last update
++ */
++void account_user_time(struct task_struct *p, cputime_t cputime)
++{
++ struct cpu_usage_stat *cpustat = &kstat_this_cpu.cpustat;
++ struct vx_info *vxi = p->vx_info; /* p is _always_ current */
++ cputime64_t tmp;
++ int nice = (TASK_NICE(p) > 0);
++
++ p->utime = cputime_add(p->utime, cputime);
++ vx_account_user(vxi, cputime, nice);
++
++ /* Add user time to cpustat. */
++ tmp = cputime_to_cputime64(cputime);
++ if (nice)
++ cpustat->nice = cputime64_add(cpustat->nice, tmp);
++ else
++ cpustat->user = cputime64_add(cpustat->user, tmp);
++ /* Account for user time used */
++ acct_update_integrals(p);
++}
++
++/*
++ * Account guest cpu time to a process.
++ * @p: the process that the cpu time gets accounted to
++ * @cputime: the cpu time spent in virtual machine since the last update
++ */
++static void account_guest_time(struct task_struct *p, cputime_t cputime)
++{
++ cputime64_t tmp;
++ struct cpu_usage_stat *cpustat = &kstat_this_cpu.cpustat;
++
++ tmp = cputime_to_cputime64(cputime);
++
++ p->utime = cputime_add(p->utime, cputime);
++ p->gtime = cputime_add(p->gtime, cputime);
++
++ cpustat->user = cputime64_add(cpustat->user, tmp);
++ cpustat->guest = cputime64_add(cpustat->guest, tmp);
++}
++
++/*
++ * Account scaled user cpu time to a process.
++ * @p: the process that the cpu time gets accounted to
++ * @cputime: the cpu time spent in user space since the last update
++ */
++void account_user_time_scaled(struct task_struct *p, cputime_t cputime)
++{
++ p->utimescaled = cputime_add(p->utimescaled, cputime);
++}
++
++/*
++ * Account system cpu time to a process.
++ * @p: the process that the cpu time gets accounted to
++ * @hardirq_offset: the offset to subtract from hardirq_count()
++ * @cputime: the cpu time spent in kernel space since the last update
++ */
++void account_system_time(struct task_struct *p, int hardirq_offset,
++ cputime_t cputime)
++{
++ struct cpu_usage_stat *cpustat = &kstat_this_cpu.cpustat;
++ struct vx_info *vxi = p->vx_info; /* p is _always_ current */
++ struct rq *rq = this_rq();
++ cputime64_t tmp;
++
++ if ((p->flags & PF_VCPU) && (irq_count() - hardirq_offset == 0)) {
++ account_guest_time(p, cputime);
++ return;
++ }
++
++ p->stime = cputime_add(p->stime, cputime);
++ vx_account_system(vxi, cputime, (p == rq->idle));
++
++ /* Add system time to cpustat. */
++ tmp = cputime_to_cputime64(cputime);
++ if (hardirq_count() - hardirq_offset)
++ cpustat->irq = cputime64_add(cpustat->irq, tmp);
++ else if (softirq_count())
++ cpustat->softirq = cputime64_add(cpustat->softirq, tmp);
++ else if (p != rq->idle)
++ cpustat->system = cputime64_add(cpustat->system, tmp);
++ else if (atomic_read(&rq->nr_iowait) > 0)
++ cpustat->iowait = cputime64_add(cpustat->iowait, tmp);
++ else
++ cpustat->idle = cputime64_add(cpustat->idle, tmp);
++ /* Account for system time used */
++ acct_update_integrals(p);
++}
++
++/*
++ * Account scaled system cpu time to a process.
++ * @p: the process that the cpu time gets accounted to
++ * @hardirq_offset: the offset to subtract from hardirq_count()
++ * @cputime: the cpu time spent in kernel space since the last update
++ */
++void account_system_time_scaled(struct task_struct *p, cputime_t cputime)
++{
++ p->stimescaled = cputime_add(p->stimescaled, cputime);
++}
++
++/*
++ * Account for involuntary wait time.
++ * @p: the process from which the cpu time has been stolen
++ * @steal: the cpu time spent in involuntary wait
++ */
++void account_steal_time(struct task_struct *p, cputime_t steal)
++{
++ struct cpu_usage_stat *cpustat = &kstat_this_cpu.cpustat;
++ cputime64_t tmp = cputime_to_cputime64(steal);
++ struct rq *rq = this_rq();
++
++ if (p == rq->idle) {
++ p->stime = cputime_add(p->stime, steal);
++ if (atomic_read(&rq->nr_iowait) > 0)
++ cpustat->iowait = cputime64_add(cpustat->iowait, tmp);
++ else
++ cpustat->idle = cputime64_add(cpustat->idle, tmp);
++ } else
++ cpustat->steal = cputime64_add(cpustat->steal, tmp);
++}
++
++/*
++ * Use precise platform statistics if available:
++ */
++#ifdef CONFIG_VIRT_CPU_ACCOUNTING
++cputime_t task_utime(struct task_struct *p)
++{
++ return p->utime;
++}
++
++cputime_t task_stime(struct task_struct *p)
++{
++ return p->stime;
++}
++#else
++cputime_t task_utime(struct task_struct *p)
++{
++ clock_t utime = cputime_to_clock_t(p->utime),
++ total = utime + cputime_to_clock_t(p->stime);
++ u64 temp;
++
++ /*
++ * Use CFS's precise accounting:
++ */
++ temp = (u64)nsec_to_clock_t(p->se.sum_exec_runtime);
++
++ if (total) {
++ temp *= utime;
++ do_div(temp, total);
++ }
++ utime = (clock_t)temp;
++
++ p->prev_utime = max(p->prev_utime, clock_t_to_cputime(utime));
++ return p->prev_utime;
++}
++
++cputime_t task_stime(struct task_struct *p)
++{
++ clock_t stime;
++
++ /*
++ * Use CFS's precise accounting. (we subtract utime from
++ * the total, to make sure the total observed by userspace
++ * grows monotonically - apps rely on that):
++ */
++ stime = nsec_to_clock_t(p->se.sum_exec_runtime) -
++ cputime_to_clock_t(task_utime(p));
++
++ if (stime >= 0)
++ p->prev_stime = max(p->prev_stime, clock_t_to_cputime(stime));
++
++ return p->prev_stime;
++}
++#endif
++
++inline cputime_t task_gtime(struct task_struct *p)
++{
++ return p->gtime;
++}
++
++/*
++ * This function gets called by the timer code, with HZ frequency.
++ * We call it with interrupts disabled.
++ *
++ * It also gets called by the fork code, when changing the parent's
++ * timeslices.
++ */
++void scheduler_tick(void)
++{
++ int cpu = smp_processor_id();
++ struct rq *rq = cpu_rq(cpu);
++ struct task_struct *curr = rq->curr;
++
++ sched_clock_tick();
++
++ spin_lock(&rq->lock);
++ update_rq_clock(rq);
++ update_cpu_load(rq);
++ curr->sched_class->task_tick(rq, curr, 0);
++ spin_unlock(&rq->lock);
++
++#ifdef CONFIG_SMP
++ rq->idle_at_tick = idle_cpu(cpu);
++ trigger_load_balance(rq, cpu);
++#endif
++}
++
++#if defined(CONFIG_PREEMPT) && (defined(CONFIG_DEBUG_PREEMPT) || \
++ defined(CONFIG_PREEMPT_TRACER))
++
++static inline unsigned long get_parent_ip(unsigned long addr)
++{
++ if (in_lock_functions(addr)) {
++ addr = CALLER_ADDR2;
++ if (in_lock_functions(addr))
++ addr = CALLER_ADDR3;
++ }
++ return addr;
++}
++
++void __kprobes add_preempt_count(int val)
++{
++#ifdef CONFIG_DEBUG_PREEMPT
++ /*
++ * Underflow?
++ */
++ if (DEBUG_LOCKS_WARN_ON((preempt_count() < 0)))
++ return;
++#endif
++ preempt_count() += val;
++#ifdef CONFIG_DEBUG_PREEMPT
++ /*
++ * Spinlock count overflowing soon?
++ */
++ DEBUG_LOCKS_WARN_ON((preempt_count() & PREEMPT_MASK) >=
++ PREEMPT_MASK - 10);
++#endif
++ if (preempt_count() == val)
++ trace_preempt_off(CALLER_ADDR0, get_parent_ip(CALLER_ADDR1));
++}
++EXPORT_SYMBOL(add_preempt_count);
++
++void __kprobes sub_preempt_count(int val)
++{
++#ifdef CONFIG_DEBUG_PREEMPT
++ /*
++ * Underflow?
++ */
++ if (DEBUG_LOCKS_WARN_ON(val > preempt_count()))
++ return;
++ /*
++ * Is the spinlock portion underflowing?
++ */
++ if (DEBUG_LOCKS_WARN_ON((val < PREEMPT_MASK) &&
++ !(preempt_count() & PREEMPT_MASK)))
++ return;
++#endif
++
++ if (preempt_count() == val)
++ trace_preempt_on(CALLER_ADDR0, get_parent_ip(CALLER_ADDR1));
++ preempt_count() -= val;
++}
++EXPORT_SYMBOL(sub_preempt_count);
++
++#endif
++
++/*
++ * Print scheduling while atomic bug:
++ */
++static noinline void __schedule_bug(struct task_struct *prev)
++{
++ struct pt_regs *regs = get_irq_regs();
++
++ printk(KERN_ERR "BUG: scheduling while atomic: %s/%d/0x%08x\n",
++ prev->comm, prev->pid, preempt_count());
++
++ debug_show_held_locks(prev);
++ print_modules();
++ if (irqs_disabled())
++ print_irqtrace_events(prev);
++
++ if (regs)
++ show_regs(regs);
++ else
++ dump_stack();
++}
++
++/*
++ * Various schedule()-time debugging checks and statistics:
++ */
++static inline void schedule_debug(struct task_struct *prev)
++{
++ /*
++ * Test if we are atomic. Since do_exit() needs to call into
++ * schedule() atomically, we ignore that path for now.
++ * Otherwise, whine if we are scheduling when we should not be.
++ */
++ if (unlikely(in_atomic_preempt_off() && !prev->exit_state))
++ __schedule_bug(prev);
++
++ profile_hit(SCHED_PROFILING, __builtin_return_address(0));
++
++ schedstat_inc(this_rq(), sched_count);
++#ifdef CONFIG_SCHEDSTATS
++ if (unlikely(prev->lock_depth >= 0)) {
++ schedstat_inc(this_rq(), bkl_count);
++ schedstat_inc(prev, sched_info.bkl_count);
++ }
++#endif
++}
++
++/*
++ * Pick up the highest-prio task:
++ */
++static inline struct task_struct *
++pick_next_task(struct rq *rq, struct task_struct *prev)
++{
++ const struct sched_class *class;
++ struct task_struct *p;
++
++ /*
++ * Optimization: we know that if all tasks are in
++ * the fair class we can call that function directly:
++ */
++ if (likely(rq->nr_running == rq->cfs.nr_running)) {
++ p = fair_sched_class.pick_next_task(rq);
++ if (likely(p))
++ return p;
++ }
++
++ class = sched_class_highest;
++ for ( ; ; ) {
++ p = class->pick_next_task(rq);
++ if (p)
++ return p;
++ /*
++ * Will never be NULL as the idle class always
++ * returns a non-NULL p:
++ */
++ class = class->next;
++ }
++}
++
++/*
++ * schedule() is the main scheduler function.
++ */
++asmlinkage void __sched schedule(void)
++{
++ struct task_struct *prev, *next;
++ unsigned long *switch_count;
++ struct rq *rq;
++ int cpu;
++
++need_resched:
++ preempt_disable();
++ cpu = smp_processor_id();
++ rq = cpu_rq(cpu);
++ rcu_qsctr_inc(cpu);
++ prev = rq->curr;
++ switch_count = &prev->nivcsw;
++
++ release_kernel_lock(prev);
++need_resched_nonpreemptible:
++
++ schedule_debug(prev);
++
++ if (sched_feat(HRTICK))
++ hrtick_clear(rq);
++
++ /*
++ * Do the rq-clock update outside the rq lock:
++ */
++ local_irq_disable();
++ update_rq_clock(rq);
++ spin_lock(&rq->lock);
++ clear_tsk_need_resched(prev);
++
++ if (prev->state && !(preempt_count() & PREEMPT_ACTIVE)) {
++ if (unlikely(signal_pending_state(prev->state, prev)))
++ prev->state = TASK_RUNNING;
++ else
++ deactivate_task(rq, prev, 1);
++ switch_count = &prev->nvcsw;
++ }
++
++#ifdef CONFIG_SMP
++ if (prev->sched_class->pre_schedule)
++ prev->sched_class->pre_schedule(rq, prev);
++#endif
++
++ if (unlikely(!rq->nr_running))
++ idle_balance(cpu, rq);
++
++ prev->sched_class->put_prev_task(rq, prev);
++ next = pick_next_task(rq, prev);
++
++ if (likely(prev != next)) {
++ sched_info_switch(prev, next);
++
++ rq->nr_switches++;
++ rq->curr = next;
++ ++*switch_count;
++
++ context_switch(rq, prev, next); /* unlocks the rq */
++ /*
++ * the context switch might have flipped the stack from under
++ * us, hence refresh the local variables.
++ */
++ cpu = smp_processor_id();
++ rq = cpu_rq(cpu);
++ } else
++ spin_unlock_irq(&rq->lock);
++
++ if (unlikely(reacquire_kernel_lock(current) < 0))
++ goto need_resched_nonpreemptible;
++
++ preempt_enable_no_resched();
++ if (unlikely(test_thread_flag(TIF_NEED_RESCHED)))
++ goto need_resched;
++}
++EXPORT_SYMBOL(schedule);
++
++#ifdef CONFIG_PREEMPT
++/*
++ * this is the entry point to schedule() from in-kernel preemption
++ * off of preempt_enable. Kernel preemptions off return from interrupt
++ * occur there and call schedule directly.
++ */
++asmlinkage void __sched preempt_schedule(void)
++{
++ struct thread_info *ti = current_thread_info();
++
++ /*
++ * If there is a non-zero preempt_count or interrupts are disabled,
++ * we do not want to preempt the current task. Just return..
++ */
++ if (likely(ti->preempt_count || irqs_disabled()))
++ return;
++
++ do {
++ add_preempt_count(PREEMPT_ACTIVE);
++ schedule();
++ sub_preempt_count(PREEMPT_ACTIVE);
++
++ /*
++ * Check again in case we missed a preemption opportunity
++ * between schedule and now.
++ */
++ barrier();
++ } while (unlikely(test_thread_flag(TIF_NEED_RESCHED)));
++}
++EXPORT_SYMBOL(preempt_schedule);
++
++/*
++ * this is the entry point to schedule() from kernel preemption
++ * off of irq context.
++ * Note, that this is called and return with irqs disabled. This will
++ * protect us against recursive calling from irq.
++ */
++asmlinkage void __sched preempt_schedule_irq(void)
++{
++ struct thread_info *ti = current_thread_info();
++
++ /* Catch callers which need to be fixed */
++ BUG_ON(ti->preempt_count || !irqs_disabled());
++
++ do {
++ add_preempt_count(PREEMPT_ACTIVE);
++ local_irq_enable();
++ schedule();
++ local_irq_disable();
++ sub_preempt_count(PREEMPT_ACTIVE);
++
++ /*
++ * Check again in case we missed a preemption opportunity
++ * between schedule and now.
++ */
++ barrier();
++ } while (unlikely(test_thread_flag(TIF_NEED_RESCHED)));
++}
++
++#endif /* CONFIG_PREEMPT */
++
++int default_wake_function(wait_queue_t *curr, unsigned mode, int sync,
++ void *key)
++{
++ return try_to_wake_up(curr->private, mode, sync);
++}
++EXPORT_SYMBOL(default_wake_function);
++
++/*
++ * The core wakeup function. Non-exclusive wakeups (nr_exclusive == 0) just
++ * wake everything up. If it's an exclusive wakeup (nr_exclusive == small +ve
++ * number) then we wake all the non-exclusive tasks and one exclusive task.
++ *
++ * There are circumstances in which we can try to wake a task which has already
++ * started to run but is not in state TASK_RUNNING. try_to_wake_up() returns
++ * zero in this (rare) case, and we handle it by continuing to scan the queue.
++ */
++static void __wake_up_common(wait_queue_head_t *q, unsigned int mode,
++ int nr_exclusive, int sync, void *key)
++{
++ wait_queue_t *curr, *next;
++
++ list_for_each_entry_safe(curr, next, &q->task_list, task_list) {
++ unsigned flags = curr->flags;
++
++ if (curr->func(curr, mode, sync, key) &&
++ (flags & WQ_FLAG_EXCLUSIVE) && !--nr_exclusive)
++ break;
++ }
++}
++
++/**
++ * __wake_up - wake up threads blocked on a waitqueue.
++ * @q: the waitqueue
++ * @mode: which threads
++ * @nr_exclusive: how many wake-one or wake-many threads to wake up
++ * @key: is directly passed to the wakeup function
++ */
++void __wake_up(wait_queue_head_t *q, unsigned int mode,
++ int nr_exclusive, void *key)
++{
++ unsigned long flags;
++
++ spin_lock_irqsave(&q->lock, flags);
++ __wake_up_common(q, mode, nr_exclusive, 0, key);
++ spin_unlock_irqrestore(&q->lock, flags);
++}
++EXPORT_SYMBOL(__wake_up);
++
++/*
++ * Same as __wake_up but called with the spinlock in wait_queue_head_t held.
++ */
++void __wake_up_locked(wait_queue_head_t *q, unsigned int mode)
++{
++ __wake_up_common(q, mode, 1, 0, NULL);
++}
++
++/**
++ * __wake_up_sync - wake up threads blocked on a waitqueue.
++ * @q: the waitqueue
++ * @mode: which threads
++ * @nr_exclusive: how many wake-one or wake-many threads to wake up
++ *
++ * The sync wakeup differs that the waker knows that it will schedule
++ * away soon, so while the target thread will be woken up, it will not
++ * be migrated to another CPU - ie. the two threads are 'synchronized'
++ * with each other. This can prevent needless bouncing between CPUs.
++ *
++ * On UP it can prevent extra preemption.
++ */
++void
++__wake_up_sync(wait_queue_head_t *q, unsigned int mode, int nr_exclusive)
++{
++ unsigned long flags;
++ int sync = 1;
++
++ if (unlikely(!q))
++ return;
++
++ if (unlikely(!nr_exclusive))
++ sync = 0;
++
++ spin_lock_irqsave(&q->lock, flags);
++ __wake_up_common(q, mode, nr_exclusive, sync, NULL);
++ spin_unlock_irqrestore(&q->lock, flags);
++}
++EXPORT_SYMBOL_GPL(__wake_up_sync); /* For internal use only */
++
++void complete(struct completion *x)
++{
++ unsigned long flags;
++
++ spin_lock_irqsave(&x->wait.lock, flags);
++ x->done++;
++ __wake_up_common(&x->wait, TASK_NORMAL, 1, 0, NULL);
++ spin_unlock_irqrestore(&x->wait.lock, flags);
++}
++EXPORT_SYMBOL(complete);
++
++void complete_all(struct completion *x)
++{
++ unsigned long flags;
++
++ spin_lock_irqsave(&x->wait.lock, flags);
++ x->done += UINT_MAX/2;
++ __wake_up_common(&x->wait, TASK_NORMAL, 0, 0, NULL);
++ spin_unlock_irqrestore(&x->wait.lock, flags);
++}
++EXPORT_SYMBOL(complete_all);
++
++static inline long __sched
++do_wait_for_common(struct completion *x, long timeout, int state)
++{
++ if (!x->done) {
++ DECLARE_WAITQUEUE(wait, current);
++
++ wait.flags |= WQ_FLAG_EXCLUSIVE;
++ __add_wait_queue_tail(&x->wait, &wait);
++ do {
++ if ((state == TASK_INTERRUPTIBLE &&
++ signal_pending(current)) ||
++ (state == TASK_KILLABLE &&
++ fatal_signal_pending(current))) {
++ timeout = -ERESTARTSYS;
++ break;
++ }
++ __set_current_state(state);
++ spin_unlock_irq(&x->wait.lock);
++ timeout = schedule_timeout(timeout);
++ spin_lock_irq(&x->wait.lock);
++ } while (!x->done && timeout);
++ __remove_wait_queue(&x->wait, &wait);
++ if (!x->done)
++ return timeout;
++ }
++ x->done--;
++ return timeout ?: 1;
++}
++
++static long __sched
++wait_for_common(struct completion *x, long timeout, int state)
++{
++ might_sleep();
++
++ spin_lock_irq(&x->wait.lock);
++ timeout = do_wait_for_common(x, timeout, state);
++ spin_unlock_irq(&x->wait.lock);
++ return timeout;
++}
++
++void __sched wait_for_completion(struct completion *x)
++{
++ wait_for_common(x, MAX_SCHEDULE_TIMEOUT, TASK_UNINTERRUPTIBLE);
++}
++EXPORT_SYMBOL(wait_for_completion);
++
++unsigned long __sched
++wait_for_completion_timeout(struct completion *x, unsigned long timeout)
++{
++ return wait_for_common(x, timeout, TASK_UNINTERRUPTIBLE);
++}
++EXPORT_SYMBOL(wait_for_completion_timeout);
++
++int __sched wait_for_completion_interruptible(struct completion *x)
++{
++ long t = wait_for_common(x, MAX_SCHEDULE_TIMEOUT, TASK_INTERRUPTIBLE);
++ if (t == -ERESTARTSYS)
++ return t;
++ return 0;
++}
++EXPORT_SYMBOL(wait_for_completion_interruptible);
++
++unsigned long __sched
++wait_for_completion_interruptible_timeout(struct completion *x,
++ unsigned long timeout)
++{
++ return wait_for_common(x, timeout, TASK_INTERRUPTIBLE);
++}
++EXPORT_SYMBOL(wait_for_completion_interruptible_timeout);
++
++int __sched wait_for_completion_killable(struct completion *x)
++{
++ long t = wait_for_common(x, MAX_SCHEDULE_TIMEOUT, TASK_KILLABLE);
++ if (t == -ERESTARTSYS)
++ return t;
++ return 0;
++}
++EXPORT_SYMBOL(wait_for_completion_killable);
++
++/**
++ * try_wait_for_completion - try to decrement a completion without blocking
++ * @x: completion structure
++ *
++ * Returns: 0 if a decrement cannot be done without blocking
++ * 1 if a decrement succeeded.
++ *
++ * If a completion is being used as a counting completion,
++ * attempt to decrement the counter without blocking. This
++ * enables us to avoid waiting if the resource the completion
++ * is protecting is not available.
++ */
++bool try_wait_for_completion(struct completion *x)
++{
++ int ret = 1;
++
++ spin_lock_irq(&x->wait.lock);
++ if (!x->done)
++ ret = 0;
++ else
++ x->done--;
++ spin_unlock_irq(&x->wait.lock);
++ return ret;
++}
++EXPORT_SYMBOL(try_wait_for_completion);
++
++/**
++ * completion_done - Test to see if a completion has any waiters
++ * @x: completion structure
++ *
++ * Returns: 0 if there are waiters (wait_for_completion() in progress)
++ * 1 if there are no waiters.
++ *
++ */
++bool completion_done(struct completion *x)
++{
++ int ret = 1;
++
++ spin_lock_irq(&x->wait.lock);
++ if (!x->done)
++ ret = 0;
++ spin_unlock_irq(&x->wait.lock);
++ return ret;
++}
++EXPORT_SYMBOL(completion_done);
++
++static long __sched
++sleep_on_common(wait_queue_head_t *q, int state, long timeout)
++{
++ unsigned long flags;
++ wait_queue_t wait;
++
++ init_waitqueue_entry(&wait, current);
++
++ __set_current_state(state);
++
++ spin_lock_irqsave(&q->lock, flags);
++ __add_wait_queue(q, &wait);
++ spin_unlock(&q->lock);
++ timeout = schedule_timeout(timeout);
++ spin_lock_irq(&q->lock);
++ __remove_wait_queue(q, &wait);
++ spin_unlock_irqrestore(&q->lock, flags);
++
++ return timeout;
++}
++
++void __sched interruptible_sleep_on(wait_queue_head_t *q)
++{
++ sleep_on_common(q, TASK_INTERRUPTIBLE, MAX_SCHEDULE_TIMEOUT);
++}
++EXPORT_SYMBOL(interruptible_sleep_on);
++
++long __sched
++interruptible_sleep_on_timeout(wait_queue_head_t *q, long timeout)
++{
++ return sleep_on_common(q, TASK_INTERRUPTIBLE, timeout);
++}
++EXPORT_SYMBOL(interruptible_sleep_on_timeout);
++
++void __sched sleep_on(wait_queue_head_t *q)
++{
++ sleep_on_common(q, TASK_UNINTERRUPTIBLE, MAX_SCHEDULE_TIMEOUT);
++}
++EXPORT_SYMBOL(sleep_on);
++
++long __sched sleep_on_timeout(wait_queue_head_t *q, long timeout)
++{
++ return sleep_on_common(q, TASK_UNINTERRUPTIBLE, timeout);
++}
++EXPORT_SYMBOL(sleep_on_timeout);
++
++#ifdef CONFIG_RT_MUTEXES
++
++/*
++ * rt_mutex_setprio - set the current priority of a task
++ * @p: task
++ * @prio: prio value (kernel-internal form)
++ *
++ * This function changes the 'effective' priority of a task. It does
++ * not touch ->normal_prio like __setscheduler().
++ *
++ * Used by the rt_mutex code to implement priority inheritance logic.
++ */
++void rt_mutex_setprio(struct task_struct *p, int prio)
++{
++ unsigned long flags;
++ int oldprio, on_rq, running;
++ struct rq *rq;
++ const struct sched_class *prev_class = p->sched_class;
++
++ BUG_ON(prio < 0 || prio > MAX_PRIO);
++
++ rq = task_rq_lock(p, &flags);
++ update_rq_clock(rq);
++
++ oldprio = p->prio;
++ on_rq = p->se.on_rq;
++ running = task_current(rq, p);
++ if (on_rq)
++ dequeue_task(rq, p, 0);
++ if (running)
++ p->sched_class->put_prev_task(rq, p);
++
++ if (rt_prio(prio))
++ p->sched_class = &rt_sched_class;
++ else
++ p->sched_class = &fair_sched_class;
++
++ p->prio = prio;
++
++ if (running)
++ p->sched_class->set_curr_task(rq);
++ if (on_rq) {
++ enqueue_task(rq, p, 0);
++
++ check_class_changed(rq, p, prev_class, oldprio, running);
++ }
++ task_rq_unlock(rq, &flags);
++}
++
++#endif
++
++void set_user_nice(struct task_struct *p, long nice)
++{
++ int old_prio, delta, on_rq;
++ unsigned long flags;
++ struct rq *rq;
++
++ if (TASK_NICE(p) == nice || nice < -20 || nice > 19)
++ return;
++ /*
++ * We have to be careful, if called from sys_setpriority(),
++ * the task might be in the middle of scheduling on another CPU.
++ */
++ rq = task_rq_lock(p, &flags);
++ update_rq_clock(rq);
++ /*
++ * The RT priorities are set via sched_setscheduler(), but we still
++ * allow the 'normal' nice value to be set - but as expected
++ * it wont have any effect on scheduling until the task is
++ * SCHED_FIFO/SCHED_RR:
++ */
++ if (task_has_rt_policy(p)) {
++ p->static_prio = NICE_TO_PRIO(nice);
++ goto out_unlock;
++ }
++ on_rq = p->se.on_rq;
++ if (on_rq)
++ dequeue_task(rq, p, 0);
++
++ p->static_prio = NICE_TO_PRIO(nice);
++ set_load_weight(p);
++ old_prio = p->prio;
++ p->prio = effective_prio(p);
++ delta = p->prio - old_prio;
++
++ if (on_rq) {
++ enqueue_task(rq, p, 0);
++ /*
++ * If the task increased its priority or is running and
++ * lowered its priority, then reschedule its CPU:
++ */
++ if (delta < 0 || (delta > 0 && task_running(rq, p)))
++ resched_task(rq->curr);
++ }
++out_unlock:
++ task_rq_unlock(rq, &flags);
++}
++EXPORT_SYMBOL(set_user_nice);
++
++/*
++ * can_nice - check if a task can reduce its nice value
++ * @p: task
++ * @nice: nice value
++ */
++int can_nice(const struct task_struct *p, const int nice)
++{
++ /* convert nice value [19,-20] to rlimit style value [1,40] */
++ int nice_rlim = 20 - nice;
++
++ return (nice_rlim <= p->signal->rlim[RLIMIT_NICE].rlim_cur ||
++ capable(CAP_SYS_NICE));
++}
++
++#ifdef __ARCH_WANT_SYS_NICE
++
++/*
++ * sys_nice - change the priority of the current process.
++ * @increment: priority increment
++ *
++ * sys_setpriority is a more generic, but much slower function that
++ * does similar things.
++ */
++SYSCALL_DEFINE1(nice, int, increment)
++{
++ long nice, retval;
++
++ /*
++ * Setpriority might change our priority at the same moment.
++ * We don't have to worry. Conceptually one call occurs first
++ * and we have a single winner.
++ */
++ if (increment < -40)
++ increment = -40;
++ if (increment > 40)
++ increment = 40;
++
++ nice = PRIO_TO_NICE(current->static_prio) + increment;
++ if (nice < -20)
++ nice = -20;
++ if (nice > 19)
++ nice = 19;
++
++ if (increment < 0 && !can_nice(current, nice))
++ return vx_flags(VXF_IGNEG_NICE, 0) ? 0 : -EPERM;
++
++ retval = security_task_setnice(current, nice);
++ if (retval)
++ return retval;
++
++ set_user_nice(current, nice);
++ return 0;
++}
++
++#endif
++
++/**
++ * task_prio - return the priority value of a given task.
++ * @p: the task in question.
++ *
++ * This is the priority value as seen by users in /proc.
++ * RT tasks are offset by -200. Normal tasks are centered
++ * around 0, value goes from -16 to +15.
++ */
++int task_prio(const struct task_struct *p)
++{
++ return p->prio - MAX_RT_PRIO;
++}
++
++/**
++ * task_nice - return the nice value of a given task.
++ * @p: the task in question.
++ */
++int task_nice(const struct task_struct *p)
++{
++ return TASK_NICE(p);
++}
++EXPORT_SYMBOL(task_nice);
++
++/**
++ * idle_cpu - is a given cpu idle currently?
++ * @cpu: the processor in question.
++ */
++int idle_cpu(int cpu)
++{
++ return cpu_curr(cpu) == cpu_rq(cpu)->idle;
++}
++
++/**
++ * idle_task - return the idle task for a given cpu.
++ * @cpu: the processor in question.
++ */
++struct task_struct *idle_task(int cpu)
++{
++ return cpu_rq(cpu)->idle;
++}
++
++/**
++ * find_process_by_pid - find a process with a matching PID value.
++ * @pid: the pid in question.
++ */
++static struct task_struct *find_process_by_pid(pid_t pid)
++{
++ return pid ? find_task_by_vpid(pid) : current;
++}
++
++/* Actually do priority change: must hold rq lock. */
++static void
++__setscheduler(struct rq *rq, struct task_struct *p, int policy, int prio)
++{
++ BUG_ON(p->se.on_rq);
++
++ p->policy = policy;
++ switch (p->policy) {
++ case SCHED_NORMAL:
++ case SCHED_BATCH:
++ case SCHED_IDLE:
++ p->sched_class = &fair_sched_class;
++ break;
++ case SCHED_FIFO:
++ case SCHED_RR:
++ p->sched_class = &rt_sched_class;
++ break;
++ }
++
++ p->rt_priority = prio;
++ p->normal_prio = normal_prio(p);
++ /* we are holding p->pi_lock already */
++ p->prio = rt_mutex_getprio(p);
++ set_load_weight(p);
++}
++
++static int __sched_setscheduler(struct task_struct *p, int policy,
++ struct sched_param *param, bool user)
++{
++ int retval, oldprio, oldpolicy = -1, on_rq, running;
++ unsigned long flags;
++ const struct sched_class *prev_class = p->sched_class;
++ struct rq *rq;
++
++ /* may grab non-irq protected spin_locks */
++ BUG_ON(in_interrupt());
++recheck:
++ /* double check policy once rq lock held */
++ if (policy < 0)
++ policy = oldpolicy = p->policy;
++ else if (policy != SCHED_FIFO && policy != SCHED_RR &&
++ policy != SCHED_NORMAL && policy != SCHED_BATCH &&
++ policy != SCHED_IDLE)
++ return -EINVAL;
++ /*
++ * Valid priorities for SCHED_FIFO and SCHED_RR are
++ * 1..MAX_USER_RT_PRIO-1, valid priority for SCHED_NORMAL,
++ * SCHED_BATCH and SCHED_IDLE is 0.
++ */
++ if (param->sched_priority < 0 ||
++ (p->mm && param->sched_priority > MAX_USER_RT_PRIO-1) ||
++ (!p->mm && param->sched_priority > MAX_RT_PRIO-1))
++ return -EINVAL;
++ if (rt_policy(policy) != (param->sched_priority != 0))
++ return -EINVAL;
++
++ /*
++ * Allow unprivileged RT tasks to decrease priority:
++ */
++ if (user && !capable(CAP_SYS_NICE)) {
++ if (rt_policy(policy)) {
++ unsigned long rlim_rtprio;
++
++ if (!lock_task_sighand(p, &flags))
++ return -ESRCH;
++ rlim_rtprio = p->signal->rlim[RLIMIT_RTPRIO].rlim_cur;
++ unlock_task_sighand(p, &flags);
++
++ /* can't set/change the rt policy */
++ if (policy != p->policy && !rlim_rtprio)
++ return -EPERM;
++
++ /* can't increase priority */
++ if (param->sched_priority > p->rt_priority &&
++ param->sched_priority > rlim_rtprio)
++ return -EPERM;
++ }
++ /*
++ * Like positive nice levels, dont allow tasks to
++ * move out of SCHED_IDLE either:
++ */
++ if (p->policy == SCHED_IDLE && policy != SCHED_IDLE)
++ return -EPERM;
++
++ /* can't change other user's priorities */
++ if ((current->euid != p->euid) &&
++ (current->euid != p->uid))
++ return -EPERM;
++ }
++
++ if (user) {
++#ifdef CONFIG_RT_GROUP_SCHED
++ /*
++ * Do not allow realtime tasks into groups that have no runtime
++ * assigned.
++ */
++ if (rt_policy(policy) && task_group(p)->rt_bandwidth.rt_runtime == 0)
++ return -EPERM;
++#endif
++
++ retval = security_task_setscheduler(p, policy, param);
++ if (retval)
++ return retval;
++ }
++
++ /*
++ * make sure no PI-waiters arrive (or leave) while we are
++ * changing the priority of the task:
++ */
++ spin_lock_irqsave(&p->pi_lock, flags);
++ /*
++ * To be able to change p->policy safely, the apropriate
++ * runqueue lock must be held.
++ */
++ rq = __task_rq_lock(p);
++ /* recheck policy now with rq lock held */
++ if (unlikely(oldpolicy != -1 && oldpolicy != p->policy)) {
++ policy = oldpolicy = -1;
++ __task_rq_unlock(rq);
++ spin_unlock_irqrestore(&p->pi_lock, flags);
++ goto recheck;
++ }
++ update_rq_clock(rq);
++ on_rq = p->se.on_rq;
++ running = task_current(rq, p);
++ if (on_rq)
++ deactivate_task(rq, p, 0);
++ if (running)
++ p->sched_class->put_prev_task(rq, p);
++
++ oldprio = p->prio;
++ __setscheduler(rq, p, policy, param->sched_priority);
++
++ if (running)
++ p->sched_class->set_curr_task(rq);
++ if (on_rq) {
++ activate_task(rq, p, 0);
++
++ check_class_changed(rq, p, prev_class, oldprio, running);
++ }
++ __task_rq_unlock(rq);
++ spin_unlock_irqrestore(&p->pi_lock, flags);
++
++ rt_mutex_adjust_pi(p);
++
++ return 0;
++}
++
++/**
++ * sched_setscheduler - change the scheduling policy and/or RT priority of a thread.
++ * @p: the task in question.
++ * @policy: new policy.
++ * @param: structure containing the new RT priority.
++ *
++ * NOTE that the task may be already dead.
++ */
++int sched_setscheduler(struct task_struct *p, int policy,
++ struct sched_param *param)
++{
++ return __sched_setscheduler(p, policy, param, true);
++}
++EXPORT_SYMBOL_GPL(sched_setscheduler);
++
++/**
++ * sched_setscheduler_nocheck - change the scheduling policy and/or RT priority of a thread from kernelspace.
++ * @p: the task in question.
++ * @policy: new policy.
++ * @param: structure containing the new RT priority.
++ *
++ * Just like sched_setscheduler, only don't bother checking if the
++ * current context has permission. For example, this is needed in
++ * stop_machine(): we create temporary high priority worker threads,
++ * but our caller might not have that capability.
++ */
++int sched_setscheduler_nocheck(struct task_struct *p, int policy,
++ struct sched_param *param)
++{
++ return __sched_setscheduler(p, policy, param, false);
++}
++
++static int
++do_sched_setscheduler(pid_t pid, int policy, struct sched_param __user *param)
++{
++ struct sched_param lparam;
++ struct task_struct *p;
++ int retval;
++
++ if (!param || pid < 0)
++ return -EINVAL;
++ if (copy_from_user(&lparam, param, sizeof(struct sched_param)))
++ return -EFAULT;
++
++ rcu_read_lock();
++ retval = -ESRCH;
++ p = find_process_by_pid(pid);
++ if (p != NULL)
++ retval = sched_setscheduler(p, policy, &lparam);
++ rcu_read_unlock();
++
++ return retval;
++}
++
++/**
++ * sys_sched_setscheduler - set/change the scheduler policy and RT priority
++ * @pid: the pid in question.
++ * @policy: new policy.
++ * @param: structure containing the new RT priority.
++ */
++SYSCALL_DEFINE3(sched_setscheduler, pid_t, pid, int, policy,
++ struct sched_param __user *, param)
++{
++ /* negative values for policy are not valid */
++ if (policy < 0)
++ return -EINVAL;
++
++ return do_sched_setscheduler(pid, policy, param);
++}
++
++/**
++ * sys_sched_setparam - set/change the RT priority of a thread
++ * @pid: the pid in question.
++ * @param: structure containing the new RT priority.
++ */
++SYSCALL_DEFINE2(sched_setparam, pid_t, pid, struct sched_param __user *, param)
++{
++ return do_sched_setscheduler(pid, -1, param);
++}
++
++/**
++ * sys_sched_getscheduler - get the policy (scheduling class) of a thread
++ * @pid: the pid in question.
++ */
++SYSCALL_DEFINE1(sched_getscheduler, pid_t, pid)
++{
++ struct task_struct *p;
++ int retval;
++
++ if (pid < 0)
++ return -EINVAL;
++
++ retval = -ESRCH;
++ read_lock(&tasklist_lock);
++ p = find_process_by_pid(pid);
++ if (p) {
++ retval = security_task_getscheduler(p);
++ if (!retval)
++ retval = p->policy;
++ }
++ read_unlock(&tasklist_lock);
++ return retval;
++}
++
++/**
++ * sys_sched_getscheduler - get the RT priority of a thread
++ * @pid: the pid in question.
++ * @param: structure containing the RT priority.
++ */
++SYSCALL_DEFINE2(sched_getparam, pid_t, pid, struct sched_param __user *, param)
++{
++ struct sched_param lp;
++ struct task_struct *p;
++ int retval;
++
++ if (!param || pid < 0)
++ return -EINVAL;
++
++ read_lock(&tasklist_lock);
++ p = find_process_by_pid(pid);
++ retval = -ESRCH;
++ if (!p)
++ goto out_unlock;
++
++ retval = security_task_getscheduler(p);
++ if (retval)
++ goto out_unlock;
++
++ lp.sched_priority = p->rt_priority;
++ read_unlock(&tasklist_lock);
++
++ /*
++ * This one might sleep, we cannot do it with a spinlock held ...
++ */
++ retval = copy_to_user(param, &lp, sizeof(*param)) ? -EFAULT : 0;
++
++ return retval;
++
++out_unlock:
++ read_unlock(&tasklist_lock);
++ return retval;
++}
++
++long sched_setaffinity(pid_t pid, const cpumask_t *in_mask)
++{
++ cpumask_t cpus_allowed;
++ cpumask_t new_mask = *in_mask;
++ struct task_struct *p;
++ int retval;
++
++ get_online_cpus();
++ read_lock(&tasklist_lock);
++
++ p = find_process_by_pid(pid);
++ if (!p) {
++ read_unlock(&tasklist_lock);
++ put_online_cpus();
++ return -ESRCH;
++ }
++
++ /*
++ * It is not safe to call set_cpus_allowed with the
++ * tasklist_lock held. We will bump the task_struct's
++ * usage count and then drop tasklist_lock.
++ */
++ get_task_struct(p);
++ read_unlock(&tasklist_lock);
++
++ retval = -EPERM;
++ if ((current->euid != p->euid) && (current->euid != p->uid) &&
++ !capable(CAP_SYS_NICE))
++ goto out_unlock;
++
++ retval = security_task_setscheduler(p, 0, NULL);
++ if (retval)
++ goto out_unlock;
++
++ cpuset_cpus_allowed(p, &cpus_allowed);
++ cpus_and(new_mask, new_mask, cpus_allowed);
++ again:
++ retval = set_cpus_allowed_ptr(p, &new_mask);
++
++ if (!retval) {
++ cpuset_cpus_allowed(p, &cpus_allowed);
++ if (!cpus_subset(new_mask, cpus_allowed)) {
++ /*
++ * We must have raced with a concurrent cpuset
++ * update. Just reset the cpus_allowed to the
++ * cpuset's cpus_allowed
++ */
++ new_mask = cpus_allowed;
++ goto again;
++ }
++ }
++out_unlock:
++ put_task_struct(p);
++ put_online_cpus();
++ return retval;
++}
++
++static int get_user_cpu_mask(unsigned long __user *user_mask_ptr, unsigned len,
++ cpumask_t *new_mask)
++{
++ if (len < sizeof(cpumask_t)) {
++ memset(new_mask, 0, sizeof(cpumask_t));
++ } else if (len > sizeof(cpumask_t)) {
++ len = sizeof(cpumask_t);
++ }
++ return copy_from_user(new_mask, user_mask_ptr, len) ? -EFAULT : 0;
++}
++
++/**
++ * sys_sched_setaffinity - set the cpu affinity of a process
++ * @pid: pid of the process
++ * @len: length in bytes of the bitmask pointed to by user_mask_ptr
++ * @user_mask_ptr: user-space pointer to the new cpu mask
++ */
++SYSCALL_DEFINE3(sched_setaffinity, pid_t, pid, unsigned int, len,
++ unsigned long __user *, user_mask_ptr)
++{
++ cpumask_t new_mask;
++ int retval;
++
++ retval = get_user_cpu_mask(user_mask_ptr, len, &new_mask);
++ if (retval)
++ return retval;
++
++ return sched_setaffinity(pid, &new_mask);
++}
++
++long sched_getaffinity(pid_t pid, cpumask_t *mask)
++{
++ struct task_struct *p;
++ int retval;
++
++ get_online_cpus();
++ read_lock(&tasklist_lock);
++
++ retval = -ESRCH;
++ p = find_process_by_pid(pid);
++ if (!p)
++ goto out_unlock;
++
++ retval = security_task_getscheduler(p);
++ if (retval)
++ goto out_unlock;
++
++ cpus_and(*mask, p->cpus_allowed, cpu_online_map);
++
++out_unlock:
++ read_unlock(&tasklist_lock);
++ put_online_cpus();
++
++ return retval;
++}
++
++/**
++ * sys_sched_getaffinity - get the cpu affinity of a process
++ * @pid: pid of the process
++ * @len: length in bytes of the bitmask pointed to by user_mask_ptr
++ * @user_mask_ptr: user-space pointer to hold the current cpu mask
++ */
++SYSCALL_DEFINE3(sched_getaffinity, pid_t, pid, unsigned int, len,
++ unsigned long __user *, user_mask_ptr)
++{
++ int ret;
++ cpumask_t mask;
++
++ if (len < sizeof(cpumask_t))
++ return -EINVAL;
++
++ ret = sched_getaffinity(pid, &mask);
++ if (ret < 0)
++ return ret;
++
++ if (copy_to_user(user_mask_ptr, &mask, sizeof(cpumask_t)))
++ return -EFAULT;
++
++ return sizeof(cpumask_t);
++}
++
++/**
++ * sys_sched_yield - yield the current processor to other threads.
++ *
++ * This function yields the current CPU to other tasks. If there are no
++ * other threads running on this CPU then this function will return.
++ */
++SYSCALL_DEFINE0(sched_yield)
++{
++ struct rq *rq = this_rq_lock();
++
++ schedstat_inc(rq, yld_count);
++ current->sched_class->yield_task(rq);
++
++ /*
++ * Since we are going to call schedule() anyway, there's
++ * no need to preempt or enable interrupts:
++ */
++ __release(rq->lock);
++ spin_release(&rq->lock.dep_map, 1, _THIS_IP_);
++ _raw_spin_unlock(&rq->lock);
++ preempt_enable_no_resched();
++
++ schedule();
++
++ return 0;
++}
++
++static void __cond_resched(void)
++{
++#ifdef CONFIG_DEBUG_SPINLOCK_SLEEP
++ __might_sleep(__FILE__, __LINE__);
++#endif
++ /*
++ * The BKS might be reacquired before we have dropped
++ * PREEMPT_ACTIVE, which could trigger a second
++ * cond_resched() call.
++ */
++ do {
++ add_preempt_count(PREEMPT_ACTIVE);
++ schedule();
++ sub_preempt_count(PREEMPT_ACTIVE);
++ } while (need_resched());
++}
++
++int __sched _cond_resched(void)
++{
++ if (need_resched() && !(preempt_count() & PREEMPT_ACTIVE) &&
++ system_state == SYSTEM_RUNNING) {
++ __cond_resched();
++ return 1;
++ }
++ return 0;
++}
++EXPORT_SYMBOL(_cond_resched);
++
++/*
++ * cond_resched_lock() - if a reschedule is pending, drop the given lock,
++ * call schedule, and on return reacquire the lock.
++ *
++ * This works OK both with and without CONFIG_PREEMPT. We do strange low-level
++ * operations here to prevent schedule() from being called twice (once via
++ * spin_unlock(), once by hand).
++ */
++int cond_resched_lock(spinlock_t *lock)
++{
++ int resched = need_resched() && system_state == SYSTEM_RUNNING;
++ int ret = 0;
++
++ if (spin_needbreak(lock) || resched) {
++ spin_unlock(lock);
++ if (resched && need_resched())
++ __cond_resched();
++ else
++ cpu_relax();
++ ret = 1;
++ spin_lock(lock);
++ }
++ return ret;
++}
++EXPORT_SYMBOL(cond_resched_lock);
++
++int __sched cond_resched_softirq(void)
++{
++ BUG_ON(!in_softirq());
++
++ if (need_resched() && system_state == SYSTEM_RUNNING) {
++ local_bh_enable();
++ __cond_resched();
++ local_bh_disable();
++ return 1;
++ }
++ return 0;
++}
++EXPORT_SYMBOL(cond_resched_softirq);
++
++/**
++ * yield - yield the current processor to other threads.
++ *
++ * This is a shortcut for kernel-space yielding - it marks the
++ * thread runnable and calls sys_sched_yield().
++ */
++void __sched yield(void)
++{
++ set_current_state(TASK_RUNNING);
++ sys_sched_yield();
++}
++EXPORT_SYMBOL(yield);
++
++/*
++ * This task is about to go to sleep on IO. Increment rq->nr_iowait so
++ * that process accounting knows that this is a task in IO wait state.
++ *
++ * But don't do that if it is a deliberate, throttling IO wait (this task
++ * has set its backing_dev_info: the queue against which it should throttle)
++ */
++void __sched io_schedule(void)
++{
++ struct rq *rq = &__raw_get_cpu_var(runqueues);
++
++ delayacct_blkio_start();
++ atomic_inc(&rq->nr_iowait);
++ schedule();
++ atomic_dec(&rq->nr_iowait);
++ delayacct_blkio_end();
++}
++EXPORT_SYMBOL(io_schedule);
++
++long __sched io_schedule_timeout(long timeout)
++{
++ struct rq *rq = &__raw_get_cpu_var(runqueues);
++ long ret;
++
++ delayacct_blkio_start();
++ atomic_inc(&rq->nr_iowait);
++ ret = schedule_timeout(timeout);
++ atomic_dec(&rq->nr_iowait);
++ delayacct_blkio_end();
++ return ret;
++}
++
++/**
++ * sys_sched_get_priority_max - return maximum RT priority.
++ * @policy: scheduling class.
++ *
++ * this syscall returns the maximum rt_priority that can be used
++ * by a given scheduling class.
++ */
++SYSCALL_DEFINE1(sched_get_priority_max, int, policy)
++{
++ int ret = -EINVAL;
++
++ switch (policy) {
++ case SCHED_FIFO:
++ case SCHED_RR:
++ ret = MAX_USER_RT_PRIO-1;
++ break;
++ case SCHED_NORMAL:
++ case SCHED_BATCH:
++ case SCHED_IDLE:
++ ret = 0;
++ break;
++ }
++ return ret;
++}
++
++/**
++ * sys_sched_get_priority_min - return minimum RT priority.
++ * @policy: scheduling class.
++ *
++ * this syscall returns the minimum rt_priority that can be used
++ * by a given scheduling class.
++ */
++SYSCALL_DEFINE1(sched_get_priority_min, int, policy)
++{
++ int ret = -EINVAL;
++
++ switch (policy) {
++ case SCHED_FIFO:
++ case SCHED_RR:
++ ret = 1;
++ break;
++ case SCHED_NORMAL:
++ case SCHED_BATCH:
++ case SCHED_IDLE:
++ ret = 0;
++ }
++ return ret;
++}
++
++/**
++ * sys_sched_rr_get_interval - return the default timeslice of a process.
++ * @pid: pid of the process.
++ * @interval: userspace pointer to the timeslice value.
++ *
++ * this syscall writes the default timeslice value of a given process
++ * into the user-space timespec buffer. A value of '0' means infinity.
++ */
++SYSCALL_DEFINE2(sched_rr_get_interval, pid_t, pid,
++ struct timespec __user *, interval)
++{
++ struct task_struct *p;
++ unsigned int time_slice;
++ int retval;
++ struct timespec t;
++
++ if (pid < 0)
++ return -EINVAL;
++
++ retval = -ESRCH;
++ read_lock(&tasklist_lock);
++ p = find_process_by_pid(pid);
++ if (!p)
++ goto out_unlock;
++
++ retval = security_task_getscheduler(p);
++ if (retval)
++ goto out_unlock;
++
++ /*
++ * Time slice is 0 for SCHED_FIFO tasks and for SCHED_OTHER
++ * tasks that are on an otherwise idle runqueue:
++ */
++ time_slice = 0;
++ if (p->policy == SCHED_RR) {
++ time_slice = DEF_TIMESLICE;
++ } else if (p->policy != SCHED_FIFO) {
++ struct sched_entity *se = &p->se;
++ unsigned long flags;
++ struct rq *rq;
++
++ rq = task_rq_lock(p, &flags);
++ if (rq->cfs.load.weight)
++ time_slice = NS_TO_JIFFIES(sched_slice(&rq->cfs, se));
++ task_rq_unlock(rq, &flags);
++ }
++ read_unlock(&tasklist_lock);
++ jiffies_to_timespec(time_slice, &t);
++ retval = copy_to_user(interval, &t, sizeof(t)) ? -EFAULT : 0;
++ return retval;
++
++out_unlock:
++ read_unlock(&tasklist_lock);
++ return retval;
++}
++
++static const char stat_nam[] = TASK_STATE_TO_CHAR_STR;
++
++void sched_show_task(struct task_struct *p)
++{
++ unsigned long free = 0;
++ unsigned state;
++
++ state = p->state ? __ffs(p->state) + 1 : 0;
++ printk(KERN_INFO "%-13.13s %c", p->comm,
++ state < sizeof(stat_nam) - 1 ? stat_nam[state] : '?');
++#if BITS_PER_LONG == 32
++ if (state == TASK_RUNNING)
++ printk(KERN_CONT " running ");
++ else
++ printk(KERN_CONT " %08lx ", thread_saved_pc(p));
++#else
++ if (state == TASK_RUNNING)
++ printk(KERN_CONT " running task ");
++ else
++ printk(KERN_CONT " %016lx ", thread_saved_pc(p));
++#endif
++#ifdef CONFIG_DEBUG_STACK_USAGE
++ {
++ unsigned long *n = end_of_stack(p);
++ while (!*n)
++ n++;
++ free = (unsigned long)n - (unsigned long)end_of_stack(p);
++ }
++#endif
++ printk(KERN_CONT "%5lu %5d %6d\n", free,
++ task_pid_nr(p), task_pid_nr(p->real_parent));
++
++ show_stack(p, NULL);
++}
++
++void show_state_filter(unsigned long state_filter)
++{
++ struct task_struct *g, *p;
++
++#if BITS_PER_LONG == 32
++ printk(KERN_INFO
++ " task PC stack pid father\n");
++#else
++ printk(KERN_INFO
++ " task PC stack pid father\n");
++#endif
++ read_lock(&tasklist_lock);
++ do_each_thread(g, p) {
++ /*
++ * reset the NMI-timeout, listing all files on a slow
++ * console might take alot of time:
++ */
++ touch_nmi_watchdog();
++ if (!state_filter || (p->state & state_filter))
++ sched_show_task(p);
++ } while_each_thread(g, p);
++
++ touch_all_softlockup_watchdogs();
++
++#ifdef CONFIG_SCHED_DEBUG
++ sysrq_sched_debug_show();
++#endif
++ read_unlock(&tasklist_lock);
++ /*
++ * Only show locks if all tasks are dumped:
++ */
++ if (state_filter == -1)
++ debug_show_all_locks();
++}
++
++void __cpuinit init_idle_bootup_task(struct task_struct *idle)
++{
++ idle->sched_class = &idle_sched_class;
++}
++
++/**
++ * init_idle - set up an idle thread for a given CPU
++ * @idle: task in question
++ * @cpu: cpu the idle task belongs to
++ *
++ * NOTE: this function does not set the idle thread's NEED_RESCHED
++ * flag, to make booting more robust.
++ */
++void __cpuinit init_idle(struct task_struct *idle, int cpu)
++{
++ struct rq *rq = cpu_rq(cpu);
++ unsigned long flags;
++
++ __sched_fork(idle);
++ idle->se.exec_start = sched_clock();
++
++ idle->prio = idle->normal_prio = MAX_PRIO;
++ idle->cpus_allowed = cpumask_of_cpu(cpu);
++ __set_task_cpu(idle, cpu);
++
++ spin_lock_irqsave(&rq->lock, flags);
++ rq->curr = rq->idle = idle;
++#if defined(CONFIG_SMP) && defined(__ARCH_WANT_UNLOCKED_CTXSW)
++ idle->oncpu = 1;
++#endif
++ spin_unlock_irqrestore(&rq->lock, flags);
++
++ /* Set the preempt count _outside_ the spinlocks! */
++#if defined(CONFIG_PREEMPT)
++ task_thread_info(idle)->preempt_count = (idle->lock_depth >= 0);
++#else
++ task_thread_info(idle)->preempt_count = 0;
++#endif
++ /*
++ * The idle tasks have their own, simple scheduling class:
++ */
++ idle->sched_class = &idle_sched_class;
++}
++
++/*
++ * In a system that switches off the HZ timer nohz_cpu_mask
++ * indicates which cpus entered this state. This is used
++ * in the rcu update to wait only for active cpus. For system
++ * which do not switch off the HZ timer nohz_cpu_mask should
++ * always be CPU_MASK_NONE.
++ */
++cpumask_t nohz_cpu_mask = CPU_MASK_NONE;
++
++/*
++ * Increase the granularity value when there are more CPUs,
++ * because with more CPUs the 'effective latency' as visible
++ * to users decreases. But the relationship is not linear,
++ * so pick a second-best guess by going with the log2 of the
++ * number of CPUs.
++ *
++ * This idea comes from the SD scheduler of Con Kolivas:
++ */
++static inline void sched_init_granularity(void)
++{
++ unsigned int factor = 1 + ilog2(num_online_cpus());
++ const unsigned long limit = 200000000;
++
++ sysctl_sched_min_granularity *= factor;
++ if (sysctl_sched_min_granularity > limit)
++ sysctl_sched_min_granularity = limit;
++
++ sysctl_sched_latency *= factor;
++ if (sysctl_sched_latency > limit)
++ sysctl_sched_latency = limit;
++
++ sysctl_sched_wakeup_granularity *= factor;
++
++ sysctl_sched_shares_ratelimit *= factor;
++}
++
++#ifdef CONFIG_SMP
++/*
++ * This is how migration works:
++ *
++ * 1) we queue a struct migration_req structure in the source CPU's
++ * runqueue and wake up that CPU's migration thread.
++ * 2) we down() the locked semaphore => thread blocks.
++ * 3) migration thread wakes up (implicitly it forces the migrated
++ * thread off the CPU)
++ * 4) it gets the migration request and checks whether the migrated
++ * task is still in the wrong runqueue.
++ * 5) if it's in the wrong runqueue then the migration thread removes
++ * it and puts it into the right queue.
++ * 6) migration thread up()s the semaphore.
++ * 7) we wake up and the migration is done.
++ */
++
++/*
++ * Change a given task's CPU affinity. Migrate the thread to a
++ * proper CPU and schedule it away if the CPU it's executing on
++ * is removed from the allowed bitmask.
++ *
++ * NOTE: the caller must have a valid reference to the task, the
++ * task must not exit() & deallocate itself prematurely. The
++ * call is not atomic; no spinlocks may be held.
++ */
++int set_cpus_allowed_ptr(struct task_struct *p, const cpumask_t *new_mask)
++{
++ struct migration_req req;
++ unsigned long flags;
++ struct rq *rq;
++ int ret = 0;
++
++ rq = task_rq_lock(p, &flags);
++ if (!cpus_intersects(*new_mask, cpu_online_map)) {
++ ret = -EINVAL;
++ goto out;
++ }
++
++ if (unlikely((p->flags & PF_THREAD_BOUND) && p != current &&
++ !cpus_equal(p->cpus_allowed, *new_mask))) {
++ ret = -EINVAL;
++ goto out;
++ }
++
++ if (p->sched_class->set_cpus_allowed)
++ p->sched_class->set_cpus_allowed(p, new_mask);
++ else {
++ p->cpus_allowed = *new_mask;
++ p->rt.nr_cpus_allowed = cpus_weight(*new_mask);
++ }
++
++ /* Can the task run on the task's current CPU? If so, we're done */
++ if (cpu_isset(task_cpu(p), *new_mask))
++ goto out;
++
++ if (migrate_task(p, any_online_cpu(*new_mask), &req)) {
++ /* Need help from migration thread: drop lock and wait. */
++ task_rq_unlock(rq, &flags);
++ wake_up_process(rq->migration_thread);
++ wait_for_completion(&req.done);
++ tlb_migrate_finish(p->mm);
++ return 0;
++ }
++out:
++ task_rq_unlock(rq, &flags);
++
++ return ret;
++}
++EXPORT_SYMBOL_GPL(set_cpus_allowed_ptr);
++
++/*
++ * Move (not current) task off this cpu, onto dest cpu. We're doing
++ * this because either it can't run here any more (set_cpus_allowed()
++ * away from this CPU, or CPU going down), or because we're
++ * attempting to rebalance this task on exec (sched_exec).
++ *
++ * So we race with normal scheduler movements, but that's OK, as long
++ * as the task is no longer on this CPU.
++ *
++ * Returns non-zero if task was successfully migrated.
++ */
++static int __migrate_task(struct task_struct *p, int src_cpu, int dest_cpu)
++{
++ struct rq *rq_dest, *rq_src;
++ int ret = 0, on_rq;
++
++ if (unlikely(!cpu_active(dest_cpu)))
++ return ret;
++
++ rq_src = cpu_rq(src_cpu);
++ rq_dest = cpu_rq(dest_cpu);
++
++ double_rq_lock(rq_src, rq_dest);
++ /* Already moved. */
++ if (task_cpu(p) != src_cpu)
++ goto done;
++ /* Affinity changed (again). */
++ if (!cpu_isset(dest_cpu, p->cpus_allowed))
++ goto fail;
++
++ on_rq = p->se.on_rq;
++ if (on_rq)
++ deactivate_task(rq_src, p, 0);
++
++ set_task_cpu(p, dest_cpu);
++ if (on_rq) {
++ activate_task(rq_dest, p, 0);
++ check_preempt_curr(rq_dest, p);
++ }
++done:
++ ret = 1;
++fail:
++ double_rq_unlock(rq_src, rq_dest);
++ return ret;
++}
++
++/*
++ * migration_thread - this is a highprio system thread that performs
++ * thread migration by bumping thread off CPU then 'pushing' onto
++ * another runqueue.
++ */
++static int migration_thread(void *data)
++{
++ int cpu = (long)data;
++ struct rq *rq;
++
++ rq = cpu_rq(cpu);
++ BUG_ON(rq->migration_thread != current);
++
++ set_current_state(TASK_INTERRUPTIBLE);
++ while (!kthread_should_stop()) {
++ struct migration_req *req;
++ struct list_head *head;
++
++ spin_lock_irq(&rq->lock);
++
++ if (cpu_is_offline(cpu)) {
++ spin_unlock_irq(&rq->lock);
++ goto wait_to_die;
++ }
++
++ if (rq->active_balance) {
++ active_load_balance(rq, cpu);
++ rq->active_balance = 0;
++ }
++
++ head = &rq->migration_queue;
++
++ if (list_empty(head)) {
++ spin_unlock_irq(&rq->lock);
++ schedule();
++ set_current_state(TASK_INTERRUPTIBLE);
++ continue;
++ }
++ req = list_entry(head->next, struct migration_req, list);
++ list_del_init(head->next);
++
++ spin_unlock(&rq->lock);
++ __migrate_task(req->task, cpu, req->dest_cpu);
++ local_irq_enable();
++
++ complete(&req->done);
++ }
++ __set_current_state(TASK_RUNNING);
++ return 0;
++
++wait_to_die:
++ /* Wait for kthread_stop */
++ set_current_state(TASK_INTERRUPTIBLE);
++ while (!kthread_should_stop()) {
++ schedule();
++ set_current_state(TASK_INTERRUPTIBLE);
++ }
++ __set_current_state(TASK_RUNNING);
++ return 0;
++}
++
++#ifdef CONFIG_HOTPLUG_CPU
++
++static int __migrate_task_irq(struct task_struct *p, int src_cpu, int dest_cpu)
++{
++ int ret;
++
++ local_irq_disable();
++ ret = __migrate_task(p, src_cpu, dest_cpu);
++ local_irq_enable();
++ return ret;
++}
++
++/*
++ * Figure out where task on dead CPU should go, use force if necessary.
++ * NOTE: interrupts should be disabled by the caller
++ */
++static void move_task_off_dead_cpu(int dead_cpu, struct task_struct *p)
++{
++ unsigned long flags;
++ cpumask_t mask;
++ struct rq *rq;
++ int dest_cpu;
++
++ do {
++ /* On same node? */
++ mask = node_to_cpumask(cpu_to_node(dead_cpu));
++ cpus_and(mask, mask, p->cpus_allowed);
++ dest_cpu = any_online_cpu(mask);
++
++ /* On any allowed CPU? */
++ if (dest_cpu >= nr_cpu_ids)
++ dest_cpu = any_online_cpu(p->cpus_allowed);
++
++ /* No more Mr. Nice Guy. */
++ if (dest_cpu >= nr_cpu_ids) {
++ cpumask_t cpus_allowed;
++
++ cpuset_cpus_allowed_locked(p, &cpus_allowed);
++ /*
++ * Try to stay on the same cpuset, where the
++ * current cpuset may be a subset of all cpus.
++ * The cpuset_cpus_allowed_locked() variant of
++ * cpuset_cpus_allowed() will not block. It must be
++ * called within calls to cpuset_lock/cpuset_unlock.
++ */
++ rq = task_rq_lock(p, &flags);
++ p->cpus_allowed = cpus_allowed;
++ dest_cpu = any_online_cpu(p->cpus_allowed);
++ task_rq_unlock(rq, &flags);
++
++ /*
++ * Don't tell them about moving exiting tasks or
++ * kernel threads (both mm NULL), since they never
++ * leave kernel.
++ */
++ if (p->mm && printk_ratelimit()) {
++ printk(KERN_INFO "process %d (%s) no "
++ "longer affine to cpu%d\n",
++ task_pid_nr(p), p->comm, dead_cpu);
++ }
++ }
++ } while (!__migrate_task_irq(p, dead_cpu, dest_cpu));
++}
++
++/*
++ * While a dead CPU has no uninterruptible tasks queued at this point,
++ * it might still have a nonzero ->nr_uninterruptible counter, because
++ * for performance reasons the counter is not stricly tracking tasks to
++ * their home CPUs. So we just add the counter to another CPU's counter,
++ * to keep the global sum constant after CPU-down:
++ */
++static void migrate_nr_uninterruptible(struct rq *rq_src)
++{
++ struct rq *rq_dest = cpu_rq(any_online_cpu(*CPU_MASK_ALL_PTR));
++ unsigned long flags;
++
++ local_irq_save(flags);
++ double_rq_lock(rq_src, rq_dest);
++ rq_dest->nr_uninterruptible += rq_src->nr_uninterruptible;
++ rq_src->nr_uninterruptible = 0;
++ double_rq_unlock(rq_src, rq_dest);
++ local_irq_restore(flags);
++}
++
++/* Run through task list and migrate tasks from the dead cpu. */
++static void migrate_live_tasks(int src_cpu)
++{
++ struct task_struct *p, *t;
++
++ read_lock(&tasklist_lock);
++
++ do_each_thread(t, p) {
++ if (p == current)
++ continue;
++
++ if (task_cpu(p) == src_cpu)
++ move_task_off_dead_cpu(src_cpu, p);
++ } while_each_thread(t, p);
++
++ read_unlock(&tasklist_lock);
++}
++
++/*
++ * Schedules idle task to be the next runnable task on current CPU.
++ * It does so by boosting its priority to highest possible.
++ * Used by CPU offline code.
++ */
++void sched_idle_next(void)
++{
++ int this_cpu = smp_processor_id();
++ struct rq *rq = cpu_rq(this_cpu);
++ struct task_struct *p = rq->idle;
++ unsigned long flags;
++
++ /* cpu has to be offline */
++ BUG_ON(cpu_online(this_cpu));
++
++ /*
++ * Strictly not necessary since rest of the CPUs are stopped by now
++ * and interrupts disabled on the current cpu.
++ */
++ spin_lock_irqsave(&rq->lock, flags);
++
++ __setscheduler(rq, p, SCHED_FIFO, MAX_RT_PRIO-1);
++
++ update_rq_clock(rq);
++ activate_task(rq, p, 0);
++
++ spin_unlock_irqrestore(&rq->lock, flags);
++}
++
++/*
++ * Ensures that the idle task is using init_mm right before its cpu goes
++ * offline.
++ */
++void idle_task_exit(void)
++{
++ struct mm_struct *mm = current->active_mm;
++
++ BUG_ON(cpu_online(smp_processor_id()));
++
++ if (mm != &init_mm)
++ switch_mm(mm, &init_mm, current);
++ mmdrop(mm);
++}
++
++/* called under rq->lock with disabled interrupts */
++static void migrate_dead(unsigned int dead_cpu, struct task_struct *p)
++{
++ struct rq *rq = cpu_rq(dead_cpu);
++
++ /* Must be exiting, otherwise would be on tasklist. */
++ BUG_ON(!p->exit_state);
++
++ /* Cannot have done final schedule yet: would have vanished. */
++ BUG_ON(p->state == TASK_DEAD);
++
++ get_task_struct(p);
++
++ /*
++ * Drop lock around migration; if someone else moves it,
++ * that's OK. No task can be added to this CPU, so iteration is
++ * fine.
++ */
++ spin_unlock_irq(&rq->lock);
++ move_task_off_dead_cpu(dead_cpu, p);
++ spin_lock_irq(&rq->lock);
++
++ put_task_struct(p);
++}
++
++/* release_task() removes task from tasklist, so we won't find dead tasks. */
++static void migrate_dead_tasks(unsigned int dead_cpu)
++{
++ struct rq *rq = cpu_rq(dead_cpu);
++ struct task_struct *next;
++
++ for ( ; ; ) {
++ if (!rq->nr_running)
++ break;
++ update_rq_clock(rq);
++ next = pick_next_task(rq, rq->curr);
++ if (!next)
++ break;
++ next->sched_class->put_prev_task(rq, next);
++ migrate_dead(dead_cpu, next);
++
++ }
++}
++#endif /* CONFIG_HOTPLUG_CPU */
++
++#if defined(CONFIG_SCHED_DEBUG) && defined(CONFIG_SYSCTL)
++
++static struct ctl_table sd_ctl_dir[] = {
++ {
++ .procname = "sched_domain",
++ .mode = 0555,
++ },
++ {0, },
++};
++
++static struct ctl_table sd_ctl_root[] = {
++ {
++ .ctl_name = CTL_KERN,
++ .procname = "kernel",
++ .mode = 0555,
++ .child = sd_ctl_dir,
++ },
++ {0, },
++};
++
++static struct ctl_table *sd_alloc_ctl_entry(int n)
++{
++ struct ctl_table *entry =
++ kcalloc(n, sizeof(struct ctl_table), GFP_KERNEL);
++
++ return entry;
++}
++
++static void sd_free_ctl_entry(struct ctl_table **tablep)
++{
++ struct ctl_table *entry;
++
++ /*
++ * In the intermediate directories, both the child directory and
++ * procname are dynamically allocated and could fail but the mode
++ * will always be set. In the lowest directory the names are
++ * static strings and all have proc handlers.
++ */
++ for (entry = *tablep; entry->mode; entry++) {
++ if (entry->child)
++ sd_free_ctl_entry(&entry->child);
++ if (entry->proc_handler == NULL)
++ kfree(entry->procname);
++ }
++
++ kfree(*tablep);
++ *tablep = NULL;
++}
++
++static void
++set_table_entry(struct ctl_table *entry,
++ const char *procname, void *data, int maxlen,
++ mode_t mode, proc_handler *proc_handler)
++{
++ entry->procname = procname;
++ entry->data = data;
++ entry->maxlen = maxlen;
++ entry->mode = mode;
++ entry->proc_handler = proc_handler;
++}
++
++static struct ctl_table *
++sd_alloc_ctl_domain_table(struct sched_domain *sd)
++{
++ struct ctl_table *table = sd_alloc_ctl_entry(12);
++
++ if (table == NULL)
++ return NULL;
++
++ set_table_entry(&table[0], "min_interval", &sd->min_interval,
++ sizeof(long), 0644, proc_doulongvec_minmax);
++ set_table_entry(&table[1], "max_interval", &sd->max_interval,
++ sizeof(long), 0644, proc_doulongvec_minmax);
++ set_table_entry(&table[2], "busy_idx", &sd->busy_idx,
++ sizeof(int), 0644, proc_dointvec_minmax);
++ set_table_entry(&table[3], "idle_idx", &sd->idle_idx,
++ sizeof(int), 0644, proc_dointvec_minmax);
++ set_table_entry(&table[4], "newidle_idx", &sd->newidle_idx,
++ sizeof(int), 0644, proc_dointvec_minmax);
++ set_table_entry(&table[5], "wake_idx", &sd->wake_idx,
++ sizeof(int), 0644, proc_dointvec_minmax);
++ set_table_entry(&table[6], "forkexec_idx", &sd->forkexec_idx,
++ sizeof(int), 0644, proc_dointvec_minmax);
++ set_table_entry(&table[7], "busy_factor", &sd->busy_factor,
++ sizeof(int), 0644, proc_dointvec_minmax);
++ set_table_entry(&table[8], "imbalance_pct", &sd->imbalance_pct,
++ sizeof(int), 0644, proc_dointvec_minmax);
++ set_table_entry(&table[9], "cache_nice_tries",
++ &sd->cache_nice_tries,
++ sizeof(int), 0644, proc_dointvec_minmax);
++ set_table_entry(&table[10], "flags", &sd->flags,
++ sizeof(int), 0644, proc_dointvec_minmax);
++ /* &table[11] is terminator */
++
++ return table;
++}
++
++static ctl_table *sd_alloc_ctl_cpu_table(int cpu)
++{
++ struct ctl_table *entry, *table;
++ struct sched_domain *sd;
++ int domain_num = 0, i;
++ char buf[32];
++
++ for_each_domain(cpu, sd)
++ domain_num++;
++ entry = table = sd_alloc_ctl_entry(domain_num + 1);
++ if (table == NULL)
++ return NULL;
++
++ i = 0;
++ for_each_domain(cpu, sd) {
++ snprintf(buf, 32, "domain%d", i);
++ entry->procname = kstrdup(buf, GFP_KERNEL);
++ entry->mode = 0555;
++ entry->child = sd_alloc_ctl_domain_table(sd);
++ entry++;
++ i++;
++ }
++ return table;
++}
++
++static struct ctl_table_header *sd_sysctl_header;
++static void register_sched_domain_sysctl(void)
++{
++ int i, cpu_num = num_online_cpus();
++ struct ctl_table *entry = sd_alloc_ctl_entry(cpu_num + 1);
++ char buf[32];
++
++ WARN_ON(sd_ctl_dir[0].child);
++ sd_ctl_dir[0].child = entry;
++
++ if (entry == NULL)
++ return;
++
++ for_each_online_cpu(i) {
++ snprintf(buf, 32, "cpu%d", i);
++ entry->procname = kstrdup(buf, GFP_KERNEL);
++ entry->mode = 0555;
++ entry->child = sd_alloc_ctl_cpu_table(i);
++ entry++;
++ }
++
++ WARN_ON(sd_sysctl_header);
++ sd_sysctl_header = register_sysctl_table(sd_ctl_root);
++}
++
++/* may be called multiple times per register */
++static void unregister_sched_domain_sysctl(void)
++{
++ if (sd_sysctl_header)
++ unregister_sysctl_table(sd_sysctl_header);
++ sd_sysctl_header = NULL;
++ if (sd_ctl_dir[0].child)
++ sd_free_ctl_entry(&sd_ctl_dir[0].child);
++}
++#else
++static void register_sched_domain_sysctl(void)
++{
++}
++static void unregister_sched_domain_sysctl(void)
++{
++}
++#endif
++
++static void set_rq_online(struct rq *rq)
++{
++ if (!rq->online) {
++ const struct sched_class *class;
++
++ cpu_set(rq->cpu, rq->rd->online);
++ rq->online = 1;
++
++ for_each_class(class) {
++ if (class->rq_online)
++ class->rq_online(rq);
++ }
++ }
++}
++
++static void set_rq_offline(struct rq *rq)
++{
++ if (rq->online) {
++ const struct sched_class *class;
++
++ for_each_class(class) {
++ if (class->rq_offline)
++ class->rq_offline(rq);
++ }
++
++ cpu_clear(rq->cpu, rq->rd->online);
++ rq->online = 0;
++ }
++}
++
++/*
++ * migration_call - callback that gets triggered when a CPU is added.
++ * Here we can start up the necessary migration thread for the new CPU.
++ */
++static int __cpuinit
++migration_call(struct notifier_block *nfb, unsigned long action, void *hcpu)
++{
++ struct task_struct *p;
++ int cpu = (long)hcpu;
++ unsigned long flags;
++ struct rq *rq;
++
++ switch (action) {
++
++ case CPU_UP_PREPARE:
++ case CPU_UP_PREPARE_FROZEN:
++ p = kthread_create(migration_thread, hcpu, "migration/%d", cpu);
++ if (IS_ERR(p))
++ return NOTIFY_BAD;
++ kthread_bind(p, cpu);
++ /* Must be high prio: stop_machine expects to yield to it. */
++ rq = task_rq_lock(p, &flags);
++ __setscheduler(rq, p, SCHED_FIFO, MAX_RT_PRIO-1);
++ task_rq_unlock(rq, &flags);
++ cpu_rq(cpu)->migration_thread = p;
++ break;
++
++ case CPU_ONLINE:
++ case CPU_ONLINE_FROZEN:
++ /* Strictly unnecessary, as first user will wake it. */
++ wake_up_process(cpu_rq(cpu)->migration_thread);
++
++ /* Update our root-domain */
++ rq = cpu_rq(cpu);
++ spin_lock_irqsave(&rq->lock, flags);
++ if (rq->rd) {
++ BUG_ON(!cpu_isset(cpu, rq->rd->span));
++
++ set_rq_online(rq);
++ }
++ spin_unlock_irqrestore(&rq->lock, flags);
++ break;
++
++#ifdef CONFIG_HOTPLUG_CPU
++ case CPU_UP_CANCELED:
++ case CPU_UP_CANCELED_FROZEN:
++ if (!cpu_rq(cpu)->migration_thread)
++ break;
++ /* Unbind it from offline cpu so it can run. Fall thru. */
++ kthread_bind(cpu_rq(cpu)->migration_thread,
++ any_online_cpu(cpu_online_map));
++ kthread_stop(cpu_rq(cpu)->migration_thread);
++ cpu_rq(cpu)->migration_thread = NULL;
++ break;
++
++ case CPU_DEAD:
++ case CPU_DEAD_FROZEN:
++ cpuset_lock(); /* around calls to cpuset_cpus_allowed_lock() */
++ migrate_live_tasks(cpu);
++ rq = cpu_rq(cpu);
++ kthread_stop(rq->migration_thread);
++ rq->migration_thread = NULL;
++ /* Idle task back to normal (off runqueue, low prio) */
++ spin_lock_irq(&rq->lock);
++ update_rq_clock(rq);
++ deactivate_task(rq, rq->idle, 0);
++ rq->idle->static_prio = MAX_PRIO;
++ __setscheduler(rq, rq->idle, SCHED_NORMAL, 0);
++ rq->idle->sched_class = &idle_sched_class;
++ migrate_dead_tasks(cpu);
++ spin_unlock_irq(&rq->lock);
++ cpuset_unlock();
++ migrate_nr_uninterruptible(rq);
++ BUG_ON(rq->nr_running != 0);
++
++ /*
++ * No need to migrate the tasks: it was best-effort if
++ * they didn't take sched_hotcpu_mutex. Just wake up
++ * the requestors.
++ */
++ spin_lock_irq(&rq->lock);
++ while (!list_empty(&rq->migration_queue)) {
++ struct migration_req *req;
++
++ req = list_entry(rq->migration_queue.next,
++ struct migration_req, list);
++ list_del_init(&req->list);
++ spin_unlock_irq(&rq->lock);
++ complete(&req->done);
++ spin_lock_irq(&rq->lock);
++ }
++ spin_unlock_irq(&rq->lock);
++ break;
++
++ case CPU_DYING:
++ case CPU_DYING_FROZEN:
++ /* Update our root-domain */
++ rq = cpu_rq(cpu);
++ spin_lock_irqsave(&rq->lock, flags);
++ if (rq->rd) {
++ BUG_ON(!cpu_isset(cpu, rq->rd->span));
++ set_rq_offline(rq);
++ }
++ spin_unlock_irqrestore(&rq->lock, flags);
++ break;
++#endif
++ }
++ return NOTIFY_OK;
++}
++
++/* Register at highest priority so that task migration (migrate_all_tasks)
++ * happens before everything else.
++ */
++static struct notifier_block __cpuinitdata migration_notifier = {
++ .notifier_call = migration_call,
++ .priority = 10
++};
++
++static int __init migration_init(void)
++{
++ void *cpu = (void *)(long)smp_processor_id();
++ int err;
++
++ /* Start one for the boot CPU: */
++ err = migration_call(&migration_notifier, CPU_UP_PREPARE, cpu);
++ BUG_ON(err == NOTIFY_BAD);
++ migration_call(&migration_notifier, CPU_ONLINE, cpu);
++ register_cpu_notifier(&migration_notifier);
++
++ return err;
++}
++early_initcall(migration_init);
++#endif
++
++#ifdef CONFIG_SMP
++
++#ifdef CONFIG_SCHED_DEBUG
++
++static inline const char *sd_level_to_string(enum sched_domain_level lvl)
++{
++ switch (lvl) {
++ case SD_LV_NONE:
++ return "NONE";
++ case SD_LV_SIBLING:
++ return "SIBLING";
++ case SD_LV_MC:
++ return "MC";
++ case SD_LV_CPU:
++ return "CPU";
++ case SD_LV_NODE:
++ return "NODE";
++ case SD_LV_ALLNODES:
++ return "ALLNODES";
++ case SD_LV_MAX:
++ return "MAX";
++
++ }
++ return "MAX";
++}
++
++static int sched_domain_debug_one(struct sched_domain *sd, int cpu, int level,
++ cpumask_t *groupmask)
++{
++ struct sched_group *group = sd->groups;
++ char str[256];
++
++ cpulist_scnprintf(str, sizeof(str), sd->span);
++ cpus_clear(*groupmask);
++
++ printk(KERN_DEBUG "%*s domain %d: ", level, "", level);
++
++ if (!(sd->flags & SD_LOAD_BALANCE)) {
++ printk("does not load-balance\n");
++ if (sd->parent)
++ printk(KERN_ERR "ERROR: !SD_LOAD_BALANCE domain"
++ " has parent");
++ return -1;
++ }
++
++ printk(KERN_CONT "span %s level %s\n",
++ str, sd_level_to_string(sd->level));
++
++ if (!cpu_isset(cpu, sd->span)) {
++ printk(KERN_ERR "ERROR: domain->span does not contain "
++ "CPU%d\n", cpu);
++ }
++ if (!cpu_isset(cpu, group->cpumask)) {
++ printk(KERN_ERR "ERROR: domain->groups does not contain"
++ " CPU%d\n", cpu);
++ }
++
++ printk(KERN_DEBUG "%*s groups:", level + 1, "");
++ do {
++ if (!group) {
++ printk("\n");
++ printk(KERN_ERR "ERROR: group is NULL\n");
++ break;
++ }
++
++ if (!group->__cpu_power) {
++ printk(KERN_CONT "\n");
++ printk(KERN_ERR "ERROR: domain->cpu_power not "
++ "set\n");
++ break;
++ }
++
++ if (!cpus_weight(group->cpumask)) {
++ printk(KERN_CONT "\n");
++ printk(KERN_ERR "ERROR: empty group\n");
++ break;
++ }
++
++ if (cpus_intersects(*groupmask, group->cpumask)) {
++ printk(KERN_CONT "\n");
++ printk(KERN_ERR "ERROR: repeated CPUs\n");
++ break;
++ }
++
++ cpus_or(*groupmask, *groupmask, group->cpumask);
++
++ cpulist_scnprintf(str, sizeof(str), group->cpumask);
++ printk(KERN_CONT " %s", str);
++
++ group = group->next;
++ } while (group != sd->groups);
++ printk(KERN_CONT "\n");
++
++ if (!cpus_equal(sd->span, *groupmask))
++ printk(KERN_ERR "ERROR: groups don't span domain->span\n");
++
++ if (sd->parent && !cpus_subset(*groupmask, sd->parent->span))
++ printk(KERN_ERR "ERROR: parent span is not a superset "
++ "of domain->span\n");
++ return 0;
++}
++
++static void sched_domain_debug(struct sched_domain *sd, int cpu)
++{
++ cpumask_t *groupmask;
++ int level = 0;
++
++ if (!sd) {
++ printk(KERN_DEBUG "CPU%d attaching NULL sched-domain.\n", cpu);
++ return;
++ }
++
++ printk(KERN_DEBUG "CPU%d attaching sched-domain:\n", cpu);
++
++ groupmask = kmalloc(sizeof(cpumask_t), GFP_KERNEL);
++ if (!groupmask) {
++ printk(KERN_DEBUG "Cannot load-balance (out of memory)\n");
++ return;
++ }
++
++ for (;;) {
++ if (sched_domain_debug_one(sd, cpu, level, groupmask))
++ break;
++ level++;
++ sd = sd->parent;
++ if (!sd)
++ break;
++ }
++ kfree(groupmask);
++}
++#else /* !CONFIG_SCHED_DEBUG */
++# define sched_domain_debug(sd, cpu) do { } while (0)
++#endif /* CONFIG_SCHED_DEBUG */
++
++static int sd_degenerate(struct sched_domain *sd)
++{
++ if (cpus_weight(sd->span) == 1)
++ return 1;
++
++ /* Following flags need at least 2 groups */
++ if (sd->flags & (SD_LOAD_BALANCE |
++ SD_BALANCE_NEWIDLE |
++ SD_BALANCE_FORK |
++ SD_BALANCE_EXEC |
++ SD_SHARE_CPUPOWER |
++ SD_SHARE_PKG_RESOURCES)) {
++ if (sd->groups != sd->groups->next)
++ return 0;
++ }
++
++ /* Following flags don't use groups */
++ if (sd->flags & (SD_WAKE_IDLE |
++ SD_WAKE_AFFINE |
++ SD_WAKE_BALANCE))
++ return 0;
++
++ return 1;
++}
++
++static int
++sd_parent_degenerate(struct sched_domain *sd, struct sched_domain *parent)
++{
++ unsigned long cflags = sd->flags, pflags = parent->flags;
++
++ if (sd_degenerate(parent))
++ return 1;
++
++ if (!cpus_equal(sd->span, parent->span))
++ return 0;
++
++ /* Does parent contain flags not in child? */
++ /* WAKE_BALANCE is a subset of WAKE_AFFINE */
++ if (cflags & SD_WAKE_AFFINE)
++ pflags &= ~SD_WAKE_BALANCE;
++ /* Flags needing groups don't count if only 1 group in parent */
++ if (parent->groups == parent->groups->next) {
++ pflags &= ~(SD_LOAD_BALANCE |
++ SD_BALANCE_NEWIDLE |
++ SD_BALANCE_FORK |
++ SD_BALANCE_EXEC |
++ SD_SHARE_CPUPOWER |
++ SD_SHARE_PKG_RESOURCES);
++ }
++ if (~cflags & pflags)
++ return 0;
++
++ return 1;
++}
++
++static void rq_attach_root(struct rq *rq, struct root_domain *rd)
++{
++ unsigned long flags;
++
++ spin_lock_irqsave(&rq->lock, flags);
++
++ if (rq->rd) {
++ struct root_domain *old_rd = rq->rd;
++
++ if (cpu_isset(rq->cpu, old_rd->online))
++ set_rq_offline(rq);
++
++ cpu_clear(rq->cpu, old_rd->span);
++
++ if (atomic_dec_and_test(&old_rd->refcount))
++ kfree(old_rd);
++ }
++
++ atomic_inc(&rd->refcount);
++ rq->rd = rd;
++
++ cpu_set(rq->cpu, rd->span);
++ if (cpu_isset(rq->cpu, cpu_online_map))
++ set_rq_online(rq);
++
++ spin_unlock_irqrestore(&rq->lock, flags);
++}
++
++static void init_rootdomain(struct root_domain *rd)
++{
++ memset(rd, 0, sizeof(*rd));
++
++ cpus_clear(rd->span);
++ cpus_clear(rd->online);
++
++ cpupri_init(&rd->cpupri);
++}
++
++static void init_defrootdomain(void)
++{
++ init_rootdomain(&def_root_domain);
++ atomic_set(&def_root_domain.refcount, 1);
++}
++
++static struct root_domain *alloc_rootdomain(void)
++{
++ struct root_domain *rd;
++
++ rd = kmalloc(sizeof(*rd), GFP_KERNEL);
++ if (!rd)
++ return NULL;
++
++ init_rootdomain(rd);
++
++ return rd;
++}
++
++/*
++ * Attach the domain 'sd' to 'cpu' as its base domain. Callers must
++ * hold the hotplug lock.
++ */
++static void
++cpu_attach_domain(struct sched_domain *sd, struct root_domain *rd, int cpu)
++{
++ struct rq *rq = cpu_rq(cpu);
++ struct sched_domain *tmp;
++
++ /* Remove the sched domains which do not contribute to scheduling. */
++ for (tmp = sd; tmp; ) {
++ struct sched_domain *parent = tmp->parent;
++ if (!parent)
++ break;
++
++ if (sd_parent_degenerate(tmp, parent)) {
++ tmp->parent = parent->parent;
++ if (parent->parent)
++ parent->parent->child = tmp;
++ } else
++ tmp = tmp->parent;
++ }
++
++ if (sd && sd_degenerate(sd)) {
++ sd = sd->parent;
++ if (sd)
++ sd->child = NULL;
++ }
++
++ sched_domain_debug(sd, cpu);
++
++ rq_attach_root(rq, rd);
++ rcu_assign_pointer(rq->sd, sd);
++}
++
++/* cpus with isolated domains */
++static cpumask_t cpu_isolated_map = CPU_MASK_NONE;
++
++/* Setup the mask of cpus configured for isolated domains */
++static int __init isolated_cpu_setup(char *str)
++{
++ static int __initdata ints[NR_CPUS];
++ int i;
++
++ str = get_options(str, ARRAY_SIZE(ints), ints);
++ cpus_clear(cpu_isolated_map);
++ for (i = 1; i <= ints[0]; i++)
++ if (ints[i] < NR_CPUS)
++ cpu_set(ints[i], cpu_isolated_map);
++ return 1;
++}
++
++__setup("isolcpus=", isolated_cpu_setup);
++
++/*
++ * init_sched_build_groups takes the cpumask we wish to span, and a pointer
++ * to a function which identifies what group(along with sched group) a CPU
++ * belongs to. The return value of group_fn must be a >= 0 and < NR_CPUS
++ * (due to the fact that we keep track of groups covered with a cpumask_t).
++ *
++ * init_sched_build_groups will build a circular linked list of the groups
++ * covered by the given span, and will set each group's ->cpumask correctly,
++ * and ->cpu_power to 0.
++ */
++static void
++init_sched_build_groups(const cpumask_t *span, const cpumask_t *cpu_map,
++ int (*group_fn)(int cpu, const cpumask_t *cpu_map,
++ struct sched_group **sg,
++ cpumask_t *tmpmask),
++ cpumask_t *covered, cpumask_t *tmpmask)
++{
++ struct sched_group *first = NULL, *last = NULL;
++ int i;
++
++ cpus_clear(*covered);
++
++ for_each_cpu_mask_nr(i, *span) {
++ struct sched_group *sg;
++ int group = group_fn(i, cpu_map, &sg, tmpmask);
++ int j;
++
++ if (cpu_isset(i, *covered))
++ continue;
++
++ cpus_clear(sg->cpumask);
++ sg->__cpu_power = 0;
++
++ for_each_cpu_mask_nr(j, *span) {
++ if (group_fn(j, cpu_map, NULL, tmpmask) != group)
++ continue;
++
++ cpu_set(j, *covered);
++ cpu_set(j, sg->cpumask);
++ }
++ if (!first)
++ first = sg;
++ if (last)
++ last->next = sg;
++ last = sg;
++ }
++ last->next = first;
++}
++
++#define SD_NODES_PER_DOMAIN 16
++
++#ifdef CONFIG_NUMA
++
++/**
++ * find_next_best_node - find the next node to include in a sched_domain
++ * @node: node whose sched_domain we're building
++ * @used_nodes: nodes already in the sched_domain
++ *
++ * Find the next node to include in a given scheduling domain. Simply
++ * finds the closest node not already in the @used_nodes map.
++ *
++ * Should use nodemask_t.
++ */
++static int find_next_best_node(int node, nodemask_t *used_nodes)
++{
++ int i, n, val, min_val, best_node = 0;
++
++ min_val = INT_MAX;
++
++ for (i = 0; i < nr_node_ids; i++) {
++ /* Start at @node */
++ n = (node + i) % nr_node_ids;
++
++ if (!nr_cpus_node(n))
++ continue;
++
++ /* Skip already used nodes */
++ if (node_isset(n, *used_nodes))
++ continue;
++
++ /* Simple min distance search */
++ val = node_distance(node, n);
++
++ if (val < min_val) {
++ min_val = val;
++ best_node = n;
++ }
++ }
++
++ node_set(best_node, *used_nodes);
++ return best_node;
++}
++
++/**
++ * sched_domain_node_span - get a cpumask for a node's sched_domain
++ * @node: node whose cpumask we're constructing
++ * @span: resulting cpumask
++ *
++ * Given a node, construct a good cpumask for its sched_domain to span. It
++ * should be one that prevents unnecessary balancing, but also spreads tasks
++ * out optimally.
++ */
++static void sched_domain_node_span(int node, cpumask_t *span)
++{
++ nodemask_t used_nodes;
++ node_to_cpumask_ptr(nodemask, node);
++ int i;
++
++ cpus_clear(*span);
++ nodes_clear(used_nodes);
++
++ cpus_or(*span, *span, *nodemask);
++ node_set(node, used_nodes);
++
++ for (i = 1; i < SD_NODES_PER_DOMAIN; i++) {
++ int next_node = find_next_best_node(node, &used_nodes);
++
++ node_to_cpumask_ptr_next(nodemask, next_node);
++ cpus_or(*span, *span, *nodemask);
++ }
++}
++#endif /* CONFIG_NUMA */
++
++int sched_smt_power_savings = 0, sched_mc_power_savings = 0;
++
++/*
++ * SMT sched-domains:
++ */
++#ifdef CONFIG_SCHED_SMT
++static DEFINE_PER_CPU(struct sched_domain, cpu_domains);
++static DEFINE_PER_CPU(struct sched_group, sched_group_cpus);
++
++static int
++cpu_to_cpu_group(int cpu, const cpumask_t *cpu_map, struct sched_group **sg,
++ cpumask_t *unused)
++{
++ if (sg)
++ *sg = &per_cpu(sched_group_cpus, cpu);
++ return cpu;
++}
++#endif /* CONFIG_SCHED_SMT */
++
++/*
++ * multi-core sched-domains:
++ */
++#ifdef CONFIG_SCHED_MC
++static DEFINE_PER_CPU(struct sched_domain, core_domains);
++static DEFINE_PER_CPU(struct sched_group, sched_group_core);
++#endif /* CONFIG_SCHED_MC */
++
++#if defined(CONFIG_SCHED_MC) && defined(CONFIG_SCHED_SMT)
++static int
++cpu_to_core_group(int cpu, const cpumask_t *cpu_map, struct sched_group **sg,
++ cpumask_t *mask)
++{
++ int group;
++
++ *mask = per_cpu(cpu_sibling_map, cpu);
++ cpus_and(*mask, *mask, *cpu_map);
++ group = first_cpu(*mask);
++ if (sg)
++ *sg = &per_cpu(sched_group_core, group);
++ return group;
++}
++#elif defined(CONFIG_SCHED_MC)
++static int
++cpu_to_core_group(int cpu, const cpumask_t *cpu_map, struct sched_group **sg,
++ cpumask_t *unused)
++{
++ if (sg)
++ *sg = &per_cpu(sched_group_core, cpu);
++ return cpu;
++}
++#endif
++
++static DEFINE_PER_CPU(struct sched_domain, phys_domains);
++static DEFINE_PER_CPU(struct sched_group, sched_group_phys);
++
++static int
++cpu_to_phys_group(int cpu, const cpumask_t *cpu_map, struct sched_group **sg,
++ cpumask_t *mask)
++{
++ int group;
++#ifdef CONFIG_SCHED_MC
++ *mask = cpu_coregroup_map(cpu);
++ cpus_and(*mask, *mask, *cpu_map);
++ group = first_cpu(*mask);
++#elif defined(CONFIG_SCHED_SMT)
++ *mask = per_cpu(cpu_sibling_map, cpu);
++ cpus_and(*mask, *mask, *cpu_map);
++ group = first_cpu(*mask);
++#else
++ group = cpu;
++#endif
++ if (sg)
++ *sg = &per_cpu(sched_group_phys, group);
++ return group;
++}
++
++#ifdef CONFIG_NUMA
++/*
++ * The init_sched_build_groups can't handle what we want to do with node
++ * groups, so roll our own. Now each node has its own list of groups which
++ * gets dynamically allocated.
++ */
++static DEFINE_PER_CPU(struct sched_domain, node_domains);
++static struct sched_group ***sched_group_nodes_bycpu;
++
++static DEFINE_PER_CPU(struct sched_domain, allnodes_domains);
++static DEFINE_PER_CPU(struct sched_group, sched_group_allnodes);
++
++static int cpu_to_allnodes_group(int cpu, const cpumask_t *cpu_map,
++ struct sched_group **sg, cpumask_t *nodemask)
++{
++ int group;
++
++ *nodemask = node_to_cpumask(cpu_to_node(cpu));
++ cpus_and(*nodemask, *nodemask, *cpu_map);
++ group = first_cpu(*nodemask);
++
++ if (sg)
++ *sg = &per_cpu(sched_group_allnodes, group);
++ return group;
++}
++
++static void init_numa_sched_groups_power(struct sched_group *group_head)
++{
++ struct sched_group *sg = group_head;
++ int j;
++
++ if (!sg)
++ return;
++ do {
++ for_each_cpu_mask_nr(j, sg->cpumask) {
++ struct sched_domain *sd;
++
++ sd = &per_cpu(phys_domains, j);
++ if (j != first_cpu(sd->groups->cpumask)) {
++ /*
++ * Only add "power" once for each
++ * physical package.
++ */
++ continue;
++ }
++
++ sg_inc_cpu_power(sg, sd->groups->__cpu_power);
++ }
++ sg = sg->next;
++ } while (sg != group_head);
++}
++#endif /* CONFIG_NUMA */
++
++#ifdef CONFIG_NUMA
++/* Free memory allocated for various sched_group structures */
++static void free_sched_groups(const cpumask_t *cpu_map, cpumask_t *nodemask)
++{
++ int cpu, i;
++
++ for_each_cpu_mask_nr(cpu, *cpu_map) {
++ struct sched_group **sched_group_nodes
++ = sched_group_nodes_bycpu[cpu];
++
++ if (!sched_group_nodes)
++ continue;
++
++ for (i = 0; i < nr_node_ids; i++) {
++ struct sched_group *oldsg, *sg = sched_group_nodes[i];
++
++ *nodemask = node_to_cpumask(i);
++ cpus_and(*nodemask, *nodemask, *cpu_map);
++ if (cpus_empty(*nodemask))
++ continue;
++
++ if (sg == NULL)
++ continue;
++ sg = sg->next;
++next_sg:
++ oldsg = sg;
++ sg = sg->next;
++ kfree(oldsg);
++ if (oldsg != sched_group_nodes[i])
++ goto next_sg;
++ }
++ kfree(sched_group_nodes);
++ sched_group_nodes_bycpu[cpu] = NULL;
++ }
++}
++#else /* !CONFIG_NUMA */
++static void free_sched_groups(const cpumask_t *cpu_map, cpumask_t *nodemask)
++{
++}
++#endif /* CONFIG_NUMA */
++
++/*
++ * Initialize sched groups cpu_power.
++ *
++ * cpu_power indicates the capacity of sched group, which is used while
++ * distributing the load between different sched groups in a sched domain.
++ * Typically cpu_power for all the groups in a sched domain will be same unless
++ * there are asymmetries in the topology. If there are asymmetries, group
++ * having more cpu_power will pickup more load compared to the group having
++ * less cpu_power.
++ *
++ * cpu_power will be a multiple of SCHED_LOAD_SCALE. This multiple represents
++ * the maximum number of tasks a group can handle in the presence of other idle
++ * or lightly loaded groups in the same sched domain.
++ */
++static void init_sched_groups_power(int cpu, struct sched_domain *sd)
++{
++ struct sched_domain *child;
++ struct sched_group *group;
++
++ WARN_ON(!sd || !sd->groups);
++
++ if (cpu != first_cpu(sd->groups->cpumask))
++ return;
++
++ child = sd->child;
++
++ sd->groups->__cpu_power = 0;
++
++ /*
++ * For perf policy, if the groups in child domain share resources
++ * (for example cores sharing some portions of the cache hierarchy
++ * or SMT), then set this domain groups cpu_power such that each group
++ * can handle only one task, when there are other idle groups in the
++ * same sched domain.
++ */
++ if (!child || (!(sd->flags & SD_POWERSAVINGS_BALANCE) &&
++ (child->flags &
++ (SD_SHARE_CPUPOWER | SD_SHARE_PKG_RESOURCES)))) {
++ sg_inc_cpu_power(sd->groups, SCHED_LOAD_SCALE);
++ return;
++ }
++
++ /*
++ * add cpu_power of each child group to this groups cpu_power
++ */
++ group = child->groups;
++ do {
++ sg_inc_cpu_power(sd->groups, group->__cpu_power);
++ group = group->next;
++ } while (group != child->groups);
++}
++
++/*
++ * Initializers for schedule domains
++ * Non-inlined to reduce accumulated stack pressure in build_sched_domains()
++ */
++
++#define SD_INIT(sd, type) sd_init_##type(sd)
++#define SD_INIT_FUNC(type) \
++static noinline void sd_init_##type(struct sched_domain *sd) \
++{ \
++ memset(sd, 0, sizeof(*sd)); \
++ *sd = SD_##type##_INIT; \
++ sd->level = SD_LV_##type; \
++}
++
++SD_INIT_FUNC(CPU)
++#ifdef CONFIG_NUMA
++ SD_INIT_FUNC(ALLNODES)
++ SD_INIT_FUNC(NODE)
++#endif
++#ifdef CONFIG_SCHED_SMT
++ SD_INIT_FUNC(SIBLING)
++#endif
++#ifdef CONFIG_SCHED_MC
++ SD_INIT_FUNC(MC)
++#endif
++
++/*
++ * To minimize stack usage kmalloc room for cpumasks and share the
++ * space as the usage in build_sched_domains() dictates. Used only
++ * if the amount of space is significant.
++ */
++struct allmasks {
++ cpumask_t tmpmask; /* make this one first */
++ union {
++ cpumask_t nodemask;
++ cpumask_t this_sibling_map;
++ cpumask_t this_core_map;
++ };
++ cpumask_t send_covered;
++
++#ifdef CONFIG_NUMA
++ cpumask_t domainspan;
++ cpumask_t covered;
++ cpumask_t notcovered;
++#endif
++};
++
++#if NR_CPUS > 128
++#define SCHED_CPUMASK_ALLOC 1
++#define SCHED_CPUMASK_FREE(v) kfree(v)
++#define SCHED_CPUMASK_DECLARE(v) struct allmasks *v
++#else
++#define SCHED_CPUMASK_ALLOC 0
++#define SCHED_CPUMASK_FREE(v)
++#define SCHED_CPUMASK_DECLARE(v) struct allmasks _v, *v = &_v
++#endif
++
++#define SCHED_CPUMASK_VAR(v, a) cpumask_t *v = (cpumask_t *) \
++ ((unsigned long)(a) + offsetof(struct allmasks, v))
++
++static int default_relax_domain_level = -1;
++
++static int __init setup_relax_domain_level(char *str)
++{
++ unsigned long val;
++
++ val = simple_strtoul(str, NULL, 0);
++ if (val < SD_LV_MAX)
++ default_relax_domain_level = val;
++
++ return 1;
++}
++__setup("relax_domain_level=", setup_relax_domain_level);
++
++static void set_domain_attribute(struct sched_domain *sd,
++ struct sched_domain_attr *attr)
++{
++ int request;
++
++ if (!attr || attr->relax_domain_level < 0) {
++ if (default_relax_domain_level < 0)
++ return;
++ else
++ request = default_relax_domain_level;
++ } else
++ request = attr->relax_domain_level;
++ if (request < sd->level) {
++ /* turn off idle balance on this domain */
++ sd->flags &= ~(SD_WAKE_IDLE|SD_BALANCE_NEWIDLE);
++ } else {
++ /* turn on idle balance on this domain */
++ sd->flags |= (SD_WAKE_IDLE_FAR|SD_BALANCE_NEWIDLE);
++ }
++}
++
++/*
++ * Build sched domains for a given set of cpus and attach the sched domains
++ * to the individual cpus
++ */
++static int __build_sched_domains(const cpumask_t *cpu_map,
++ struct sched_domain_attr *attr)
++{
++ int i;
++ struct root_domain *rd;
++ SCHED_CPUMASK_DECLARE(allmasks);
++ cpumask_t *tmpmask;
++#ifdef CONFIG_NUMA
++ struct sched_group **sched_group_nodes = NULL;
++ int sd_allnodes = 0;
++
++ /*
++ * Allocate the per-node list of sched groups
++ */
++ sched_group_nodes = kcalloc(nr_node_ids, sizeof(struct sched_group *),
++ GFP_KERNEL);
++ if (!sched_group_nodes) {
++ printk(KERN_WARNING "Can not alloc sched group node list\n");
++ return -ENOMEM;
++ }
++#endif
++
++ rd = alloc_rootdomain();
++ if (!rd) {
++ printk(KERN_WARNING "Cannot alloc root domain\n");
++#ifdef CONFIG_NUMA
++ kfree(sched_group_nodes);
++#endif
++ return -ENOMEM;
++ }
++
++#if SCHED_CPUMASK_ALLOC
++ /* get space for all scratch cpumask variables */
++ allmasks = kmalloc(sizeof(*allmasks), GFP_KERNEL);
++ if (!allmasks) {
++ printk(KERN_WARNING "Cannot alloc cpumask array\n");
++ kfree(rd);
++#ifdef CONFIG_NUMA
++ kfree(sched_group_nodes);
++#endif
++ return -ENOMEM;
++ }
++#endif
++ tmpmask = (cpumask_t *)allmasks;
++
++
++#ifdef CONFIG_NUMA
++ sched_group_nodes_bycpu[first_cpu(*cpu_map)] = sched_group_nodes;
++#endif
++
++ /*
++ * Set up domains for cpus specified by the cpu_map.
++ */
++ for_each_cpu_mask_nr(i, *cpu_map) {
++ struct sched_domain *sd = NULL, *p;
++ SCHED_CPUMASK_VAR(nodemask, allmasks);
++
++ *nodemask = node_to_cpumask(cpu_to_node(i));
++ cpus_and(*nodemask, *nodemask, *cpu_map);
++
++#ifdef CONFIG_NUMA
++ if (cpus_weight(*cpu_map) >
++ SD_NODES_PER_DOMAIN*cpus_weight(*nodemask)) {
++ sd = &per_cpu(allnodes_domains, i);
++ SD_INIT(sd, ALLNODES);
++ set_domain_attribute(sd, attr);
++ sd->span = *cpu_map;
++ cpu_to_allnodes_group(i, cpu_map, &sd->groups, tmpmask);
++ p = sd;
++ sd_allnodes = 1;
++ } else
++ p = NULL;
++
++ sd = &per_cpu(node_domains, i);
++ SD_INIT(sd, NODE);
++ set_domain_attribute(sd, attr);
++ sched_domain_node_span(cpu_to_node(i), &sd->span);
++ sd->parent = p;
++ if (p)
++ p->child = sd;
++ cpus_and(sd->span, sd->span, *cpu_map);
++#endif
++
++ p = sd;
++ sd = &per_cpu(phys_domains, i);
++ SD_INIT(sd, CPU);
++ set_domain_attribute(sd, attr);
++ sd->span = *nodemask;
++ sd->parent = p;
++ if (p)
++ p->child = sd;
++ cpu_to_phys_group(i, cpu_map, &sd->groups, tmpmask);
++
++#ifdef CONFIG_SCHED_MC
++ p = sd;
++ sd = &per_cpu(core_domains, i);
++ SD_INIT(sd, MC);
++ set_domain_attribute(sd, attr);
++ sd->span = cpu_coregroup_map(i);
++ cpus_and(sd->span, sd->span, *cpu_map);
++ sd->parent = p;
++ p->child = sd;
++ cpu_to_core_group(i, cpu_map, &sd->groups, tmpmask);
++#endif
++
++#ifdef CONFIG_SCHED_SMT
++ p = sd;
++ sd = &per_cpu(cpu_domains, i);
++ SD_INIT(sd, SIBLING);
++ set_domain_attribute(sd, attr);
++ sd->span = per_cpu(cpu_sibling_map, i);
++ cpus_and(sd->span, sd->span, *cpu_map);
++ sd->parent = p;
++ p->child = sd;
++ cpu_to_cpu_group(i, cpu_map, &sd->groups, tmpmask);
++#endif
++ }
++
++#ifdef CONFIG_SCHED_SMT
++ /* Set up CPU (sibling) groups */
++ for_each_cpu_mask_nr(i, *cpu_map) {
++ SCHED_CPUMASK_VAR(this_sibling_map, allmasks);
++ SCHED_CPUMASK_VAR(send_covered, allmasks);
++
++ *this_sibling_map = per_cpu(cpu_sibling_map, i);
++ cpus_and(*this_sibling_map, *this_sibling_map, *cpu_map);
++ if (i != first_cpu(*this_sibling_map))
++ continue;
++
++ init_sched_build_groups(this_sibling_map, cpu_map,
++ &cpu_to_cpu_group,
++ send_covered, tmpmask);
++ }
++#endif
++
++#ifdef CONFIG_SCHED_MC
++ /* Set up multi-core groups */
++ for_each_cpu_mask_nr(i, *cpu_map) {
++ SCHED_CPUMASK_VAR(this_core_map, allmasks);
++ SCHED_CPUMASK_VAR(send_covered, allmasks);
++
++ *this_core_map = cpu_coregroup_map(i);
++ cpus_and(*this_core_map, *this_core_map, *cpu_map);
++ if (i != first_cpu(*this_core_map))
++ continue;
++
++ init_sched_build_groups(this_core_map, cpu_map,
++ &cpu_to_core_group,
++ send_covered, tmpmask);
++ }
++#endif
++
++ /* Set up physical groups */
++ for (i = 0; i < nr_node_ids; i++) {
++ SCHED_CPUMASK_VAR(nodemask, allmasks);
++ SCHED_CPUMASK_VAR(send_covered, allmasks);
++
++ *nodemask = node_to_cpumask(i);
++ cpus_and(*nodemask, *nodemask, *cpu_map);
++ if (cpus_empty(*nodemask))
++ continue;
++
++ init_sched_build_groups(nodemask, cpu_map,
++ &cpu_to_phys_group,
++ send_covered, tmpmask);
++ }
++
++#ifdef CONFIG_NUMA
++ /* Set up node groups */
++ if (sd_allnodes) {
++ SCHED_CPUMASK_VAR(send_covered, allmasks);
++
++ init_sched_build_groups(cpu_map, cpu_map,
++ &cpu_to_allnodes_group,
++ send_covered, tmpmask);
++ }
++
++ for (i = 0; i < nr_node_ids; i++) {
++ /* Set up node groups */
++ struct sched_group *sg, *prev;
++ SCHED_CPUMASK_VAR(nodemask, allmasks);
++ SCHED_CPUMASK_VAR(domainspan, allmasks);
++ SCHED_CPUMASK_VAR(covered, allmasks);
++ int j;
++
++ *nodemask = node_to_cpumask(i);
++ cpus_clear(*covered);
++
++ cpus_and(*nodemask, *nodemask, *cpu_map);
++ if (cpus_empty(*nodemask)) {
++ sched_group_nodes[i] = NULL;
++ continue;
++ }
++
++ sched_domain_node_span(i, domainspan);
++ cpus_and(*domainspan, *domainspan, *cpu_map);
++
++ sg = kmalloc_node(sizeof(struct sched_group), GFP_KERNEL, i);
++ if (!sg) {
++ printk(KERN_WARNING "Can not alloc domain group for "
++ "node %d\n", i);
++ goto error;
++ }
++ sched_group_nodes[i] = sg;
++ for_each_cpu_mask_nr(j, *nodemask) {
++ struct sched_domain *sd;
++
++ sd = &per_cpu(node_domains, j);
++ sd->groups = sg;
++ }
++ sg->__cpu_power = 0;
++ sg->cpumask = *nodemask;
++ sg->next = sg;
++ cpus_or(*covered, *covered, *nodemask);
++ prev = sg;
++
++ for (j = 0; j < nr_node_ids; j++) {
++ SCHED_CPUMASK_VAR(notcovered, allmasks);
++ int n = (i + j) % nr_node_ids;
++ node_to_cpumask_ptr(pnodemask, n);
++
++ cpus_complement(*notcovered, *covered);
++ cpus_and(*tmpmask, *notcovered, *cpu_map);
++ cpus_and(*tmpmask, *tmpmask, *domainspan);
++ if (cpus_empty(*tmpmask))
++ break;
++
++ cpus_and(*tmpmask, *tmpmask, *pnodemask);
++ if (cpus_empty(*tmpmask))
++ continue;
++
++ sg = kmalloc_node(sizeof(struct sched_group),
++ GFP_KERNEL, i);
++ if (!sg) {
++ printk(KERN_WARNING
++ "Can not alloc domain group for node %d\n", j);
++ goto error;
++ }
++ sg->__cpu_power = 0;
++ sg->cpumask = *tmpmask;
++ sg->next = prev->next;
++ cpus_or(*covered, *covered, *tmpmask);
++ prev->next = sg;
++ prev = sg;
++ }
++ }
++#endif
++
++ /* Calculate CPU power for physical packages and nodes */
++#ifdef CONFIG_SCHED_SMT
++ for_each_cpu_mask_nr(i, *cpu_map) {
++ struct sched_domain *sd = &per_cpu(cpu_domains, i);
++
++ init_sched_groups_power(i, sd);
++ }
++#endif
++#ifdef CONFIG_SCHED_MC
++ for_each_cpu_mask_nr(i, *cpu_map) {
++ struct sched_domain *sd = &per_cpu(core_domains, i);
++
++ init_sched_groups_power(i, sd);
++ }
++#endif
++
++ for_each_cpu_mask_nr(i, *cpu_map) {
++ struct sched_domain *sd = &per_cpu(phys_domains, i);
++
++ init_sched_groups_power(i, sd);
++ }
++
++#ifdef CONFIG_NUMA
++ for (i = 0; i < nr_node_ids; i++)
++ init_numa_sched_groups_power(sched_group_nodes[i]);
++
++ if (sd_allnodes) {
++ struct sched_group *sg;
++
++ cpu_to_allnodes_group(first_cpu(*cpu_map), cpu_map, &sg,
++ tmpmask);
++ init_numa_sched_groups_power(sg);
++ }
++#endif
++
++ /* Attach the domains */
++ for_each_cpu_mask_nr(i, *cpu_map) {
++ struct sched_domain *sd;
++#ifdef CONFIG_SCHED_SMT
++ sd = &per_cpu(cpu_domains, i);
++#elif defined(CONFIG_SCHED_MC)
++ sd = &per_cpu(core_domains, i);
++#else
++ sd = &per_cpu(phys_domains, i);
++#endif
++ cpu_attach_domain(sd, rd, i);
++ }
++
++ SCHED_CPUMASK_FREE((void *)allmasks);
++ return 0;
++
++#ifdef CONFIG_NUMA
++error:
++ free_sched_groups(cpu_map, tmpmask);
++ SCHED_CPUMASK_FREE((void *)allmasks);
++ return -ENOMEM;
++#endif
++}
++
++static int build_sched_domains(const cpumask_t *cpu_map)
++{
++ return __build_sched_domains(cpu_map, NULL);
++}
++
++static cpumask_t *doms_cur; /* current sched domains */
++static int ndoms_cur; /* number of sched domains in 'doms_cur' */
++static struct sched_domain_attr *dattr_cur;
++ /* attribues of custom domains in 'doms_cur' */
++
++/*
++ * Special case: If a kmalloc of a doms_cur partition (array of
++ * cpumask_t) fails, then fallback to a single sched domain,
++ * as determined by the single cpumask_t fallback_doms.
++ */
++static cpumask_t fallback_doms;
++
++void __attribute__((weak)) arch_update_cpu_topology(void)
++{
++}
++
++/*
++ * Set up scheduler domains and groups. Callers must hold the hotplug lock.
++ * For now this just excludes isolated cpus, but could be used to
++ * exclude other special cases in the future.
++ */
++static int arch_init_sched_domains(const cpumask_t *cpu_map)
++{
++ int err;
++
++ arch_update_cpu_topology();
++ ndoms_cur = 1;
++ doms_cur = kmalloc(sizeof(cpumask_t), GFP_KERNEL);
++ if (!doms_cur)
++ doms_cur = &fallback_doms;
++ cpus_andnot(*doms_cur, *cpu_map, cpu_isolated_map);
++ dattr_cur = NULL;
++ err = build_sched_domains(doms_cur);
++ register_sched_domain_sysctl();
++
++ return err;
++}
++
++static void arch_destroy_sched_domains(const cpumask_t *cpu_map,
++ cpumask_t *tmpmask)
++{
++ free_sched_groups(cpu_map, tmpmask);
++}
++
++/*
++ * Detach sched domains from a group of cpus specified in cpu_map
++ * These cpus will now be attached to the NULL domain
++ */
++static void detach_destroy_domains(const cpumask_t *cpu_map)
++{
++ cpumask_t tmpmask;
++ int i;
++
++ unregister_sched_domain_sysctl();
++
++ for_each_cpu_mask_nr(i, *cpu_map)
++ cpu_attach_domain(NULL, &def_root_domain, i);
++ synchronize_sched();
++ arch_destroy_sched_domains(cpu_map, &tmpmask);
++}
++
++/* handle null as "default" */
++static int dattrs_equal(struct sched_domain_attr *cur, int idx_cur,
++ struct sched_domain_attr *new, int idx_new)
++{
++ struct sched_domain_attr tmp;
++
++ /* fast path */
++ if (!new && !cur)
++ return 1;
++
++ tmp = SD_ATTR_INIT;
++ return !memcmp(cur ? (cur + idx_cur) : &tmp,
++ new ? (new + idx_new) : &tmp,
++ sizeof(struct sched_domain_attr));
++}
++
++/*
++ * Partition sched domains as specified by the 'ndoms_new'
++ * cpumasks in the array doms_new[] of cpumasks. This compares
++ * doms_new[] to the current sched domain partitioning, doms_cur[].
++ * It destroys each deleted domain and builds each new domain.
++ *
++ * 'doms_new' is an array of cpumask_t's of length 'ndoms_new'.
++ * The masks don't intersect (don't overlap.) We should setup one
++ * sched domain for each mask. CPUs not in any of the cpumasks will
++ * not be load balanced. If the same cpumask appears both in the
++ * current 'doms_cur' domains and in the new 'doms_new', we can leave
++ * it as it is.
++ *
++ * The passed in 'doms_new' should be kmalloc'd. This routine takes
++ * ownership of it and will kfree it when done with it. If the caller
++ * failed the kmalloc call, then it can pass in doms_new == NULL &&
++ * ndoms_new == 1, and partition_sched_domains() will fallback to
++ * the single partition 'fallback_doms', it also forces the domains
++ * to be rebuilt.
++ *
++ * If doms_new == NULL it will be replaced with cpu_online_map.
++ * ndoms_new == 0 is a special case for destroying existing domains,
++ * and it will not create the default domain.
++ *
++ * Call with hotplug lock held
++ */
++void partition_sched_domains(int ndoms_new, cpumask_t *doms_new,
++ struct sched_domain_attr *dattr_new)
++{
++ int i, j, n;
++
++ mutex_lock(&sched_domains_mutex);
++
++ /* always unregister in case we don't destroy any domains */
++ unregister_sched_domain_sysctl();
++
++ n = doms_new ? ndoms_new : 0;
++
++ /* Destroy deleted domains */
++ for (i = 0; i < ndoms_cur; i++) {
++ for (j = 0; j < n; j++) {
++ if (cpus_equal(doms_cur[i], doms_new[j])
++ && dattrs_equal(dattr_cur, i, dattr_new, j))
++ goto match1;
++ }
++ /* no match - a current sched domain not in new doms_new[] */
++ detach_destroy_domains(doms_cur + i);
++match1:
++ ;
++ }
++
++ if (doms_new == NULL) {
++ ndoms_cur = 0;
++ doms_new = &fallback_doms;
++ cpus_andnot(doms_new[0], cpu_online_map, cpu_isolated_map);
++ dattr_new = NULL;
++ }
++
++ /* Build new domains */
++ for (i = 0; i < ndoms_new; i++) {
++ for (j = 0; j < ndoms_cur; j++) {
++ if (cpus_equal(doms_new[i], doms_cur[j])
++ && dattrs_equal(dattr_new, i, dattr_cur, j))
++ goto match2;
++ }
++ /* no match - add a new doms_new */
++ __build_sched_domains(doms_new + i,
++ dattr_new ? dattr_new + i : NULL);
++match2:
++ ;
++ }
++
++ /* Remember the new sched domains */
++ if (doms_cur != &fallback_doms)
++ kfree(doms_cur);
++ kfree(dattr_cur); /* kfree(NULL) is safe */
++ doms_cur = doms_new;
++ dattr_cur = dattr_new;
++ ndoms_cur = ndoms_new;
++
++ register_sched_domain_sysctl();
++
++ mutex_unlock(&sched_domains_mutex);
++}
++
++#if defined(CONFIG_SCHED_MC) || defined(CONFIG_SCHED_SMT)
++int arch_reinit_sched_domains(void)
++{
++ get_online_cpus();
++
++ /* Destroy domains first to force the rebuild */
++ partition_sched_domains(0, NULL, NULL);
++
++ rebuild_sched_domains();
++ put_online_cpus();
++
++ return 0;
++}
++
++static ssize_t sched_power_savings_store(const char *buf, size_t count, int smt)
++{
++ int ret;
++
++ if (buf[0] != '0' && buf[0] != '1')
++ return -EINVAL;
++
++ if (smt)
++ sched_smt_power_savings = (buf[0] == '1');
++ else
++ sched_mc_power_savings = (buf[0] == '1');
++
++ ret = arch_reinit_sched_domains();
++
++ return ret ? ret : count;
++}
++
++#ifdef CONFIG_SCHED_MC
++static ssize_t sched_mc_power_savings_show(struct sysdev_class *class,
++ char *page)
++{
++ return sprintf(page, "%u\n", sched_mc_power_savings);
++}
++static ssize_t sched_mc_power_savings_store(struct sysdev_class *class,
++ const char *buf, size_t count)
++{
++ return sched_power_savings_store(buf, count, 0);
++}
++static SYSDEV_CLASS_ATTR(sched_mc_power_savings, 0644,
++ sched_mc_power_savings_show,
++ sched_mc_power_savings_store);
++#endif
++
++#ifdef CONFIG_SCHED_SMT
++static ssize_t sched_smt_power_savings_show(struct sysdev_class *dev,
++ char *page)
++{
++ return sprintf(page, "%u\n", sched_smt_power_savings);
++}
++static ssize_t sched_smt_power_savings_store(struct sysdev_class *dev,
++ const char *buf, size_t count)
++{
++ return sched_power_savings_store(buf, count, 1);
++}
++static SYSDEV_CLASS_ATTR(sched_smt_power_savings, 0644,
++ sched_smt_power_savings_show,
++ sched_smt_power_savings_store);
++#endif
++
++int sched_create_sysfs_power_savings_entries(struct sysdev_class *cls)
++{
++ int err = 0;
++
++#ifdef CONFIG_SCHED_SMT
++ if (smt_capable())
++ err = sysfs_create_file(&cls->kset.kobj,
++ &attr_sched_smt_power_savings.attr);
++#endif
++#ifdef CONFIG_SCHED_MC
++ if (!err && mc_capable())
++ err = sysfs_create_file(&cls->kset.kobj,
++ &attr_sched_mc_power_savings.attr);
++#endif
++ return err;
++}
++#endif /* CONFIG_SCHED_MC || CONFIG_SCHED_SMT */
++
++#ifndef CONFIG_CPUSETS
++/*
++ * Add online and remove offline CPUs from the scheduler domains.
++ * When cpusets are enabled they take over this function.
++ */
++static int update_sched_domains(struct notifier_block *nfb,
++ unsigned long action, void *hcpu)
++{
++ switch (action) {
++ case CPU_ONLINE:
++ case CPU_ONLINE_FROZEN:
++ case CPU_DEAD:
++ case CPU_DEAD_FROZEN:
++ partition_sched_domains(1, NULL, NULL);
++ return NOTIFY_OK;
++
++ default:
++ return NOTIFY_DONE;
++ }
++}
++#endif
++
++static int update_runtime(struct notifier_block *nfb,
++ unsigned long action, void *hcpu)
++{
++ int cpu = (int)(long)hcpu;
++
++ switch (action) {
++ case CPU_DOWN_PREPARE:
++ case CPU_DOWN_PREPARE_FROZEN:
++ disable_runtime(cpu_rq(cpu));
++ return NOTIFY_OK;
++
++ case CPU_DOWN_FAILED:
++ case CPU_DOWN_FAILED_FROZEN:
++ case CPU_ONLINE:
++ case CPU_ONLINE_FROZEN:
++ enable_runtime(cpu_rq(cpu));
++ return NOTIFY_OK;
++
++ default:
++ return NOTIFY_DONE;
++ }
++}
++
++void __init sched_init_smp(void)
++{
++ cpumask_t non_isolated_cpus;
++
++#if defined(CONFIG_NUMA)
++ sched_group_nodes_bycpu = kzalloc(nr_cpu_ids * sizeof(void **),
++ GFP_KERNEL);
++ BUG_ON(sched_group_nodes_bycpu == NULL);
++#endif
++ get_online_cpus();
++ mutex_lock(&sched_domains_mutex);
++ arch_init_sched_domains(&cpu_online_map);
++ cpus_andnot(non_isolated_cpus, cpu_possible_map, cpu_isolated_map);
++ if (cpus_empty(non_isolated_cpus))
++ cpu_set(smp_processor_id(), non_isolated_cpus);
++ mutex_unlock(&sched_domains_mutex);
++ put_online_cpus();
++
++#ifndef CONFIG_CPUSETS
++ /* XXX: Theoretical race here - CPU may be hotplugged now */
++ hotcpu_notifier(update_sched_domains, 0);
++#endif
++
++ /* RT runtime code needs to handle some hotplug events */
++ hotcpu_notifier(update_runtime, 0);
++
++ init_hrtick();
++
++ /* Move init over to a non-isolated CPU */
++ if (set_cpus_allowed_ptr(current, &non_isolated_cpus) < 0)
++ BUG();
++ sched_init_granularity();
++}
++#else
++void __init sched_init_smp(void)
++{
++ sched_init_granularity();
++}
++#endif /* CONFIG_SMP */
++
++int in_sched_functions(unsigned long addr)
++{
++ return in_lock_functions(addr) ||
++ (addr >= (unsigned long)__sched_text_start
++ && addr < (unsigned long)__sched_text_end);
++}
++
++static void init_cfs_rq(struct cfs_rq *cfs_rq, struct rq *rq)
++{
++ cfs_rq->tasks_timeline = RB_ROOT;
++ INIT_LIST_HEAD(&cfs_rq->tasks);
++#ifdef CONFIG_FAIR_GROUP_SCHED
++ cfs_rq->rq = rq;
++#endif
++ cfs_rq->min_vruntime = (u64)(-(1LL << 20));
++}
++
++static void init_rt_rq(struct rt_rq *rt_rq, struct rq *rq)
++{
++ struct rt_prio_array *array;
++ int i;
++
++ array = &rt_rq->active;
++ for (i = 0; i < MAX_RT_PRIO; i++) {
++ INIT_LIST_HEAD(array->queue + i);
++ __clear_bit(i, array->bitmap);
++ }
++ /* delimiter for bitsearch: */
++ __set_bit(MAX_RT_PRIO, array->bitmap);
++
++#if defined CONFIG_SMP || defined CONFIG_RT_GROUP_SCHED
++ rt_rq->highest_prio = MAX_RT_PRIO;
++#endif
++#ifdef CONFIG_SMP
++ rt_rq->rt_nr_migratory = 0;
++ rt_rq->overloaded = 0;
++#endif
++
++ rt_rq->rt_time = 0;
++ rt_rq->rt_throttled = 0;
++ rt_rq->rt_runtime = 0;
++ spin_lock_init(&rt_rq->rt_runtime_lock);
++
++#ifdef CONFIG_RT_GROUP_SCHED
++ rt_rq->rt_nr_boosted = 0;
++ rt_rq->rq = rq;
++#endif
++}
++
++#ifdef CONFIG_FAIR_GROUP_SCHED
++static void init_tg_cfs_entry(struct task_group *tg, struct cfs_rq *cfs_rq,
++ struct sched_entity *se, int cpu, int add,
++ struct sched_entity *parent)
++{
++ struct rq *rq = cpu_rq(cpu);
++ tg->cfs_rq[cpu] = cfs_rq;
++ init_cfs_rq(cfs_rq, rq);
++ cfs_rq->tg = tg;
++ if (add)
++ list_add(&cfs_rq->leaf_cfs_rq_list, &rq->leaf_cfs_rq_list);
++
++ tg->se[cpu] = se;
++ /* se could be NULL for init_task_group */
++ if (!se)
++ return;
++
++ if (!parent)
++ se->cfs_rq = &rq->cfs;
++ else
++ se->cfs_rq = parent->my_q;
++
++ se->my_q = cfs_rq;
++ se->load.weight = tg->shares;
++ se->load.inv_weight = 0;
++ se->parent = parent;
++}
++#endif
++
++#ifdef CONFIG_RT_GROUP_SCHED
++static void init_tg_rt_entry(struct task_group *tg, struct rt_rq *rt_rq,
++ struct sched_rt_entity *rt_se, int cpu, int add,
++ struct sched_rt_entity *parent)
++{
++ struct rq *rq = cpu_rq(cpu);
++
++ tg->rt_rq[cpu] = rt_rq;
++ init_rt_rq(rt_rq, rq);
++ rt_rq->tg = tg;
++ rt_rq->rt_se = rt_se;
++ rt_rq->rt_runtime = tg->rt_bandwidth.rt_runtime;
++ if (add)
++ list_add(&rt_rq->leaf_rt_rq_list, &rq->leaf_rt_rq_list);
++
++ tg->rt_se[cpu] = rt_se;
++ if (!rt_se)
++ return;
++
++ if (!parent)
++ rt_se->rt_rq = &rq->rt;
++ else
++ rt_se->rt_rq = parent->my_q;
++
++ rt_se->my_q = rt_rq;
++ rt_se->parent = parent;
++ INIT_LIST_HEAD(&rt_se->run_list);
++}
++#endif
++
++void __init sched_init(void)
++{
++ int i, j;
++ unsigned long alloc_size = 0, ptr;
++
++#ifdef CONFIG_FAIR_GROUP_SCHED
++ alloc_size += 2 * nr_cpu_ids * sizeof(void **);
++#endif
++#ifdef CONFIG_RT_GROUP_SCHED
++ alloc_size += 2 * nr_cpu_ids * sizeof(void **);
++#endif
++#ifdef CONFIG_USER_SCHED
++ alloc_size *= 2;
++#endif
++ /*
++ * As sched_init() is called before page_alloc is setup,
++ * we use alloc_bootmem().
++ */
++ if (alloc_size) {
++ ptr = (unsigned long)alloc_bootmem(alloc_size);
++
++#ifdef CONFIG_FAIR_GROUP_SCHED
++ init_task_group.se = (struct sched_entity **)ptr;
++ ptr += nr_cpu_ids * sizeof(void **);
++
++ init_task_group.cfs_rq = (struct cfs_rq **)ptr;
++ ptr += nr_cpu_ids * sizeof(void **);
++
++#ifdef CONFIG_USER_SCHED
++ root_task_group.se = (struct sched_entity **)ptr;
++ ptr += nr_cpu_ids * sizeof(void **);
++
++ root_task_group.cfs_rq = (struct cfs_rq **)ptr;
++ ptr += nr_cpu_ids * sizeof(void **);
++#endif /* CONFIG_USER_SCHED */
++#endif /* CONFIG_FAIR_GROUP_SCHED */
++#ifdef CONFIG_RT_GROUP_SCHED
++ init_task_group.rt_se = (struct sched_rt_entity **)ptr;
++ ptr += nr_cpu_ids * sizeof(void **);
++
++ init_task_group.rt_rq = (struct rt_rq **)ptr;
++ ptr += nr_cpu_ids * sizeof(void **);
++
++#ifdef CONFIG_USER_SCHED
++ root_task_group.rt_se = (struct sched_rt_entity **)ptr;
++ ptr += nr_cpu_ids * sizeof(void **);
++
++ root_task_group.rt_rq = (struct rt_rq **)ptr;
++ ptr += nr_cpu_ids * sizeof(void **);
++#endif /* CONFIG_USER_SCHED */
++#endif /* CONFIG_RT_GROUP_SCHED */
++ }
++
++#ifdef CONFIG_SMP
++ init_defrootdomain();
++#endif
++
++ init_rt_bandwidth(&def_rt_bandwidth,
++ global_rt_period(), global_rt_runtime());
++
++#ifdef CONFIG_RT_GROUP_SCHED
++ init_rt_bandwidth(&init_task_group.rt_bandwidth,
++ global_rt_period(), global_rt_runtime());
++#ifdef CONFIG_USER_SCHED
++ init_rt_bandwidth(&root_task_group.rt_bandwidth,
++ global_rt_period(), RUNTIME_INF);
++#endif /* CONFIG_USER_SCHED */
++#endif /* CONFIG_RT_GROUP_SCHED */
++
++#ifdef CONFIG_GROUP_SCHED
++ list_add(&init_task_group.list, &task_groups);
++ INIT_LIST_HEAD(&init_task_group.children);
++
++#ifdef CONFIG_USER_SCHED
++ INIT_LIST_HEAD(&root_task_group.children);
++ init_task_group.parent = &root_task_group;
++ list_add(&init_task_group.siblings, &root_task_group.children);
++#endif /* CONFIG_USER_SCHED */
++#endif /* CONFIG_GROUP_SCHED */
++
++ for_each_possible_cpu(i) {
++ struct rq *rq;
++
++ rq = cpu_rq(i);
++ spin_lock_init(&rq->lock);
++ rq->nr_running = 0;
++ init_cfs_rq(&rq->cfs, rq);
++ init_rt_rq(&rq->rt, rq);
++#ifdef CONFIG_FAIR_GROUP_SCHED
++ init_task_group.shares = init_task_group_load;
++ INIT_LIST_HEAD(&rq->leaf_cfs_rq_list);
++#ifdef CONFIG_CGROUP_SCHED
++ /*
++ * How much cpu bandwidth does init_task_group get?
++ *
++ * In case of task-groups formed thr' the cgroup filesystem, it
++ * gets 100% of the cpu resources in the system. This overall
++ * system cpu resource is divided among the tasks of
++ * init_task_group and its child task-groups in a fair manner,
++ * based on each entity's (task or task-group's) weight
++ * (se->load.weight).
++ *
++ * In other words, if init_task_group has 10 tasks of weight
++ * 1024) and two child groups A0 and A1 (of weight 1024 each),
++ * then A0's share of the cpu resource is:
++ *
++ * A0's bandwidth = 1024 / (10*1024 + 1024 + 1024) = 8.33%
++ *
++ * We achieve this by letting init_task_group's tasks sit
++ * directly in rq->cfs (i.e init_task_group->se[] = NULL).
++ */
++ init_tg_cfs_entry(&init_task_group, &rq->cfs, NULL, i, 1, NULL);
++#elif defined CONFIG_USER_SCHED
++ root_task_group.shares = NICE_0_LOAD;
++ init_tg_cfs_entry(&root_task_group, &rq->cfs, NULL, i, 0, NULL);
++ /*
++ * In case of task-groups formed thr' the user id of tasks,
++ * init_task_group represents tasks belonging to root user.
++ * Hence it forms a sibling of all subsequent groups formed.
++ * In this case, init_task_group gets only a fraction of overall
++ * system cpu resource, based on the weight assigned to root
++ * user's cpu share (INIT_TASK_GROUP_LOAD). This is accomplished
++ * by letting tasks of init_task_group sit in a separate cfs_rq
++ * (init_cfs_rq) and having one entity represent this group of
++ * tasks in rq->cfs (i.e init_task_group->se[] != NULL).
++ */
++ init_tg_cfs_entry(&init_task_group,
++ &per_cpu(init_cfs_rq, i),
++ &per_cpu(init_sched_entity, i), i, 1,
++ root_task_group.se[i]);
++
++#endif
++#endif /* CONFIG_FAIR_GROUP_SCHED */
++
++ rq->rt.rt_runtime = def_rt_bandwidth.rt_runtime;
++#ifdef CONFIG_RT_GROUP_SCHED
++ INIT_LIST_HEAD(&rq->leaf_rt_rq_list);
++#ifdef CONFIG_CGROUP_SCHED
++ init_tg_rt_entry(&init_task_group, &rq->rt, NULL, i, 1, NULL);
++#elif defined CONFIG_USER_SCHED
++ init_tg_rt_entry(&root_task_group, &rq->rt, NULL, i, 0, NULL);
++ init_tg_rt_entry(&init_task_group,
++ &per_cpu(init_rt_rq, i),
++ &per_cpu(init_sched_rt_entity, i), i, 1,
++ root_task_group.rt_se[i]);
++#endif
++#endif
++
++ for (j = 0; j < CPU_LOAD_IDX_MAX; j++)
++ rq->cpu_load[j] = 0;
++#ifdef CONFIG_SMP
++ rq->sd = NULL;
++ rq->rd = NULL;
++ rq->active_balance = 0;
++ rq->next_balance = jiffies;
++ rq->push_cpu = 0;
++ rq->cpu = i;
++ rq->online = 0;
++ rq->migration_thread = NULL;
++ INIT_LIST_HEAD(&rq->migration_queue);
++ rq_attach_root(rq, &def_root_domain);
++#endif
++ init_rq_hrtick(rq);
++ atomic_set(&rq->nr_iowait, 0);
++ }
++
++ set_load_weight(&init_task);
++
++#ifdef CONFIG_PREEMPT_NOTIFIERS
++ INIT_HLIST_HEAD(&init_task.preempt_notifiers);
++#endif
++
++#ifdef CONFIG_SMP
++ open_softirq(SCHED_SOFTIRQ, run_rebalance_domains);
++#endif
++
++#ifdef CONFIG_RT_MUTEXES
++ plist_head_init(&init_task.pi_waiters, &init_task.pi_lock);
++#endif
++
++ /*
++ * The boot idle thread does lazy MMU switching as well:
++ */
++ atomic_inc(&init_mm.mm_count);
++ enter_lazy_tlb(&init_mm, current);
++
++ /*
++ * Make us the idle thread. Technically, schedule() should not be
++ * called from this thread, however somewhere below it might be,
++ * but because we are the idle thread, we just pick up running again
++ * when this runqueue becomes "idle".
++ */
++ init_idle(current, smp_processor_id());
++ /*
++ * During early bootup we pretend to be a normal task:
++ */
++ current->sched_class = &fair_sched_class;
++
++ scheduler_running = 1;
++}
++
++#ifdef CONFIG_DEBUG_SPINLOCK_SLEEP
++void __might_sleep(char *file, int line)
++{
++#ifdef in_atomic
++ static unsigned long prev_jiffy; /* ratelimiting */
++
++ if ((in_atomic() || irqs_disabled()) &&
++ system_state == SYSTEM_RUNNING && !oops_in_progress) {
++ if (time_before(jiffies, prev_jiffy + HZ) && prev_jiffy)
++ return;
++ prev_jiffy = jiffies;
++ printk(KERN_ERR "BUG: sleeping function called from invalid"
++ " context at %s:%d\n", file, line);
++ printk("in_atomic():%d, irqs_disabled():%d\n",
++ in_atomic(), irqs_disabled());
++ debug_show_held_locks(current);
++ if (irqs_disabled())
++ print_irqtrace_events(current);
++ dump_stack();
++ }
++#endif
++}
++EXPORT_SYMBOL(__might_sleep);
++#endif
++
++#ifdef CONFIG_MAGIC_SYSRQ
++static void normalize_task(struct rq *rq, struct task_struct *p)
++{
++ int on_rq;
++
++ update_rq_clock(rq);
++ on_rq = p->se.on_rq;
++ if (on_rq)
++ deactivate_task(rq, p, 0);
++ __setscheduler(rq, p, SCHED_NORMAL, 0);
++ if (on_rq) {
++ activate_task(rq, p, 0);
++ resched_task(rq->curr);
++ }
++}
++
++void normalize_rt_tasks(void)
++{
++ struct task_struct *g, *p;
++ unsigned long flags;
++ struct rq *rq;
++
++ read_lock_irqsave(&tasklist_lock, flags);
++ do_each_thread(g, p) {
++ /*
++ * Only normalize user tasks:
++ */
++ if (!p->mm)
++ continue;
++
++ p->se.exec_start = 0;
++#ifdef CONFIG_SCHEDSTATS
++ p->se.wait_start = 0;
++ p->se.sleep_start = 0;
++ p->se.block_start = 0;
++#endif
++
++ if (!rt_task(p)) {
++ /*
++ * Renice negative nice level userspace
++ * tasks back to 0:
++ */
++ if (TASK_NICE(p) < 0 && p->mm)
++ set_user_nice(p, 0);
++ continue;
++ }
++
++ spin_lock(&p->pi_lock);
++ rq = __task_rq_lock(p);
++
++ normalize_task(rq, p);
++
++ __task_rq_unlock(rq);
++ spin_unlock(&p->pi_lock);
++ } while_each_thread(g, p);
++
++ read_unlock_irqrestore(&tasklist_lock, flags);
++}
++
++#endif /* CONFIG_MAGIC_SYSRQ */
++
++#ifdef CONFIG_IA64
++/*
++ * These functions are only useful for the IA64 MCA handling.
++ *
++ * They can only be called when the whole system has been
++ * stopped - every CPU needs to be quiescent, and no scheduling
++ * activity can take place. Using them for anything else would
++ * be a serious bug, and as a result, they aren't even visible
++ * under any other configuration.
++ */
++
++/**
++ * curr_task - return the current task for a given cpu.
++ * @cpu: the processor in question.
++ *
++ * ONLY VALID WHEN THE WHOLE SYSTEM IS STOPPED!
++ */
++struct task_struct *curr_task(int cpu)
++{
++ return cpu_curr(cpu);
++}
++
++/**
++ * set_curr_task - set the current task for a given cpu.
++ * @cpu: the processor in question.
++ * @p: the task pointer to set.
++ *
++ * Description: This function must only be used when non-maskable interrupts
++ * are serviced on a separate stack. It allows the architecture to switch the
++ * notion of the current task on a cpu in a non-blocking manner. This function
++ * must be called with all CPU's synchronized, and interrupts disabled, the
++ * and caller must save the original value of the current task (see
++ * curr_task() above) and restore that value before reenabling interrupts and
++ * re-starting the system.
++ *
++ * ONLY VALID WHEN THE WHOLE SYSTEM IS STOPPED!
++ */
++void set_curr_task(int cpu, struct task_struct *p)
++{
++ cpu_curr(cpu) = p;
++}
++
++#endif
++
++#ifdef CONFIG_FAIR_GROUP_SCHED
++static void free_fair_sched_group(struct task_group *tg)
++{
++ int i;
++
++ for_each_possible_cpu(i) {
++ if (tg->cfs_rq)
++ kfree(tg->cfs_rq[i]);
++ if (tg->se)
++ kfree(tg->se[i]);
++ }
++
++ kfree(tg->cfs_rq);
++ kfree(tg->se);
++}
++
++static
++int alloc_fair_sched_group(struct task_group *tg, struct task_group *parent)
++{
++ struct cfs_rq *cfs_rq;
++ struct sched_entity *se, *parent_se;
++ struct rq *rq;
++ int i;
++
++ tg->cfs_rq = kzalloc(sizeof(cfs_rq) * nr_cpu_ids, GFP_KERNEL);
++ if (!tg->cfs_rq)
++ goto err;
++ tg->se = kzalloc(sizeof(se) * nr_cpu_ids, GFP_KERNEL);
++ if (!tg->se)
++ goto err;
++
++ tg->shares = NICE_0_LOAD;
++
++ for_each_possible_cpu(i) {
++ rq = cpu_rq(i);
++
++ cfs_rq = kmalloc_node(sizeof(struct cfs_rq),
++ GFP_KERNEL|__GFP_ZERO, cpu_to_node(i));
++ if (!cfs_rq)
++ goto err;
++
++ se = kmalloc_node(sizeof(struct sched_entity),
++ GFP_KERNEL|__GFP_ZERO, cpu_to_node(i));
++ if (!se)
++ goto err;
++
++ parent_se = parent ? parent->se[i] : NULL;
++ init_tg_cfs_entry(tg, cfs_rq, se, i, 0, parent_se);
++ }
++
++ return 1;
++
++ err:
++ return 0;
++}
++
++static inline void register_fair_sched_group(struct task_group *tg, int cpu)
++{
++ list_add_rcu(&tg->cfs_rq[cpu]->leaf_cfs_rq_list,
++ &cpu_rq(cpu)->leaf_cfs_rq_list);
++}
++
++static inline void unregister_fair_sched_group(struct task_group *tg, int cpu)
++{
++ list_del_rcu(&tg->cfs_rq[cpu]->leaf_cfs_rq_list);
++}
++#else /* !CONFG_FAIR_GROUP_SCHED */
++static inline void free_fair_sched_group(struct task_group *tg)
++{
++}
++
++static inline
++int alloc_fair_sched_group(struct task_group *tg, struct task_group *parent)
++{
++ return 1;
++}
++
++static inline void register_fair_sched_group(struct task_group *tg, int cpu)
++{
++}
++
++static inline void unregister_fair_sched_group(struct task_group *tg, int cpu)
++{
++}
++#endif /* CONFIG_FAIR_GROUP_SCHED */
++
++#ifdef CONFIG_RT_GROUP_SCHED
++static void free_rt_sched_group(struct task_group *tg)
++{
++ int i;
++
++ destroy_rt_bandwidth(&tg->rt_bandwidth);
++
++ for_each_possible_cpu(i) {
++ if (tg->rt_rq)
++ kfree(tg->rt_rq[i]);
++ if (tg->rt_se)
++ kfree(tg->rt_se[i]);
++ }
++
++ kfree(tg->rt_rq);
++ kfree(tg->rt_se);
++}
++
++static
++int alloc_rt_sched_group(struct task_group *tg, struct task_group *parent)
++{
++ struct rt_rq *rt_rq;
++ struct sched_rt_entity *rt_se, *parent_se;
++ struct rq *rq;
++ int i;
++
++ tg->rt_rq = kzalloc(sizeof(rt_rq) * nr_cpu_ids, GFP_KERNEL);
++ if (!tg->rt_rq)
++ goto err;
++ tg->rt_se = kzalloc(sizeof(rt_se) * nr_cpu_ids, GFP_KERNEL);
++ if (!tg->rt_se)
++ goto err;
++
++ init_rt_bandwidth(&tg->rt_bandwidth,
++ ktime_to_ns(def_rt_bandwidth.rt_period), 0);
++
++ for_each_possible_cpu(i) {
++ rq = cpu_rq(i);
++
++ rt_rq = kmalloc_node(sizeof(struct rt_rq),
++ GFP_KERNEL|__GFP_ZERO, cpu_to_node(i));
++ if (!rt_rq)
++ goto err;
++
++ rt_se = kmalloc_node(sizeof(struct sched_rt_entity),
++ GFP_KERNEL|__GFP_ZERO, cpu_to_node(i));
++ if (!rt_se)
++ goto err;
++
++ parent_se = parent ? parent->rt_se[i] : NULL;
++ init_tg_rt_entry(tg, rt_rq, rt_se, i, 0, parent_se);
++ }
++
++ return 1;
++
++ err:
++ return 0;
++}
++
++static inline void register_rt_sched_group(struct task_group *tg, int cpu)
++{
++ list_add_rcu(&tg->rt_rq[cpu]->leaf_rt_rq_list,
++ &cpu_rq(cpu)->leaf_rt_rq_list);
++}
++
++static inline void unregister_rt_sched_group(struct task_group *tg, int cpu)
++{
++ list_del_rcu(&tg->rt_rq[cpu]->leaf_rt_rq_list);
++}
++#else /* !CONFIG_RT_GROUP_SCHED */
++static inline void free_rt_sched_group(struct task_group *tg)
++{
++}
++
++static inline
++int alloc_rt_sched_group(struct task_group *tg, struct task_group *parent)
++{
++ return 1;
++}
++
++static inline void register_rt_sched_group(struct task_group *tg, int cpu)
++{
++}
++
++static inline void unregister_rt_sched_group(struct task_group *tg, int cpu)
++{
++}
++#endif /* CONFIG_RT_GROUP_SCHED */
++
++#ifdef CONFIG_GROUP_SCHED
++static void free_sched_group(struct task_group *tg)
++{
++ free_fair_sched_group(tg);
++ free_rt_sched_group(tg);
++ kfree(tg);
++}
++
++/* allocate runqueue etc for a new task group */
++struct task_group *sched_create_group(struct task_group *parent)
++{
++ struct task_group *tg;
++ unsigned long flags;
++ int i;
++
++ tg = kzalloc(sizeof(*tg), GFP_KERNEL);
++ if (!tg)
++ return ERR_PTR(-ENOMEM);
++
++ if (!alloc_fair_sched_group(tg, parent))
++ goto err;
++
++ if (!alloc_rt_sched_group(tg, parent))
++ goto err;
++
++ spin_lock_irqsave(&task_group_lock, flags);
++ for_each_possible_cpu(i) {
++ register_fair_sched_group(tg, i);
++ register_rt_sched_group(tg, i);
++ }
++ list_add_rcu(&tg->list, &task_groups);
++
++ WARN_ON(!parent); /* root should already exist */
++
++ tg->parent = parent;
++ INIT_LIST_HEAD(&tg->children);
++ list_add_rcu(&tg->siblings, &parent->children);
++ spin_unlock_irqrestore(&task_group_lock, flags);
++
++ return tg;
++
++err:
++ free_sched_group(tg);
++ return ERR_PTR(-ENOMEM);
++}
++
++/* rcu callback to free various structures associated with a task group */
++static void free_sched_group_rcu(struct rcu_head *rhp)
++{
++ /* now it should be safe to free those cfs_rqs */
++ free_sched_group(container_of(rhp, struct task_group, rcu));
++}
++
++/* Destroy runqueue etc associated with a task group */
++void sched_destroy_group(struct task_group *tg)
++{
++ unsigned long flags;
++ int i;
++
++ spin_lock_irqsave(&task_group_lock, flags);
++ for_each_possible_cpu(i) {
++ unregister_fair_sched_group(tg, i);
++ unregister_rt_sched_group(tg, i);
++ }
++ list_del_rcu(&tg->list);
++ list_del_rcu(&tg->siblings);
++ spin_unlock_irqrestore(&task_group_lock, flags);
++
++ /* wait for possible concurrent references to cfs_rqs complete */
++ call_rcu(&tg->rcu, free_sched_group_rcu);
++}
++
++/* change task's runqueue when it moves between groups.
++ * The caller of this function should have put the task in its new group
++ * by now. This function just updates tsk->se.cfs_rq and tsk->se.parent to
++ * reflect its new group.
++ */
++void sched_move_task(struct task_struct *tsk)
++{
++ int on_rq, running;
++ unsigned long flags;
++ struct rq *rq;
++
++ rq = task_rq_lock(tsk, &flags);
++
++ update_rq_clock(rq);
++
++ running = task_current(rq, tsk);
++ on_rq = tsk->se.on_rq;
++
++ if (on_rq)
++ dequeue_task(rq, tsk, 0);
++ if (unlikely(running))
++ tsk->sched_class->put_prev_task(rq, tsk);
++
++ set_task_rq(tsk, task_cpu(tsk));
++
++#ifdef CONFIG_FAIR_GROUP_SCHED
++ if (tsk->sched_class->moved_group)
++ tsk->sched_class->moved_group(tsk);
++#endif
++
++ if (unlikely(running))
++ tsk->sched_class->set_curr_task(rq);
++ if (on_rq)
++ enqueue_task(rq, tsk, 0);
++
++ task_rq_unlock(rq, &flags);
++}
++#endif /* CONFIG_GROUP_SCHED */
++
++#ifdef CONFIG_FAIR_GROUP_SCHED
++static void __set_se_shares(struct sched_entity *se, unsigned long shares)
++{
++ struct cfs_rq *cfs_rq = se->cfs_rq;
++ int on_rq;
++
++ on_rq = se->on_rq;
++ if (on_rq)
++ dequeue_entity(cfs_rq, se, 0);
++
++ se->load.weight = shares;
++ se->load.inv_weight = 0;
++
++ if (on_rq)
++ enqueue_entity(cfs_rq, se, 0);
++}
++
++static void set_se_shares(struct sched_entity *se, unsigned long shares)
++{
++ struct cfs_rq *cfs_rq = se->cfs_rq;
++ struct rq *rq = cfs_rq->rq;
++ unsigned long flags;
++
++ spin_lock_irqsave(&rq->lock, flags);
++ __set_se_shares(se, shares);
++ spin_unlock_irqrestore(&rq->lock, flags);
++}
++
++static DEFINE_MUTEX(shares_mutex);
++
++int sched_group_set_shares(struct task_group *tg, unsigned long shares)
++{
++ int i;
++ unsigned long flags;
++
++ /*
++ * We can't change the weight of the root cgroup.
++ */
++ if (!tg->se[0])
++ return -EINVAL;
++
++ if (shares < MIN_SHARES)
++ shares = MIN_SHARES;
++ else if (shares > MAX_SHARES)
++ shares = MAX_SHARES;
++
++ mutex_lock(&shares_mutex);
++ if (tg->shares == shares)
++ goto done;
++
++ spin_lock_irqsave(&task_group_lock, flags);
++ for_each_possible_cpu(i)
++ unregister_fair_sched_group(tg, i);
++ list_del_rcu(&tg->siblings);
++ spin_unlock_irqrestore(&task_group_lock, flags);
++
++ /* wait for any ongoing reference to this group to finish */
++ synchronize_sched();
++
++ /*
++ * Now we are free to modify the group's share on each cpu
++ * w/o tripping rebalance_share or load_balance_fair.
++ */
++ tg->shares = shares;
++ for_each_possible_cpu(i) {
++ /*
++ * force a rebalance
++ */
++ cfs_rq_set_shares(tg->cfs_rq[i], 0);
++ set_se_shares(tg->se[i], shares);
++ }
++
++ /*
++ * Enable load balance activity on this group, by inserting it back on
++ * each cpu's rq->leaf_cfs_rq_list.
++ */
++ spin_lock_irqsave(&task_group_lock, flags);
++ for_each_possible_cpu(i)
++ register_fair_sched_group(tg, i);
++ list_add_rcu(&tg->siblings, &tg->parent->children);
++ spin_unlock_irqrestore(&task_group_lock, flags);
++done:
++ mutex_unlock(&shares_mutex);
++ return 0;
++}
++
++unsigned long sched_group_shares(struct task_group *tg)
++{
++ return tg->shares;
++}
++#endif
++
++#ifdef CONFIG_RT_GROUP_SCHED
++/*
++ * Ensure that the real time constraints are schedulable.
++ */
++static DEFINE_MUTEX(rt_constraints_mutex);
++
++static unsigned long to_ratio(u64 period, u64 runtime)
++{
++ if (runtime == RUNTIME_INF)
++ return 1ULL << 16;
++
++ return div64_u64(runtime << 16, period);
++}
++
++#ifdef CONFIG_CGROUP_SCHED
++static int __rt_schedulable(struct task_group *tg, u64 period, u64 runtime)
++{
++ struct task_group *tgi, *parent = tg->parent;
++ unsigned long total = 0;
++
++ if (!parent) {
++ if (global_rt_period() < period)
++ return 0;
++
++ return to_ratio(period, runtime) <
++ to_ratio(global_rt_period(), global_rt_runtime());
++ }
++
++ if (ktime_to_ns(parent->rt_bandwidth.rt_period) < period)
++ return 0;
++
++ rcu_read_lock();
++ list_for_each_entry_rcu(tgi, &parent->children, siblings) {
++ if (tgi == tg)
++ continue;
++
++ total += to_ratio(ktime_to_ns(tgi->rt_bandwidth.rt_period),
++ tgi->rt_bandwidth.rt_runtime);
++ }
++ rcu_read_unlock();
++
++ return total + to_ratio(period, runtime) <=
++ to_ratio(ktime_to_ns(parent->rt_bandwidth.rt_period),
++ parent->rt_bandwidth.rt_runtime);
++}
++#elif defined CONFIG_USER_SCHED
++static int __rt_schedulable(struct task_group *tg, u64 period, u64 runtime)
++{
++ struct task_group *tgi;
++ unsigned long total = 0;
++ unsigned long global_ratio =
++ to_ratio(global_rt_period(), global_rt_runtime());
++
++ rcu_read_lock();
++ list_for_each_entry_rcu(tgi, &task_groups, list) {
++ if (tgi == tg)
++ continue;
++
++ total += to_ratio(ktime_to_ns(tgi->rt_bandwidth.rt_period),
++ tgi->rt_bandwidth.rt_runtime);
++ }
++ rcu_read_unlock();
++
++ return total + to_ratio(period, runtime) < global_ratio;
++}
++#endif
++
++/* Must be called with tasklist_lock held */
++static inline int tg_has_rt_tasks(struct task_group *tg)
++{
++ struct task_struct *g, *p;
++ do_each_thread(g, p) {
++ if (rt_task(p) && rt_rq_of_se(&p->rt)->tg == tg)
++ return 1;
++ } while_each_thread(g, p);
++ return 0;
++}
++
++static int tg_set_bandwidth(struct task_group *tg,
++ u64 rt_period, u64 rt_runtime)
++{
++ int i, err = 0;
++
++ mutex_lock(&rt_constraints_mutex);
++ read_lock(&tasklist_lock);
++ if (rt_runtime == 0 && tg_has_rt_tasks(tg)) {
++ err = -EBUSY;
++ goto unlock;
++ }
++ if (!__rt_schedulable(tg, rt_period, rt_runtime)) {
++ err = -EINVAL;
++ goto unlock;
++ }
++
++ spin_lock_irq(&tg->rt_bandwidth.rt_runtime_lock);
++ tg->rt_bandwidth.rt_period = ns_to_ktime(rt_period);
++ tg->rt_bandwidth.rt_runtime = rt_runtime;
++
++ for_each_possible_cpu(i) {
++ struct rt_rq *rt_rq = tg->rt_rq[i];
++
++ spin_lock(&rt_rq->rt_runtime_lock);
++ rt_rq->rt_runtime = rt_runtime;
++ spin_unlock(&rt_rq->rt_runtime_lock);
++ }
++ spin_unlock_irq(&tg->rt_bandwidth.rt_runtime_lock);
++ unlock:
++ read_unlock(&tasklist_lock);
++ mutex_unlock(&rt_constraints_mutex);
++
++ return err;
++}
++
++int sched_group_set_rt_runtime(struct task_group *tg, long rt_runtime_us)
++{
++ u64 rt_runtime, rt_period;
++
++ rt_period = ktime_to_ns(tg->rt_bandwidth.rt_period);
++ rt_runtime = (u64)rt_runtime_us * NSEC_PER_USEC;
++ if (rt_runtime_us < 0)
++ rt_runtime = RUNTIME_INF;
++
++ return tg_set_bandwidth(tg, rt_period, rt_runtime);
++}
++
++long sched_group_rt_runtime(struct task_group *tg)
++{
++ u64 rt_runtime_us;
++
++ if (tg->rt_bandwidth.rt_runtime == RUNTIME_INF)
++ return -1;
++
++ rt_runtime_us = tg->rt_bandwidth.rt_runtime;
++ do_div(rt_runtime_us, NSEC_PER_USEC);
++ return rt_runtime_us;
++}
++
++int sched_group_set_rt_period(struct task_group *tg, long rt_period_us)
++{
++ u64 rt_runtime, rt_period;
++
++ rt_period = (u64)rt_period_us * NSEC_PER_USEC;
++ rt_runtime = tg->rt_bandwidth.rt_runtime;
++
++ if (rt_period == 0)
++ return -EINVAL;
++
++ return tg_set_bandwidth(tg, rt_period, rt_runtime);
++}
++
++long sched_group_rt_period(struct task_group *tg)
++{
++ u64 rt_period_us;
++
++ rt_period_us = ktime_to_ns(tg->rt_bandwidth.rt_period);
++ do_div(rt_period_us, NSEC_PER_USEC);
++ return rt_period_us;
++}
++
++static int sched_rt_global_constraints(void)
++{
++ struct task_group *tg = &root_task_group;
++ u64 rt_runtime, rt_period;
++ int ret = 0;
++
++ if (sysctl_sched_rt_period <= 0)
++ return -EINVAL;
++
++ rt_period = ktime_to_ns(tg->rt_bandwidth.rt_period);
++ rt_runtime = tg->rt_bandwidth.rt_runtime;
++
++ mutex_lock(&rt_constraints_mutex);
++ if (!__rt_schedulable(tg, rt_period, rt_runtime))
++ ret = -EINVAL;
++ mutex_unlock(&rt_constraints_mutex);
++
++ return ret;
++}
++#else /* !CONFIG_RT_GROUP_SCHED */
++static int sched_rt_global_constraints(void)
++{
++ unsigned long flags;
++ int i;
++
++ if (sysctl_sched_rt_period <= 0)
++ return -EINVAL;
++
++ spin_lock_irqsave(&def_rt_bandwidth.rt_runtime_lock, flags);
++ for_each_possible_cpu(i) {
++ struct rt_rq *rt_rq = &cpu_rq(i)->rt;
++
++ spin_lock(&rt_rq->rt_runtime_lock);
++ rt_rq->rt_runtime = global_rt_runtime();
++ spin_unlock(&rt_rq->rt_runtime_lock);
++ }
++ spin_unlock_irqrestore(&def_rt_bandwidth.rt_runtime_lock, flags);
++
++ return 0;
++}
++#endif /* CONFIG_RT_GROUP_SCHED */
++
++int sched_rt_handler(struct ctl_table *table, int write,
++ struct file *filp, void __user *buffer, size_t *lenp,
++ loff_t *ppos)
++{
++ int ret;
++ int old_period, old_runtime;
++ static DEFINE_MUTEX(mutex);
++
++ mutex_lock(&mutex);
++ old_period = sysctl_sched_rt_period;
++ old_runtime = sysctl_sched_rt_runtime;
++
++ ret = proc_dointvec(table, write, filp, buffer, lenp, ppos);
++
++ if (!ret && write) {
++ ret = sched_rt_global_constraints();
++ if (ret) {
++ sysctl_sched_rt_period = old_period;
++ sysctl_sched_rt_runtime = old_runtime;
++ } else {
++ def_rt_bandwidth.rt_runtime = global_rt_runtime();
++ def_rt_bandwidth.rt_period =
++ ns_to_ktime(global_rt_period());
++ }
++ }
++ mutex_unlock(&mutex);
++
++ return ret;
++}
++
++#ifdef CONFIG_CGROUP_SCHED
++
++/* return corresponding task_group object of a cgroup */
++static inline struct task_group *cgroup_tg(struct cgroup *cgrp)
++{
++ return container_of(cgroup_subsys_state(cgrp, cpu_cgroup_subsys_id),
++ struct task_group, css);
++}
++
++static struct cgroup_subsys_state *
++cpu_cgroup_create(struct cgroup_subsys *ss, struct cgroup *cgrp)
++{
++ struct task_group *tg, *parent;
++
++ if (!cgrp->parent) {
++ /* This is early initialization for the top cgroup */
++ init_task_group.css.cgroup = cgrp;
++ return &init_task_group.css;
++ }
++
++ parent = cgroup_tg(cgrp->parent);
++ tg = sched_create_group(parent);
++ if (IS_ERR(tg))
++ return ERR_PTR(-ENOMEM);
++
++ /* Bind the cgroup to task_group object we just created */
++ tg->css.cgroup = cgrp;
++
++ return &tg->css;
++}
++
++static void
++cpu_cgroup_destroy(struct cgroup_subsys *ss, struct cgroup *cgrp)
++{
++ struct task_group *tg = cgroup_tg(cgrp);
++
++ sched_destroy_group(tg);
++}
++
++static int
++cpu_cgroup_can_attach(struct cgroup_subsys *ss, struct cgroup *cgrp,
++ struct task_struct *tsk)
++{
++#ifdef CONFIG_RT_GROUP_SCHED
++ /* Don't accept realtime tasks when there is no way for them to run */
++ if (rt_task(tsk) && cgroup_tg(cgrp)->rt_bandwidth.rt_runtime == 0)
++ return -EINVAL;
++#else
++ /* We don't support RT-tasks being in separate groups */
++ if (tsk->sched_class != &fair_sched_class)
++ return -EINVAL;
++#endif
++
++ return 0;
++}
++
++static void
++cpu_cgroup_attach(struct cgroup_subsys *ss, struct cgroup *cgrp,
++ struct cgroup *old_cont, struct task_struct *tsk)
++{
++ sched_move_task(tsk);
++}
++
++#ifdef CONFIG_FAIR_GROUP_SCHED
++static int cpu_shares_write_u64(struct cgroup *cgrp, struct cftype *cftype,
++ u64 shareval)
++{
++ return sched_group_set_shares(cgroup_tg(cgrp), shareval);
++}
++
++static u64 cpu_shares_read_u64(struct cgroup *cgrp, struct cftype *cft)
++{
++ struct task_group *tg = cgroup_tg(cgrp);
++
++ return (u64) tg->shares;
++}
++#endif /* CONFIG_FAIR_GROUP_SCHED */
++
++#ifdef CONFIG_RT_GROUP_SCHED
++static int cpu_rt_runtime_write(struct cgroup *cgrp, struct cftype *cft,
++ s64 val)
++{
++ return sched_group_set_rt_runtime(cgroup_tg(cgrp), val);
++}
++
++static s64 cpu_rt_runtime_read(struct cgroup *cgrp, struct cftype *cft)
++{
++ return sched_group_rt_runtime(cgroup_tg(cgrp));
++}
++
++static int cpu_rt_period_write_uint(struct cgroup *cgrp, struct cftype *cftype,
++ u64 rt_period_us)
++{
++ return sched_group_set_rt_period(cgroup_tg(cgrp), rt_period_us);
++}
++
++static u64 cpu_rt_period_read_uint(struct cgroup *cgrp, struct cftype *cft)
++{
++ return sched_group_rt_period(cgroup_tg(cgrp));
++}
++#endif /* CONFIG_RT_GROUP_SCHED */
++
++static struct cftype cpu_files[] = {
++#ifdef CONFIG_FAIR_GROUP_SCHED
++ {
++ .name = "shares",
++ .read_u64 = cpu_shares_read_u64,
++ .write_u64 = cpu_shares_write_u64,
++ },
++#endif
++#ifdef CONFIG_RT_GROUP_SCHED
++ {
++ .name = "rt_runtime_us",
++ .read_s64 = cpu_rt_runtime_read,
++ .write_s64 = cpu_rt_runtime_write,
++ },
++ {
++ .name = "rt_period_us",
++ .read_u64 = cpu_rt_period_read_uint,
++ .write_u64 = cpu_rt_period_write_uint,
++ },
++#endif
++};
++
++static int cpu_cgroup_populate(struct cgroup_subsys *ss, struct cgroup *cont)
++{
++ return cgroup_add_files(cont, ss, cpu_files, ARRAY_SIZE(cpu_files));
++}
++
++struct cgroup_subsys cpu_cgroup_subsys = {
++ .name = "cpu",
++ .create = cpu_cgroup_create,
++ .destroy = cpu_cgroup_destroy,
++ .can_attach = cpu_cgroup_can_attach,
++ .attach = cpu_cgroup_attach,
++ .populate = cpu_cgroup_populate,
++ .subsys_id = cpu_cgroup_subsys_id,
++ .early_init = 1,
++};
++
++#endif /* CONFIG_CGROUP_SCHED */
++
++#ifdef CONFIG_CGROUP_CPUACCT
++
++/*
++ * CPU accounting code for task groups.
++ *
++ * Based on the work by Paul Menage (menage@google.com) and Balbir Singh
++ * (balbir@in.ibm.com).
++ */
++
++/* track cpu usage of a group of tasks */
++struct cpuacct {
++ struct cgroup_subsys_state css;
++ /* cpuusage holds pointer to a u64-type object on every cpu */
++ u64 *cpuusage;
++};
++
++struct cgroup_subsys cpuacct_subsys;
++
++/* return cpu accounting group corresponding to this container */
++static inline struct cpuacct *cgroup_ca(struct cgroup *cgrp)
++{
++ return container_of(cgroup_subsys_state(cgrp, cpuacct_subsys_id),
++ struct cpuacct, css);
++}
++
++/* return cpu accounting group to which this task belongs */
++static inline struct cpuacct *task_ca(struct task_struct *tsk)
++{
++ return container_of(task_subsys_state(tsk, cpuacct_subsys_id),
++ struct cpuacct, css);
++}
++
++/* create a new cpu accounting group */
++static struct cgroup_subsys_state *cpuacct_create(
++ struct cgroup_subsys *ss, struct cgroup *cgrp)
++{
++ struct cpuacct *ca = kzalloc(sizeof(*ca), GFP_KERNEL);
++
++ if (!ca)
++ return ERR_PTR(-ENOMEM);
++
++ ca->cpuusage = alloc_percpu(u64);
++ if (!ca->cpuusage) {
++ kfree(ca);
++ return ERR_PTR(-ENOMEM);
++ }
++
++ return &ca->css;
++}
++
++/* destroy an existing cpu accounting group */
++static void
++cpuacct_destroy(struct cgroup_subsys *ss, struct cgroup *cgrp)
++{
++ struct cpuacct *ca = cgroup_ca(cgrp);
++
++ free_percpu(ca->cpuusage);
++ kfree(ca);
++}
++
++/* return total cpu usage (in nanoseconds) of a group */
++static u64 cpuusage_read(struct cgroup *cgrp, struct cftype *cft)
++{
++ struct cpuacct *ca = cgroup_ca(cgrp);
++ u64 totalcpuusage = 0;
++ int i;
++
++ for_each_possible_cpu(i) {
++ u64 *cpuusage = percpu_ptr(ca->cpuusage, i);
++
++ /*
++ * Take rq->lock to make 64-bit addition safe on 32-bit
++ * platforms.
++ */
++ spin_lock_irq(&cpu_rq(i)->lock);
++ totalcpuusage += *cpuusage;
++ spin_unlock_irq(&cpu_rq(i)->lock);
++ }
++
++ return totalcpuusage;
++}
++
++static int cpuusage_write(struct cgroup *cgrp, struct cftype *cftype,
++ u64 reset)
++{
++ struct cpuacct *ca = cgroup_ca(cgrp);
++ int err = 0;
++ int i;
++
++ if (reset) {
++ err = -EINVAL;
++ goto out;
++ }
++
++ for_each_possible_cpu(i) {
++ u64 *cpuusage = percpu_ptr(ca->cpuusage, i);
++
++ spin_lock_irq(&cpu_rq(i)->lock);
++ *cpuusage = 0;
++ spin_unlock_irq(&cpu_rq(i)->lock);
++ }
++out:
++ return err;
++}
++
++static struct cftype files[] = {
++ {
++ .name = "usage",
++ .read_u64 = cpuusage_read,
++ .write_u64 = cpuusage_write,
++ },
++};
++
++static int cpuacct_populate(struct cgroup_subsys *ss, struct cgroup *cgrp)
++{
++ return cgroup_add_files(cgrp, ss, files, ARRAY_SIZE(files));
++}
++
++/*
++ * charge this task's execution time to its accounting group.
++ *
++ * called with rq->lock held.
++ */
++static void cpuacct_charge(struct task_struct *tsk, u64 cputime)
++{
++ struct cpuacct *ca;
++
++ if (!cpuacct_subsys.active)
++ return;
++
++ ca = task_ca(tsk);
++ if (ca) {
++ u64 *cpuusage = percpu_ptr(ca->cpuusage, task_cpu(tsk));
++
++ *cpuusage += cputime;
++ }
++}
++
++struct cgroup_subsys cpuacct_subsys = {
++ .name = "cpuacct",
++ .create = cpuacct_create,
++ .destroy = cpuacct_destroy,
++ .populate = cpuacct_populate,
++ .subsys_id = cpuacct_subsys_id,
++};
++#endif /* CONFIG_CGROUP_CPUACCT */
+diff -Nurb linux-2.6.27-590/kernel/sched.c.rej linux-2.6.27-591/kernel/sched.c.rej
+--- linux-2.6.27-590/kernel/sched.c.rej 1969-12-31 19:00:00.000000000 -0500
++++ linux-2.6.27-591/kernel/sched.c.rej 2010-01-29 15:43:46.000000000 -0500
+@@ -0,0 +1,258 @@
++***************
++*** 23,28 ****
++ #include <linux/nmi.h>
++ #include <linux/init.h>
++ #include <asm/uaccess.h>
++ #include <linux/highmem.h>
++ #include <linux/smp_lock.h>
++ #include <asm/mmu_context.h>
++--- 23,29 ----
++ #include <linux/nmi.h>
++ #include <linux/init.h>
++ #include <asm/uaccess.h>
+++ #include <linux/arrays.h>
++ #include <linux/highmem.h>
++ #include <linux/smp_lock.h>
++ #include <asm/mmu_context.h>
++***************
++*** 451,456 ****
++
++ repeat_lock_task:
++ rq = task_rq(p);
++ spin_lock(&rq->lock);
++ if (unlikely(rq != task_rq(p))) {
++ spin_unlock(&rq->lock);
++--- 455,461 ----
++
++ repeat_lock_task:
++ rq = task_rq(p);
+++
++ spin_lock(&rq->lock);
++ if (unlikely(rq != task_rq(p))) {
++ spin_unlock(&rq->lock);
++***************
++*** 1761,1766 ****
++ * event cannot wake it up and insert it on the runqueue either.
++ */
++ p->state = TASK_RUNNING;
++
++ /*
++ * Make sure we do not leak PI boosting priority to the child:
++--- 1766,1786 ----
++ * event cannot wake it up and insert it on the runqueue either.
++ */
++ p->state = TASK_RUNNING;
+++ #ifdef CONFIG_CHOPSTIX
+++ /* The jiffy of last interruption */
+++ if (p->state & TASK_UNINTERRUPTIBLE) {
+++ p->last_interrupted=jiffies;
+++ }
+++ else
+++ if (p->state & TASK_INTERRUPTIBLE) {
+++ p->last_interrupted=INTERRUPTIBLE;
+++ }
+++ else
+++ p->last_interrupted=RUNNING;
+++
+++ /* The jiffy of last execution */
+++ p->last_ran_j=jiffies;
+++ #endif
++
++ /*
++ * Make sure we do not leak PI boosting priority to the child:
++***************
++*** 3628,3633 ****
++
++ #endif
++
++ static inline int interactive_sleep(enum sleep_type sleep_type)
++ {
++ return (sleep_type == SLEEP_INTERACTIVE ||
++--- 3648,3654 ----
++
++ #endif
++
+++
++ static inline int interactive_sleep(enum sleep_type sleep_type)
++ {
++ return (sleep_type == SLEEP_INTERACTIVE ||
++***************
++*** 3637,3652 ****
++ /*
++ * schedule() is the main scheduler function.
++ */
++ asmlinkage void __sched schedule(void)
++ {
++ struct task_struct *prev, *next;
++ struct prio_array *array;
++ struct list_head *queue;
++ unsigned long long now;
++- unsigned long run_time;
++ int cpu, idx, new_prio;
++ long *switch_count;
++ struct rq *rq;
++
++ /*
++ * Test if we are atomic. Since do_exit() needs to call into
++--- 3658,3685 ----
++ /*
++ * schedule() is the main scheduler function.
++ */
+++
+++ #ifdef CONFIG_CHOPSTIX
+++ extern void (*rec_event)(void *,unsigned int);
+++ struct event_spec {
+++ unsigned long pc;
+++ unsigned long dcookie;
+++ unsigned int count;
+++ unsigned int reason;
+++ };
+++ #endif
+++
++ asmlinkage void __sched schedule(void)
++ {
++ struct task_struct *prev, *next;
++ struct prio_array *array;
++ struct list_head *queue;
++ unsigned long long now;
+++ unsigned long run_time, diff;
++ int cpu, idx, new_prio;
++ long *switch_count;
++ struct rq *rq;
+++ int sampling_reason;
++
++ /*
++ * Test if we are atomic. Since do_exit() needs to call into
++***************
++*** 3700,3705 ****
++ switch_count = &prev->nivcsw;
++ if (prev->state && !(preempt_count() & PREEMPT_ACTIVE)) {
++ switch_count = &prev->nvcsw;
++ if (unlikely((prev->state & TASK_INTERRUPTIBLE) &&
++ unlikely(signal_pending(prev))))
++ prev->state = TASK_RUNNING;
++--- 3733,3739 ----
++ switch_count = &prev->nivcsw;
++ if (prev->state && !(preempt_count() & PREEMPT_ACTIVE)) {
++ switch_count = &prev->nvcsw;
+++
++ if (unlikely((prev->state & TASK_INTERRUPTIBLE) &&
++ unlikely(signal_pending(prev))))
++ prev->state = TASK_RUNNING;
++***************
++*** 3709,3714 ****
++ vx_uninterruptible_inc(prev);
++ }
++ deactivate_task(prev, rq);
++ }
++ }
++
++--- 3743,3759 ----
++ vx_uninterruptible_inc(prev);
++ }
++ deactivate_task(prev, rq);
+++ #ifdef CONFIG_CHOPSTIX
+++ /* An uninterruptible process just yielded. Record the current jiffie */
+++ if (prev->state & TASK_UNINTERRUPTIBLE) {
+++ prev->last_interrupted=jiffies;
+++ }
+++ /* An interruptible process just yielded, or it got preempted.
+++ * Mark it as interruptible */
+++ else if (prev->state & TASK_INTERRUPTIBLE) {
+++ prev->last_interrupted=INTERRUPTIBLE;
+++ }
+++ #endif
++ }
++ }
++
++***************
++*** 3785,3790 ****
++ prev->sleep_avg = 0;
++ prev->timestamp = prev->last_ran = now;
++
++ sched_info_switch(prev, next);
++ if (likely(prev != next)) {
++ next->timestamp = next->last_ran = now;
++--- 3830,3869 ----
++ prev->sleep_avg = 0;
++ prev->timestamp = prev->last_ran = now;
++
+++ #ifdef CONFIG_CHOPSTIX
+++ /* Run only if the Chopstix module so decrees it */
+++ if (rec_event) {
+++ prev->last_ran_j = jiffies;
+++ if (next->last_interrupted!=INTERRUPTIBLE) {
+++ if (next->last_interrupted!=RUNNING) {
+++ diff = (jiffies-next->last_interrupted);
+++ sampling_reason = 0;/* BLOCKING */
+++ }
+++ else {
+++ diff = jiffies-next->last_ran_j;
+++ sampling_reason = 1;/* PREEMPTION */
+++ }
+++
+++ if (diff >= HZ/10) {
+++ struct event event;
+++ struct event_spec espec;
+++ struct pt_regs *regs;
+++ regs = task_pt_regs(current);
+++
+++ espec.reason = sampling_reason;
+++ event.event_data=&espec;
+++ event.task=next;
+++ espec.pc=regs->eip;
+++ event.event_type=2;
+++ /* index in the event array currently set up */
+++ /* make sure the counters are loaded in the order we want them to show up*/
+++ (*rec_event)(&event, diff);
+++ }
+++ }
+++ /* next has been elected to run */
+++ next->last_interrupted=0;
+++ }
+++ #endif
++ sched_info_switch(prev, next);
++ if (likely(prev != next)) {
++ next->timestamp = next->last_ran = now;
++***************
++*** 5737,5742 ****
++ jiffies_to_timespec(p->policy == SCHED_FIFO ?
++ 0 : task_timeslice(p), &t);
++ read_unlock(&tasklist_lock);
++ retval = copy_to_user(interval, &t, sizeof(t)) ? -EFAULT : 0;
++ out_nounlock:
++ return retval;
++--- 5817,5823 ----
++ jiffies_to_timespec(p->policy == SCHED_FIFO ?
++ 0 : task_timeslice(p), &t);
++ read_unlock(&tasklist_lock);
+++
++ retval = copy_to_user(interval, &t, sizeof(t)) ? -EFAULT : 0;
++ out_nounlock:
++ return retval;
++***************
++*** 7980,7982 ****
++ }
++
++ #endif
++--- 8061,8080 ----
++ }
++
++ #endif
+++
+++ #ifdef CONFIG_CHOPSTIX
+++ void (*rec_event)(void *,unsigned int) = NULL;
+++
+++ /* To support safe calling from asm */
+++ asmlinkage void rec_event_asm (struct event *event_signature_in, unsigned int count) {
+++ struct pt_regs *regs;
+++ struct event_spec *es = event_signature_in->event_data;
+++ regs = task_pt_regs(current);
+++ event_signature_in->task=current;
+++ es->pc=regs->eip;
+++ event_signature_in->count=1;
+++ (*rec_event)(event_signature_in, count);
+++ }
+++ EXPORT_SYMBOL(rec_event);
+++ EXPORT_SYMBOL(in_sched_functions);
+++ #endif
+diff -Nurb linux-2.6.27-590/mm/memory.c linux-2.6.27-591/mm/memory.c
+--- linux-2.6.27-590/mm/memory.c 2010-01-26 17:49:20.000000000 -0500
++++ linux-2.6.27-591/mm/memory.c 2010-01-29 15:43:46.000000000 -0500
+@@ -61,6 +61,7 @@
+
+ #include <linux/swapops.h>
+ #include <linux/elf.h>
++#include <linux/arrays.h>
+
+ #include "internal.h"
+
+@@ -2690,6 +2691,15 @@
+ return ret;
+ }
+
++extern void (*rec_event)(void *,unsigned int);
++struct event_spec {
++ unsigned long pc;
++ unsigned long dcookie;
++ unsigned count;
++ unsigned char reason;
++};
++
++
+ /*
+ * By the time we get here, we already hold the mm semaphore
+ */
+@@ -2719,6 +2729,24 @@
+ if (!pte)
+ return VM_FAULT_OOM;
+
++#ifdef CONFIG_CHOPSTIX
++ if (rec_event) {
++ struct event event;
++ struct event_spec espec;
++ struct pt_regs *regs;
++ unsigned int pc;
++ regs = task_pt_regs(current);
++ pc = regs->eip & (unsigned int) ~4095;
++
++ espec.reason = 0; /* alloc */
++ event.event_data=&espec;
++ event.task = current;
++ espec.pc=pc;
++ event.event_type=5;
++ (*rec_event)(&event, 1);
++ }
++#endif
++
+ return handle_pte_fault(mm, vma, address, pte, pmd, write_access);
+ }
+
+diff -Nurb linux-2.6.27-590/mm/memory.c.orig linux-2.6.27-591/mm/memory.c.orig
+--- linux-2.6.27-590/mm/memory.c.orig 1969-12-31 19:00:00.000000000 -0500
++++ linux-2.6.27-591/mm/memory.c.orig 2010-01-26 17:49:20.000000000 -0500
+@@ -0,0 +1,3035 @@
++/*
++ * linux/mm/memory.c
++ *
++ * Copyright (C) 1991, 1992, 1993, 1994 Linus Torvalds
++ */
++
++/*
++ * demand-loading started 01.12.91 - seems it is high on the list of
++ * things wanted, and it should be easy to implement. - Linus
++ */
++
++/*
++ * Ok, demand-loading was easy, shared pages a little bit tricker. Shared
++ * pages started 02.12.91, seems to work. - Linus.
++ *
++ * Tested sharing by executing about 30 /bin/sh: under the old kernel it
++ * would have taken more than the 6M I have free, but it worked well as
++ * far as I could see.
++ *
++ * Also corrected some "invalidate()"s - I wasn't doing enough of them.
++ */
++
++/*
++ * Real VM (paging to/from disk) started 18.12.91. Much more work and
++ * thought has to go into this. Oh, well..
++ * 19.12.91 - works, somewhat. Sometimes I get faults, don't know why.
++ * Found it. Everything seems to work now.
++ * 20.12.91 - Ok, making the swap-device changeable like the root.
++ */
++
++/*
++ * 05.04.94 - Multi-page memory management added for v1.1.
++ * Idea by Alex Bligh (alex@cconcepts.co.uk)
++ *
++ * 16.07.99 - Support of BIGMEM added by Gerhard Wichert, Siemens AG
++ * (Gerhard.Wichert@pdb.siemens.de)
++ *
++ * Aug/Sep 2004 Changed to four level page tables (Andi Kleen)
++ */
++
++#include <linux/kernel_stat.h>
++#include <linux/mm.h>
++#include <linux/hugetlb.h>
++#include <linux/mman.h>
++#include <linux/swap.h>
++#include <linux/highmem.h>
++#include <linux/pagemap.h>
++#include <linux/rmap.h>
++#include <linux/module.h>
++#include <linux/delayacct.h>
++#include <linux/init.h>
++#include <linux/writeback.h>
++#include <linux/memcontrol.h>
++#include <linux/mmu_notifier.h>
++
++#include <asm/pgalloc.h>
++#include <asm/uaccess.h>
++#include <asm/tlb.h>
++#include <asm/tlbflush.h>
++#include <asm/pgtable.h>
++
++#include <linux/swapops.h>
++#include <linux/elf.h>
++
++#include "internal.h"
++
++#ifndef CONFIG_NEED_MULTIPLE_NODES
++/* use the per-pgdat data instead for discontigmem - mbligh */
++unsigned long max_mapnr;
++struct page *mem_map;
++
++EXPORT_SYMBOL(max_mapnr);
++EXPORT_SYMBOL(mem_map);
++#endif
++
++unsigned long num_physpages;
++/*
++ * A number of key systems in x86 including ioremap() rely on the assumption
++ * that high_memory defines the upper bound on direct map memory, then end
++ * of ZONE_NORMAL. Under CONFIG_DISCONTIG this means that max_low_pfn and
++ * highstart_pfn must be the same; there must be no gap between ZONE_NORMAL
++ * and ZONE_HIGHMEM.
++ */
++void * high_memory;
++
++EXPORT_SYMBOL(num_physpages);
++EXPORT_SYMBOL(high_memory);
++
++/*
++ * Randomize the address space (stacks, mmaps, brk, etc.).
++ *
++ * ( When CONFIG_COMPAT_BRK=y we exclude brk from randomization,
++ * as ancient (libc5 based) binaries can segfault. )
++ */
++int randomize_va_space __read_mostly =
++#ifdef CONFIG_COMPAT_BRK
++ 1;
++#else
++ 2;
++#endif
++
++static int __init disable_randmaps(char *s)
++{
++ randomize_va_space = 0;
++ return 1;
++}
++__setup("norandmaps", disable_randmaps);
++
++
++/*
++ * If a p?d_bad entry is found while walking page tables, report
++ * the error, before resetting entry to p?d_none. Usually (but
++ * very seldom) called out from the p?d_none_or_clear_bad macros.
++ */
++
++void pgd_clear_bad(pgd_t *pgd)
++{
++ pgd_ERROR(*pgd);
++ pgd_clear(pgd);
++}
++
++void pud_clear_bad(pud_t *pud)
++{
++ pud_ERROR(*pud);
++ pud_clear(pud);
++}
++
++void pmd_clear_bad(pmd_t *pmd)
++{
++ pmd_ERROR(*pmd);
++ pmd_clear(pmd);
++}
++
++/*
++ * Note: this doesn't free the actual pages themselves. That
++ * has been handled earlier when unmapping all the memory regions.
++ */
++static void free_pte_range(struct mmu_gather *tlb, pmd_t *pmd)
++{
++ pgtable_t token = pmd_pgtable(*pmd);
++ pmd_clear(pmd);
++ pte_free_tlb(tlb, token);
++ tlb->mm->nr_ptes--;
++}
++
++static inline void free_pmd_range(struct mmu_gather *tlb, pud_t *pud,
++ unsigned long addr, unsigned long end,
++ unsigned long floor, unsigned long ceiling)
++{
++ pmd_t *pmd;
++ unsigned long next;
++ unsigned long start;
++
++ start = addr;
++ pmd = pmd_offset(pud, addr);
++ do {
++ next = pmd_addr_end(addr, end);
++ if (pmd_none_or_clear_bad(pmd))
++ continue;
++ free_pte_range(tlb, pmd);
++ } while (pmd++, addr = next, addr != end);
++
++ start &= PUD_MASK;
++ if (start < floor)
++ return;
++ if (ceiling) {
++ ceiling &= PUD_MASK;
++ if (!ceiling)
++ return;
++ }
++ if (end - 1 > ceiling - 1)
++ return;
++
++ pmd = pmd_offset(pud, start);
++ pud_clear(pud);
++ pmd_free_tlb(tlb, pmd);
++}
++
++static inline void free_pud_range(struct mmu_gather *tlb, pgd_t *pgd,
++ unsigned long addr, unsigned long end,
++ unsigned long floor, unsigned long ceiling)
++{
++ pud_t *pud;
++ unsigned long next;
++ unsigned long start;
++
++ start = addr;
++ pud = pud_offset(pgd, addr);
++ do {
++ next = pud_addr_end(addr, end);
++ if (pud_none_or_clear_bad(pud))
++ continue;
++ free_pmd_range(tlb, pud, addr, next, floor, ceiling);
++ } while (pud++, addr = next, addr != end);
++
++ start &= PGDIR_MASK;
++ if (start < floor)
++ return;
++ if (ceiling) {
++ ceiling &= PGDIR_MASK;
++ if (!ceiling)
++ return;
++ }
++ if (end - 1 > ceiling - 1)
++ return;
++
++ pud = pud_offset(pgd, start);
++ pgd_clear(pgd);
++ pud_free_tlb(tlb, pud);
++}
++
++/*
++ * This function frees user-level page tables of a process.
++ *
++ * Must be called with pagetable lock held.
++ */
++void free_pgd_range(struct mmu_gather *tlb,
++ unsigned long addr, unsigned long end,
++ unsigned long floor, unsigned long ceiling)
++{
++ pgd_t *pgd;
++ unsigned long next;
++ unsigned long start;
++
++ /*
++ * The next few lines have given us lots of grief...
++ *
++ * Why are we testing PMD* at this top level? Because often
++ * there will be no work to do at all, and we'd prefer not to
++ * go all the way down to the bottom just to discover that.
++ *
++ * Why all these "- 1"s? Because 0 represents both the bottom
++ * of the address space and the top of it (using -1 for the
++ * top wouldn't help much: the masks would do the wrong thing).
++ * The rule is that addr 0 and floor 0 refer to the bottom of
++ * the address space, but end 0 and ceiling 0 refer to the top
++ * Comparisons need to use "end - 1" and "ceiling - 1" (though
++ * that end 0 case should be mythical).
++ *
++ * Wherever addr is brought up or ceiling brought down, we must
++ * be careful to reject "the opposite 0" before it confuses the
++ * subsequent tests. But what about where end is brought down
++ * by PMD_SIZE below? no, end can't go down to 0 there.
++ *
++ * Whereas we round start (addr) and ceiling down, by different
++ * masks at different levels, in order to test whether a table
++ * now has no other vmas using it, so can be freed, we don't
++ * bother to round floor or end up - the tests don't need that.
++ */
++
++ addr &= PMD_MASK;
++ if (addr < floor) {
++ addr += PMD_SIZE;
++ if (!addr)
++ return;
++ }
++ if (ceiling) {
++ ceiling &= PMD_MASK;
++ if (!ceiling)
++ return;
++ }
++ if (end - 1 > ceiling - 1)
++ end -= PMD_SIZE;
++ if (addr > end - 1)
++ return;
++
++ start = addr;
++ pgd = pgd_offset(tlb->mm, addr);
++ do {
++ next = pgd_addr_end(addr, end);
++ if (pgd_none_or_clear_bad(pgd))
++ continue;
++ free_pud_range(tlb, pgd, addr, next, floor, ceiling);
++ } while (pgd++, addr = next, addr != end);
++}
++
++void free_pgtables(struct mmu_gather *tlb, struct vm_area_struct *vma,
++ unsigned long floor, unsigned long ceiling)
++{
++ while (vma) {
++ struct vm_area_struct *next = vma->vm_next;
++ unsigned long addr = vma->vm_start;
++
++ /*
++ * Hide vma from rmap and vmtruncate before freeing pgtables
++ */
++ anon_vma_unlink(vma);
++ unlink_file_vma(vma);
++
++ if (is_vm_hugetlb_page(vma)) {
++ hugetlb_free_pgd_range(tlb, addr, vma->vm_end,
++ floor, next? next->vm_start: ceiling);
++ } else {
++ /*
++ * Optimization: gather nearby vmas into one call down
++ */
++ while (next && next->vm_start <= vma->vm_end + PMD_SIZE
++ && !is_vm_hugetlb_page(next)) {
++ vma = next;
++ next = vma->vm_next;
++ anon_vma_unlink(vma);
++ unlink_file_vma(vma);
++ }
++ free_pgd_range(tlb, addr, vma->vm_end,
++ floor, next? next->vm_start: ceiling);
++ }
++ vma = next;
++ }
++}
++
++int __pte_alloc(struct mm_struct *mm, pmd_t *pmd, unsigned long address)
++{
++ pgtable_t new = pte_alloc_one(mm, address);
++ if (!new)
++ return -ENOMEM;
++
++ /*
++ * Ensure all pte setup (eg. pte page lock and page clearing) are
++ * visible before the pte is made visible to other CPUs by being
++ * put into page tables.
++ *
++ * The other side of the story is the pointer chasing in the page
++ * table walking code (when walking the page table without locking;
++ * ie. most of the time). Fortunately, these data accesses consist
++ * of a chain of data-dependent loads, meaning most CPUs (alpha
++ * being the notable exception) will already guarantee loads are
++ * seen in-order. See the alpha page table accessors for the
++ * smp_read_barrier_depends() barriers in page table walking code.
++ */
++ smp_wmb(); /* Could be smp_wmb__xxx(before|after)_spin_lock */
++
++ spin_lock(&mm->page_table_lock);
++ if (!pmd_present(*pmd)) { /* Has another populated it ? */
++ mm->nr_ptes++;
++ pmd_populate(mm, pmd, new);
++ new = NULL;
++ }
++ spin_unlock(&mm->page_table_lock);
++ if (new)
++ pte_free(mm, new);
++ return 0;
++}
++
++int __pte_alloc_kernel(pmd_t *pmd, unsigned long address)
++{
++ pte_t *new = pte_alloc_one_kernel(&init_mm, address);
++ if (!new)
++ return -ENOMEM;
++
++ smp_wmb(); /* See comment in __pte_alloc */
++
++ spin_lock(&init_mm.page_table_lock);
++ if (!pmd_present(*pmd)) { /* Has another populated it ? */
++ pmd_populate_kernel(&init_mm, pmd, new);
++ new = NULL;
++ }
++ spin_unlock(&init_mm.page_table_lock);
++ if (new)
++ pte_free_kernel(&init_mm, new);
++ return 0;
++}
++
++static inline void add_mm_rss(struct mm_struct *mm, int file_rss, int anon_rss)
++{
++ if (file_rss)
++ add_mm_counter(mm, file_rss, file_rss);
++ if (anon_rss)
++ add_mm_counter(mm, anon_rss, anon_rss);
++}
++
++/*
++ * This function is called to print an error when a bad pte
++ * is found. For example, we might have a PFN-mapped pte in
++ * a region that doesn't allow it.
++ *
++ * The calling function must still handle the error.
++ */
++static void print_bad_pte(struct vm_area_struct *vma, pte_t pte,
++ unsigned long vaddr)
++{
++ printk(KERN_ERR "Bad pte = %08llx, process = %s, "
++ "vm_flags = %lx, vaddr = %lx\n",
++ (long long)pte_val(pte),
++ (vma->vm_mm == current->mm ? current->comm : "???"),
++ vma->vm_flags, vaddr);
++ dump_stack();
++}
++
++static inline int is_cow_mapping(unsigned int flags)
++{
++ return (flags & (VM_SHARED | VM_MAYWRITE)) == VM_MAYWRITE;
++}
++
++/*
++ * vm_normal_page -- This function gets the "struct page" associated with a pte.
++ *
++ * "Special" mappings do not wish to be associated with a "struct page" (either
++ * it doesn't exist, or it exists but they don't want to touch it). In this
++ * case, NULL is returned here. "Normal" mappings do have a struct page.
++ *
++ * There are 2 broad cases. Firstly, an architecture may define a pte_special()
++ * pte bit, in which case this function is trivial. Secondly, an architecture
++ * may not have a spare pte bit, which requires a more complicated scheme,
++ * described below.
++ *
++ * A raw VM_PFNMAP mapping (ie. one that is not COWed) is always considered a
++ * special mapping (even if there are underlying and valid "struct pages").
++ * COWed pages of a VM_PFNMAP are always normal.
++ *
++ * The way we recognize COWed pages within VM_PFNMAP mappings is through the
++ * rules set up by "remap_pfn_range()": the vma will have the VM_PFNMAP bit
++ * set, and the vm_pgoff will point to the first PFN mapped: thus every special
++ * mapping will always honor the rule
++ *
++ * pfn_of_page == vma->vm_pgoff + ((addr - vma->vm_start) >> PAGE_SHIFT)
++ *
++ * And for normal mappings this is false.
++ *
++ * This restricts such mappings to be a linear translation from virtual address
++ * to pfn. To get around this restriction, we allow arbitrary mappings so long
++ * as the vma is not a COW mapping; in that case, we know that all ptes are
++ * special (because none can have been COWed).
++ *
++ *
++ * In order to support COW of arbitrary special mappings, we have VM_MIXEDMAP.
++ *
++ * VM_MIXEDMAP mappings can likewise contain memory with or without "struct
++ * page" backing, however the difference is that _all_ pages with a struct
++ * page (that is, those where pfn_valid is true) are refcounted and considered
++ * normal pages by the VM. The disadvantage is that pages are refcounted
++ * (which can be slower and simply not an option for some PFNMAP users). The
++ * advantage is that we don't have to follow the strict linearity rule of
++ * PFNMAP mappings in order to support COWable mappings.
++ *
++ */
++#ifdef __HAVE_ARCH_PTE_SPECIAL
++# define HAVE_PTE_SPECIAL 1
++#else
++# define HAVE_PTE_SPECIAL 0
++#endif
++struct page *vm_normal_page(struct vm_area_struct *vma, unsigned long addr,
++ pte_t pte)
++{
++ unsigned long pfn;
++
++ if (HAVE_PTE_SPECIAL) {
++ if (likely(!pte_special(pte))) {
++ VM_BUG_ON(!pfn_valid(pte_pfn(pte)));
++ return pte_page(pte);
++ }
++ VM_BUG_ON(!(vma->vm_flags & (VM_PFNMAP | VM_MIXEDMAP)));
++ return NULL;
++ }
++
++ /* !HAVE_PTE_SPECIAL case follows: */
++
++ pfn = pte_pfn(pte);
++
++ if (unlikely(vma->vm_flags & (VM_PFNMAP|VM_MIXEDMAP))) {
++ if (vma->vm_flags & VM_MIXEDMAP) {
++ if (!pfn_valid(pfn))
++ return NULL;
++ goto out;
++ } else {
++ unsigned long off;
++ off = (addr - vma->vm_start) >> PAGE_SHIFT;
++ if (pfn == vma->vm_pgoff + off)
++ return NULL;
++ if (!is_cow_mapping(vma->vm_flags))
++ return NULL;
++ }
++ }
++
++ VM_BUG_ON(!pfn_valid(pfn));
++
++ /*
++ * NOTE! We still have PageReserved() pages in the page tables.
++ *
++ * eg. VDSO mappings can cause them to exist.
++ */
++out:
++ return pfn_to_page(pfn);
++}
++
++/*
++ * copy one vm_area from one task to the other. Assumes the page tables
++ * already present in the new task to be cleared in the whole range
++ * covered by this vma.
++ */
++
++static inline void
++copy_one_pte(struct mm_struct *dst_mm, struct mm_struct *src_mm,
++ pte_t *dst_pte, pte_t *src_pte, struct vm_area_struct *vma,
++ unsigned long addr, int *rss)
++{
++ unsigned long vm_flags = vma->vm_flags;
++ pte_t pte = *src_pte;
++ struct page *page;
++
++ /* pte contains position in swap or file, so copy. */
++ if (unlikely(!pte_present(pte))) {
++ if (!pte_file(pte)) {
++ swp_entry_t entry = pte_to_swp_entry(pte);
++
++ swap_duplicate(entry);
++ /* make sure dst_mm is on swapoff's mmlist. */
++ if (unlikely(list_empty(&dst_mm->mmlist))) {
++ spin_lock(&mmlist_lock);
++ if (list_empty(&dst_mm->mmlist))
++ list_add(&dst_mm->mmlist,
++ &src_mm->mmlist);
++ spin_unlock(&mmlist_lock);
++ }
++ if (is_write_migration_entry(entry) &&
++ is_cow_mapping(vm_flags)) {
++ /*
++ * COW mappings require pages in both parent
++ * and child to be set to read.
++ */
++ make_migration_entry_read(&entry);
++ pte = swp_entry_to_pte(entry);
++ set_pte_at(src_mm, addr, src_pte, pte);
++ }
++ }
++ goto out_set_pte;
++ }
++
++ /*
++ * If it's a COW mapping, write protect it both
++ * in the parent and the child
++ */
++ if (is_cow_mapping(vm_flags)) {
++ ptep_set_wrprotect(src_mm, addr, src_pte);
++ pte = pte_wrprotect(pte);
++ }
++
++ /*
++ * If it's a shared mapping, mark it clean in
++ * the child
++ */
++ if (vm_flags & VM_SHARED)
++ pte = pte_mkclean(pte);
++ pte = pte_mkold(pte);
++
++ page = vm_normal_page(vma, addr, pte);
++ if (page) {
++ get_page(page);
++ page_dup_rmap(page, vma, addr);
++ rss[!!PageAnon(page)]++;
++ }
++
++out_set_pte:
++ set_pte_at(dst_mm, addr, dst_pte, pte);
++}
++
++static int copy_pte_range(struct mm_struct *dst_mm, struct mm_struct *src_mm,
++ pmd_t *dst_pmd, pmd_t *src_pmd, struct vm_area_struct *vma,
++ unsigned long addr, unsigned long end)
++{
++ pte_t *src_pte, *dst_pte;
++ spinlock_t *src_ptl, *dst_ptl;
++ int progress = 0;
++ int rss[2];
++
++ if (!vx_rss_avail(dst_mm, ((end - addr)/PAGE_SIZE + 1)))
++ return -ENOMEM;
++
++again:
++ rss[1] = rss[0] = 0;
++ dst_pte = pte_alloc_map_lock(dst_mm, dst_pmd, addr, &dst_ptl);
++ if (!dst_pte)
++ return -ENOMEM;
++ src_pte = pte_offset_map_nested(src_pmd, addr);
++ src_ptl = pte_lockptr(src_mm, src_pmd);
++ spin_lock_nested(src_ptl, SINGLE_DEPTH_NESTING);
++ arch_enter_lazy_mmu_mode();
++
++ do {
++ /*
++ * We are holding two locks at this point - either of them
++ * could generate latencies in another task on another CPU.
++ */
++ if (progress >= 32) {
++ progress = 0;
++ if (need_resched() ||
++ spin_needbreak(src_ptl) || spin_needbreak(dst_ptl))
++ break;
++ }
++ if (pte_none(*src_pte)) {
++ progress++;
++ continue;
++ }
++ copy_one_pte(dst_mm, src_mm, dst_pte, src_pte, vma, addr, rss);
++ progress += 8;
++ } while (dst_pte++, src_pte++, addr += PAGE_SIZE, addr != end);
++
++ arch_leave_lazy_mmu_mode();
++ spin_unlock(src_ptl);
++ pte_unmap_nested(src_pte - 1);
++ add_mm_rss(dst_mm, rss[0], rss[1]);
++ pte_unmap_unlock(dst_pte - 1, dst_ptl);
++ cond_resched();
++ if (addr != end)
++ goto again;
++ return 0;
++}
++
++static inline int copy_pmd_range(struct mm_struct *dst_mm, struct mm_struct *src_mm,
++ pud_t *dst_pud, pud_t *src_pud, struct vm_area_struct *vma,
++ unsigned long addr, unsigned long end)
++{
++ pmd_t *src_pmd, *dst_pmd;
++ unsigned long next;
++
++ dst_pmd = pmd_alloc(dst_mm, dst_pud, addr);
++ if (!dst_pmd)
++ return -ENOMEM;
++ src_pmd = pmd_offset(src_pud, addr);
++ do {
++ next = pmd_addr_end(addr, end);
++ if (pmd_none_or_clear_bad(src_pmd))
++ continue;
++ if (copy_pte_range(dst_mm, src_mm, dst_pmd, src_pmd,
++ vma, addr, next))
++ return -ENOMEM;
++ } while (dst_pmd++, src_pmd++, addr = next, addr != end);
++ return 0;
++}
++
++static inline int copy_pud_range(struct mm_struct *dst_mm, struct mm_struct *src_mm,
++ pgd_t *dst_pgd, pgd_t *src_pgd, struct vm_area_struct *vma,
++ unsigned long addr, unsigned long end)
++{
++ pud_t *src_pud, *dst_pud;
++ unsigned long next;
++
++ dst_pud = pud_alloc(dst_mm, dst_pgd, addr);
++ if (!dst_pud)
++ return -ENOMEM;
++ src_pud = pud_offset(src_pgd, addr);
++ do {
++ next = pud_addr_end(addr, end);
++ if (pud_none_or_clear_bad(src_pud))
++ continue;
++ if (copy_pmd_range(dst_mm, src_mm, dst_pud, src_pud,
++ vma, addr, next))
++ return -ENOMEM;
++ } while (dst_pud++, src_pud++, addr = next, addr != end);
++ return 0;
++}
++
++int copy_page_range(struct mm_struct *dst_mm, struct mm_struct *src_mm,
++ struct vm_area_struct *vma)
++{
++ pgd_t *src_pgd, *dst_pgd;
++ unsigned long next;
++ unsigned long addr = vma->vm_start;
++ unsigned long end = vma->vm_end;
++ int ret;
++
++ /*
++ * Don't copy ptes where a page fault will fill them correctly.
++ * Fork becomes much lighter when there are big shared or private
++ * readonly mappings. The tradeoff is that copy_page_range is more
++ * efficient than faulting.
++ */
++ if (!(vma->vm_flags & (VM_HUGETLB|VM_NONLINEAR|VM_PFNMAP|VM_INSERTPAGE))) {
++ if (!vma->anon_vma)
++ return 0;
++ }
++
++ if (is_vm_hugetlb_page(vma))
++ return copy_hugetlb_page_range(dst_mm, src_mm, vma);
++
++ /*
++ * We need to invalidate the secondary MMU mappings only when
++ * there could be a permission downgrade on the ptes of the
++ * parent mm. And a permission downgrade will only happen if
++ * is_cow_mapping() returns true.
++ */
++ if (is_cow_mapping(vma->vm_flags))
++ mmu_notifier_invalidate_range_start(src_mm, addr, end);
++
++ ret = 0;
++ dst_pgd = pgd_offset(dst_mm, addr);
++ src_pgd = pgd_offset(src_mm, addr);
++ do {
++ next = pgd_addr_end(addr, end);
++ if (pgd_none_or_clear_bad(src_pgd))
++ continue;
++ if (unlikely(copy_pud_range(dst_mm, src_mm, dst_pgd, src_pgd,
++ vma, addr, next))) {
++ ret = -ENOMEM;
++ break;
++ }
++ } while (dst_pgd++, src_pgd++, addr = next, addr != end);
++
++ if (is_cow_mapping(vma->vm_flags))
++ mmu_notifier_invalidate_range_end(src_mm,
++ vma->vm_start, end);
++ return ret;
++}
++
++static unsigned long zap_pte_range(struct mmu_gather *tlb,
++ struct vm_area_struct *vma, pmd_t *pmd,
++ unsigned long addr, unsigned long end,
++ long *zap_work, struct zap_details *details)
++{
++ struct mm_struct *mm = tlb->mm;
++ pte_t *pte;
++ spinlock_t *ptl;
++ int file_rss = 0;
++ int anon_rss = 0;
++
++ pte = pte_offset_map_lock(mm, pmd, addr, &ptl);
++ arch_enter_lazy_mmu_mode();
++ do {
++ pte_t ptent = *pte;
++ if (pte_none(ptent)) {
++ (*zap_work)--;
++ continue;
++ }
++
++ (*zap_work) -= PAGE_SIZE;
++
++ if (pte_present(ptent)) {
++ struct page *page;
++
++ page = vm_normal_page(vma, addr, ptent);
++ if (unlikely(details) && page) {
++ /*
++ * unmap_shared_mapping_pages() wants to
++ * invalidate cache without truncating:
++ * unmap shared but keep private pages.
++ */
++ if (details->check_mapping &&
++ details->check_mapping != page->mapping)
++ continue;
++ /*
++ * Each page->index must be checked when
++ * invalidating or truncating nonlinear.
++ */
++ if (details->nonlinear_vma &&
++ (page->index < details->first_index ||
++ page->index > details->last_index))
++ continue;
++ }
++ ptent = ptep_get_and_clear_full(mm, addr, pte,
++ tlb->fullmm);
++ tlb_remove_tlb_entry(tlb, pte, addr);
++ if (unlikely(!page))
++ continue;
++ if (unlikely(details) && details->nonlinear_vma
++ && linear_page_index(details->nonlinear_vma,
++ addr) != page->index)
++ set_pte_at(mm, addr, pte,
++ pgoff_to_pte(page->index));
++ if (PageAnon(page))
++ anon_rss--;
++ else {
++ if (pte_dirty(ptent))
++ set_page_dirty(page);
++ if (pte_young(ptent))
++ SetPageReferenced(page);
++ file_rss--;
++ }
++ page_remove_rmap(page, vma);
++ tlb_remove_page(tlb, page);
++ continue;
++ }
++ /*
++ * If details->check_mapping, we leave swap entries;
++ * if details->nonlinear_vma, we leave file entries.
++ */
++ if (unlikely(details))
++ continue;
++ if (!pte_file(ptent))
++ free_swap_and_cache(pte_to_swp_entry(ptent));
++ pte_clear_not_present_full(mm, addr, pte, tlb->fullmm);
++ } while (pte++, addr += PAGE_SIZE, (addr != end && *zap_work > 0));
++
++ add_mm_rss(mm, file_rss, anon_rss);
++ arch_leave_lazy_mmu_mode();
++ pte_unmap_unlock(pte - 1, ptl);
++
++ return addr;
++}
++
++static inline unsigned long zap_pmd_range(struct mmu_gather *tlb,
++ struct vm_area_struct *vma, pud_t *pud,
++ unsigned long addr, unsigned long end,
++ long *zap_work, struct zap_details *details)
++{
++ pmd_t *pmd;
++ unsigned long next;
++
++ pmd = pmd_offset(pud, addr);
++ do {
++ next = pmd_addr_end(addr, end);
++ if (pmd_none_or_clear_bad(pmd)) {
++ (*zap_work)--;
++ continue;
++ }
++ next = zap_pte_range(tlb, vma, pmd, addr, next,
++ zap_work, details);
++ } while (pmd++, addr = next, (addr != end && *zap_work > 0));
++
++ return addr;
++}
++
++static inline unsigned long zap_pud_range(struct mmu_gather *tlb,
++ struct vm_area_struct *vma, pgd_t *pgd,
++ unsigned long addr, unsigned long end,
++ long *zap_work, struct zap_details *details)
++{
++ pud_t *pud;
++ unsigned long next;
++
++ pud = pud_offset(pgd, addr);
++ do {
++ next = pud_addr_end(addr, end);
++ if (pud_none_or_clear_bad(pud)) {
++ (*zap_work)--;
++ continue;
++ }
++ next = zap_pmd_range(tlb, vma, pud, addr, next,
++ zap_work, details);
++ } while (pud++, addr = next, (addr != end && *zap_work > 0));
++
++ return addr;
++}
++
++static unsigned long unmap_page_range(struct mmu_gather *tlb,
++ struct vm_area_struct *vma,
++ unsigned long addr, unsigned long end,
++ long *zap_work, struct zap_details *details)
++{
++ pgd_t *pgd;
++ unsigned long next;
++
++ if (details && !details->check_mapping && !details->nonlinear_vma)
++ details = NULL;
++
++ BUG_ON(addr >= end);
++ tlb_start_vma(tlb, vma);
++ pgd = pgd_offset(vma->vm_mm, addr);
++ do {
++ next = pgd_addr_end(addr, end);
++ if (pgd_none_or_clear_bad(pgd)) {
++ (*zap_work)--;
++ continue;
++ }
++ next = zap_pud_range(tlb, vma, pgd, addr, next,
++ zap_work, details);
++ } while (pgd++, addr = next, (addr != end && *zap_work > 0));
++ tlb_end_vma(tlb, vma);
++
++ return addr;
++}
++
++#ifdef CONFIG_PREEMPT
++# define ZAP_BLOCK_SIZE (8 * PAGE_SIZE)
++#else
++/* No preempt: go for improved straight-line efficiency */
++# define ZAP_BLOCK_SIZE (1024 * PAGE_SIZE)
++#endif
++
++/**
++ * unmap_vmas - unmap a range of memory covered by a list of vma's
++ * @tlbp: address of the caller's struct mmu_gather
++ * @vma: the starting vma
++ * @start_addr: virtual address at which to start unmapping
++ * @end_addr: virtual address at which to end unmapping
++ * @nr_accounted: Place number of unmapped pages in vm-accountable vma's here
++ * @details: details of nonlinear truncation or shared cache invalidation
++ *
++ * Returns the end address of the unmapping (restart addr if interrupted).
++ *
++ * Unmap all pages in the vma list.
++ *
++ * We aim to not hold locks for too long (for scheduling latency reasons).
++ * So zap pages in ZAP_BLOCK_SIZE bytecounts. This means we need to
++ * return the ending mmu_gather to the caller.
++ *
++ * Only addresses between `start' and `end' will be unmapped.
++ *
++ * The VMA list must be sorted in ascending virtual address order.
++ *
++ * unmap_vmas() assumes that the caller will flush the whole unmapped address
++ * range after unmap_vmas() returns. So the only responsibility here is to
++ * ensure that any thus-far unmapped pages are flushed before unmap_vmas()
++ * drops the lock and schedules.
++ */
++unsigned long unmap_vmas(struct mmu_gather **tlbp,
++ struct vm_area_struct *vma, unsigned long start_addr,
++ unsigned long end_addr, unsigned long *nr_accounted,
++ struct zap_details *details)
++{
++ long zap_work = ZAP_BLOCK_SIZE;
++ unsigned long tlb_start = 0; /* For tlb_finish_mmu */
++ int tlb_start_valid = 0;
++ unsigned long start = start_addr;
++ spinlock_t *i_mmap_lock = details? details->i_mmap_lock: NULL;
++ int fullmm = (*tlbp)->fullmm;
++ struct mm_struct *mm = vma->vm_mm;
++
++ mmu_notifier_invalidate_range_start(mm, start_addr, end_addr);
++ for ( ; vma && vma->vm_start < end_addr; vma = vma->vm_next) {
++ unsigned long end;
++
++ start = max(vma->vm_start, start_addr);
++ if (start >= vma->vm_end)
++ continue;
++ end = min(vma->vm_end, end_addr);
++ if (end <= vma->vm_start)
++ continue;
++
++ if (vma->vm_flags & VM_ACCOUNT)
++ *nr_accounted += (end - start) >> PAGE_SHIFT;
++
++ while (start != end) {
++ if (!tlb_start_valid) {
++ tlb_start = start;
++ tlb_start_valid = 1;
++ }
++
++ if (unlikely(is_vm_hugetlb_page(vma))) {
++ /*
++ * It is undesirable to test vma->vm_file as it
++ * should be non-null for valid hugetlb area.
++ * However, vm_file will be NULL in the error
++ * cleanup path of do_mmap_pgoff. When
++ * hugetlbfs ->mmap method fails,
++ * do_mmap_pgoff() nullifies vma->vm_file
++ * before calling this function to clean up.
++ * Since no pte has actually been setup, it is
++ * safe to do nothing in this case.
++ */
++ if (vma->vm_file) {
++ unmap_hugepage_range(vma, start, end, NULL);
++ zap_work -= (end - start) /
++ pages_per_huge_page(hstate_vma(vma));
++ }
++
++ start = end;
++ } else
++ start = unmap_page_range(*tlbp, vma,
++ start, end, &zap_work, details);
++
++ if (zap_work > 0) {
++ BUG_ON(start != end);
++ break;
++ }
++
++ tlb_finish_mmu(*tlbp, tlb_start, start);
++
++ if (need_resched() ||
++ (i_mmap_lock && spin_needbreak(i_mmap_lock))) {
++ if (i_mmap_lock) {
++ *tlbp = NULL;
++ goto out;
++ }
++ cond_resched();
++ }
++
++ *tlbp = tlb_gather_mmu(vma->vm_mm, fullmm);
++ tlb_start_valid = 0;
++ zap_work = ZAP_BLOCK_SIZE;
++ }
++ }
++out:
++ mmu_notifier_invalidate_range_end(mm, start_addr, end_addr);
++ return start; /* which is now the end (or restart) address */
++}
++
++/**
++ * zap_page_range - remove user pages in a given range
++ * @vma: vm_area_struct holding the applicable pages
++ * @address: starting address of pages to zap
++ * @size: number of bytes to zap
++ * @details: details of nonlinear truncation or shared cache invalidation
++ */
++unsigned long zap_page_range(struct vm_area_struct *vma, unsigned long address,
++ unsigned long size, struct zap_details *details)
++{
++ struct mm_struct *mm = vma->vm_mm;
++ struct mmu_gather *tlb;
++ unsigned long end = address + size;
++ unsigned long nr_accounted = 0;
++
++ lru_add_drain();
++ tlb = tlb_gather_mmu(mm, 0);
++ update_hiwater_rss(mm);
++ end = unmap_vmas(&tlb, vma, address, end, &nr_accounted, details);
++ if (tlb)
++ tlb_finish_mmu(tlb, address, end);
++ return end;
++}
++
++/**
++ * zap_vma_ptes - remove ptes mapping the vma
++ * @vma: vm_area_struct holding ptes to be zapped
++ * @address: starting address of pages to zap
++ * @size: number of bytes to zap
++ *
++ * This function only unmaps ptes assigned to VM_PFNMAP vmas.
++ *
++ * The entire address range must be fully contained within the vma.
++ *
++ * Returns 0 if successful.
++ */
++int zap_vma_ptes(struct vm_area_struct *vma, unsigned long address,
++ unsigned long size)
++{
++ if (address < vma->vm_start || address + size > vma->vm_end ||
++ !(vma->vm_flags & VM_PFNMAP))
++ return -1;
++ zap_page_range(vma, address, size, NULL);
++ return 0;
++}
++EXPORT_SYMBOL_GPL(zap_vma_ptes);
++
++/*
++ * Do a quick page-table lookup for a single page.
++ */
++struct page *follow_page(struct vm_area_struct *vma, unsigned long address,
++ unsigned int flags)
++{
++ pgd_t *pgd;
++ pud_t *pud;
++ pmd_t *pmd;
++ pte_t *ptep, pte;
++ spinlock_t *ptl;
++ struct page *page;
++ struct mm_struct *mm = vma->vm_mm;
++
++ page = follow_huge_addr(mm, address, flags & FOLL_WRITE);
++ if (!IS_ERR(page)) {
++ BUG_ON(flags & FOLL_GET);
++ goto out;
++ }
++
++ page = NULL;
++ pgd = pgd_offset(mm, address);
++ if (pgd_none(*pgd) || unlikely(pgd_bad(*pgd)))
++ goto no_page_table;
++
++ pud = pud_offset(pgd, address);
++ if (pud_none(*pud))
++ goto no_page_table;
++ if (pud_huge(*pud)) {
++ BUG_ON(flags & FOLL_GET);
++ page = follow_huge_pud(mm, address, pud, flags & FOLL_WRITE);
++ goto out;
++ }
++ if (unlikely(pud_bad(*pud)))
++ goto no_page_table;
++
++ pmd = pmd_offset(pud, address);
++ if (pmd_none(*pmd))
++ goto no_page_table;
++ if (pmd_huge(*pmd)) {
++ BUG_ON(flags & FOLL_GET);
++ page = follow_huge_pmd(mm, address, pmd, flags & FOLL_WRITE);
++ goto out;
++ }
++ if (unlikely(pmd_bad(*pmd)))
++ goto no_page_table;
++
++ ptep = pte_offset_map_lock(mm, pmd, address, &ptl);
++
++ pte = *ptep;
++ if (!pte_present(pte))
++ goto no_page;
++ if ((flags & FOLL_WRITE) && !pte_write(pte))
++ goto unlock;
++ page = vm_normal_page(vma, address, pte);
++ if (unlikely(!page))
++ goto bad_page;
++
++ if (flags & FOLL_GET)
++ get_page(page);
++ if (flags & FOLL_TOUCH) {
++ if ((flags & FOLL_WRITE) &&
++ !pte_dirty(pte) && !PageDirty(page))
++ set_page_dirty(page);
++ mark_page_accessed(page);
++ }
++unlock:
++ pte_unmap_unlock(ptep, ptl);
++out:
++ return page;
++
++bad_page:
++ pte_unmap_unlock(ptep, ptl);
++ return ERR_PTR(-EFAULT);
++
++no_page:
++ pte_unmap_unlock(ptep, ptl);
++ if (!pte_none(pte))
++ return page;
++ /* Fall through to ZERO_PAGE handling */
++no_page_table:
++ /*
++ * When core dumping an enormous anonymous area that nobody
++ * has touched so far, we don't want to allocate page tables.
++ */
++ if (flags & FOLL_ANON) {
++ page = ZERO_PAGE(0);
++ if (flags & FOLL_GET)
++ get_page(page);
++ BUG_ON(flags & FOLL_WRITE);
++ }
++ return page;
++}
++
++/* Can we do the FOLL_ANON optimization? */
++static inline int use_zero_page(struct vm_area_struct *vma)
++{
++ /*
++ * We don't want to optimize FOLL_ANON for make_pages_present()
++ * when it tries to page in a VM_LOCKED region. As to VM_SHARED,
++ * we want to get the page from the page tables to make sure
++ * that we serialize and update with any other user of that
++ * mapping.
++ */
++ if (vma->vm_flags & (VM_LOCKED | VM_SHARED))
++ return 0;
++ /*
++ * And if we have a fault routine, it's not an anonymous region.
++ */
++ return !vma->vm_ops || !vma->vm_ops->fault;
++}
++
++int get_user_pages(struct task_struct *tsk, struct mm_struct *mm,
++ unsigned long start, int len, int write, int force,
++ struct page **pages, struct vm_area_struct **vmas)
++{
++ int i;
++ unsigned int vm_flags;
++
++ if (len <= 0)
++ return 0;
++ /*
++ * Require read or write permissions.
++ * If 'force' is set, we only require the "MAY" flags.
++ */
++ vm_flags = write ? (VM_WRITE | VM_MAYWRITE) : (VM_READ | VM_MAYREAD);
++ vm_flags &= force ? (VM_MAYREAD | VM_MAYWRITE) : (VM_READ | VM_WRITE);
++ i = 0;
++
++ do {
++ struct vm_area_struct *vma;
++ unsigned int foll_flags;
++
++ vma = find_extend_vma(mm, start);
++ if (!vma && in_gate_area(tsk, start)) {
++ unsigned long pg = start & PAGE_MASK;
++ struct vm_area_struct *gate_vma = get_gate_vma(tsk);
++ pgd_t *pgd;
++ pud_t *pud;
++ pmd_t *pmd;
++ pte_t *pte;
++ if (write) /* user gate pages are read-only */
++ return i ? : -EFAULT;
++ if (pg > TASK_SIZE)
++ pgd = pgd_offset_k(pg);
++ else
++ pgd = pgd_offset_gate(mm, pg);
++ BUG_ON(pgd_none(*pgd));
++ pud = pud_offset(pgd, pg);
++ BUG_ON(pud_none(*pud));
++ pmd = pmd_offset(pud, pg);
++ if (pmd_none(*pmd))
++ return i ? : -EFAULT;
++ pte = pte_offset_map(pmd, pg);
++ if (pte_none(*pte)) {
++ pte_unmap(pte);
++ return i ? : -EFAULT;
++ }
++ if (pages) {
++ struct page *page = vm_normal_page(gate_vma, start, *pte);
++ pages[i] = page;
++ if (page)
++ get_page(page);
++ }
++ pte_unmap(pte);
++ if (vmas)
++ vmas[i] = gate_vma;
++ i++;
++ start += PAGE_SIZE;
++ len--;
++ continue;
++ }
++
++ if (!vma || (vma->vm_flags & (VM_IO | VM_PFNMAP))
++ || !(vm_flags & vma->vm_flags))
++ return i ? : -EFAULT;
++
++ if (is_vm_hugetlb_page(vma)) {
++ i = follow_hugetlb_page(mm, vma, pages, vmas,
++ &start, &len, i, write);
++ continue;
++ }
++
++ foll_flags = FOLL_TOUCH;
++ if (pages)
++ foll_flags |= FOLL_GET;
++ if (!write && use_zero_page(vma))
++ foll_flags |= FOLL_ANON;
++
++ do {
++ struct page *page;
++
++ /*
++ * If tsk is ooming, cut off its access to large memory
++ * allocations. It has a pending SIGKILL, but it can't
++ * be processed until returning to user space.
++ */
++ if (unlikely(test_tsk_thread_flag(tsk, TIF_MEMDIE)))
++ return i ? i : -ENOMEM;
++
++ if (write)
++ foll_flags |= FOLL_WRITE;
++
++ cond_resched();
++ while (!(page = follow_page(vma, start, foll_flags))) {
++ int ret;
++ ret = handle_mm_fault(mm, vma, start,
++ foll_flags & FOLL_WRITE);
++ if (ret & VM_FAULT_ERROR) {
++ if (ret & VM_FAULT_OOM)
++ return i ? i : -ENOMEM;
++ else if (ret & VM_FAULT_SIGBUS)
++ return i ? i : -EFAULT;
++ BUG();
++ }
++ if (ret & VM_FAULT_MAJOR)
++ tsk->maj_flt++;
++ else
++ tsk->min_flt++;
++
++ /*
++ * The VM_FAULT_WRITE bit tells us that
++ * do_wp_page has broken COW when necessary,
++ * even if maybe_mkwrite decided not to set
++ * pte_write. We can thus safely do subsequent
++ * page lookups as if they were reads.
++ */
++ if (ret & VM_FAULT_WRITE)
++ foll_flags &= ~FOLL_WRITE;
++
++ cond_resched();
++ }
++ if (IS_ERR(page))
++ return i ? i : PTR_ERR(page);
++ if (pages) {
++ pages[i] = page;
++
++ flush_anon_page(vma, page, start);
++ flush_dcache_page(page);
++ }
++ if (vmas)
++ vmas[i] = vma;
++ i++;
++ start += PAGE_SIZE;
++ len--;
++ } while (len && start < vma->vm_end);
++ } while (len);
++ return i;
++}
++EXPORT_SYMBOL(get_user_pages);
++
++pte_t *get_locked_pte(struct mm_struct *mm, unsigned long addr,
++ spinlock_t **ptl)
++{
++ pgd_t * pgd = pgd_offset(mm, addr);
++ pud_t * pud = pud_alloc(mm, pgd, addr);
++ if (pud) {
++ pmd_t * pmd = pmd_alloc(mm, pud, addr);
++ if (pmd)
++ return pte_alloc_map_lock(mm, pmd, addr, ptl);
++ }
++ return NULL;
++}
++
++/*
++ * This is the old fallback for page remapping.
++ *
++ * For historical reasons, it only allows reserved pages. Only
++ * old drivers should use this, and they needed to mark their
++ * pages reserved for the old functions anyway.
++ */
++static int insert_page(struct vm_area_struct *vma, unsigned long addr,
++ struct page *page, pgprot_t prot)
++{
++ struct mm_struct *mm = vma->vm_mm;
++ int retval;
++ pte_t *pte;
++ spinlock_t *ptl;
++
++ retval = mem_cgroup_charge(page, mm, GFP_KERNEL);
++ if (retval)
++ goto out;
++
++ retval = -EINVAL;
++ if (PageAnon(page))
++ goto out_uncharge;
++ retval = -ENOMEM;
++ flush_dcache_page(page);
++ pte = get_locked_pte(mm, addr, &ptl);
++ if (!pte)
++ goto out_uncharge;
++ retval = -EBUSY;
++ if (!pte_none(*pte))
++ goto out_unlock;
++
++ /* Ok, finally just insert the thing.. */
++ get_page(page);
++ inc_mm_counter(mm, file_rss);
++ page_add_file_rmap(page);
++ set_pte_at(mm, addr, pte, mk_pte(page, prot));
++
++ retval = 0;
++ pte_unmap_unlock(pte, ptl);
++ return retval;
++out_unlock:
++ pte_unmap_unlock(pte, ptl);
++out_uncharge:
++ mem_cgroup_uncharge_page(page);
++out:
++ return retval;
++}
++
++/**
++ * vm_insert_page - insert single page into user vma
++ * @vma: user vma to map to
++ * @addr: target user address of this page
++ * @page: source kernel page
++ *
++ * This allows drivers to insert individual pages they've allocated
++ * into a user vma.
++ *
++ * The page has to be a nice clean _individual_ kernel allocation.
++ * If you allocate a compound page, you need to have marked it as
++ * such (__GFP_COMP), or manually just split the page up yourself
++ * (see split_page()).
++ *
++ * NOTE! Traditionally this was done with "remap_pfn_range()" which
++ * took an arbitrary page protection parameter. This doesn't allow
++ * that. Your vma protection will have to be set up correctly, which
++ * means that if you want a shared writable mapping, you'd better
++ * ask for a shared writable mapping!
++ *
++ * The page does not need to be reserved.
++ */
++int vm_insert_page(struct vm_area_struct *vma, unsigned long addr,
++ struct page *page)
++{
++ if (addr < vma->vm_start || addr >= vma->vm_end)
++ return -EFAULT;
++ if (!page_count(page))
++ return -EINVAL;
++ vma->vm_flags |= VM_INSERTPAGE;
++ return insert_page(vma, addr, page, vma->vm_page_prot);
++}
++EXPORT_SYMBOL(vm_insert_page);
++
++static int insert_pfn(struct vm_area_struct *vma, unsigned long addr,
++ unsigned long pfn, pgprot_t prot)
++{
++ struct mm_struct *mm = vma->vm_mm;
++ int retval;
++ pte_t *pte, entry;
++ spinlock_t *ptl;
++
++ retval = -ENOMEM;
++ pte = get_locked_pte(mm, addr, &ptl);
++ if (!pte)
++ goto out;
++ retval = -EBUSY;
++ if (!pte_none(*pte))
++ goto out_unlock;
++
++ /* Ok, finally just insert the thing.. */
++ entry = pte_mkspecial(pfn_pte(pfn, prot));
++ set_pte_at(mm, addr, pte, entry);
++ update_mmu_cache(vma, addr, entry); /* XXX: why not for insert_page? */
++
++ retval = 0;
++out_unlock:
++ pte_unmap_unlock(pte, ptl);
++out:
++ return retval;
++}
++
++/**
++ * vm_insert_pfn - insert single pfn into user vma
++ * @vma: user vma to map to
++ * @addr: target user address of this page
++ * @pfn: source kernel pfn
++ *
++ * Similar to vm_inert_page, this allows drivers to insert individual pages
++ * they've allocated into a user vma. Same comments apply.
++ *
++ * This function should only be called from a vm_ops->fault handler, and
++ * in that case the handler should return NULL.
++ *
++ * vma cannot be a COW mapping.
++ *
++ * As this is called only for pages that do not currently exist, we
++ * do not need to flush old virtual caches or the TLB.
++ */
++int vm_insert_pfn(struct vm_area_struct *vma, unsigned long addr,
++ unsigned long pfn)
++{
++ /*
++ * Technically, architectures with pte_special can avoid all these
++ * restrictions (same for remap_pfn_range). However we would like
++ * consistency in testing and feature parity among all, so we should
++ * try to keep these invariants in place for everybody.
++ */
++ BUG_ON(!(vma->vm_flags & (VM_PFNMAP|VM_MIXEDMAP)));
++ BUG_ON((vma->vm_flags & (VM_PFNMAP|VM_MIXEDMAP)) ==
++ (VM_PFNMAP|VM_MIXEDMAP));
++ BUG_ON((vma->vm_flags & VM_PFNMAP) && is_cow_mapping(vma->vm_flags));
++ BUG_ON((vma->vm_flags & VM_MIXEDMAP) && pfn_valid(pfn));
++
++ if (addr < vma->vm_start || addr >= vma->vm_end)
++ return -EFAULT;
++ return insert_pfn(vma, addr, pfn, vma->vm_page_prot);
++}
++EXPORT_SYMBOL(vm_insert_pfn);
++
++int vm_insert_mixed(struct vm_area_struct *vma, unsigned long addr,
++ unsigned long pfn)
++{
++ BUG_ON(!(vma->vm_flags & VM_MIXEDMAP));
++
++ if (addr < vma->vm_start || addr >= vma->vm_end)
++ return -EFAULT;
++
++ /*
++ * If we don't have pte special, then we have to use the pfn_valid()
++ * based VM_MIXEDMAP scheme (see vm_normal_page), and thus we *must*
++ * refcount the page if pfn_valid is true (hence insert_page rather
++ * than insert_pfn).
++ */
++ if (!HAVE_PTE_SPECIAL && pfn_valid(pfn)) {
++ struct page *page;
++
++ page = pfn_to_page(pfn);
++ return insert_page(vma, addr, page, vma->vm_page_prot);
++ }
++ return insert_pfn(vma, addr, pfn, vma->vm_page_prot);
++}
++EXPORT_SYMBOL(vm_insert_mixed);
++
++/*
++ * maps a range of physical memory into the requested pages. the old
++ * mappings are removed. any references to nonexistent pages results
++ * in null mappings (currently treated as "copy-on-access")
++ */
++static int remap_pte_range(struct mm_struct *mm, pmd_t *pmd,
++ unsigned long addr, unsigned long end,
++ unsigned long pfn, pgprot_t prot)
++{
++ pte_t *pte;
++ spinlock_t *ptl;
++
++ pte = pte_alloc_map_lock(mm, pmd, addr, &ptl);
++ if (!pte)
++ return -ENOMEM;
++ arch_enter_lazy_mmu_mode();
++ do {
++ BUG_ON(!pte_none(*pte));
++ set_pte_at(mm, addr, pte, pte_mkspecial(pfn_pte(pfn, prot)));
++ pfn++;
++ } while (pte++, addr += PAGE_SIZE, addr != end);
++ arch_leave_lazy_mmu_mode();
++ pte_unmap_unlock(pte - 1, ptl);
++ return 0;
++}
++
++static inline int remap_pmd_range(struct mm_struct *mm, pud_t *pud,
++ unsigned long addr, unsigned long end,
++ unsigned long pfn, pgprot_t prot)
++{
++ pmd_t *pmd;
++ unsigned long next;
++
++ pfn -= addr >> PAGE_SHIFT;
++ pmd = pmd_alloc(mm, pud, addr);
++ if (!pmd)
++ return -ENOMEM;
++ do {
++ next = pmd_addr_end(addr, end);
++ if (remap_pte_range(mm, pmd, addr, next,
++ pfn + (addr >> PAGE_SHIFT), prot))
++ return -ENOMEM;
++ } while (pmd++, addr = next, addr != end);
++ return 0;
++}
++
++static inline int remap_pud_range(struct mm_struct *mm, pgd_t *pgd,
++ unsigned long addr, unsigned long end,
++ unsigned long pfn, pgprot_t prot)
++{
++ pud_t *pud;
++ unsigned long next;
++
++ pfn -= addr >> PAGE_SHIFT;
++ pud = pud_alloc(mm, pgd, addr);
++ if (!pud)
++ return -ENOMEM;
++ do {
++ next = pud_addr_end(addr, end);
++ if (remap_pmd_range(mm, pud, addr, next,
++ pfn + (addr >> PAGE_SHIFT), prot))
++ return -ENOMEM;
++ } while (pud++, addr = next, addr != end);
++ return 0;
++}
++
++/**
++ * remap_pfn_range - remap kernel memory to userspace
++ * @vma: user vma to map to
++ * @addr: target user address to start at
++ * @pfn: physical address of kernel memory
++ * @size: size of map area
++ * @prot: page protection flags for this mapping
++ *
++ * Note: this is only safe if the mm semaphore is held when called.
++ */
++int remap_pfn_range(struct vm_area_struct *vma, unsigned long addr,
++ unsigned long pfn, unsigned long size, pgprot_t prot)
++{
++ pgd_t *pgd;
++ unsigned long next;
++ unsigned long end = addr + PAGE_ALIGN(size);
++ struct mm_struct *mm = vma->vm_mm;
++ int err;
++
++ /*
++ * Physically remapped pages are special. Tell the
++ * rest of the world about it:
++ * VM_IO tells people not to look at these pages
++ * (accesses can have side effects).
++ * VM_RESERVED is specified all over the place, because
++ * in 2.4 it kept swapout's vma scan off this vma; but
++ * in 2.6 the LRU scan won't even find its pages, so this
++ * flag means no more than count its pages in reserved_vm,
++ * and omit it from core dump, even when VM_IO turned off.
++ * VM_PFNMAP tells the core MM that the base pages are just
++ * raw PFN mappings, and do not have a "struct page" associated
++ * with them.
++ *
++ * There's a horrible special case to handle copy-on-write
++ * behaviour that some programs depend on. We mark the "original"
++ * un-COW'ed pages by matching them up with "vma->vm_pgoff".
++ */
++ if (is_cow_mapping(vma->vm_flags)) {
++ if (addr != vma->vm_start || end != vma->vm_end)
++ return -EINVAL;
++ vma->vm_pgoff = pfn;
++ }
++
++ vma->vm_flags |= VM_IO | VM_RESERVED | VM_PFNMAP;
++
++ BUG_ON(addr >= end);
++ pfn -= addr >> PAGE_SHIFT;
++ pgd = pgd_offset(mm, addr);
++ flush_cache_range(vma, addr, end);
++ do {
++ next = pgd_addr_end(addr, end);
++ err = remap_pud_range(mm, pgd, addr, next,
++ pfn + (addr >> PAGE_SHIFT), prot);
++ if (err)
++ break;
++ } while (pgd++, addr = next, addr != end);
++ return err;
++}
++EXPORT_SYMBOL(remap_pfn_range);
++
++static int apply_to_pte_range(struct mm_struct *mm, pmd_t *pmd,
++ unsigned long addr, unsigned long end,
++ pte_fn_t fn, void *data)
++{
++ pte_t *pte;
++ int err;
++ pgtable_t token;
++ spinlock_t *uninitialized_var(ptl);
++
++ pte = (mm == &init_mm) ?
++ pte_alloc_kernel(pmd, addr) :
++ pte_alloc_map_lock(mm, pmd, addr, &ptl);
++ if (!pte)
++ return -ENOMEM;
++
++ BUG_ON(pmd_huge(*pmd));
++
++ token = pmd_pgtable(*pmd);
++
++ do {
++ err = fn(pte, token, addr, data);
++ if (err)
++ break;
++ } while (pte++, addr += PAGE_SIZE, addr != end);
++
++ if (mm != &init_mm)
++ pte_unmap_unlock(pte-1, ptl);
++ return err;
++}
++
++static int apply_to_pmd_range(struct mm_struct *mm, pud_t *pud,
++ unsigned long addr, unsigned long end,
++ pte_fn_t fn, void *data)
++{
++ pmd_t *pmd;
++ unsigned long next;
++ int err;
++
++ BUG_ON(pud_huge(*pud));
++
++ pmd = pmd_alloc(mm, pud, addr);
++ if (!pmd)
++ return -ENOMEM;
++ do {
++ next = pmd_addr_end(addr, end);
++ err = apply_to_pte_range(mm, pmd, addr, next, fn, data);
++ if (err)
++ break;
++ } while (pmd++, addr = next, addr != end);
++ return err;
++}
++
++static int apply_to_pud_range(struct mm_struct *mm, pgd_t *pgd,
++ unsigned long addr, unsigned long end,
++ pte_fn_t fn, void *data)
++{
++ pud_t *pud;
++ unsigned long next;
++ int err;
++
++ pud = pud_alloc(mm, pgd, addr);
++ if (!pud)
++ return -ENOMEM;
++ do {
++ next = pud_addr_end(addr, end);
++ err = apply_to_pmd_range(mm, pud, addr, next, fn, data);
++ if (err)
++ break;
++ } while (pud++, addr = next, addr != end);
++ return err;
++}
++
++/*
++ * Scan a region of virtual memory, filling in page tables as necessary
++ * and calling a provided function on each leaf page table.
++ */
++int apply_to_page_range(struct mm_struct *mm, unsigned long addr,
++ unsigned long size, pte_fn_t fn, void *data)
++{
++ pgd_t *pgd;
++ unsigned long next;
++ unsigned long start = addr, end = addr + size;
++ int err;
++
++ BUG_ON(addr >= end);
++ mmu_notifier_invalidate_range_start(mm, start, end);
++ pgd = pgd_offset(mm, addr);
++ do {
++ next = pgd_addr_end(addr, end);
++ err = apply_to_pud_range(mm, pgd, addr, next, fn, data);
++ if (err)
++ break;
++ } while (pgd++, addr = next, addr != end);
++ mmu_notifier_invalidate_range_end(mm, start, end);
++ return err;
++}
++EXPORT_SYMBOL_GPL(apply_to_page_range);
++
++/*
++ * handle_pte_fault chooses page fault handler according to an entry
++ * which was read non-atomically. Before making any commitment, on
++ * those architectures or configurations (e.g. i386 with PAE) which
++ * might give a mix of unmatched parts, do_swap_page and do_file_page
++ * must check under lock before unmapping the pte and proceeding
++ * (but do_wp_page is only called after already making such a check;
++ * and do_anonymous_page and do_no_page can safely check later on).
++ */
++static inline int pte_unmap_same(struct mm_struct *mm, pmd_t *pmd,
++ pte_t *page_table, pte_t orig_pte)
++{
++ int same = 1;
++#if defined(CONFIG_SMP) || defined(CONFIG_PREEMPT)
++ if (sizeof(pte_t) > sizeof(unsigned long)) {
++ spinlock_t *ptl = pte_lockptr(mm, pmd);
++ spin_lock(ptl);
++ same = pte_same(*page_table, orig_pte);
++ spin_unlock(ptl);
++ }
++#endif
++ pte_unmap(page_table);
++ return same;
++}
++
++/*
++ * Do pte_mkwrite, but only if the vma says VM_WRITE. We do this when
++ * servicing faults for write access. In the normal case, do always want
++ * pte_mkwrite. But get_user_pages can cause write faults for mappings
++ * that do not have writing enabled, when used by access_process_vm.
++ */
++static inline pte_t maybe_mkwrite(pte_t pte, struct vm_area_struct *vma)
++{
++ if (likely(vma->vm_flags & VM_WRITE))
++ pte = pte_mkwrite(pte);
++ return pte;
++}
++
++static inline void cow_user_page(struct page *dst, struct page *src, unsigned long va, struct vm_area_struct *vma)
++{
++ /*
++ * If the source page was a PFN mapping, we don't have
++ * a "struct page" for it. We do a best-effort copy by
++ * just copying from the original user address. If that
++ * fails, we just zero-fill it. Live with it.
++ */
++ if (unlikely(!src)) {
++ void *kaddr = kmap_atomic(dst, KM_USER0);
++ void __user *uaddr = (void __user *)(va & PAGE_MASK);
++
++ /*
++ * This really shouldn't fail, because the page is there
++ * in the page tables. But it might just be unreadable,
++ * in which case we just give up and fill the result with
++ * zeroes.
++ */
++ if (__copy_from_user_inatomic(kaddr, uaddr, PAGE_SIZE))
++ memset(kaddr, 0, PAGE_SIZE);
++ kunmap_atomic(kaddr, KM_USER0);
++ flush_dcache_page(dst);
++ } else
++ copy_user_highpage(dst, src, va, vma);
++}
++
++/*
++ * This routine handles present pages, when users try to write
++ * to a shared page. It is done by copying the page to a new address
++ * and decrementing the shared-page counter for the old page.
++ *
++ * Note that this routine assumes that the protection checks have been
++ * done by the caller (the low-level page fault routine in most cases).
++ * Thus we can safely just mark it writable once we've done any necessary
++ * COW.
++ *
++ * We also mark the page dirty at this point even though the page will
++ * change only once the write actually happens. This avoids a few races,
++ * and potentially makes it more efficient.
++ *
++ * We enter with non-exclusive mmap_sem (to exclude vma changes,
++ * but allow concurrent faults), with pte both mapped and locked.
++ * We return with mmap_sem still held, but pte unmapped and unlocked.
++ */
++static int do_wp_page(struct mm_struct *mm, struct vm_area_struct *vma,
++ unsigned long address, pte_t *page_table, pmd_t *pmd,
++ spinlock_t *ptl, pte_t orig_pte)
++{
++ struct page *old_page, *new_page;
++ pte_t entry;
++ int reuse = 0, ret = 0;
++ int page_mkwrite = 0;
++ struct page *dirty_page = NULL;
++
++ old_page = vm_normal_page(vma, address, orig_pte);
++ if (!old_page) {
++ /*
++ * VM_MIXEDMAP !pfn_valid() case
++ *
++ * We should not cow pages in a shared writeable mapping.
++ * Just mark the pages writable as we can't do any dirty
++ * accounting on raw pfn maps.
++ */
++ if ((vma->vm_flags & (VM_WRITE|VM_SHARED)) ==
++ (VM_WRITE|VM_SHARED))
++ goto reuse;
++ goto gotten;
++ }
++
++ /*
++ * Take out anonymous pages first, anonymous shared vmas are
++ * not dirty accountable.
++ */
++ if (PageAnon(old_page)) {
++ if (trylock_page(old_page)) {
++ reuse = can_share_swap_page(old_page);
++ unlock_page(old_page);
++ }
++ } else if (unlikely((vma->vm_flags & (VM_WRITE|VM_SHARED)) ==
++ (VM_WRITE|VM_SHARED))) {
++ /*
++ * Only catch write-faults on shared writable pages,
++ * read-only shared pages can get COWed by
++ * get_user_pages(.write=1, .force=1).
++ */
++ if (vma->vm_ops && vma->vm_ops->page_mkwrite) {
++ /*
++ * Notify the address space that the page is about to
++ * become writable so that it can prohibit this or wait
++ * for the page to get into an appropriate state.
++ *
++ * We do this without the lock held, so that it can
++ * sleep if it needs to.
++ */
++ page_cache_get(old_page);
++ pte_unmap_unlock(page_table, ptl);
++
++ if (vma->vm_ops->page_mkwrite(vma, old_page) < 0)
++ goto unwritable_page;
++
++ /*
++ * Since we dropped the lock we need to revalidate
++ * the PTE as someone else may have changed it. If
++ * they did, we just return, as we can count on the
++ * MMU to tell us if they didn't also make it writable.
++ */
++ page_table = pte_offset_map_lock(mm, pmd, address,
++ &ptl);
++ page_cache_release(old_page);
++ if (!pte_same(*page_table, orig_pte))
++ goto unlock;
++
++ page_mkwrite = 1;
++ }
++ dirty_page = old_page;
++ get_page(dirty_page);
++ reuse = 1;
++ }
++
++ if (reuse) {
++reuse:
++ flush_cache_page(vma, address, pte_pfn(orig_pte));
++ entry = pte_mkyoung(orig_pte);
++ entry = maybe_mkwrite(pte_mkdirty(entry), vma);
++ if (ptep_set_access_flags(vma, address, page_table, entry,1))
++ update_mmu_cache(vma, address, entry);
++ ret |= VM_FAULT_WRITE;
++ goto unlock;
++ }
++
++ /*
++ * Ok, we need to copy. Oh, well..
++ */
++ page_cache_get(old_page);
++gotten:
++ pte_unmap_unlock(page_table, ptl);
++
++ if (unlikely(anon_vma_prepare(vma)))
++ goto oom;
++ VM_BUG_ON(old_page == ZERO_PAGE(0));
++ new_page = alloc_page_vma(GFP_HIGHUSER_MOVABLE, vma, address);
++ if (!new_page)
++ goto oom;
++ cow_user_page(new_page, old_page, address, vma);
++ __SetPageUptodate(new_page);
++
++ if (mem_cgroup_charge(new_page, mm, GFP_KERNEL))
++ goto oom_free_new;
++
++ /*
++ * Re-check the pte - we dropped the lock
++ */
++ page_table = pte_offset_map_lock(mm, pmd, address, &ptl);
++ if (likely(pte_same(*page_table, orig_pte))) {
++ if (old_page) {
++ if (!PageAnon(old_page)) {
++ dec_mm_counter(mm, file_rss);
++ inc_mm_counter(mm, anon_rss);
++ }
++ } else
++ inc_mm_counter(mm, anon_rss);
++ flush_cache_page(vma, address, pte_pfn(orig_pte));
++ entry = mk_pte(new_page, vma->vm_page_prot);
++ entry = maybe_mkwrite(pte_mkdirty(entry), vma);
++ /*
++ * Clear the pte entry and flush it first, before updating the
++ * pte with the new entry. This will avoid a race condition
++ * seen in the presence of one thread doing SMC and another
++ * thread doing COW.
++ */
++ ptep_clear_flush_notify(vma, address, page_table);
++ set_pte_at(mm, address, page_table, entry);
++ update_mmu_cache(vma, address, entry);
++ lru_cache_add_active(new_page);
++ page_add_new_anon_rmap(new_page, vma, address);
++
++ if (old_page) {
++ /*
++ * Only after switching the pte to the new page may
++ * we remove the mapcount here. Otherwise another
++ * process may come and find the rmap count decremented
++ * before the pte is switched to the new page, and
++ * "reuse" the old page writing into it while our pte
++ * here still points into it and can be read by other
++ * threads.
++ *
++ * The critical issue is to order this
++ * page_remove_rmap with the ptp_clear_flush above.
++ * Those stores are ordered by (if nothing else,)
++ * the barrier present in the atomic_add_negative
++ * in page_remove_rmap.
++ *
++ * Then the TLB flush in ptep_clear_flush ensures that
++ * no process can access the old page before the
++ * decremented mapcount is visible. And the old page
++ * cannot be reused until after the decremented
++ * mapcount is visible. So transitively, TLBs to
++ * old page will be flushed before it can be reused.
++ */
++ page_remove_rmap(old_page, vma);
++ }
++
++ /* Free the old page.. */
++ new_page = old_page;
++ ret |= VM_FAULT_WRITE;
++ } else
++ mem_cgroup_uncharge_page(new_page);
++
++ if (new_page)
++ page_cache_release(new_page);
++ if (old_page)
++ page_cache_release(old_page);
++unlock:
++ pte_unmap_unlock(page_table, ptl);
++ if (dirty_page) {
++ if (vma->vm_file)
++ file_update_time(vma->vm_file);
++
++ /*
++ * Yes, Virginia, this is actually required to prevent a race
++ * with clear_page_dirty_for_io() from clearing the page dirty
++ * bit after it clear all dirty ptes, but before a racing
++ * do_wp_page installs a dirty pte.
++ *
++ * do_no_page is protected similarly.
++ */
++ wait_on_page_locked(dirty_page);
++ set_page_dirty_balance(dirty_page, page_mkwrite);
++ put_page(dirty_page);
++ }
++ return ret;
++oom_free_new:
++ page_cache_release(new_page);
++oom:
++ if (old_page)
++ page_cache_release(old_page);
++ return VM_FAULT_OOM;
++
++unwritable_page:
++ page_cache_release(old_page);
++ return VM_FAULT_SIGBUS;
++}
++
++/*
++ * Helper functions for unmap_mapping_range().
++ *
++ * __ Notes on dropping i_mmap_lock to reduce latency while unmapping __
++ *
++ * We have to restart searching the prio_tree whenever we drop the lock,
++ * since the iterator is only valid while the lock is held, and anyway
++ * a later vma might be split and reinserted earlier while lock dropped.
++ *
++ * The list of nonlinear vmas could be handled more efficiently, using
++ * a placeholder, but handle it in the same way until a need is shown.
++ * It is important to search the prio_tree before nonlinear list: a vma
++ * may become nonlinear and be shifted from prio_tree to nonlinear list
++ * while the lock is dropped; but never shifted from list to prio_tree.
++ *
++ * In order to make forward progress despite restarting the search,
++ * vm_truncate_count is used to mark a vma as now dealt with, so we can
++ * quickly skip it next time around. Since the prio_tree search only
++ * shows us those vmas affected by unmapping the range in question, we
++ * can't efficiently keep all vmas in step with mapping->truncate_count:
++ * so instead reset them all whenever it wraps back to 0 (then go to 1).
++ * mapping->truncate_count and vma->vm_truncate_count are protected by
++ * i_mmap_lock.
++ *
++ * In order to make forward progress despite repeatedly restarting some
++ * large vma, note the restart_addr from unmap_vmas when it breaks out:
++ * and restart from that address when we reach that vma again. It might
++ * have been split or merged, shrunk or extended, but never shifted: so
++ * restart_addr remains valid so long as it remains in the vma's range.
++ * unmap_mapping_range forces truncate_count to leap over page-aligned
++ * values so we can save vma's restart_addr in its truncate_count field.
++ */
++#define is_restart_addr(truncate_count) (!((truncate_count) & ~PAGE_MASK))
++
++static void reset_vma_truncate_counts(struct address_space *mapping)
++{
++ struct vm_area_struct *vma;
++ struct prio_tree_iter iter;
++
++ vma_prio_tree_foreach(vma, &iter, &mapping->i_mmap, 0, ULONG_MAX)
++ vma->vm_truncate_count = 0;
++ list_for_each_entry(vma, &mapping->i_mmap_nonlinear, shared.vm_set.list)
++ vma->vm_truncate_count = 0;
++}
++
++static int unmap_mapping_range_vma(struct vm_area_struct *vma,
++ unsigned long start_addr, unsigned long end_addr,
++ struct zap_details *details)
++{
++ unsigned long restart_addr;
++ int need_break;
++
++ /*
++ * files that support invalidating or truncating portions of the
++ * file from under mmaped areas must have their ->fault function
++ * return a locked page (and set VM_FAULT_LOCKED in the return).
++ * This provides synchronisation against concurrent unmapping here.
++ */
++
++again:
++ restart_addr = vma->vm_truncate_count;
++ if (is_restart_addr(restart_addr) && start_addr < restart_addr) {
++ start_addr = restart_addr;
++ if (start_addr >= end_addr) {
++ /* Top of vma has been split off since last time */
++ vma->vm_truncate_count = details->truncate_count;
++ return 0;
++ }
++ }
++
++ restart_addr = zap_page_range(vma, start_addr,
++ end_addr - start_addr, details);
++ need_break = need_resched() || spin_needbreak(details->i_mmap_lock);
++
++ if (restart_addr >= end_addr) {
++ /* We have now completed this vma: mark it so */
++ vma->vm_truncate_count = details->truncate_count;
++ if (!need_break)
++ return 0;
++ } else {
++ /* Note restart_addr in vma's truncate_count field */
++ vma->vm_truncate_count = restart_addr;
++ if (!need_break)
++ goto again;
++ }
++
++ spin_unlock(details->i_mmap_lock);
++ cond_resched();
++ spin_lock(details->i_mmap_lock);
++ return -EINTR;
++}
++
++static inline void unmap_mapping_range_tree(struct prio_tree_root *root,
++ struct zap_details *details)
++{
++ struct vm_area_struct *vma;
++ struct prio_tree_iter iter;
++ pgoff_t vba, vea, zba, zea;
++
++restart:
++ vma_prio_tree_foreach(vma, &iter, root,
++ details->first_index, details->last_index) {
++ /* Skip quickly over those we have already dealt with */
++ if (vma->vm_truncate_count == details->truncate_count)
++ continue;
++
++ vba = vma->vm_pgoff;
++ vea = vba + ((vma->vm_end - vma->vm_start) >> PAGE_SHIFT) - 1;
++ /* Assume for now that PAGE_CACHE_SHIFT == PAGE_SHIFT */
++ zba = details->first_index;
++ if (zba < vba)
++ zba = vba;
++ zea = details->last_index;
++ if (zea > vea)
++ zea = vea;
++
++ if (unmap_mapping_range_vma(vma,
++ ((zba - vba) << PAGE_SHIFT) + vma->vm_start,
++ ((zea - vba + 1) << PAGE_SHIFT) + vma->vm_start,
++ details) < 0)
++ goto restart;
++ }
++}
++
++static inline void unmap_mapping_range_list(struct list_head *head,
++ struct zap_details *details)
++{
++ struct vm_area_struct *vma;
++
++ /*
++ * In nonlinear VMAs there is no correspondence between virtual address
++ * offset and file offset. So we must perform an exhaustive search
++ * across *all* the pages in each nonlinear VMA, not just the pages
++ * whose virtual address lies outside the file truncation point.
++ */
++restart:
++ list_for_each_entry(vma, head, shared.vm_set.list) {
++ /* Skip quickly over those we have already dealt with */
++ if (vma->vm_truncate_count == details->truncate_count)
++ continue;
++ details->nonlinear_vma = vma;
++ if (unmap_mapping_range_vma(vma, vma->vm_start,
++ vma->vm_end, details) < 0)
++ goto restart;
++ }
++}
++
++/**
++ * unmap_mapping_range - unmap the portion of all mmaps in the specified address_space corresponding to the specified page range in the underlying file.
++ * @mapping: the address space containing mmaps to be unmapped.
++ * @holebegin: byte in first page to unmap, relative to the start of
++ * the underlying file. This will be rounded down to a PAGE_SIZE
++ * boundary. Note that this is different from vmtruncate(), which
++ * must keep the partial page. In contrast, we must get rid of
++ * partial pages.
++ * @holelen: size of prospective hole in bytes. This will be rounded
++ * up to a PAGE_SIZE boundary. A holelen of zero truncates to the
++ * end of the file.
++ * @even_cows: 1 when truncating a file, unmap even private COWed pages;
++ * but 0 when invalidating pagecache, don't throw away private data.
++ */
++void unmap_mapping_range(struct address_space *mapping,
++ loff_t const holebegin, loff_t const holelen, int even_cows)
++{
++ struct zap_details details;
++ pgoff_t hba = holebegin >> PAGE_SHIFT;
++ pgoff_t hlen = (holelen + PAGE_SIZE - 1) >> PAGE_SHIFT;
++
++ /* Check for overflow. */
++ if (sizeof(holelen) > sizeof(hlen)) {
++ long long holeend =
++ (holebegin + holelen + PAGE_SIZE - 1) >> PAGE_SHIFT;
++ if (holeend & ~(long long)ULONG_MAX)
++ hlen = ULONG_MAX - hba + 1;
++ }
++
++ details.check_mapping = even_cows? NULL: mapping;
++ details.nonlinear_vma = NULL;
++ details.first_index = hba;
++ details.last_index = hba + hlen - 1;
++ if (details.last_index < details.first_index)
++ details.last_index = ULONG_MAX;
++ details.i_mmap_lock = &mapping->i_mmap_lock;
++
++ spin_lock(&mapping->i_mmap_lock);
++
++ /* Protect against endless unmapping loops */
++ mapping->truncate_count++;
++ if (unlikely(is_restart_addr(mapping->truncate_count))) {
++ if (mapping->truncate_count == 0)
++ reset_vma_truncate_counts(mapping);
++ mapping->truncate_count++;
++ }
++ details.truncate_count = mapping->truncate_count;
++
++ if (unlikely(!prio_tree_empty(&mapping->i_mmap)))
++ unmap_mapping_range_tree(&mapping->i_mmap, &details);
++ if (unlikely(!list_empty(&mapping->i_mmap_nonlinear)))
++ unmap_mapping_range_list(&mapping->i_mmap_nonlinear, &details);
++ spin_unlock(&mapping->i_mmap_lock);
++}
++EXPORT_SYMBOL(unmap_mapping_range);
++
++/**
++ * vmtruncate - unmap mappings "freed" by truncate() syscall
++ * @inode: inode of the file used
++ * @offset: file offset to start truncating
++ *
++ * NOTE! We have to be ready to update the memory sharing
++ * between the file and the memory map for a potential last
++ * incomplete page. Ugly, but necessary.
++ */
++int vmtruncate(struct inode * inode, loff_t offset)
++{
++ if (inode->i_size < offset) {
++ unsigned long limit;
++
++ limit = current->signal->rlim[RLIMIT_FSIZE].rlim_cur;
++ if (limit != RLIM_INFINITY && offset > limit)
++ goto out_sig;
++ if (offset > inode->i_sb->s_maxbytes)
++ goto out_big;
++ i_size_write(inode, offset);
++ } else {
++ struct address_space *mapping = inode->i_mapping;
++
++ /*
++ * truncation of in-use swapfiles is disallowed - it would
++ * cause subsequent swapout to scribble on the now-freed
++ * blocks.
++ */
++ if (IS_SWAPFILE(inode))
++ return -ETXTBSY;
++ i_size_write(inode, offset);
++
++ /*
++ * unmap_mapping_range is called twice, first simply for
++ * efficiency so that truncate_inode_pages does fewer
++ * single-page unmaps. However after this first call, and
++ * before truncate_inode_pages finishes, it is possible for
++ * private pages to be COWed, which remain after
++ * truncate_inode_pages finishes, hence the second
++ * unmap_mapping_range call must be made for correctness.
++ */
++ unmap_mapping_range(mapping, offset + PAGE_SIZE - 1, 0, 1);
++ truncate_inode_pages(mapping, offset);
++ unmap_mapping_range(mapping, offset + PAGE_SIZE - 1, 0, 1);
++ }
++
++ if (inode->i_op && inode->i_op->truncate)
++ inode->i_op->truncate(inode);
++ return 0;
++
++out_sig:
++ send_sig(SIGXFSZ, current, 0);
++out_big:
++ return -EFBIG;
++}
++EXPORT_SYMBOL(vmtruncate);
++
++int vmtruncate_range(struct inode *inode, loff_t offset, loff_t end)
++{
++ struct address_space *mapping = inode->i_mapping;
++
++ /*
++ * If the underlying filesystem is not going to provide
++ * a way to truncate a range of blocks (punch a hole) -
++ * we should return failure right now.
++ */
++ if (!inode->i_op || !inode->i_op->truncate_range)
++ return -ENOSYS;
++
++ mutex_lock(&inode->i_mutex);
++ down_write(&inode->i_alloc_sem);
++ unmap_mapping_range(mapping, offset, (end - offset), 1);
++ truncate_inode_pages_range(mapping, offset, end);
++ unmap_mapping_range(mapping, offset, (end - offset), 1);
++ inode->i_op->truncate_range(inode, offset, end);
++ up_write(&inode->i_alloc_sem);
++ mutex_unlock(&inode->i_mutex);
++
++ return 0;
++}
++
++/*
++ * We enter with non-exclusive mmap_sem (to exclude vma changes,
++ * but allow concurrent faults), and pte mapped but not yet locked.
++ * We return with mmap_sem still held, but pte unmapped and unlocked.
++ */
++static int do_swap_page(struct mm_struct *mm, struct vm_area_struct *vma,
++ unsigned long address, pte_t *page_table, pmd_t *pmd,
++ int write_access, pte_t orig_pte)
++{
++ spinlock_t *ptl;
++ struct page *page;
++ swp_entry_t entry;
++ pte_t pte;
++ int ret = 0;
++
++ if (!pte_unmap_same(mm, pmd, page_table, orig_pte))
++ goto out;
++
++ entry = pte_to_swp_entry(orig_pte);
++ if (is_migration_entry(entry)) {
++ migration_entry_wait(mm, pmd, address);
++ goto out;
++ }
++ delayacct_set_flag(DELAYACCT_PF_SWAPIN);
++ page = lookup_swap_cache(entry);
++ if (!page) {
++ grab_swap_token(); /* Contend for token _before_ read-in */
++ page = swapin_readahead(entry,
++ GFP_HIGHUSER_MOVABLE, vma, address);
++ if (!page) {
++ /*
++ * Back out if somebody else faulted in this pte
++ * while we released the pte lock.
++ */
++ page_table = pte_offset_map_lock(mm, pmd, address, &ptl);
++ if (likely(pte_same(*page_table, orig_pte)))
++ ret = VM_FAULT_OOM;
++ delayacct_clear_flag(DELAYACCT_PF_SWAPIN);
++ goto unlock;
++ }
++
++ /* Had to read the page from swap area: Major fault */
++ ret = VM_FAULT_MAJOR;
++ count_vm_event(PGMAJFAULT);
++ }
++
++ if (mem_cgroup_charge(page, mm, GFP_KERNEL)) {
++ delayacct_clear_flag(DELAYACCT_PF_SWAPIN);
++ ret = VM_FAULT_OOM;
++ goto out;
++ }
++
++ if (!vx_rss_avail(mm, 1)) {
++ ret = VM_FAULT_OOM;
++ goto out;
++ }
++
++ mark_page_accessed(page);
++ lock_page(page);
++ delayacct_clear_flag(DELAYACCT_PF_SWAPIN);
++
++ /*
++ * Back out if somebody else already faulted in this pte.
++ */
++ page_table = pte_offset_map_lock(mm, pmd, address, &ptl);
++ if (unlikely(!pte_same(*page_table, orig_pte)))
++ goto out_nomap;
++
++ if (unlikely(!PageUptodate(page))) {
++ ret = VM_FAULT_SIGBUS;
++ goto out_nomap;
++ }
++
++ /* The page isn't present yet, go ahead with the fault. */
++
++ inc_mm_counter(mm, anon_rss);
++ pte = mk_pte(page, vma->vm_page_prot);
++ if (write_access && can_share_swap_page(page)) {
++ pte = maybe_mkwrite(pte_mkdirty(pte), vma);
++ write_access = 0;
++ }
++
++ flush_icache_page(vma, page);
++ set_pte_at(mm, address, page_table, pte);
++ page_add_anon_rmap(page, vma, address);
++
++ swap_free(entry);
++ if (vm_swap_full())
++ remove_exclusive_swap_page(page);
++ unlock_page(page);
++
++ if (write_access) {
++ ret |= do_wp_page(mm, vma, address, page_table, pmd, ptl, pte);
++ if (ret & VM_FAULT_ERROR)
++ ret &= VM_FAULT_ERROR;
++ goto out;
++ }
++
++ /* No need to invalidate - it was non-present before */
++ update_mmu_cache(vma, address, pte);
++unlock:
++ pte_unmap_unlock(page_table, ptl);
++out:
++ return ret;
++out_nomap:
++ mem_cgroup_uncharge_page(page);
++ pte_unmap_unlock(page_table, ptl);
++ unlock_page(page);
++ page_cache_release(page);
++ return ret;
++}
++
++/*
++ * We enter with non-exclusive mmap_sem (to exclude vma changes,
++ * but allow concurrent faults), and pte mapped but not yet locked.
++ * We return with mmap_sem still held, but pte unmapped and unlocked.
++ */
++static int do_anonymous_page(struct mm_struct *mm, struct vm_area_struct *vma,
++ unsigned long address, pte_t *page_table, pmd_t *pmd,
++ int write_access)
++{
++ struct page *page;
++ spinlock_t *ptl;
++ pte_t entry;
++
++ /* Allocate our own private page. */
++ pte_unmap(page_table);
++
++ if (!vx_rss_avail(mm, 1))
++ goto oom;
++ if (unlikely(anon_vma_prepare(vma)))
++ goto oom;
++ page = alloc_zeroed_user_highpage_movable(vma, address);
++ if (!page)
++ goto oom;
++ __SetPageUptodate(page);
++
++ if (mem_cgroup_charge(page, mm, GFP_KERNEL))
++ goto oom_free_page;
++
++ entry = mk_pte(page, vma->vm_page_prot);
++ entry = maybe_mkwrite(pte_mkdirty(entry), vma);
++
++ page_table = pte_offset_map_lock(mm, pmd, address, &ptl);
++ if (!pte_none(*page_table))
++ goto release;
++ inc_mm_counter(mm, anon_rss);
++ lru_cache_add_active(page);
++ page_add_new_anon_rmap(page, vma, address);
++ set_pte_at(mm, address, page_table, entry);
++
++ /* No need to invalidate - it was non-present before */
++ update_mmu_cache(vma, address, entry);
++unlock:
++ pte_unmap_unlock(page_table, ptl);
++ return 0;
++release:
++ mem_cgroup_uncharge_page(page);
++ page_cache_release(page);
++ goto unlock;
++oom_free_page:
++ page_cache_release(page);
++oom:
++ return VM_FAULT_OOM;
++}
++
++/*
++ * __do_fault() tries to create a new page mapping. It aggressively
++ * tries to share with existing pages, but makes a separate copy if
++ * the FAULT_FLAG_WRITE is set in the flags parameter in order to avoid
++ * the next page fault.
++ *
++ * As this is called only for pages that do not currently exist, we
++ * do not need to flush old virtual caches or the TLB.
++ *
++ * We enter with non-exclusive mmap_sem (to exclude vma changes,
++ * but allow concurrent faults), and pte neither mapped nor locked.
++ * We return with mmap_sem still held, but pte unmapped and unlocked.
++ */
++static int __do_fault(struct mm_struct *mm, struct vm_area_struct *vma,
++ unsigned long address, pmd_t *pmd,
++ pgoff_t pgoff, unsigned int flags, pte_t orig_pte)
++{
++ pte_t *page_table;
++ spinlock_t *ptl;
++ struct page *page;
++ pte_t entry;
++ int anon = 0;
++ struct page *dirty_page = NULL;
++ struct vm_fault vmf;
++ int ret;
++ int page_mkwrite = 0;
++
++ vmf.virtual_address = (void __user *)(address & PAGE_MASK);
++ vmf.pgoff = pgoff;
++ vmf.flags = flags;
++ vmf.page = NULL;
++
++ ret = vma->vm_ops->fault(vma, &vmf);
++ if (unlikely(ret & (VM_FAULT_ERROR | VM_FAULT_NOPAGE)))
++ return ret;
++
++ /*
++ * For consistency in subsequent calls, make the faulted page always
++ * locked.
++ */
++ if (unlikely(!(ret & VM_FAULT_LOCKED)))
++ lock_page(vmf.page);
++ else
++ VM_BUG_ON(!PageLocked(vmf.page));
++
++ /*
++ * Should we do an early C-O-W break?
++ */
++ page = vmf.page;
++ if (flags & FAULT_FLAG_WRITE) {
++ if (!(vma->vm_flags & VM_SHARED)) {
++ anon = 1;
++ if (unlikely(anon_vma_prepare(vma))) {
++ ret = VM_FAULT_OOM;
++ goto out;
++ }
++ page = alloc_page_vma(GFP_HIGHUSER_MOVABLE,
++ vma, address);
++ if (!page) {
++ ret = VM_FAULT_OOM;
++ goto out;
++ }
++ copy_user_highpage(page, vmf.page, address, vma);
++ __SetPageUptodate(page);
++ } else {
++ /*
++ * If the page will be shareable, see if the backing
++ * address space wants to know that the page is about
++ * to become writable
++ */
++ if (vma->vm_ops->page_mkwrite) {
++ unlock_page(page);
++ if (vma->vm_ops->page_mkwrite(vma, page) < 0) {
++ ret = VM_FAULT_SIGBUS;
++ anon = 1; /* no anon but release vmf.page */
++ goto out_unlocked;
++ }
++ lock_page(page);
++ /*
++ * XXX: this is not quite right (racy vs
++ * invalidate) to unlock and relock the page
++ * like this, however a better fix requires
++ * reworking page_mkwrite locking API, which
++ * is better done later.
++ */
++ if (!page->mapping) {
++ ret = 0;
++ anon = 1; /* no anon but release vmf.page */
++ goto out;
++ }
++ page_mkwrite = 1;
++ }
++ }
++
++ }
++
++ if (mem_cgroup_charge(page, mm, GFP_KERNEL)) {
++ ret = VM_FAULT_OOM;
++ goto out;
++ }
++
++ page_table = pte_offset_map_lock(mm, pmd, address, &ptl);
++
++ /*
++ * This silly early PAGE_DIRTY setting removes a race
++ * due to the bad i386 page protection. But it's valid
++ * for other architectures too.
++ *
++ * Note that if write_access is true, we either now have
++ * an exclusive copy of the page, or this is a shared mapping,
++ * so we can make it writable and dirty to avoid having to
++ * handle that later.
++ */
++ /* Only go through if we didn't race with anybody else... */
++ if (likely(pte_same(*page_table, orig_pte))) {
++ flush_icache_page(vma, page);
++ entry = mk_pte(page, vma->vm_page_prot);
++ if (flags & FAULT_FLAG_WRITE)
++ entry = maybe_mkwrite(pte_mkdirty(entry), vma);
++ set_pte_at(mm, address, page_table, entry);
++ if (anon) {
++ inc_mm_counter(mm, anon_rss);
++ lru_cache_add_active(page);
++ page_add_new_anon_rmap(page, vma, address);
++ } else {
++ inc_mm_counter(mm, file_rss);
++ page_add_file_rmap(page);
++ if (flags & FAULT_FLAG_WRITE) {
++ dirty_page = page;
++ get_page(dirty_page);
++ }
++ }
++
++ /* no need to invalidate: a not-present page won't be cached */
++ update_mmu_cache(vma, address, entry);
++ } else {
++ mem_cgroup_uncharge_page(page);
++ if (anon)
++ page_cache_release(page);
++ else
++ anon = 1; /* no anon but release faulted_page */
++ }
++
++ pte_unmap_unlock(page_table, ptl);
++
++out:
++ unlock_page(vmf.page);
++out_unlocked:
++ if (anon)
++ page_cache_release(vmf.page);
++ else if (dirty_page) {
++ if (vma->vm_file)
++ file_update_time(vma->vm_file);
++
++ set_page_dirty_balance(dirty_page, page_mkwrite);
++ put_page(dirty_page);
++ }
++
++ return ret;
++}
++
++static int do_linear_fault(struct mm_struct *mm, struct vm_area_struct *vma,
++ unsigned long address, pte_t *page_table, pmd_t *pmd,
++ int write_access, pte_t orig_pte)
++{
++ pgoff_t pgoff = (((address & PAGE_MASK)
++ - vma->vm_start) >> PAGE_SHIFT) + vma->vm_pgoff;
++ unsigned int flags = (write_access ? FAULT_FLAG_WRITE : 0);
++
++ pte_unmap(page_table);
++ return __do_fault(mm, vma, address, pmd, pgoff, flags, orig_pte);
++}
++
++/*
++ * Fault of a previously existing named mapping. Repopulate the pte
++ * from the encoded file_pte if possible. This enables swappable
++ * nonlinear vmas.
++ *
++ * We enter with non-exclusive mmap_sem (to exclude vma changes,
++ * but allow concurrent faults), and pte mapped but not yet locked.
++ * We return with mmap_sem still held, but pte unmapped and unlocked.
++ */
++static int do_nonlinear_fault(struct mm_struct *mm, struct vm_area_struct *vma,
++ unsigned long address, pte_t *page_table, pmd_t *pmd,
++ int write_access, pte_t orig_pte)
++{
++ unsigned int flags = FAULT_FLAG_NONLINEAR |
++ (write_access ? FAULT_FLAG_WRITE : 0);
++ pgoff_t pgoff;
++
++ if (!pte_unmap_same(mm, pmd, page_table, orig_pte))
++ return 0;
++
++ if (unlikely(!(vma->vm_flags & VM_NONLINEAR) ||
++ !(vma->vm_flags & VM_CAN_NONLINEAR))) {
++ /*
++ * Page table corrupted: show pte and kill process.
++ */
++ print_bad_pte(vma, orig_pte, address);
++ return VM_FAULT_OOM;
++ }
++
++ pgoff = pte_to_pgoff(orig_pte);
++ return __do_fault(mm, vma, address, pmd, pgoff, flags, orig_pte);
++}
++
++/*
++ * These routines also need to handle stuff like marking pages dirty
++ * and/or accessed for architectures that don't do it in hardware (most
++ * RISC architectures). The early dirtying is also good on the i386.
++ *
++ * There is also a hook called "update_mmu_cache()" that architectures
++ * with external mmu caches can use to update those (ie the Sparc or
++ * PowerPC hashed page tables that act as extended TLBs).
++ *
++ * We enter with non-exclusive mmap_sem (to exclude vma changes,
++ * but allow concurrent faults), and pte mapped but not yet locked.
++ * We return with mmap_sem still held, but pte unmapped and unlocked.
++ */
++static inline int handle_pte_fault(struct mm_struct *mm,
++ struct vm_area_struct *vma, unsigned long address,
++ pte_t *pte, pmd_t *pmd, int write_access)
++{
++ pte_t entry;
++ spinlock_t *ptl;
++ int ret = 0, type = VXPT_UNKNOWN;
++
++ entry = *pte;
++ if (!pte_present(entry)) {
++ if (pte_none(entry)) {
++ if (vma->vm_ops) {
++ if (likely(vma->vm_ops->fault))
++ return do_linear_fault(mm, vma, address,
++ pte, pmd, write_access, entry);
++ }
++ return do_anonymous_page(mm, vma, address,
++ pte, pmd, write_access);
++ }
++ if (pte_file(entry))
++ return do_nonlinear_fault(mm, vma, address,
++ pte, pmd, write_access, entry);
++ return do_swap_page(mm, vma, address,
++ pte, pmd, write_access, entry);
++ }
++
++ ptl = pte_lockptr(mm, pmd);
++ spin_lock(ptl);
++ if (unlikely(!pte_same(*pte, entry)))
++ goto unlock;
++ if (write_access) {
++ if (!pte_write(entry)) {
++ ret = do_wp_page(mm, vma, address,
++ pte, pmd, ptl, entry);
++ type = VXPT_WRITE;
++ goto out;
++ }
++ entry = pte_mkdirty(entry);
++ }
++ entry = pte_mkyoung(entry);
++ if (ptep_set_access_flags(vma, address, pte, entry, write_access)) {
++ update_mmu_cache(vma, address, entry);
++ } else {
++ /*
++ * This is needed only for protection faults but the arch code
++ * is not yet telling us if this is a protection fault or not.
++ * This still avoids useless tlb flushes for .text page faults
++ * with threads.
++ */
++ if (write_access)
++ flush_tlb_page(vma, address);
++ }
++unlock:
++ pte_unmap_unlock(pte, ptl);
++ ret = 0;
++out:
++ vx_page_fault(mm, vma, type, ret);
++ return ret;
++}
++
++/*
++ * By the time we get here, we already hold the mm semaphore
++ */
++int handle_mm_fault(struct mm_struct *mm, struct vm_area_struct *vma,
++ unsigned long address, int write_access)
++{
++ pgd_t *pgd;
++ pud_t *pud;
++ pmd_t *pmd;
++ pte_t *pte;
++
++ __set_current_state(TASK_RUNNING);
++
++ count_vm_event(PGFAULT);
++
++ if (unlikely(is_vm_hugetlb_page(vma)))
++ return hugetlb_fault(mm, vma, address, write_access);
++
++ pgd = pgd_offset(mm, address);
++ pud = pud_alloc(mm, pgd, address);
++ if (!pud)
++ return VM_FAULT_OOM;
++ pmd = pmd_alloc(mm, pud, address);
++ if (!pmd)
++ return VM_FAULT_OOM;
++ pte = pte_alloc_map(mm, pmd, address);
++ if (!pte)
++ return VM_FAULT_OOM;
++
++ return handle_pte_fault(mm, vma, address, pte, pmd, write_access);
++}
++
++#ifndef __PAGETABLE_PUD_FOLDED
++/*
++ * Allocate page upper directory.
++ * We've already handled the fast-path in-line.
++ */
++int __pud_alloc(struct mm_struct *mm, pgd_t *pgd, unsigned long address)
++{
++ pud_t *new = pud_alloc_one(mm, address);
++ if (!new)
++ return -ENOMEM;
++
++ smp_wmb(); /* See comment in __pte_alloc */
++
++ spin_lock(&mm->page_table_lock);
++ if (pgd_present(*pgd)) /* Another has populated it */
++ pud_free(mm, new);
++ else
++ pgd_populate(mm, pgd, new);
++ spin_unlock(&mm->page_table_lock);
++ return 0;
++}
++#endif /* __PAGETABLE_PUD_FOLDED */
++
++#ifndef __PAGETABLE_PMD_FOLDED
++/*
++ * Allocate page middle directory.
++ * We've already handled the fast-path in-line.
++ */
++int __pmd_alloc(struct mm_struct *mm, pud_t *pud, unsigned long address)
++{
++ pmd_t *new = pmd_alloc_one(mm, address);
++ if (!new)
++ return -ENOMEM;
++
++ smp_wmb(); /* See comment in __pte_alloc */
++
++ spin_lock(&mm->page_table_lock);
++#ifndef __ARCH_HAS_4LEVEL_HACK
++ if (pud_present(*pud)) /* Another has populated it */
++ pmd_free(mm, new);
++ else
++ pud_populate(mm, pud, new);
++#else
++ if (pgd_present(*pud)) /* Another has populated it */
++ pmd_free(mm, new);
++ else
++ pgd_populate(mm, pud, new);
++#endif /* __ARCH_HAS_4LEVEL_HACK */
++ spin_unlock(&mm->page_table_lock);
++ return 0;
++}
++#endif /* __PAGETABLE_PMD_FOLDED */
++
++int make_pages_present(unsigned long addr, unsigned long end)
++{
++ int ret, len, write;
++ struct vm_area_struct * vma;
++
++ vma = find_vma(current->mm, addr);
++ if (!vma)
++ return -ENOMEM;
++ write = (vma->vm_flags & VM_WRITE) != 0;
++ BUG_ON(addr >= end);
++ BUG_ON(end > vma->vm_end);
++ len = DIV_ROUND_UP(end, PAGE_SIZE) - addr/PAGE_SIZE;
++ ret = get_user_pages(current, current->mm, addr,
++ len, write, 0, NULL, NULL);
++ if (ret < 0) {
++ /*
++ SUS require strange return value to mlock
++ - invalid addr generate to ENOMEM.
++ - out of memory should generate EAGAIN.
++ */
++ if (ret == -EFAULT)
++ ret = -ENOMEM;
++ else if (ret == -ENOMEM)
++ ret = -EAGAIN;
++ return ret;
++ }
++ return ret == len ? 0 : -ENOMEM;
++}
++
++#if !defined(__HAVE_ARCH_GATE_AREA)
++
++#if defined(AT_SYSINFO_EHDR)
++static struct vm_area_struct gate_vma;
++
++static int __init gate_vma_init(void)
++{
++ gate_vma.vm_mm = NULL;
++ gate_vma.vm_start = FIXADDR_USER_START;
++ gate_vma.vm_end = FIXADDR_USER_END;
++ gate_vma.vm_flags = VM_READ | VM_MAYREAD | VM_EXEC | VM_MAYEXEC;
++ gate_vma.vm_page_prot = __P101;
++ /*
++ * Make sure the vDSO gets into every core dump.
++ * Dumping its contents makes post-mortem fully interpretable later
++ * without matching up the same kernel and hardware config to see
++ * what PC values meant.
++ */
++ gate_vma.vm_flags |= VM_ALWAYSDUMP;
++ return 0;
++}
++__initcall(gate_vma_init);
++#endif
++
++struct vm_area_struct *get_gate_vma(struct task_struct *tsk)
++{
++#ifdef AT_SYSINFO_EHDR
++ return &gate_vma;
++#else
++ return NULL;
++#endif
++}
++
++int in_gate_area_no_task(unsigned long addr)
++{
++#ifdef AT_SYSINFO_EHDR
++ if ((addr >= FIXADDR_USER_START) && (addr < FIXADDR_USER_END))
++ return 1;
++#endif
++ return 0;
++}
++
++#endif /* __HAVE_ARCH_GATE_AREA */
++
++#ifdef CONFIG_HAVE_IOREMAP_PROT
++static resource_size_t follow_phys(struct vm_area_struct *vma,
++ unsigned long address, unsigned int flags,
++ unsigned long *prot)
++{
++ pgd_t *pgd;
++ pud_t *pud;
++ pmd_t *pmd;
++ pte_t *ptep, pte;
++ spinlock_t *ptl;
++ resource_size_t phys_addr = 0;
++ struct mm_struct *mm = vma->vm_mm;
++
++ VM_BUG_ON(!(vma->vm_flags & (VM_IO | VM_PFNMAP)));
++
++ pgd = pgd_offset(mm, address);
++ if (pgd_none(*pgd) || unlikely(pgd_bad(*pgd)))
++ goto no_page_table;
++
++ pud = pud_offset(pgd, address);
++ if (pud_none(*pud) || unlikely(pud_bad(*pud)))
++ goto no_page_table;
++
++ pmd = pmd_offset(pud, address);
++ if (pmd_none(*pmd) || unlikely(pmd_bad(*pmd)))
++ goto no_page_table;
++
++ /* We cannot handle huge page PFN maps. Luckily they don't exist. */
++ if (pmd_huge(*pmd))
++ goto no_page_table;
++
++ ptep = pte_offset_map_lock(mm, pmd, address, &ptl);
++ if (!ptep)
++ goto out;
++
++ pte = *ptep;
++ if (!pte_present(pte))
++ goto unlock;
++ if ((flags & FOLL_WRITE) && !pte_write(pte))
++ goto unlock;
++ phys_addr = pte_pfn(pte);
++ phys_addr <<= PAGE_SHIFT; /* Shift here to avoid overflow on PAE */
++
++ *prot = pgprot_val(pte_pgprot(pte));
++
++unlock:
++ pte_unmap_unlock(ptep, ptl);
++out:
++ return phys_addr;
++no_page_table:
++ return 0;
++}
++
++int generic_access_phys(struct vm_area_struct *vma, unsigned long addr,
++ void *buf, int len, int write)
++{
++ resource_size_t phys_addr;
++ unsigned long prot = 0;
++ void *maddr;
++ int offset = addr & (PAGE_SIZE-1);
++
++ if (!(vma->vm_flags & (VM_IO | VM_PFNMAP)))
++ return -EINVAL;
++
++ phys_addr = follow_phys(vma, addr, write, &prot);
++
++ if (!phys_addr)
++ return -EINVAL;
++
++ maddr = ioremap_prot(phys_addr, PAGE_SIZE, prot);
++ if (write)
++ memcpy_toio(maddr + offset, buf, len);
++ else
++ memcpy_fromio(buf, maddr + offset, len);
++ iounmap(maddr);
++
++ return len;
++}
++#endif
++
++/*
++ * Access another process' address space.
++ * Source/target buffer must be kernel space,
++ * Do not walk the page table directly, use get_user_pages
++ */
++int access_process_vm(struct task_struct *tsk, unsigned long addr, void *buf, int len, int write)
++{
++ struct mm_struct *mm;
++ struct vm_area_struct *vma;
++ void *old_buf = buf;
++
++ mm = get_task_mm(tsk);
++ if (!mm)
++ return 0;
++
++ down_read(&mm->mmap_sem);
++ /* ignore errors, just check how much was successfully transferred */
++ while (len) {
++ int bytes, ret, offset;
++ void *maddr;
++ struct page *page = NULL;
++
++ ret = get_user_pages(tsk, mm, addr, 1,
++ write, 1, &page, &vma);
++ if (ret <= 0) {
++ /*
++ * Check if this is a VM_IO | VM_PFNMAP VMA, which
++ * we can access using slightly different code.
++ */
++#ifdef CONFIG_HAVE_IOREMAP_PROT
++ vma = find_vma(mm, addr);
++ if (!vma)
++ break;
++ if (vma->vm_ops && vma->vm_ops->access)
++ ret = vma->vm_ops->access(vma, addr, buf,
++ len, write);
++ if (ret <= 0)
++#endif
++ break;
++ bytes = ret;
++ } else {
++ bytes = len;
++ offset = addr & (PAGE_SIZE-1);
++ if (bytes > PAGE_SIZE-offset)
++ bytes = PAGE_SIZE-offset;
++
++ maddr = kmap(page);
++ if (write) {
++ copy_to_user_page(vma, page, addr,
++ maddr + offset, buf, bytes);
++ set_page_dirty_lock(page);
++ } else {
++ copy_from_user_page(vma, page, addr,
++ buf, maddr + offset, bytes);
++ }
++ kunmap(page);
++ page_cache_release(page);
++ }
++ len -= bytes;
++ buf += bytes;
++ addr += bytes;
++ }
++ up_read(&mm->mmap_sem);
++ mmput(mm);
++
++ return buf - old_buf;
++}
++
++/*
++ * Print the name of a VMA.
++ */
++void print_vma_addr(char *prefix, unsigned long ip)
++{
++ struct mm_struct *mm = current->mm;
++ struct vm_area_struct *vma;
++
++ /*
++ * Do not print if we are in atomic
++ * contexts (in exception stacks, etc.):
++ */
++ if (preempt_count())
++ return;
++
++ down_read(&mm->mmap_sem);
++ vma = find_vma(mm, ip);
++ if (vma && vma->vm_file) {
++ struct file *f = vma->vm_file;
++ char *buf = (char *)__get_free_page(GFP_KERNEL);
++ if (buf) {
++ char *p, *s;
++
++ p = d_path(&f->f_path, buf, PAGE_SIZE);
++ if (IS_ERR(p))
++ p = "?";
++ s = strrchr(p, '/');
++ if (s)
++ p = s+1;
++ printk("%s%s[%lx+%lx]", prefix, p,
++ vma->vm_start,
++ vma->vm_end - vma->vm_start);
++ free_page((unsigned long)buf);
++ }
++ }
++ up_read(¤t->mm->mmap_sem);
++}
+diff -Nurb linux-2.6.27-590/mm/slab.c linux-2.6.27-591/mm/slab.c
+--- linux-2.6.27-590/mm/slab.c 2010-01-26 17:49:20.000000000 -0500
++++ linux-2.6.27-591/mm/slab.c 2010-01-29 16:09:09.000000000 -0500
+@@ -110,6 +110,7 @@
+ #include <linux/fault-inject.h>
+ #include <linux/rtmutex.h>
+ #include <linux/reciprocal_div.h>
++#include <linux/arrays.h>
+ #include <linux/debugobjects.h>
+
+ #include <asm/cacheflush.h>
+@@ -248,6 +249,14 @@
+ void *addr;
+ };
+
++extern void (*rec_event)(void *,unsigned int);
++struct event_spec {
++ unsigned long pc;
++ unsigned long dcookie;
++ unsigned count;
++ unsigned char reason;
++};
++
+ /*
+ * struct array_cache
+ *
+@@ -3469,6 +3478,19 @@
+ local_irq_restore(save_flags);
+ objp = cache_alloc_debugcheck_after(cachep, flags, objp, caller);
+ prefetchw(objp);
++#ifdef CONFIG_CHOPSTIX
++ if (rec_event && objp) {
++ struct event event;
++ struct event_spec espec;
++
++ espec.reason = 0; /* alloc */
++ event.event_data=&espec;
++ event.task = current;
++ espec.pc=caller;
++ event.event_type=5;
++ (*rec_event)(&event, cachep->buffer_size);
++ }
++#endif
+
+ if (unlikely((flags & __GFP_ZERO) && objp))
+ memset(objp, 0, obj_size(cachep));
+@@ -3578,12 +3600,26 @@
+ * Release an obj back to its cache. If the obj has a constructed state, it must
+ * be in this state _before_ it is released. Called with disabled ints.
+ */
+-static inline void __cache_free(struct kmem_cache *cachep, void *objp)
++static inline void __cache_free(struct kmem_cache *cachep, void *objp, void *caller)
+ {
+ struct array_cache *ac = cpu_cache_get(cachep);
+
+ check_irq_off();
+- objp = cache_free_debugcheck(cachep, objp, __builtin_return_address(0));
++ objp = cache_free_debugcheck(cachep, objp, caller);
++ #ifdef CONFIG_CHOPSTIX
++ if (rec_event && objp) {
++ struct event event;
++ struct event_spec espec;
++
++ espec.reason = 1; /* free */
++ event.event_data=&espec;
++ event.task = current;
++ espec.pc=caller;
++ event.event_type=4;
++ (*rec_event)(&event, cachep->buffer_size);
++ }
++ #endif
++
+ vx_slab_free(cachep);
+
+ /*
+@@ -3714,6 +3750,7 @@
+ void *caller)
+ {
+ struct kmem_cache *cachep;
++ void *ret;
+
+ /* If you want to save a few bytes .text space: replace
+ * __ with kmem_.
+@@ -3741,10 +3778,17 @@
+ EXPORT_SYMBOL(__kmalloc_track_caller);
+
+ #else
++#ifdef CONFIG_CHOPSTIX
++void *__kmalloc(size_t size, gfp_t flags)
++{
++ return __do_kmalloc(size, flags, __builtin_return_address(0));
++}
++#else
+ void *__kmalloc(size_t size, gfp_t flags)
+ {
+ return __do_kmalloc(size, flags, NULL);
+ }
++#endif
+ EXPORT_SYMBOL(__kmalloc);
+ #endif
+
+@@ -3764,7 +3808,7 @@
+ debug_check_no_locks_freed(objp, obj_size(cachep));
+ if (!(cachep->flags & SLAB_DEBUG_OBJECTS))
+ debug_check_no_obj_freed(objp, obj_size(cachep));
+- __cache_free(cachep, objp);
++ __cache_free(cachep, objp,__builtin_return_address(0));
+ local_irq_restore(flags);
+ }
+ EXPORT_SYMBOL(kmem_cache_free);
+@@ -3790,7 +3834,7 @@
+ c = virt_to_cache(objp);
+ debug_check_no_locks_freed(objp, obj_size(c));
+ debug_check_no_obj_freed(objp, obj_size(c));
+- __cache_free(c, (void *)objp);
++ __cache_free(c, (void *)objp,__builtin_return_address(0));
+ local_irq_restore(flags);
+ }
+ EXPORT_SYMBOL(kfree);
+diff -Nurb linux-2.6.27-590/mm/slab.c.orig linux-2.6.27-591/mm/slab.c.orig
+--- linux-2.6.27-590/mm/slab.c.orig 1969-12-31 19:00:00.000000000 -0500
++++ linux-2.6.27-591/mm/slab.c.orig 2010-01-26 17:49:20.000000000 -0500
+@@ -0,0 +1,4479 @@
++/*
++ * linux/mm/slab.c
++ * Written by Mark Hemment, 1996/97.
++ * (markhe@nextd.demon.co.uk)
++ *
++ * kmem_cache_destroy() + some cleanup - 1999 Andrea Arcangeli
++ *
++ * Major cleanup, different bufctl logic, per-cpu arrays
++ * (c) 2000 Manfred Spraul
++ *
++ * Cleanup, make the head arrays unconditional, preparation for NUMA
++ * (c) 2002 Manfred Spraul
++ *
++ * An implementation of the Slab Allocator as described in outline in;
++ * UNIX Internals: The New Frontiers by Uresh Vahalia
++ * Pub: Prentice Hall ISBN 0-13-101908-2
++ * or with a little more detail in;
++ * The Slab Allocator: An Object-Caching Kernel Memory Allocator
++ * Jeff Bonwick (Sun Microsystems).
++ * Presented at: USENIX Summer 1994 Technical Conference
++ *
++ * The memory is organized in caches, one cache for each object type.
++ * (e.g. inode_cache, dentry_cache, buffer_head, vm_area_struct)
++ * Each cache consists out of many slabs (they are small (usually one
++ * page long) and always contiguous), and each slab contains multiple
++ * initialized objects.
++ *
++ * This means, that your constructor is used only for newly allocated
++ * slabs and you must pass objects with the same initializations to
++ * kmem_cache_free.
++ *
++ * Each cache can only support one memory type (GFP_DMA, GFP_HIGHMEM,
++ * normal). If you need a special memory type, then must create a new
++ * cache for that memory type.
++ *
++ * In order to reduce fragmentation, the slabs are sorted in 3 groups:
++ * full slabs with 0 free objects
++ * partial slabs
++ * empty slabs with no allocated objects
++ *
++ * If partial slabs exist, then new allocations come from these slabs,
++ * otherwise from empty slabs or new slabs are allocated.
++ *
++ * kmem_cache_destroy() CAN CRASH if you try to allocate from the cache
++ * during kmem_cache_destroy(). The caller must prevent concurrent allocs.
++ *
++ * Each cache has a short per-cpu head array, most allocs
++ * and frees go into that array, and if that array overflows, then 1/2
++ * of the entries in the array are given back into the global cache.
++ * The head array is strictly LIFO and should improve the cache hit rates.
++ * On SMP, it additionally reduces the spinlock operations.
++ *
++ * The c_cpuarray may not be read with enabled local interrupts -
++ * it's changed with a smp_call_function().
++ *
++ * SMP synchronization:
++ * constructors and destructors are called without any locking.
++ * Several members in struct kmem_cache and struct slab never change, they
++ * are accessed without any locking.
++ * The per-cpu arrays are never accessed from the wrong cpu, no locking,
++ * and local interrupts are disabled so slab code is preempt-safe.
++ * The non-constant members are protected with a per-cache irq spinlock.
++ *
++ * Many thanks to Mark Hemment, who wrote another per-cpu slab patch
++ * in 2000 - many ideas in the current implementation are derived from
++ * his patch.
++ *
++ * Further notes from the original documentation:
++ *
++ * 11 April '97. Started multi-threading - markhe
++ * The global cache-chain is protected by the mutex 'cache_chain_mutex'.
++ * The sem is only needed when accessing/extending the cache-chain, which
++ * can never happen inside an interrupt (kmem_cache_create(),
++ * kmem_cache_shrink() and kmem_cache_reap()).
++ *
++ * At present, each engine can be growing a cache. This should be blocked.
++ *
++ * 15 March 2005. NUMA slab allocator.
++ * Shai Fultheim <shai@scalex86.org>.
++ * Shobhit Dayal <shobhit@calsoftinc.com>
++ * Alok N Kataria <alokk@calsoftinc.com>
++ * Christoph Lameter <christoph@lameter.com>
++ *
++ * Modified the slab allocator to be node aware on NUMA systems.
++ * Each node has its own list of partial, free and full slabs.
++ * All object allocations for a node occur from node specific slab lists.
++ */
++
++#include <linux/slab.h>
++#include <linux/mm.h>
++#include <linux/poison.h>
++#include <linux/swap.h>
++#include <linux/cache.h>
++#include <linux/interrupt.h>
++#include <linux/init.h>
++#include <linux/compiler.h>
++#include <linux/cpuset.h>
++#include <linux/seq_file.h>
++#include <linux/notifier.h>
++#include <linux/kallsyms.h>
++#include <linux/cpu.h>
++#include <linux/sysctl.h>
++#include <linux/module.h>
++#include <linux/rcupdate.h>
++#include <linux/string.h>
++#include <linux/uaccess.h>
++#include <linux/nodemask.h>
++#include <linux/mempolicy.h>
++#include <linux/mutex.h>
++#include <linux/fault-inject.h>
++#include <linux/rtmutex.h>
++#include <linux/reciprocal_div.h>
++#include <linux/debugobjects.h>
++
++#include <asm/cacheflush.h>
++#include <asm/tlbflush.h>
++#include <asm/page.h>
++
++/*
++ * DEBUG - 1 for kmem_cache_create() to honour; SLAB_RED_ZONE & SLAB_POISON.
++ * 0 for faster, smaller code (especially in the critical paths).
++ *
++ * STATS - 1 to collect stats for /proc/slabinfo.
++ * 0 for faster, smaller code (especially in the critical paths).
++ *
++ * FORCED_DEBUG - 1 enables SLAB_RED_ZONE and SLAB_POISON (if possible)
++ */
++
++#ifdef CONFIG_DEBUG_SLAB
++#define DEBUG 1
++#define STATS 1
++#define FORCED_DEBUG 1
++#else
++#define DEBUG 0
++#define STATS 0
++#define FORCED_DEBUG 0
++#endif
++
++/* Shouldn't this be in a header file somewhere? */
++#define BYTES_PER_WORD sizeof(void *)
++#define REDZONE_ALIGN max(BYTES_PER_WORD, __alignof__(unsigned long long))
++
++#ifndef ARCH_KMALLOC_MINALIGN
++/*
++ * Enforce a minimum alignment for the kmalloc caches.
++ * Usually, the kmalloc caches are cache_line_size() aligned, except when
++ * DEBUG and FORCED_DEBUG are enabled, then they are BYTES_PER_WORD aligned.
++ * Some archs want to perform DMA into kmalloc caches and need a guaranteed
++ * alignment larger than the alignment of a 64-bit integer.
++ * ARCH_KMALLOC_MINALIGN allows that.
++ * Note that increasing this value may disable some debug features.
++ */
++#define ARCH_KMALLOC_MINALIGN __alignof__(unsigned long long)
++#endif
++
++#ifndef ARCH_SLAB_MINALIGN
++/*
++ * Enforce a minimum alignment for all caches.
++ * Intended for archs that get misalignment faults even for BYTES_PER_WORD
++ * aligned buffers. Includes ARCH_KMALLOC_MINALIGN.
++ * If possible: Do not enable this flag for CONFIG_DEBUG_SLAB, it disables
++ * some debug features.
++ */
++#define ARCH_SLAB_MINALIGN 0
++#endif
++
++#ifndef ARCH_KMALLOC_FLAGS
++#define ARCH_KMALLOC_FLAGS SLAB_HWCACHE_ALIGN
++#endif
++
++/* Legal flag mask for kmem_cache_create(). */
++#if DEBUG
++# define CREATE_MASK (SLAB_RED_ZONE | \
++ SLAB_POISON | SLAB_HWCACHE_ALIGN | \
++ SLAB_CACHE_DMA | \
++ SLAB_STORE_USER | \
++ SLAB_RECLAIM_ACCOUNT | SLAB_PANIC | \
++ SLAB_DESTROY_BY_RCU | SLAB_MEM_SPREAD | \
++ SLAB_DEBUG_OBJECTS)
++#else
++# define CREATE_MASK (SLAB_HWCACHE_ALIGN | \
++ SLAB_CACHE_DMA | \
++ SLAB_RECLAIM_ACCOUNT | SLAB_PANIC | \
++ SLAB_DESTROY_BY_RCU | SLAB_MEM_SPREAD | \
++ SLAB_DEBUG_OBJECTS)
++#endif
++
++/*
++ * kmem_bufctl_t:
++ *
++ * Bufctl's are used for linking objs within a slab
++ * linked offsets.
++ *
++ * This implementation relies on "struct page" for locating the cache &
++ * slab an object belongs to.
++ * This allows the bufctl structure to be small (one int), but limits
++ * the number of objects a slab (not a cache) can contain when off-slab
++ * bufctls are used. The limit is the size of the largest general cache
++ * that does not use off-slab slabs.
++ * For 32bit archs with 4 kB pages, is this 56.
++ * This is not serious, as it is only for large objects, when it is unwise
++ * to have too many per slab.
++ * Note: This limit can be raised by introducing a general cache whose size
++ * is less than 512 (PAGE_SIZE<<3), but greater than 256.
++ */
++
++typedef unsigned int kmem_bufctl_t;
++#define BUFCTL_END (((kmem_bufctl_t)(~0U))-0)
++#define BUFCTL_FREE (((kmem_bufctl_t)(~0U))-1)
++#define BUFCTL_ACTIVE (((kmem_bufctl_t)(~0U))-2)
++#define SLAB_LIMIT (((kmem_bufctl_t)(~0U))-3)
++
++/*
++ * struct slab
++ *
++ * Manages the objs in a slab. Placed either at the beginning of mem allocated
++ * for a slab, or allocated from an general cache.
++ * Slabs are chained into three list: fully used, partial, fully free slabs.
++ */
++struct slab {
++ struct list_head list;
++ unsigned long colouroff;
++ void *s_mem; /* including colour offset */
++ unsigned int inuse; /* num of objs active in slab */
++ kmem_bufctl_t free;
++ unsigned short nodeid;
++};
++
++/*
++ * struct slab_rcu
++ *
++ * slab_destroy on a SLAB_DESTROY_BY_RCU cache uses this structure to
++ * arrange for kmem_freepages to be called via RCU. This is useful if
++ * we need to approach a kernel structure obliquely, from its address
++ * obtained without the usual locking. We can lock the structure to
++ * stabilize it and check it's still at the given address, only if we
++ * can be sure that the memory has not been meanwhile reused for some
++ * other kind of object (which our subsystem's lock might corrupt).
++ *
++ * rcu_read_lock before reading the address, then rcu_read_unlock after
++ * taking the spinlock within the structure expected at that address.
++ *
++ * We assume struct slab_rcu can overlay struct slab when destroying.
++ */
++struct slab_rcu {
++ struct rcu_head head;
++ struct kmem_cache *cachep;
++ void *addr;
++};
++
++/*
++ * struct array_cache
++ *
++ * Purpose:
++ * - LIFO ordering, to hand out cache-warm objects from _alloc
++ * - reduce the number of linked list operations
++ * - reduce spinlock operations
++ *
++ * The limit is stored in the per-cpu structure to reduce the data cache
++ * footprint.
++ *
++ */
++struct array_cache {
++ unsigned int avail;
++ unsigned int limit;
++ unsigned int batchcount;
++ unsigned int touched;
++ spinlock_t lock;
++ void *entry[]; /*
++ * Must have this definition in here for the proper
++ * alignment of array_cache. Also simplifies accessing
++ * the entries.
++ */
++};
++
++/*
++ * bootstrap: The caches do not work without cpuarrays anymore, but the
++ * cpuarrays are allocated from the generic caches...
++ */
++#define BOOT_CPUCACHE_ENTRIES 1
++struct arraycache_init {
++ struct array_cache cache;
++ void *entries[BOOT_CPUCACHE_ENTRIES];
++};
++
++/*
++ * The slab lists for all objects.
++ */
++struct kmem_list3 {
++ struct list_head slabs_partial; /* partial list first, better asm code */
++ struct list_head slabs_full;
++ struct list_head slabs_free;
++ unsigned long free_objects;
++ unsigned int free_limit;
++ unsigned int colour_next; /* Per-node cache coloring */
++ spinlock_t list_lock;
++ struct array_cache *shared; /* shared per node */
++ struct array_cache **alien; /* on other nodes */
++ unsigned long next_reap; /* updated without locking */
++ int free_touched; /* updated without locking */
++};
++
++/*
++ * Need this for bootstrapping a per node allocator.
++ */
++#define NUM_INIT_LISTS (3 * MAX_NUMNODES)
++struct kmem_list3 __initdata initkmem_list3[NUM_INIT_LISTS];
++#define CACHE_CACHE 0
++#define SIZE_AC MAX_NUMNODES
++#define SIZE_L3 (2 * MAX_NUMNODES)
++
++static int drain_freelist(struct kmem_cache *cache,
++ struct kmem_list3 *l3, int tofree);
++static void free_block(struct kmem_cache *cachep, void **objpp, int len,
++ int node);
++static int enable_cpucache(struct kmem_cache *cachep);
++static void cache_reap(struct work_struct *unused);
++
++/*
++ * This function must be completely optimized away if a constant is passed to
++ * it. Mostly the same as what is in linux/slab.h except it returns an index.
++ */
++static __always_inline int index_of(const size_t size)
++{
++ extern void __bad_size(void);
++
++ if (__builtin_constant_p(size)) {
++ int i = 0;
++
++#define CACHE(x) \
++ if (size <=x) \
++ return i; \
++ else \
++ i++;
++#include <linux/kmalloc_sizes.h>
++#undef CACHE
++ __bad_size();
++ } else
++ __bad_size();
++ return 0;
++}
++
++static int slab_early_init = 1;
++
++#define INDEX_AC index_of(sizeof(struct arraycache_init))
++#define INDEX_L3 index_of(sizeof(struct kmem_list3))
++
++static void kmem_list3_init(struct kmem_list3 *parent)
++{
++ INIT_LIST_HEAD(&parent->slabs_full);
++ INIT_LIST_HEAD(&parent->slabs_partial);
++ INIT_LIST_HEAD(&parent->slabs_free);
++ parent->shared = NULL;
++ parent->alien = NULL;
++ parent->colour_next = 0;
++ spin_lock_init(&parent->list_lock);
++ parent->free_objects = 0;
++ parent->free_touched = 0;
++}
++
++#define MAKE_LIST(cachep, listp, slab, nodeid) \
++ do { \
++ INIT_LIST_HEAD(listp); \
++ list_splice(&(cachep->nodelists[nodeid]->slab), listp); \
++ } while (0)
++
++#define MAKE_ALL_LISTS(cachep, ptr, nodeid) \
++ do { \
++ MAKE_LIST((cachep), (&(ptr)->slabs_full), slabs_full, nodeid); \
++ MAKE_LIST((cachep), (&(ptr)->slabs_partial), slabs_partial, nodeid); \
++ MAKE_LIST((cachep), (&(ptr)->slabs_free), slabs_free, nodeid); \
++ } while (0)
++
++/*
++ * struct kmem_cache
++ *
++ * manages a cache.
++ */
++
++struct kmem_cache {
++/* 1) per-cpu data, touched during every alloc/free */
++ struct array_cache *array[NR_CPUS];
++/* 2) Cache tunables. Protected by cache_chain_mutex */
++ unsigned int batchcount;
++ unsigned int limit;
++ unsigned int shared;
++
++ unsigned int buffer_size;
++ u32 reciprocal_buffer_size;
++/* 3) touched by every alloc & free from the backend */
++
++ unsigned int flags; /* constant flags */
++ unsigned int num; /* # of objs per slab */
++
++/* 4) cache_grow/shrink */
++ /* order of pgs per slab (2^n) */
++ unsigned int gfporder;
++
++ /* force GFP flags, e.g. GFP_DMA */
++ gfp_t gfpflags;
++
++ size_t colour; /* cache colouring range */
++ unsigned int colour_off; /* colour offset */
++ struct kmem_cache *slabp_cache;
++ unsigned int slab_size;
++ unsigned int dflags; /* dynamic flags */
++
++ /* constructor func */
++ void (*ctor)(void *obj);
++
++/* 5) cache creation/removal */
++ const char *name;
++ struct list_head next;
++
++/* 6) statistics */
++#if STATS
++ unsigned long num_active;
++ unsigned long num_allocations;
++ unsigned long high_mark;
++ unsigned long grown;
++ unsigned long reaped;
++ unsigned long errors;
++ unsigned long max_freeable;
++ unsigned long node_allocs;
++ unsigned long node_frees;
++ unsigned long node_overflow;
++ atomic_t allochit;
++ atomic_t allocmiss;
++ atomic_t freehit;
++ atomic_t freemiss;
++#endif
++#if DEBUG
++ /*
++ * If debugging is enabled, then the allocator can add additional
++ * fields and/or padding to every object. buffer_size contains the total
++ * object size including these internal fields, the following two
++ * variables contain the offset to the user object and its size.
++ */
++ int obj_offset;
++ int obj_size;
++#endif
++ /*
++ * We put nodelists[] at the end of kmem_cache, because we want to size
++ * this array to nr_node_ids slots instead of MAX_NUMNODES
++ * (see kmem_cache_init())
++ * We still use [MAX_NUMNODES] and not [1] or [0] because cache_cache
++ * is statically defined, so we reserve the max number of nodes.
++ */
++ struct kmem_list3 *nodelists[MAX_NUMNODES];
++ /*
++ * Do not add fields after nodelists[]
++ */
++};
++
++#define CFLGS_OFF_SLAB (0x80000000UL)
++#define OFF_SLAB(x) ((x)->flags & CFLGS_OFF_SLAB)
++
++#define BATCHREFILL_LIMIT 16
++/*
++ * Optimization question: fewer reaps means less probability for unnessary
++ * cpucache drain/refill cycles.
++ *
++ * OTOH the cpuarrays can contain lots of objects,
++ * which could lock up otherwise freeable slabs.
++ */
++#define REAPTIMEOUT_CPUC (2*HZ)
++#define REAPTIMEOUT_LIST3 (4*HZ)
++
++#if STATS
++#define STATS_INC_ACTIVE(x) ((x)->num_active++)
++#define STATS_DEC_ACTIVE(x) ((x)->num_active--)
++#define STATS_INC_ALLOCED(x) ((x)->num_allocations++)
++#define STATS_INC_GROWN(x) ((x)->grown++)
++#define STATS_ADD_REAPED(x,y) ((x)->reaped += (y))
++#define STATS_SET_HIGH(x) \
++ do { \
++ if ((x)->num_active > (x)->high_mark) \
++ (x)->high_mark = (x)->num_active; \
++ } while (0)
++#define STATS_INC_ERR(x) ((x)->errors++)
++#define STATS_INC_NODEALLOCS(x) ((x)->node_allocs++)
++#define STATS_INC_NODEFREES(x) ((x)->node_frees++)
++#define STATS_INC_ACOVERFLOW(x) ((x)->node_overflow++)
++#define STATS_SET_FREEABLE(x, i) \
++ do { \
++ if ((x)->max_freeable < i) \
++ (x)->max_freeable = i; \
++ } while (0)
++#define STATS_INC_ALLOCHIT(x) atomic_inc(&(x)->allochit)
++#define STATS_INC_ALLOCMISS(x) atomic_inc(&(x)->allocmiss)
++#define STATS_INC_FREEHIT(x) atomic_inc(&(x)->freehit)
++#define STATS_INC_FREEMISS(x) atomic_inc(&(x)->freemiss)
++#else
++#define STATS_INC_ACTIVE(x) do { } while (0)
++#define STATS_DEC_ACTIVE(x) do { } while (0)
++#define STATS_INC_ALLOCED(x) do { } while (0)
++#define STATS_INC_GROWN(x) do { } while (0)
++#define STATS_ADD_REAPED(x,y) do { } while (0)
++#define STATS_SET_HIGH(x) do { } while (0)
++#define STATS_INC_ERR(x) do { } while (0)
++#define STATS_INC_NODEALLOCS(x) do { } while (0)
++#define STATS_INC_NODEFREES(x) do { } while (0)
++#define STATS_INC_ACOVERFLOW(x) do { } while (0)
++#define STATS_SET_FREEABLE(x, i) do { } while (0)
++#define STATS_INC_ALLOCHIT(x) do { } while (0)
++#define STATS_INC_ALLOCMISS(x) do { } while (0)
++#define STATS_INC_FREEHIT(x) do { } while (0)
++#define STATS_INC_FREEMISS(x) do { } while (0)
++#endif
++
++#include "slab_vs.h"
++
++#if DEBUG
++
++/*
++ * memory layout of objects:
++ * 0 : objp
++ * 0 .. cachep->obj_offset - BYTES_PER_WORD - 1: padding. This ensures that
++ * the end of an object is aligned with the end of the real
++ * allocation. Catches writes behind the end of the allocation.
++ * cachep->obj_offset - BYTES_PER_WORD .. cachep->obj_offset - 1:
++ * redzone word.
++ * cachep->obj_offset: The real object.
++ * cachep->buffer_size - 2* BYTES_PER_WORD: redzone word [BYTES_PER_WORD long]
++ * cachep->buffer_size - 1* BYTES_PER_WORD: last caller address
++ * [BYTES_PER_WORD long]
++ */
++static int obj_offset(struct kmem_cache *cachep)
++{
++ return cachep->obj_offset;
++}
++
++static int obj_size(struct kmem_cache *cachep)
++{
++ return cachep->obj_size;
++}
++
++static unsigned long long *dbg_redzone1(struct kmem_cache *cachep, void *objp)
++{
++ BUG_ON(!(cachep->flags & SLAB_RED_ZONE));
++ return (unsigned long long*) (objp + obj_offset(cachep) -
++ sizeof(unsigned long long));
++}
++
++static unsigned long long *dbg_redzone2(struct kmem_cache *cachep, void *objp)
++{
++ BUG_ON(!(cachep->flags & SLAB_RED_ZONE));
++ if (cachep->flags & SLAB_STORE_USER)
++ return (unsigned long long *)(objp + cachep->buffer_size -
++ sizeof(unsigned long long) -
++ REDZONE_ALIGN);
++ return (unsigned long long *) (objp + cachep->buffer_size -
++ sizeof(unsigned long long));
++}
++
++static void **dbg_userword(struct kmem_cache *cachep, void *objp)
++{
++ BUG_ON(!(cachep->flags & SLAB_STORE_USER));
++ return (void **)(objp + cachep->buffer_size - BYTES_PER_WORD);
++}
++
++#else
++
++#define obj_offset(x) 0
++#define obj_size(cachep) (cachep->buffer_size)
++#define dbg_redzone1(cachep, objp) ({BUG(); (unsigned long long *)NULL;})
++#define dbg_redzone2(cachep, objp) ({BUG(); (unsigned long long *)NULL;})
++#define dbg_userword(cachep, objp) ({BUG(); (void **)NULL;})
++
++#endif
++
++/*
++ * Do not go above this order unless 0 objects fit into the slab.
++ */
++#define BREAK_GFP_ORDER_HI 1
++#define BREAK_GFP_ORDER_LO 0
++static int slab_break_gfp_order = BREAK_GFP_ORDER_LO;
++
++/*
++ * Functions for storing/retrieving the cachep and or slab from the page
++ * allocator. These are used to find the slab an obj belongs to. With kfree(),
++ * these are used to find the cache which an obj belongs to.
++ */
++static inline void page_set_cache(struct page *page, struct kmem_cache *cache)
++{
++ page->lru.next = (struct list_head *)cache;
++}
++
++static inline struct kmem_cache *page_get_cache(struct page *page)
++{
++ page = compound_head(page);
++ BUG_ON(!PageSlab(page));
++ return (struct kmem_cache *)page->lru.next;
++}
++
++static inline void page_set_slab(struct page *page, struct slab *slab)
++{
++ page->lru.prev = (struct list_head *)slab;
++}
++
++static inline struct slab *page_get_slab(struct page *page)
++{
++ BUG_ON(!PageSlab(page));
++ return (struct slab *)page->lru.prev;
++}
++
++static inline struct kmem_cache *virt_to_cache(const void *obj)
++{
++ struct page *page = virt_to_head_page(obj);
++ return page_get_cache(page);
++}
++
++static inline struct slab *virt_to_slab(const void *obj)
++{
++ struct page *page = virt_to_head_page(obj);
++ return page_get_slab(page);
++}
++
++static inline void *index_to_obj(struct kmem_cache *cache, struct slab *slab,
++ unsigned int idx)
++{
++ return slab->s_mem + cache->buffer_size * idx;
++}
++
++/*
++ * We want to avoid an expensive divide : (offset / cache->buffer_size)
++ * Using the fact that buffer_size is a constant for a particular cache,
++ * we can replace (offset / cache->buffer_size) by
++ * reciprocal_divide(offset, cache->reciprocal_buffer_size)
++ */
++static inline unsigned int obj_to_index(const struct kmem_cache *cache,
++ const struct slab *slab, void *obj)
++{
++ u32 offset = (obj - slab->s_mem);
++ return reciprocal_divide(offset, cache->reciprocal_buffer_size);
++}
++
++/*
++ * These are the default caches for kmalloc. Custom caches can have other sizes.
++ */
++struct cache_sizes malloc_sizes[] = {
++#define CACHE(x) { .cs_size = (x) },
++#include <linux/kmalloc_sizes.h>
++ CACHE(ULONG_MAX)
++#undef CACHE
++};
++EXPORT_SYMBOL(malloc_sizes);
++
++/* Must match cache_sizes above. Out of line to keep cache footprint low. */
++struct cache_names {
++ char *name;
++ char *name_dma;
++};
++
++static struct cache_names __initdata cache_names[] = {
++#define CACHE(x) { .name = "size-" #x, .name_dma = "size-" #x "(DMA)" },
++#include <linux/kmalloc_sizes.h>
++ {NULL,}
++#undef CACHE
++};
++
++static struct arraycache_init initarray_cache __initdata =
++ { {0, BOOT_CPUCACHE_ENTRIES, 1, 0} };
++static struct arraycache_init initarray_generic =
++ { {0, BOOT_CPUCACHE_ENTRIES, 1, 0} };
++
++/* internal cache of cache description objs */
++static struct kmem_cache cache_cache = {
++ .batchcount = 1,
++ .limit = BOOT_CPUCACHE_ENTRIES,
++ .shared = 1,
++ .buffer_size = sizeof(struct kmem_cache),
++ .name = "kmem_cache",
++};
++
++#define BAD_ALIEN_MAGIC 0x01020304ul
++
++#ifdef CONFIG_LOCKDEP
++
++/*
++ * Slab sometimes uses the kmalloc slabs to store the slab headers
++ * for other slabs "off slab".
++ * The locking for this is tricky in that it nests within the locks
++ * of all other slabs in a few places; to deal with this special
++ * locking we put on-slab caches into a separate lock-class.
++ *
++ * We set lock class for alien array caches which are up during init.
++ * The lock annotation will be lost if all cpus of a node goes down and
++ * then comes back up during hotplug
++ */
++static struct lock_class_key on_slab_l3_key;
++static struct lock_class_key on_slab_alc_key;
++
++static inline void init_lock_keys(void)
++
++{
++ int q;
++ struct cache_sizes *s = malloc_sizes;
++
++ while (s->cs_size != ULONG_MAX) {
++ for_each_node(q) {
++ struct array_cache **alc;
++ int r;
++ struct kmem_list3 *l3 = s->cs_cachep->nodelists[q];
++ if (!l3 || OFF_SLAB(s->cs_cachep))
++ continue;
++ lockdep_set_class(&l3->list_lock, &on_slab_l3_key);
++ alc = l3->alien;
++ /*
++ * FIXME: This check for BAD_ALIEN_MAGIC
++ * should go away when common slab code is taught to
++ * work even without alien caches.
++ * Currently, non NUMA code returns BAD_ALIEN_MAGIC
++ * for alloc_alien_cache,
++ */
++ if (!alc || (unsigned long)alc == BAD_ALIEN_MAGIC)
++ continue;
++ for_each_node(r) {
++ if (alc[r])
++ lockdep_set_class(&alc[r]->lock,
++ &on_slab_alc_key);
++ }
++ }
++ s++;
++ }
++}
++#else
++static inline void init_lock_keys(void)
++{
++}
++#endif
++
++/*
++ * Guard access to the cache-chain.
++ */
++static DEFINE_MUTEX(cache_chain_mutex);
++static struct list_head cache_chain;
++
++/*
++ * chicken and egg problem: delay the per-cpu array allocation
++ * until the general caches are up.
++ */
++static enum {
++ NONE,
++ PARTIAL_AC,
++ PARTIAL_L3,
++ FULL
++} g_cpucache_up;
++
++/*
++ * used by boot code to determine if it can use slab based allocator
++ */
++int slab_is_available(void)
++{
++ return g_cpucache_up == FULL;
++}
++
++static DEFINE_PER_CPU(struct delayed_work, reap_work);
++
++static inline struct array_cache *cpu_cache_get(struct kmem_cache *cachep)
++{
++ return cachep->array[smp_processor_id()];
++}
++
++static inline struct kmem_cache *__find_general_cachep(size_t size,
++ gfp_t gfpflags)
++{
++ struct cache_sizes *csizep = malloc_sizes;
++
++#if DEBUG
++ /* This happens if someone tries to call
++ * kmem_cache_create(), or __kmalloc(), before
++ * the generic caches are initialized.
++ */
++ BUG_ON(malloc_sizes[INDEX_AC].cs_cachep == NULL);
++#endif
++ if (!size)
++ return ZERO_SIZE_PTR;
++
++ while (size > csizep->cs_size)
++ csizep++;
++
++ /*
++ * Really subtle: The last entry with cs->cs_size==ULONG_MAX
++ * has cs_{dma,}cachep==NULL. Thus no special case
++ * for large kmalloc calls required.
++ */
++#ifdef CONFIG_ZONE_DMA
++ if (unlikely(gfpflags & GFP_DMA))
++ return csizep->cs_dmacachep;
++#endif
++ return csizep->cs_cachep;
++}
++
++static struct kmem_cache *kmem_find_general_cachep(size_t size, gfp_t gfpflags)
++{
++ return __find_general_cachep(size, gfpflags);
++}
++
++static size_t slab_mgmt_size(size_t nr_objs, size_t align)
++{
++ return ALIGN(sizeof(struct slab)+nr_objs*sizeof(kmem_bufctl_t), align);
++}
++
++/*
++ * Calculate the number of objects and left-over bytes for a given buffer size.
++ */
++static void cache_estimate(unsigned long gfporder, size_t buffer_size,
++ size_t align, int flags, size_t *left_over,
++ unsigned int *num)
++{
++ int nr_objs;
++ size_t mgmt_size;
++ size_t slab_size = PAGE_SIZE << gfporder;
++
++ /*
++ * The slab management structure can be either off the slab or
++ * on it. For the latter case, the memory allocated for a
++ * slab is used for:
++ *
++ * - The struct slab
++ * - One kmem_bufctl_t for each object
++ * - Padding to respect alignment of @align
++ * - @buffer_size bytes for each object
++ *
++ * If the slab management structure is off the slab, then the
++ * alignment will already be calculated into the size. Because
++ * the slabs are all pages aligned, the objects will be at the
++ * correct alignment when allocated.
++ */
++ if (flags & CFLGS_OFF_SLAB) {
++ mgmt_size = 0;
++ nr_objs = slab_size / buffer_size;
++
++ if (nr_objs > SLAB_LIMIT)
++ nr_objs = SLAB_LIMIT;
++ } else {
++ /*
++ * Ignore padding for the initial guess. The padding
++ * is at most @align-1 bytes, and @buffer_size is at
++ * least @align. In the worst case, this result will
++ * be one greater than the number of objects that fit
++ * into the memory allocation when taking the padding
++ * into account.
++ */
++ nr_objs = (slab_size - sizeof(struct slab)) /
++ (buffer_size + sizeof(kmem_bufctl_t));
++
++ /*
++ * This calculated number will be either the right
++ * amount, or one greater than what we want.
++ */
++ if (slab_mgmt_size(nr_objs, align) + nr_objs*buffer_size
++ > slab_size)
++ nr_objs--;
++
++ if (nr_objs > SLAB_LIMIT)
++ nr_objs = SLAB_LIMIT;
++
++ mgmt_size = slab_mgmt_size(nr_objs, align);
++ }
++ *num = nr_objs;
++ *left_over = slab_size - nr_objs*buffer_size - mgmt_size;
++}
++
++#define slab_error(cachep, msg) __slab_error(__func__, cachep, msg)
++
++static void __slab_error(const char *function, struct kmem_cache *cachep,
++ char *msg)
++{
++ printk(KERN_ERR "slab error in %s(): cache `%s': %s\n",
++ function, cachep->name, msg);
++ dump_stack();
++}
++
++/*
++ * By default on NUMA we use alien caches to stage the freeing of
++ * objects allocated from other nodes. This causes massive memory
++ * inefficiencies when using fake NUMA setup to split memory into a
++ * large number of small nodes, so it can be disabled on the command
++ * line
++ */
++
++static int use_alien_caches __read_mostly = 1;
++static int numa_platform __read_mostly = 1;
++static int __init noaliencache_setup(char *s)
++{
++ use_alien_caches = 0;
++ return 1;
++}
++__setup("noaliencache", noaliencache_setup);
++
++#ifdef CONFIG_NUMA
++/*
++ * Special reaping functions for NUMA systems called from cache_reap().
++ * These take care of doing round robin flushing of alien caches (containing
++ * objects freed on different nodes from which they were allocated) and the
++ * flushing of remote pcps by calling drain_node_pages.
++ */
++static DEFINE_PER_CPU(unsigned long, reap_node);
++
++static void init_reap_node(int cpu)
++{
++ int node;
++
++ node = next_node(cpu_to_node(cpu), node_online_map);
++ if (node == MAX_NUMNODES)
++ node = first_node(node_online_map);
++
++ per_cpu(reap_node, cpu) = node;
++}
++
++static void next_reap_node(void)
++{
++ int node = __get_cpu_var(reap_node);
++
++ node = next_node(node, node_online_map);
++ if (unlikely(node >= MAX_NUMNODES))
++ node = first_node(node_online_map);
++ __get_cpu_var(reap_node) = node;
++}
++
++#else
++#define init_reap_node(cpu) do { } while (0)
++#define next_reap_node(void) do { } while (0)
++#endif
++
++/*
++ * Initiate the reap timer running on the target CPU. We run at around 1 to 2Hz
++ * via the workqueue/eventd.
++ * Add the CPU number into the expiration time to minimize the possibility of
++ * the CPUs getting into lockstep and contending for the global cache chain
++ * lock.
++ */
++static void __cpuinit start_cpu_timer(int cpu)
++{
++ struct delayed_work *reap_work = &per_cpu(reap_work, cpu);
++
++ /*
++ * When this gets called from do_initcalls via cpucache_init(),
++ * init_workqueues() has already run, so keventd will be setup
++ * at that time.
++ */
++ if (keventd_up() && reap_work->work.func == NULL) {
++ init_reap_node(cpu);
++ INIT_DELAYED_WORK(reap_work, cache_reap);
++ schedule_delayed_work_on(cpu, reap_work,
++ __round_jiffies_relative(HZ, cpu));
++ }
++}
++
++static struct array_cache *alloc_arraycache(int node, int entries,
++ int batchcount)
++{
++ int memsize = sizeof(void *) * entries + sizeof(struct array_cache);
++ struct array_cache *nc = NULL;
++
++ nc = kmalloc_node(memsize, GFP_KERNEL, node);
++ if (nc) {
++ nc->avail = 0;
++ nc->limit = entries;
++ nc->batchcount = batchcount;
++ nc->touched = 0;
++ spin_lock_init(&nc->lock);
++ }
++ return nc;
++}
++
++/*
++ * Transfer objects in one arraycache to another.
++ * Locking must be handled by the caller.
++ *
++ * Return the number of entries transferred.
++ */
++static int transfer_objects(struct array_cache *to,
++ struct array_cache *from, unsigned int max)
++{
++ /* Figure out how many entries to transfer */
++ int nr = min(min(from->avail, max), to->limit - to->avail);
++
++ if (!nr)
++ return 0;
++
++ memcpy(to->entry + to->avail, from->entry + from->avail -nr,
++ sizeof(void *) *nr);
++
++ from->avail -= nr;
++ to->avail += nr;
++ to->touched = 1;
++ return nr;
++}
++
++#ifndef CONFIG_NUMA
++
++#define drain_alien_cache(cachep, alien) do { } while (0)
++#define reap_alien(cachep, l3) do { } while (0)
++
++static inline struct array_cache **alloc_alien_cache(int node, int limit)
++{
++ return (struct array_cache **)BAD_ALIEN_MAGIC;
++}
++
++static inline void free_alien_cache(struct array_cache **ac_ptr)
++{
++}
++
++static inline int cache_free_alien(struct kmem_cache *cachep, void *objp)
++{
++ return 0;
++}
++
++static inline void *alternate_node_alloc(struct kmem_cache *cachep,
++ gfp_t flags)
++{
++ return NULL;
++}
++
++static inline void *____cache_alloc_node(struct kmem_cache *cachep,
++ gfp_t flags, int nodeid)
++{
++ return NULL;
++}
++
++#else /* CONFIG_NUMA */
++
++static void *____cache_alloc_node(struct kmem_cache *, gfp_t, int);
++static void *alternate_node_alloc(struct kmem_cache *, gfp_t);
++
++static struct array_cache **alloc_alien_cache(int node, int limit)
++{
++ struct array_cache **ac_ptr;
++ int memsize = sizeof(void *) * nr_node_ids;
++ int i;
++
++ if (limit > 1)
++ limit = 12;
++ ac_ptr = kmalloc_node(memsize, GFP_KERNEL, node);
++ if (ac_ptr) {
++ for_each_node(i) {
++ if (i == node || !node_online(i)) {
++ ac_ptr[i] = NULL;
++ continue;
++ }
++ ac_ptr[i] = alloc_arraycache(node, limit, 0xbaadf00d);
++ if (!ac_ptr[i]) {
++ for (i--; i >= 0; i--)
++ kfree(ac_ptr[i]);
++ kfree(ac_ptr);
++ return NULL;
++ }
++ }
++ }
++ return ac_ptr;
++}
++
++static void free_alien_cache(struct array_cache **ac_ptr)
++{
++ int i;
++
++ if (!ac_ptr)
++ return;
++ for_each_node(i)
++ kfree(ac_ptr[i]);
++ kfree(ac_ptr);
++}
++
++static void __drain_alien_cache(struct kmem_cache *cachep,
++ struct array_cache *ac, int node)
++{
++ struct kmem_list3 *rl3 = cachep->nodelists[node];
++
++ if (ac->avail) {
++ spin_lock(&rl3->list_lock);
++ /*
++ * Stuff objects into the remote nodes shared array first.
++ * That way we could avoid the overhead of putting the objects
++ * into the free lists and getting them back later.
++ */
++ if (rl3->shared)
++ transfer_objects(rl3->shared, ac, ac->limit);
++
++ free_block(cachep, ac->entry, ac->avail, node);
++ ac->avail = 0;
++ spin_unlock(&rl3->list_lock);
++ }
++}
++
++/*
++ * Called from cache_reap() to regularly drain alien caches round robin.
++ */
++static void reap_alien(struct kmem_cache *cachep, struct kmem_list3 *l3)
++{
++ int node = __get_cpu_var(reap_node);
++
++ if (l3->alien) {
++ struct array_cache *ac = l3->alien[node];
++
++ if (ac && ac->avail && spin_trylock_irq(&ac->lock)) {
++ __drain_alien_cache(cachep, ac, node);
++ spin_unlock_irq(&ac->lock);
++ }
++ }
++}
++
++static void drain_alien_cache(struct kmem_cache *cachep,
++ struct array_cache **alien)
++{
++ int i = 0;
++ struct array_cache *ac;
++ unsigned long flags;
++
++ for_each_online_node(i) {
++ ac = alien[i];
++ if (ac) {
++ spin_lock_irqsave(&ac->lock, flags);
++ __drain_alien_cache(cachep, ac, i);
++ spin_unlock_irqrestore(&ac->lock, flags);
++ }
++ }
++}
++
++static inline int cache_free_alien(struct kmem_cache *cachep, void *objp)
++{
++ struct slab *slabp = virt_to_slab(objp);
++ int nodeid = slabp->nodeid;
++ struct kmem_list3 *l3;
++ struct array_cache *alien = NULL;
++ int node;
++
++ node = numa_node_id();
++
++ /*
++ * Make sure we are not freeing a object from another node to the array
++ * cache on this cpu.
++ */
++ if (likely(slabp->nodeid == node))
++ return 0;
++
++ l3 = cachep->nodelists[node];
++ STATS_INC_NODEFREES(cachep);
++ if (l3->alien && l3->alien[nodeid]) {
++ alien = l3->alien[nodeid];
++ spin_lock(&alien->lock);
++ if (unlikely(alien->avail == alien->limit)) {
++ STATS_INC_ACOVERFLOW(cachep);
++ __drain_alien_cache(cachep, alien, nodeid);
++ }
++ alien->entry[alien->avail++] = objp;
++ spin_unlock(&alien->lock);
++ } else {
++ spin_lock(&(cachep->nodelists[nodeid])->list_lock);
++ free_block(cachep, &objp, 1, nodeid);
++ spin_unlock(&(cachep->nodelists[nodeid])->list_lock);
++ }
++ return 1;
++}
++#endif
++
++static void __cpuinit cpuup_canceled(long cpu)
++{
++ struct kmem_cache *cachep;
++ struct kmem_list3 *l3 = NULL;
++ int node = cpu_to_node(cpu);
++ node_to_cpumask_ptr(mask, node);
++
++ list_for_each_entry(cachep, &cache_chain, next) {
++ struct array_cache *nc;
++ struct array_cache *shared;
++ struct array_cache **alien;
++
++ /* cpu is dead; no one can alloc from it. */
++ nc = cachep->array[cpu];
++ cachep->array[cpu] = NULL;
++ l3 = cachep->nodelists[node];
++
++ if (!l3)
++ goto free_array_cache;
++
++ spin_lock_irq(&l3->list_lock);
++
++ /* Free limit for this kmem_list3 */
++ l3->free_limit -= cachep->batchcount;
++ if (nc)
++ free_block(cachep, nc->entry, nc->avail, node);
++
++ if (!cpus_empty(*mask)) {
++ spin_unlock_irq(&l3->list_lock);
++ goto free_array_cache;
++ }
++
++ shared = l3->shared;
++ if (shared) {
++ free_block(cachep, shared->entry,
++ shared->avail, node);
++ l3->shared = NULL;
++ }
++
++ alien = l3->alien;
++ l3->alien = NULL;
++
++ spin_unlock_irq(&l3->list_lock);
++
++ kfree(shared);
++ if (alien) {
++ drain_alien_cache(cachep, alien);
++ free_alien_cache(alien);
++ }
++free_array_cache:
++ kfree(nc);
++ }
++ /*
++ * In the previous loop, all the objects were freed to
++ * the respective cache's slabs, now we can go ahead and
++ * shrink each nodelist to its limit.
++ */
++ list_for_each_entry(cachep, &cache_chain, next) {
++ l3 = cachep->nodelists[node];
++ if (!l3)
++ continue;
++ drain_freelist(cachep, l3, l3->free_objects);
++ }
++}
++
++static int __cpuinit cpuup_prepare(long cpu)
++{
++ struct kmem_cache *cachep;
++ struct kmem_list3 *l3 = NULL;
++ int node = cpu_to_node(cpu);
++ const int memsize = sizeof(struct kmem_list3);
++
++ /*
++ * We need to do this right in the beginning since
++ * alloc_arraycache's are going to use this list.
++ * kmalloc_node allows us to add the slab to the right
++ * kmem_list3 and not this cpu's kmem_list3
++ */
++
++ list_for_each_entry(cachep, &cache_chain, next) {
++ /*
++ * Set up the size64 kmemlist for cpu before we can
++ * begin anything. Make sure some other cpu on this
++ * node has not already allocated this
++ */
++ if (!cachep->nodelists[node]) {
++ l3 = kmalloc_node(memsize, GFP_KERNEL, node);
++ if (!l3)
++ goto bad;
++ kmem_list3_init(l3);
++ l3->next_reap = jiffies + REAPTIMEOUT_LIST3 +
++ ((unsigned long)cachep) % REAPTIMEOUT_LIST3;
++
++ /*
++ * The l3s don't come and go as CPUs come and
++ * go. cache_chain_mutex is sufficient
++ * protection here.
++ */
++ cachep->nodelists[node] = l3;
++ }
++
++ spin_lock_irq(&cachep->nodelists[node]->list_lock);
++ cachep->nodelists[node]->free_limit =
++ (1 + nr_cpus_node(node)) *
++ cachep->batchcount + cachep->num;
++ spin_unlock_irq(&cachep->nodelists[node]->list_lock);
++ }
++
++ /*
++ * Now we can go ahead with allocating the shared arrays and
++ * array caches
++ */
++ list_for_each_entry(cachep, &cache_chain, next) {
++ struct array_cache *nc;
++ struct array_cache *shared = NULL;
++ struct array_cache **alien = NULL;
++
++ nc = alloc_arraycache(node, cachep->limit,
++ cachep->batchcount);
++ if (!nc)
++ goto bad;
++ if (cachep->shared) {
++ shared = alloc_arraycache(node,
++ cachep->shared * cachep->batchcount,
++ 0xbaadf00d);
++ if (!shared) {
++ kfree(nc);
++ goto bad;
++ }
++ }
++ if (use_alien_caches) {
++ alien = alloc_alien_cache(node, cachep->limit);
++ if (!alien) {
++ kfree(shared);
++ kfree(nc);
++ goto bad;
++ }
++ }
++ cachep->array[cpu] = nc;
++ l3 = cachep->nodelists[node];
++ BUG_ON(!l3);
++
++ spin_lock_irq(&l3->list_lock);
++ if (!l3->shared) {
++ /*
++ * We are serialised from CPU_DEAD or
++ * CPU_UP_CANCELLED by the cpucontrol lock
++ */
++ l3->shared = shared;
++ shared = NULL;
++ }
++#ifdef CONFIG_NUMA
++ if (!l3->alien) {
++ l3->alien = alien;
++ alien = NULL;
++ }
++#endif
++ spin_unlock_irq(&l3->list_lock);
++ kfree(shared);
++ free_alien_cache(alien);
++ }
++ return 0;
++bad:
++ cpuup_canceled(cpu);
++ return -ENOMEM;
++}
++
++static int __cpuinit cpuup_callback(struct notifier_block *nfb,
++ unsigned long action, void *hcpu)
++{
++ long cpu = (long)hcpu;
++ int err = 0;
++
++ switch (action) {
++ case CPU_UP_PREPARE:
++ case CPU_UP_PREPARE_FROZEN:
++ mutex_lock(&cache_chain_mutex);
++ err = cpuup_prepare(cpu);
++ mutex_unlock(&cache_chain_mutex);
++ break;
++ case CPU_ONLINE:
++ case CPU_ONLINE_FROZEN:
++ start_cpu_timer(cpu);
++ break;
++#ifdef CONFIG_HOTPLUG_CPU
++ case CPU_DOWN_PREPARE:
++ case CPU_DOWN_PREPARE_FROZEN:
++ /*
++ * Shutdown cache reaper. Note that the cache_chain_mutex is
++ * held so that if cache_reap() is invoked it cannot do
++ * anything expensive but will only modify reap_work
++ * and reschedule the timer.
++ */
++ cancel_rearming_delayed_work(&per_cpu(reap_work, cpu));
++ /* Now the cache_reaper is guaranteed to be not running. */
++ per_cpu(reap_work, cpu).work.func = NULL;
++ break;
++ case CPU_DOWN_FAILED:
++ case CPU_DOWN_FAILED_FROZEN:
++ start_cpu_timer(cpu);
++ break;
++ case CPU_DEAD:
++ case CPU_DEAD_FROZEN:
++ /*
++ * Even if all the cpus of a node are down, we don't free the
++ * kmem_list3 of any cache. This to avoid a race between
++ * cpu_down, and a kmalloc allocation from another cpu for
++ * memory from the node of the cpu going down. The list3
++ * structure is usually allocated from kmem_cache_create() and
++ * gets destroyed at kmem_cache_destroy().
++ */
++ /* fall through */
++#endif
++ case CPU_UP_CANCELED:
++ case CPU_UP_CANCELED_FROZEN:
++ mutex_lock(&cache_chain_mutex);
++ cpuup_canceled(cpu);
++ mutex_unlock(&cache_chain_mutex);
++ break;
++ }
++ return err ? NOTIFY_BAD : NOTIFY_OK;
++}
++
++static struct notifier_block __cpuinitdata cpucache_notifier = {
++ &cpuup_callback, NULL, 0
++};
++
++/*
++ * swap the static kmem_list3 with kmalloced memory
++ */
++static void init_list(struct kmem_cache *cachep, struct kmem_list3 *list,
++ int nodeid)
++{
++ struct kmem_list3 *ptr;
++
++ ptr = kmalloc_node(sizeof(struct kmem_list3), GFP_KERNEL, nodeid);
++ BUG_ON(!ptr);
++
++ local_irq_disable();
++ memcpy(ptr, list, sizeof(struct kmem_list3));
++ /*
++ * Do not assume that spinlocks can be initialized via memcpy:
++ */
++ spin_lock_init(&ptr->list_lock);
++
++ MAKE_ALL_LISTS(cachep, ptr, nodeid);
++ cachep->nodelists[nodeid] = ptr;
++ local_irq_enable();
++}
++
++/*
++ * For setting up all the kmem_list3s for cache whose buffer_size is same as
++ * size of kmem_list3.
++ */
++static void __init set_up_list3s(struct kmem_cache *cachep, int index)
++{
++ int node;
++
++ for_each_online_node(node) {
++ cachep->nodelists[node] = &initkmem_list3[index + node];
++ cachep->nodelists[node]->next_reap = jiffies +
++ REAPTIMEOUT_LIST3 +
++ ((unsigned long)cachep) % REAPTIMEOUT_LIST3;
++ }
++}
++
++/*
++ * Initialisation. Called after the page allocator have been initialised and
++ * before smp_init().
++ */
++void __init kmem_cache_init(void)
++{
++ size_t left_over;
++ struct cache_sizes *sizes;
++ struct cache_names *names;
++ int i;
++ int order;
++ int node;
++
++ if (num_possible_nodes() == 1) {
++ use_alien_caches = 0;
++ numa_platform = 0;
++ }
++
++ for (i = 0; i < NUM_INIT_LISTS; i++) {
++ kmem_list3_init(&initkmem_list3[i]);
++ if (i < MAX_NUMNODES)
++ cache_cache.nodelists[i] = NULL;
++ }
++ set_up_list3s(&cache_cache, CACHE_CACHE);
++
++ /*
++ * Fragmentation resistance on low memory - only use bigger
++ * page orders on machines with more than 32MB of memory.
++ */
++ if (num_physpages > (32 << 20) >> PAGE_SHIFT)
++ slab_break_gfp_order = BREAK_GFP_ORDER_HI;
++
++ /* Bootstrap is tricky, because several objects are allocated
++ * from caches that do not exist yet:
++ * 1) initialize the cache_cache cache: it contains the struct
++ * kmem_cache structures of all caches, except cache_cache itself:
++ * cache_cache is statically allocated.
++ * Initially an __init data area is used for the head array and the
++ * kmem_list3 structures, it's replaced with a kmalloc allocated
++ * array at the end of the bootstrap.
++ * 2) Create the first kmalloc cache.
++ * The struct kmem_cache for the new cache is allocated normally.
++ * An __init data area is used for the head array.
++ * 3) Create the remaining kmalloc caches, with minimally sized
++ * head arrays.
++ * 4) Replace the __init data head arrays for cache_cache and the first
++ * kmalloc cache with kmalloc allocated arrays.
++ * 5) Replace the __init data for kmem_list3 for cache_cache and
++ * the other cache's with kmalloc allocated memory.
++ * 6) Resize the head arrays of the kmalloc caches to their final sizes.
++ */
++
++ node = numa_node_id();
++
++ /* 1) create the cache_cache */
++ INIT_LIST_HEAD(&cache_chain);
++ list_add(&cache_cache.next, &cache_chain);
++ cache_cache.colour_off = cache_line_size();
++ cache_cache.array[smp_processor_id()] = &initarray_cache.cache;
++ cache_cache.nodelists[node] = &initkmem_list3[CACHE_CACHE + node];
++
++ /*
++ * struct kmem_cache size depends on nr_node_ids, which
++ * can be less than MAX_NUMNODES.
++ */
++ cache_cache.buffer_size = offsetof(struct kmem_cache, nodelists) +
++ nr_node_ids * sizeof(struct kmem_list3 *);
++#if DEBUG
++ cache_cache.obj_size = cache_cache.buffer_size;
++#endif
++ cache_cache.buffer_size = ALIGN(cache_cache.buffer_size,
++ cache_line_size());
++ cache_cache.reciprocal_buffer_size =
++ reciprocal_value(cache_cache.buffer_size);
++
++ for (order = 0; order < MAX_ORDER; order++) {
++ cache_estimate(order, cache_cache.buffer_size,
++ cache_line_size(), 0, &left_over, &cache_cache.num);
++ if (cache_cache.num)
++ break;
++ }
++ BUG_ON(!cache_cache.num);
++ cache_cache.gfporder = order;
++ cache_cache.colour = left_over / cache_cache.colour_off;
++ cache_cache.slab_size = ALIGN(cache_cache.num * sizeof(kmem_bufctl_t) +
++ sizeof(struct slab), cache_line_size());
++
++ /* 2+3) create the kmalloc caches */
++ sizes = malloc_sizes;
++ names = cache_names;
++
++ /*
++ * Initialize the caches that provide memory for the array cache and the
++ * kmem_list3 structures first. Without this, further allocations will
++ * bug.
++ */
++
++ sizes[INDEX_AC].cs_cachep = kmem_cache_create(names[INDEX_AC].name,
++ sizes[INDEX_AC].cs_size,
++ ARCH_KMALLOC_MINALIGN,
++ ARCH_KMALLOC_FLAGS|SLAB_PANIC,
++ NULL);
++
++ if (INDEX_AC != INDEX_L3) {
++ sizes[INDEX_L3].cs_cachep =
++ kmem_cache_create(names[INDEX_L3].name,
++ sizes[INDEX_L3].cs_size,
++ ARCH_KMALLOC_MINALIGN,
++ ARCH_KMALLOC_FLAGS|SLAB_PANIC,
++ NULL);
++ }
++
++ slab_early_init = 0;
++
++ while (sizes->cs_size != ULONG_MAX) {
++ /*
++ * For performance, all the general caches are L1 aligned.
++ * This should be particularly beneficial on SMP boxes, as it
++ * eliminates "false sharing".
++ * Note for systems short on memory removing the alignment will
++ * allow tighter packing of the smaller caches.
++ */
++ if (!sizes->cs_cachep) {
++ sizes->cs_cachep = kmem_cache_create(names->name,
++ sizes->cs_size,
++ ARCH_KMALLOC_MINALIGN,
++ ARCH_KMALLOC_FLAGS|SLAB_PANIC,
++ NULL);
++ }
++#ifdef CONFIG_ZONE_DMA
++ sizes->cs_dmacachep = kmem_cache_create(
++ names->name_dma,
++ sizes->cs_size,
++ ARCH_KMALLOC_MINALIGN,
++ ARCH_KMALLOC_FLAGS|SLAB_CACHE_DMA|
++ SLAB_PANIC,
++ NULL);
++#endif
++ sizes++;
++ names++;
++ }
++ /* 4) Replace the bootstrap head arrays */
++ {
++ struct array_cache *ptr;
++
++ ptr = kmalloc(sizeof(struct arraycache_init), GFP_KERNEL);
++
++ local_irq_disable();
++ BUG_ON(cpu_cache_get(&cache_cache) != &initarray_cache.cache);
++ memcpy(ptr, cpu_cache_get(&cache_cache),
++ sizeof(struct arraycache_init));
++ /*
++ * Do not assume that spinlocks can be initialized via memcpy:
++ */
++ spin_lock_init(&ptr->lock);
++
++ cache_cache.array[smp_processor_id()] = ptr;
++ local_irq_enable();
++
++ ptr = kmalloc(sizeof(struct arraycache_init), GFP_KERNEL);
++
++ local_irq_disable();
++ BUG_ON(cpu_cache_get(malloc_sizes[INDEX_AC].cs_cachep)
++ != &initarray_generic.cache);
++ memcpy(ptr, cpu_cache_get(malloc_sizes[INDEX_AC].cs_cachep),
++ sizeof(struct arraycache_init));
++ /*
++ * Do not assume that spinlocks can be initialized via memcpy:
++ */
++ spin_lock_init(&ptr->lock);
++
++ malloc_sizes[INDEX_AC].cs_cachep->array[smp_processor_id()] =
++ ptr;
++ local_irq_enable();
++ }
++ /* 5) Replace the bootstrap kmem_list3's */
++ {
++ int nid;
++
++ for_each_online_node(nid) {
++ init_list(&cache_cache, &initkmem_list3[CACHE_CACHE + nid], nid);
++
++ init_list(malloc_sizes[INDEX_AC].cs_cachep,
++ &initkmem_list3[SIZE_AC + nid], nid);
++
++ if (INDEX_AC != INDEX_L3) {
++ init_list(malloc_sizes[INDEX_L3].cs_cachep,
++ &initkmem_list3[SIZE_L3 + nid], nid);
++ }
++ }
++ }
++
++ /* 6) resize the head arrays to their final sizes */
++ {
++ struct kmem_cache *cachep;
++ mutex_lock(&cache_chain_mutex);
++ list_for_each_entry(cachep, &cache_chain, next)
++ if (enable_cpucache(cachep))
++ BUG();
++ mutex_unlock(&cache_chain_mutex);
++ }
++
++ /* Annotate slab for lockdep -- annotate the malloc caches */
++ init_lock_keys();
++
++
++ /* Done! */
++ g_cpucache_up = FULL;
++
++ /*
++ * Register a cpu startup notifier callback that initializes
++ * cpu_cache_get for all new cpus
++ */
++ register_cpu_notifier(&cpucache_notifier);
++
++ /*
++ * The reap timers are started later, with a module init call: That part
++ * of the kernel is not yet operational.
++ */
++}
++
++static int __init cpucache_init(void)
++{
++ int cpu;
++
++ /*
++ * Register the timers that return unneeded pages to the page allocator
++ */
++ for_each_online_cpu(cpu)
++ start_cpu_timer(cpu);
++ return 0;
++}
++__initcall(cpucache_init);
++
++/*
++ * Interface to system's page allocator. No need to hold the cache-lock.
++ *
++ * If we requested dmaable memory, we will get it. Even if we
++ * did not request dmaable memory, we might get it, but that
++ * would be relatively rare and ignorable.
++ */
++static void *kmem_getpages(struct kmem_cache *cachep, gfp_t flags, int nodeid)
++{
++ struct page *page;
++ int nr_pages;
++ int i;
++
++#ifndef CONFIG_MMU
++ /*
++ * Nommu uses slab's for process anonymous memory allocations, and thus
++ * requires __GFP_COMP to properly refcount higher order allocations
++ */
++ flags |= __GFP_COMP;
++#endif
++
++ flags |= cachep->gfpflags;
++ if (cachep->flags & SLAB_RECLAIM_ACCOUNT)
++ flags |= __GFP_RECLAIMABLE;
++
++ page = alloc_pages_node(nodeid, flags, cachep->gfporder);
++ if (!page)
++ return NULL;
++
++ nr_pages = (1 << cachep->gfporder);
++ if (cachep->flags & SLAB_RECLAIM_ACCOUNT)
++ add_zone_page_state(page_zone(page),
++ NR_SLAB_RECLAIMABLE, nr_pages);
++ else
++ add_zone_page_state(page_zone(page),
++ NR_SLAB_UNRECLAIMABLE, nr_pages);
++ for (i = 0; i < nr_pages; i++)
++ __SetPageSlab(page + i);
++ return page_address(page);
++}
++
++/*
++ * Interface to system's page release.
++ */
++static void kmem_freepages(struct kmem_cache *cachep, void *addr)
++{
++ unsigned long i = (1 << cachep->gfporder);
++ struct page *page = virt_to_page(addr);
++ const unsigned long nr_freed = i;
++
++ if (cachep->flags & SLAB_RECLAIM_ACCOUNT)
++ sub_zone_page_state(page_zone(page),
++ NR_SLAB_RECLAIMABLE, nr_freed);
++ else
++ sub_zone_page_state(page_zone(page),
++ NR_SLAB_UNRECLAIMABLE, nr_freed);
++ while (i--) {
++ BUG_ON(!PageSlab(page));
++ __ClearPageSlab(page);
++ page++;
++ }
++ if (current->reclaim_state)
++ current->reclaim_state->reclaimed_slab += nr_freed;
++ free_pages((unsigned long)addr, cachep->gfporder);
++}
++
++static void kmem_rcu_free(struct rcu_head *head)
++{
++ struct slab_rcu *slab_rcu = (struct slab_rcu *)head;
++ struct kmem_cache *cachep = slab_rcu->cachep;
++
++ kmem_freepages(cachep, slab_rcu->addr);
++ if (OFF_SLAB(cachep))
++ kmem_cache_free(cachep->slabp_cache, slab_rcu);
++}
++
++#if DEBUG
++
++#ifdef CONFIG_DEBUG_PAGEALLOC
++static void store_stackinfo(struct kmem_cache *cachep, unsigned long *addr,
++ unsigned long caller)
++{
++ int size = obj_size(cachep);
++
++ addr = (unsigned long *)&((char *)addr)[obj_offset(cachep)];
++
++ if (size < 5 * sizeof(unsigned long))
++ return;
++
++ *addr++ = 0x12345678;
++ *addr++ = caller;
++ *addr++ = smp_processor_id();
++ size -= 3 * sizeof(unsigned long);
++ {
++ unsigned long *sptr = &caller;
++ unsigned long svalue;
++
++ while (!kstack_end(sptr)) {
++ svalue = *sptr++;
++ if (kernel_text_address(svalue)) {
++ *addr++ = svalue;
++ size -= sizeof(unsigned long);
++ if (size <= sizeof(unsigned long))
++ break;
++ }
++ }
++
++ }
++ *addr++ = 0x87654321;
++}
++#endif
++
++static void poison_obj(struct kmem_cache *cachep, void *addr, unsigned char val)
++{
++ int size = obj_size(cachep);
++ addr = &((char *)addr)[obj_offset(cachep)];
++
++ memset(addr, val, size);
++ *(unsigned char *)(addr + size - 1) = POISON_END;
++}
++
++static void dump_line(char *data, int offset, int limit)
++{
++ int i;
++ unsigned char error = 0;
++ int bad_count = 0;
++
++ printk(KERN_ERR "%03x:", offset);
++ for (i = 0; i < limit; i++) {
++ if (data[offset + i] != POISON_FREE) {
++ error = data[offset + i];
++ bad_count++;
++ }
++ printk(" %02x", (unsigned char)data[offset + i]);
++ }
++ printk("\n");
++
++ if (bad_count == 1) {
++ error ^= POISON_FREE;
++ if (!(error & (error - 1))) {
++ printk(KERN_ERR "Single bit error detected. Probably "
++ "bad RAM.\n");
++#ifdef CONFIG_X86
++ printk(KERN_ERR "Run memtest86+ or a similar memory "
++ "test tool.\n");
++#else
++ printk(KERN_ERR "Run a memory test tool.\n");
++#endif
++ }
++ }
++}
++#endif
++
++#if DEBUG
++
++static void print_objinfo(struct kmem_cache *cachep, void *objp, int lines)
++{
++ int i, size;
++ char *realobj;
++
++ if (cachep->flags & SLAB_RED_ZONE) {
++ printk(KERN_ERR "Redzone: 0x%llx/0x%llx.\n",
++ *dbg_redzone1(cachep, objp),
++ *dbg_redzone2(cachep, objp));
++ }
++
++ if (cachep->flags & SLAB_STORE_USER) {
++ printk(KERN_ERR "Last user: [<%p>]",
++ *dbg_userword(cachep, objp));
++ print_symbol("(%s)",
++ (unsigned long)*dbg_userword(cachep, objp));
++ printk("\n");
++ }
++ realobj = (char *)objp + obj_offset(cachep);
++ size = obj_size(cachep);
++ for (i = 0; i < size && lines; i += 16, lines--) {
++ int limit;
++ limit = 16;
++ if (i + limit > size)
++ limit = size - i;
++ dump_line(realobj, i, limit);
++ }
++}
++
++static void check_poison_obj(struct kmem_cache *cachep, void *objp)
++{
++ char *realobj;
++ int size, i;
++ int lines = 0;
++
++ realobj = (char *)objp + obj_offset(cachep);
++ size = obj_size(cachep);
++
++ for (i = 0; i < size; i++) {
++ char exp = POISON_FREE;
++ if (i == size - 1)
++ exp = POISON_END;
++ if (realobj[i] != exp) {
++ int limit;
++ /* Mismatch ! */
++ /* Print header */
++ if (lines == 0) {
++ printk(KERN_ERR
++ "Slab corruption: %s start=%p, len=%d\n",
++ cachep->name, realobj, size);
++ print_objinfo(cachep, objp, 0);
++ }
++ /* Hexdump the affected line */
++ i = (i / 16) * 16;
++ limit = 16;
++ if (i + limit > size)
++ limit = size - i;
++ dump_line(realobj, i, limit);
++ i += 16;
++ lines++;
++ /* Limit to 5 lines */
++ if (lines > 5)
++ break;
++ }
++ }
++ if (lines != 0) {
++ /* Print some data about the neighboring objects, if they
++ * exist:
++ */
++ struct slab *slabp = virt_to_slab(objp);
++ unsigned int objnr;
++
++ objnr = obj_to_index(cachep, slabp, objp);
++ if (objnr) {
++ objp = index_to_obj(cachep, slabp, objnr - 1);
++ realobj = (char *)objp + obj_offset(cachep);
++ printk(KERN_ERR "Prev obj: start=%p, len=%d\n",
++ realobj, size);
++ print_objinfo(cachep, objp, 2);
++ }
++ if (objnr + 1 < cachep->num) {
++ objp = index_to_obj(cachep, slabp, objnr + 1);
++ realobj = (char *)objp + obj_offset(cachep);
++ printk(KERN_ERR "Next obj: start=%p, len=%d\n",
++ realobj, size);
++ print_objinfo(cachep, objp, 2);
++ }
++ }
++}
++#endif
++
++#if DEBUG
++static void slab_destroy_debugcheck(struct kmem_cache *cachep, struct slab *slabp)
++{
++ int i;
++ for (i = 0; i < cachep->num; i++) {
++ void *objp = index_to_obj(cachep, slabp, i);
++
++ if (cachep->flags & SLAB_POISON) {
++#ifdef CONFIG_DEBUG_PAGEALLOC
++ if (cachep->buffer_size % PAGE_SIZE == 0 &&
++ OFF_SLAB(cachep))
++ kernel_map_pages(virt_to_page(objp),
++ cachep->buffer_size / PAGE_SIZE, 1);
++ else
++ check_poison_obj(cachep, objp);
++#else
++ check_poison_obj(cachep, objp);
++#endif
++ }
++ if (cachep->flags & SLAB_RED_ZONE) {
++ if (*dbg_redzone1(cachep, objp) != RED_INACTIVE)
++ slab_error(cachep, "start of a freed object "
++ "was overwritten");
++ if (*dbg_redzone2(cachep, objp) != RED_INACTIVE)
++ slab_error(cachep, "end of a freed object "
++ "was overwritten");
++ }
++ }
++}
++#else
++static void slab_destroy_debugcheck(struct kmem_cache *cachep, struct slab *slabp)
++{
++}
++#endif
++
++/**
++ * slab_destroy - destroy and release all objects in a slab
++ * @cachep: cache pointer being destroyed
++ * @slabp: slab pointer being destroyed
++ *
++ * Destroy all the objs in a slab, and release the mem back to the system.
++ * Before calling the slab must have been unlinked from the cache. The
++ * cache-lock is not held/needed.
++ */
++static void slab_destroy(struct kmem_cache *cachep, struct slab *slabp)
++{
++ void *addr = slabp->s_mem - slabp->colouroff;
++
++ slab_destroy_debugcheck(cachep, slabp);
++ if (unlikely(cachep->flags & SLAB_DESTROY_BY_RCU)) {
++ struct slab_rcu *slab_rcu;
++
++ slab_rcu = (struct slab_rcu *)slabp;
++ slab_rcu->cachep = cachep;
++ slab_rcu->addr = addr;
++ call_rcu(&slab_rcu->head, kmem_rcu_free);
++ } else {
++ kmem_freepages(cachep, addr);
++ if (OFF_SLAB(cachep))
++ kmem_cache_free(cachep->slabp_cache, slabp);
++ }
++}
++
++static void __kmem_cache_destroy(struct kmem_cache *cachep)
++{
++ int i;
++ struct kmem_list3 *l3;
++
++ for_each_online_cpu(i)
++ kfree(cachep->array[i]);
++
++ /* NUMA: free the list3 structures */
++ for_each_online_node(i) {
++ l3 = cachep->nodelists[i];
++ if (l3) {
++ kfree(l3->shared);
++ free_alien_cache(l3->alien);
++ kfree(l3);
++ }
++ }
++ kmem_cache_free(&cache_cache, cachep);
++}
++
++
++/**
++ * calculate_slab_order - calculate size (page order) of slabs
++ * @cachep: pointer to the cache that is being created
++ * @size: size of objects to be created in this cache.
++ * @align: required alignment for the objects.
++ * @flags: slab allocation flags
++ *
++ * Also calculates the number of objects per slab.
++ *
++ * This could be made much more intelligent. For now, try to avoid using
++ * high order pages for slabs. When the gfp() functions are more friendly
++ * towards high-order requests, this should be changed.
++ */
++static size_t calculate_slab_order(struct kmem_cache *cachep,
++ size_t size, size_t align, unsigned long flags)
++{
++ unsigned long offslab_limit;
++ size_t left_over = 0;
++ int gfporder;
++
++ for (gfporder = 0; gfporder <= KMALLOC_MAX_ORDER; gfporder++) {
++ unsigned int num;
++ size_t remainder;
++
++ cache_estimate(gfporder, size, align, flags, &remainder, &num);
++ if (!num)
++ continue;
++
++ if (flags & CFLGS_OFF_SLAB) {
++ /*
++ * Max number of objs-per-slab for caches which
++ * use off-slab slabs. Needed to avoid a possible
++ * looping condition in cache_grow().
++ */
++ offslab_limit = size - sizeof(struct slab);
++ offslab_limit /= sizeof(kmem_bufctl_t);
++
++ if (num > offslab_limit)
++ break;
++ }
++
++ /* Found something acceptable - save it away */
++ cachep->num = num;
++ cachep->gfporder = gfporder;
++ left_over = remainder;
++
++ /*
++ * A VFS-reclaimable slab tends to have most allocations
++ * as GFP_NOFS and we really don't want to have to be allocating
++ * higher-order pages when we are unable to shrink dcache.
++ */
++ if (flags & SLAB_RECLAIM_ACCOUNT)
++ break;
++
++ /*
++ * Large number of objects is good, but very large slabs are
++ * currently bad for the gfp()s.
++ */
++ if (gfporder >= slab_break_gfp_order)
++ break;
++
++ /*
++ * Acceptable internal fragmentation?
++ */
++ if (left_over * 8 <= (PAGE_SIZE << gfporder))
++ break;
++ }
++ return left_over;
++}
++
++static int __init_refok setup_cpu_cache(struct kmem_cache *cachep)
++{
++ if (g_cpucache_up == FULL)
++ return enable_cpucache(cachep);
++
++ if (g_cpucache_up == NONE) {
++ /*
++ * Note: the first kmem_cache_create must create the cache
++ * that's used by kmalloc(24), otherwise the creation of
++ * further caches will BUG().
++ */
++ cachep->array[smp_processor_id()] = &initarray_generic.cache;
++
++ /*
++ * If the cache that's used by kmalloc(sizeof(kmem_list3)) is
++ * the first cache, then we need to set up all its list3s,
++ * otherwise the creation of further caches will BUG().
++ */
++ set_up_list3s(cachep, SIZE_AC);
++ if (INDEX_AC == INDEX_L3)
++ g_cpucache_up = PARTIAL_L3;
++ else
++ g_cpucache_up = PARTIAL_AC;
++ } else {
++ cachep->array[smp_processor_id()] =
++ kmalloc(sizeof(struct arraycache_init), GFP_KERNEL);
++
++ if (g_cpucache_up == PARTIAL_AC) {
++ set_up_list3s(cachep, SIZE_L3);
++ g_cpucache_up = PARTIAL_L3;
++ } else {
++ int node;
++ for_each_online_node(node) {
++ cachep->nodelists[node] =
++ kmalloc_node(sizeof(struct kmem_list3),
++ GFP_KERNEL, node);
++ BUG_ON(!cachep->nodelists[node]);
++ kmem_list3_init(cachep->nodelists[node]);
++ }
++ }
++ }
++ cachep->nodelists[numa_node_id()]->next_reap =
++ jiffies + REAPTIMEOUT_LIST3 +
++ ((unsigned long)cachep) % REAPTIMEOUT_LIST3;
++
++ cpu_cache_get(cachep)->avail = 0;
++ cpu_cache_get(cachep)->limit = BOOT_CPUCACHE_ENTRIES;
++ cpu_cache_get(cachep)->batchcount = 1;
++ cpu_cache_get(cachep)->touched = 0;
++ cachep->batchcount = 1;
++ cachep->limit = BOOT_CPUCACHE_ENTRIES;
++ return 0;
++}
++
++/**
++ * kmem_cache_create - Create a cache.
++ * @name: A string which is used in /proc/slabinfo to identify this cache.
++ * @size: The size of objects to be created in this cache.
++ * @align: The required alignment for the objects.
++ * @flags: SLAB flags
++ * @ctor: A constructor for the objects.
++ *
++ * Returns a ptr to the cache on success, NULL on failure.
++ * Cannot be called within a int, but can be interrupted.
++ * The @ctor is run when new pages are allocated by the cache.
++ *
++ * @name must be valid until the cache is destroyed. This implies that
++ * the module calling this has to destroy the cache before getting unloaded.
++ *
++ * The flags are
++ *
++ * %SLAB_POISON - Poison the slab with a known test pattern (a5a5a5a5)
++ * to catch references to uninitialised memory.
++ *
++ * %SLAB_RED_ZONE - Insert `Red' zones around the allocated memory to check
++ * for buffer overruns.
++ *
++ * %SLAB_HWCACHE_ALIGN - Align the objects in this cache to a hardware
++ * cacheline. This can be beneficial if you're counting cycles as closely
++ * as davem.
++ */
++struct kmem_cache *
++kmem_cache_create (const char *name, size_t size, size_t align,
++ unsigned long flags, void (*ctor)(void *))
++{
++ size_t left_over, slab_size, ralign;
++ struct kmem_cache *cachep = NULL, *pc;
++
++ /*
++ * Sanity checks... these are all serious usage bugs.
++ */
++ if (!name || in_interrupt() || (size < BYTES_PER_WORD) ||
++ size > KMALLOC_MAX_SIZE) {
++ printk(KERN_ERR "%s: Early error in slab %s\n", __func__,
++ name);
++ BUG();
++ }
++
++ /*
++ * We use cache_chain_mutex to ensure a consistent view of
++ * cpu_online_map as well. Please see cpuup_callback
++ */
++ get_online_cpus();
++ mutex_lock(&cache_chain_mutex);
++
++ list_for_each_entry(pc, &cache_chain, next) {
++ char tmp;
++ int res;
++
++ /*
++ * This happens when the module gets unloaded and doesn't
++ * destroy its slab cache and no-one else reuses the vmalloc
++ * area of the module. Print a warning.
++ */
++ res = probe_kernel_address(pc->name, tmp);
++ if (res) {
++ printk(KERN_ERR
++ "SLAB: cache with size %d has lost its name\n",
++ pc->buffer_size);
++ continue;
++ }
++
++ if (!strcmp(pc->name, name)) {
++ printk(KERN_ERR
++ "kmem_cache_create: duplicate cache %s\n", name);
++ dump_stack();
++ goto oops;
++ }
++ }
++
++#if DEBUG
++ WARN_ON(strchr(name, ' ')); /* It confuses parsers */
++#if FORCED_DEBUG
++ /*
++ * Enable redzoning and last user accounting, except for caches with
++ * large objects, if the increased size would increase the object size
++ * above the next power of two: caches with object sizes just above a
++ * power of two have a significant amount of internal fragmentation.
++ */
++ if (size < 4096 || fls(size - 1) == fls(size-1 + REDZONE_ALIGN +
++ 2 * sizeof(unsigned long long)))
++ flags |= SLAB_RED_ZONE | SLAB_STORE_USER;
++ if (!(flags & SLAB_DESTROY_BY_RCU))
++ flags |= SLAB_POISON;
++#endif
++ if (flags & SLAB_DESTROY_BY_RCU)
++ BUG_ON(flags & SLAB_POISON);
++#endif
++ /*
++ * Always checks flags, a caller might be expecting debug support which
++ * isn't available.
++ */
++ BUG_ON(flags & ~CREATE_MASK);
++
++ /*
++ * Check that size is in terms of words. This is needed to avoid
++ * unaligned accesses for some archs when redzoning is used, and makes
++ * sure any on-slab bufctl's are also correctly aligned.
++ */
++ if (size & (BYTES_PER_WORD - 1)) {
++ size += (BYTES_PER_WORD - 1);
++ size &= ~(BYTES_PER_WORD - 1);
++ }
++
++ /* calculate the final buffer alignment: */
++
++ /* 1) arch recommendation: can be overridden for debug */
++ if (flags & SLAB_HWCACHE_ALIGN) {
++ /*
++ * Default alignment: as specified by the arch code. Except if
++ * an object is really small, then squeeze multiple objects into
++ * one cacheline.
++ */
++ ralign = cache_line_size();
++ while (size <= ralign / 2)
++ ralign /= 2;
++ } else {
++ ralign = BYTES_PER_WORD;
++ }
++
++ /*
++ * Redzoning and user store require word alignment or possibly larger.
++ * Note this will be overridden by architecture or caller mandated
++ * alignment if either is greater than BYTES_PER_WORD.
++ */
++ if (flags & SLAB_STORE_USER)
++ ralign = BYTES_PER_WORD;
++
++ if (flags & SLAB_RED_ZONE) {
++ ralign = REDZONE_ALIGN;
++ /* If redzoning, ensure that the second redzone is suitably
++ * aligned, by adjusting the object size accordingly. */
++ size += REDZONE_ALIGN - 1;
++ size &= ~(REDZONE_ALIGN - 1);
++ }
++
++ /* 2) arch mandated alignment */
++ if (ralign < ARCH_SLAB_MINALIGN) {
++ ralign = ARCH_SLAB_MINALIGN;
++ }
++ /* 3) caller mandated alignment */
++ if (ralign < align) {
++ ralign = align;
++ }
++ /* disable debug if necessary */
++ if (ralign > __alignof__(unsigned long long))
++ flags &= ~(SLAB_RED_ZONE | SLAB_STORE_USER);
++ /*
++ * 4) Store it.
++ */
++ align = ralign;
++
++ /* Get cache's description obj. */
++ cachep = kmem_cache_zalloc(&cache_cache, GFP_KERNEL);
++ if (!cachep)
++ goto oops;
++
++#if DEBUG
++ cachep->obj_size = size;
++
++ /*
++ * Both debugging options require word-alignment which is calculated
++ * into align above.
++ */
++ if (flags & SLAB_RED_ZONE) {
++ /* add space for red zone words */
++ cachep->obj_offset += sizeof(unsigned long long);
++ size += 2 * sizeof(unsigned long long);
++ }
++ if (flags & SLAB_STORE_USER) {
++ /* user store requires one word storage behind the end of
++ * the real object. But if the second red zone needs to be
++ * aligned to 64 bits, we must allow that much space.
++ */
++ if (flags & SLAB_RED_ZONE)
++ size += REDZONE_ALIGN;
++ else
++ size += BYTES_PER_WORD;
++ }
++#if FORCED_DEBUG && defined(CONFIG_DEBUG_PAGEALLOC)
++ if (size >= malloc_sizes[INDEX_L3 + 1].cs_size
++ && cachep->obj_size > cache_line_size() && size < PAGE_SIZE) {
++ cachep->obj_offset += PAGE_SIZE - size;
++ size = PAGE_SIZE;
++ }
++#endif
++#endif
++
++ /*
++ * Determine if the slab management is 'on' or 'off' slab.
++ * (bootstrapping cannot cope with offslab caches so don't do
++ * it too early on.)
++ */
++ if ((size >= (PAGE_SIZE >> 3)) && !slab_early_init)
++ /*
++ * Size is large, assume best to place the slab management obj
++ * off-slab (should allow better packing of objs).
++ */
++ flags |= CFLGS_OFF_SLAB;
++
++ size = ALIGN(size, align);
++
++ left_over = calculate_slab_order(cachep, size, align, flags);
++
++ if (!cachep->num) {
++ printk(KERN_ERR
++ "kmem_cache_create: couldn't create cache %s.\n", name);
++ kmem_cache_free(&cache_cache, cachep);
++ cachep = NULL;
++ goto oops;
++ }
++ slab_size = ALIGN(cachep->num * sizeof(kmem_bufctl_t)
++ + sizeof(struct slab), align);
++
++ /*
++ * If the slab has been placed off-slab, and we have enough space then
++ * move it on-slab. This is at the expense of any extra colouring.
++ */
++ if (flags & CFLGS_OFF_SLAB && left_over >= slab_size) {
++ flags &= ~CFLGS_OFF_SLAB;
++ left_over -= slab_size;
++ }
++
++ if (flags & CFLGS_OFF_SLAB) {
++ /* really off slab. No need for manual alignment */
++ slab_size =
++ cachep->num * sizeof(kmem_bufctl_t) + sizeof(struct slab);
++ }
++
++ cachep->colour_off = cache_line_size();
++ /* Offset must be a multiple of the alignment. */
++ if (cachep->colour_off < align)
++ cachep->colour_off = align;
++ cachep->colour = left_over / cachep->colour_off;
++ cachep->slab_size = slab_size;
++ cachep->flags = flags;
++ cachep->gfpflags = 0;
++ if (CONFIG_ZONE_DMA_FLAG && (flags & SLAB_CACHE_DMA))
++ cachep->gfpflags |= GFP_DMA;
++ cachep->buffer_size = size;
++ cachep->reciprocal_buffer_size = reciprocal_value(size);
++
++ if (flags & CFLGS_OFF_SLAB) {
++ cachep->slabp_cache = kmem_find_general_cachep(slab_size, 0u);
++ /*
++ * This is a possibility for one of the malloc_sizes caches.
++ * But since we go off slab only for object size greater than
++ * PAGE_SIZE/8, and malloc_sizes gets created in ascending order,
++ * this should not happen at all.
++ * But leave a BUG_ON for some lucky dude.
++ */
++ BUG_ON(ZERO_OR_NULL_PTR(cachep->slabp_cache));
++ }
++ cachep->ctor = ctor;
++ cachep->name = name;
++
++ if (setup_cpu_cache(cachep)) {
++ __kmem_cache_destroy(cachep);
++ cachep = NULL;
++ goto oops;
++ }
++
++ /* cache setup completed, link it into the list */
++ list_add(&cachep->next, &cache_chain);
++oops:
++ if (!cachep && (flags & SLAB_PANIC))
++ panic("kmem_cache_create(): failed to create slab `%s'\n",
++ name);
++ mutex_unlock(&cache_chain_mutex);
++ put_online_cpus();
++ return cachep;
++}
++EXPORT_SYMBOL(kmem_cache_create);
++
++#if DEBUG
++static void check_irq_off(void)
++{
++ BUG_ON(!irqs_disabled());
++}
++
++static void check_irq_on(void)
++{
++ BUG_ON(irqs_disabled());
++}
++
++static void check_spinlock_acquired(struct kmem_cache *cachep)
++{
++#ifdef CONFIG_SMP
++ check_irq_off();
++ assert_spin_locked(&cachep->nodelists[numa_node_id()]->list_lock);
++#endif
++}
++
++static void check_spinlock_acquired_node(struct kmem_cache *cachep, int node)
++{
++#ifdef CONFIG_SMP
++ check_irq_off();
++ assert_spin_locked(&cachep->nodelists[node]->list_lock);
++#endif
++}
++
++#else
++#define check_irq_off() do { } while(0)
++#define check_irq_on() do { } while(0)
++#define check_spinlock_acquired(x) do { } while(0)
++#define check_spinlock_acquired_node(x, y) do { } while(0)
++#endif
++
++static void drain_array(struct kmem_cache *cachep, struct kmem_list3 *l3,
++ struct array_cache *ac,
++ int force, int node);
++
++static void do_drain(void *arg)
++{
++ struct kmem_cache *cachep = arg;
++ struct array_cache *ac;
++ int node = numa_node_id();
++
++ check_irq_off();
++ ac = cpu_cache_get(cachep);
++ spin_lock(&cachep->nodelists[node]->list_lock);
++ free_block(cachep, ac->entry, ac->avail, node);
++ spin_unlock(&cachep->nodelists[node]->list_lock);
++ ac->avail = 0;
++}
++
++static void drain_cpu_caches(struct kmem_cache *cachep)
++{
++ struct kmem_list3 *l3;
++ int node;
++
++ on_each_cpu(do_drain, cachep, 1);
++ check_irq_on();
++ for_each_online_node(node) {
++ l3 = cachep->nodelists[node];
++ if (l3 && l3->alien)
++ drain_alien_cache(cachep, l3->alien);
++ }
++
++ for_each_online_node(node) {
++ l3 = cachep->nodelists[node];
++ if (l3)
++ drain_array(cachep, l3, l3->shared, 1, node);
++ }
++}
++
++/*
++ * Remove slabs from the list of free slabs.
++ * Specify the number of slabs to drain in tofree.
++ *
++ * Returns the actual number of slabs released.
++ */
++static int drain_freelist(struct kmem_cache *cache,
++ struct kmem_list3 *l3, int tofree)
++{
++ struct list_head *p;
++ int nr_freed;
++ struct slab *slabp;
++
++ nr_freed = 0;
++ while (nr_freed < tofree && !list_empty(&l3->slabs_free)) {
++
++ spin_lock_irq(&l3->list_lock);
++ p = l3->slabs_free.prev;
++ if (p == &l3->slabs_free) {
++ spin_unlock_irq(&l3->list_lock);
++ goto out;
++ }
++
++ slabp = list_entry(p, struct slab, list);
++#if DEBUG
++ BUG_ON(slabp->inuse);
++#endif
++ list_del(&slabp->list);
++ /*
++ * Safe to drop the lock. The slab is no longer linked
++ * to the cache.
++ */
++ l3->free_objects -= cache->num;
++ spin_unlock_irq(&l3->list_lock);
++ slab_destroy(cache, slabp);
++ nr_freed++;
++ }
++out:
++ return nr_freed;
++}
++
++/* Called with cache_chain_mutex held to protect against cpu hotplug */
++static int __cache_shrink(struct kmem_cache *cachep)
++{
++ int ret = 0, i = 0;
++ struct kmem_list3 *l3;
++
++ drain_cpu_caches(cachep);
++
++ check_irq_on();
++ for_each_online_node(i) {
++ l3 = cachep->nodelists[i];
++ if (!l3)
++ continue;
++
++ drain_freelist(cachep, l3, l3->free_objects);
++
++ ret += !list_empty(&l3->slabs_full) ||
++ !list_empty(&l3->slabs_partial);
++ }
++ return (ret ? 1 : 0);
++}
++
++/**
++ * kmem_cache_shrink - Shrink a cache.
++ * @cachep: The cache to shrink.
++ *
++ * Releases as many slabs as possible for a cache.
++ * To help debugging, a zero exit status indicates all slabs were released.
++ */
++int kmem_cache_shrink(struct kmem_cache *cachep)
++{
++ int ret;
++ BUG_ON(!cachep || in_interrupt());
++
++ get_online_cpus();
++ mutex_lock(&cache_chain_mutex);
++ ret = __cache_shrink(cachep);
++ mutex_unlock(&cache_chain_mutex);
++ put_online_cpus();
++ return ret;
++}
++EXPORT_SYMBOL(kmem_cache_shrink);
++
++/**
++ * kmem_cache_destroy - delete a cache
++ * @cachep: the cache to destroy
++ *
++ * Remove a &struct kmem_cache object from the slab cache.
++ *
++ * It is expected this function will be called by a module when it is
++ * unloaded. This will remove the cache completely, and avoid a duplicate
++ * cache being allocated each time a module is loaded and unloaded, if the
++ * module doesn't have persistent in-kernel storage across loads and unloads.
++ *
++ * The cache must be empty before calling this function.
++ *
++ * The caller must guarantee that noone will allocate memory from the cache
++ * during the kmem_cache_destroy().
++ */
++void kmem_cache_destroy(struct kmem_cache *cachep)
++{
++ BUG_ON(!cachep || in_interrupt());
++
++ /* Find the cache in the chain of caches. */
++ get_online_cpus();
++ mutex_lock(&cache_chain_mutex);
++ /*
++ * the chain is never empty, cache_cache is never destroyed
++ */
++ list_del(&cachep->next);
++ if (__cache_shrink(cachep)) {
++ slab_error(cachep, "Can't free all objects");
++ list_add(&cachep->next, &cache_chain);
++ mutex_unlock(&cache_chain_mutex);
++ put_online_cpus();
++ return;
++ }
++
++ if (unlikely(cachep->flags & SLAB_DESTROY_BY_RCU))
++ synchronize_rcu();
++
++ __kmem_cache_destroy(cachep);
++ mutex_unlock(&cache_chain_mutex);
++ put_online_cpus();
++}
++EXPORT_SYMBOL(kmem_cache_destroy);
++
++/*
++ * Get the memory for a slab management obj.
++ * For a slab cache when the slab descriptor is off-slab, slab descriptors
++ * always come from malloc_sizes caches. The slab descriptor cannot
++ * come from the same cache which is getting created because,
++ * when we are searching for an appropriate cache for these
++ * descriptors in kmem_cache_create, we search through the malloc_sizes array.
++ * If we are creating a malloc_sizes cache here it would not be visible to
++ * kmem_find_general_cachep till the initialization is complete.
++ * Hence we cannot have slabp_cache same as the original cache.
++ */
++static struct slab *alloc_slabmgmt(struct kmem_cache *cachep, void *objp,
++ int colour_off, gfp_t local_flags,
++ int nodeid)
++{
++ struct slab *slabp;
++
++ if (OFF_SLAB(cachep)) {
++ /* Slab management obj is off-slab. */
++ slabp = kmem_cache_alloc_node(cachep->slabp_cache,
++ local_flags & ~GFP_THISNODE, nodeid);
++ if (!slabp)
++ return NULL;
++ } else {
++ slabp = objp + colour_off;
++ colour_off += cachep->slab_size;
++ }
++ slabp->inuse = 0;
++ slabp->colouroff = colour_off;
++ slabp->s_mem = objp + colour_off;
++ slabp->nodeid = nodeid;
++ slabp->free = 0;
++ return slabp;
++}
++
++static inline kmem_bufctl_t *slab_bufctl(struct slab *slabp)
++{
++ return (kmem_bufctl_t *) (slabp + 1);
++}
++
++static void cache_init_objs(struct kmem_cache *cachep,
++ struct slab *slabp)
++{
++ int i;
++
++ for (i = 0; i < cachep->num; i++) {
++ void *objp = index_to_obj(cachep, slabp, i);
++#if DEBUG
++ /* need to poison the objs? */
++ if (cachep->flags & SLAB_POISON)
++ poison_obj(cachep, objp, POISON_FREE);
++ if (cachep->flags & SLAB_STORE_USER)
++ *dbg_userword(cachep, objp) = NULL;
++
++ if (cachep->flags & SLAB_RED_ZONE) {
++ *dbg_redzone1(cachep, objp) = RED_INACTIVE;
++ *dbg_redzone2(cachep, objp) = RED_INACTIVE;
++ }
++ /*
++ * Constructors are not allowed to allocate memory from the same
++ * cache which they are a constructor for. Otherwise, deadlock.
++ * They must also be threaded.
++ */
++ if (cachep->ctor && !(cachep->flags & SLAB_POISON))
++ cachep->ctor(objp + obj_offset(cachep));
++
++ if (cachep->flags & SLAB_RED_ZONE) {
++ if (*dbg_redzone2(cachep, objp) != RED_INACTIVE)
++ slab_error(cachep, "constructor overwrote the"
++ " end of an object");
++ if (*dbg_redzone1(cachep, objp) != RED_INACTIVE)
++ slab_error(cachep, "constructor overwrote the"
++ " start of an object");
++ }
++ if ((cachep->buffer_size % PAGE_SIZE) == 0 &&
++ OFF_SLAB(cachep) && cachep->flags & SLAB_POISON)
++ kernel_map_pages(virt_to_page(objp),
++ cachep->buffer_size / PAGE_SIZE, 0);
++#else
++ if (cachep->ctor)
++ cachep->ctor(objp);
++#endif
++ slab_bufctl(slabp)[i] = i + 1;
++ }
++ slab_bufctl(slabp)[i - 1] = BUFCTL_END;
++}
++
++static void kmem_flagcheck(struct kmem_cache *cachep, gfp_t flags)
++{
++ if (CONFIG_ZONE_DMA_FLAG) {
++ if (flags & GFP_DMA)
++ BUG_ON(!(cachep->gfpflags & GFP_DMA));
++ else
++ BUG_ON(cachep->gfpflags & GFP_DMA);
++ }
++}
++
++static void *slab_get_obj(struct kmem_cache *cachep, struct slab *slabp,
++ int nodeid)
++{
++ void *objp = index_to_obj(cachep, slabp, slabp->free);
++ kmem_bufctl_t next;
++
++ slabp->inuse++;
++ next = slab_bufctl(slabp)[slabp->free];
++#if DEBUG
++ slab_bufctl(slabp)[slabp->free] = BUFCTL_FREE;
++ WARN_ON(slabp->nodeid != nodeid);
++#endif
++ slabp->free = next;
++
++ return objp;
++}
++
++static void slab_put_obj(struct kmem_cache *cachep, struct slab *slabp,
++ void *objp, int nodeid)
++{
++ unsigned int objnr = obj_to_index(cachep, slabp, objp);
++
++#if DEBUG
++ /* Verify that the slab belongs to the intended node */
++ WARN_ON(slabp->nodeid != nodeid);
++
++ if (slab_bufctl(slabp)[objnr] + 1 <= SLAB_LIMIT + 1) {
++ printk(KERN_ERR "slab: double free detected in cache "
++ "'%s', objp %p\n", cachep->name, objp);
++ BUG();
++ }
++#endif
++ slab_bufctl(slabp)[objnr] = slabp->free;
++ slabp->free = objnr;
++ slabp->inuse--;
++}
++
++/*
++ * Map pages beginning at addr to the given cache and slab. This is required
++ * for the slab allocator to be able to lookup the cache and slab of a
++ * virtual address for kfree, ksize, kmem_ptr_validate, and slab debugging.
++ */
++static void slab_map_pages(struct kmem_cache *cache, struct slab *slab,
++ void *addr)
++{
++ int nr_pages;
++ struct page *page;
++
++ page = virt_to_page(addr);
++
++ nr_pages = 1;
++ if (likely(!PageCompound(page)))
++ nr_pages <<= cache->gfporder;
++
++ do {
++ page_set_cache(page, cache);
++ page_set_slab(page, slab);
++ page++;
++ } while (--nr_pages);
++}
++
++/*
++ * Grow (by 1) the number of slabs within a cache. This is called by
++ * kmem_cache_alloc() when there are no active objs left in a cache.
++ */
++static int cache_grow(struct kmem_cache *cachep,
++ gfp_t flags, int nodeid, void *objp)
++{
++ struct slab *slabp;
++ size_t offset;
++ gfp_t local_flags;
++ struct kmem_list3 *l3;
++
++ /*
++ * Be lazy and only check for valid flags here, keeping it out of the
++ * critical path in kmem_cache_alloc().
++ */
++ BUG_ON(flags & GFP_SLAB_BUG_MASK);
++ local_flags = flags & (GFP_CONSTRAINT_MASK|GFP_RECLAIM_MASK);
++
++ /* Take the l3 list lock to change the colour_next on this node */
++ check_irq_off();
++ l3 = cachep->nodelists[nodeid];
++ spin_lock(&l3->list_lock);
++
++ /* Get colour for the slab, and cal the next value. */
++ offset = l3->colour_next;
++ l3->colour_next++;
++ if (l3->colour_next >= cachep->colour)
++ l3->colour_next = 0;
++ spin_unlock(&l3->list_lock);
++
++ offset *= cachep->colour_off;
++
++ if (local_flags & __GFP_WAIT)
++ local_irq_enable();
++
++ /*
++ * The test for missing atomic flag is performed here, rather than
++ * the more obvious place, simply to reduce the critical path length
++ * in kmem_cache_alloc(). If a caller is seriously mis-behaving they
++ * will eventually be caught here (where it matters).
++ */
++ kmem_flagcheck(cachep, flags);
++
++ /*
++ * Get mem for the objs. Attempt to allocate a physical page from
++ * 'nodeid'.
++ */
++ if (!objp)
++ objp = kmem_getpages(cachep, local_flags, nodeid);
++ if (!objp)
++ goto failed;
++
++ /* Get slab management. */
++ slabp = alloc_slabmgmt(cachep, objp, offset,
++ local_flags & ~GFP_CONSTRAINT_MASK, nodeid);
++ if (!slabp)
++ goto opps1;
++
++ slab_map_pages(cachep, slabp, objp);
++
++ cache_init_objs(cachep, slabp);
++
++ if (local_flags & __GFP_WAIT)
++ local_irq_disable();
++ check_irq_off();
++ spin_lock(&l3->list_lock);
++
++ /* Make slab active. */
++ list_add_tail(&slabp->list, &(l3->slabs_free));
++ STATS_INC_GROWN(cachep);
++ l3->free_objects += cachep->num;
++ spin_unlock(&l3->list_lock);
++ return 1;
++opps1:
++ kmem_freepages(cachep, objp);
++failed:
++ if (local_flags & __GFP_WAIT)
++ local_irq_disable();
++ return 0;
++}
++
++#if DEBUG
++
++/*
++ * Perform extra freeing checks:
++ * - detect bad pointers.
++ * - POISON/RED_ZONE checking
++ */
++static void kfree_debugcheck(const void *objp)
++{
++ if (!virt_addr_valid(objp)) {
++ printk(KERN_ERR "kfree_debugcheck: out of range ptr %lxh.\n",
++ (unsigned long)objp);
++ BUG();
++ }
++}
++
++static inline void verify_redzone_free(struct kmem_cache *cache, void *obj)
++{
++ unsigned long long redzone1, redzone2;
++
++ redzone1 = *dbg_redzone1(cache, obj);
++ redzone2 = *dbg_redzone2(cache, obj);
++
++ /*
++ * Redzone is ok.
++ */
++ if (redzone1 == RED_ACTIVE && redzone2 == RED_ACTIVE)
++ return;
++
++ if (redzone1 == RED_INACTIVE && redzone2 == RED_INACTIVE)
++ slab_error(cache, "double free detected");
++ else
++ slab_error(cache, "memory outside object was overwritten");
++
++ printk(KERN_ERR "%p: redzone 1:0x%llx, redzone 2:0x%llx.\n",
++ obj, redzone1, redzone2);
++}
++
++static void *cache_free_debugcheck(struct kmem_cache *cachep, void *objp,
++ void *caller)
++{
++ struct page *page;
++ unsigned int objnr;
++ struct slab *slabp;
++
++ BUG_ON(virt_to_cache(objp) != cachep);
++
++ objp -= obj_offset(cachep);
++ kfree_debugcheck(objp);
++ page = virt_to_head_page(objp);
++
++ slabp = page_get_slab(page);
++
++ if (cachep->flags & SLAB_RED_ZONE) {
++ verify_redzone_free(cachep, objp);
++ *dbg_redzone1(cachep, objp) = RED_INACTIVE;
++ *dbg_redzone2(cachep, objp) = RED_INACTIVE;
++ }
++ if (cachep->flags & SLAB_STORE_USER)
++ *dbg_userword(cachep, objp) = caller;
++
++ objnr = obj_to_index(cachep, slabp, objp);
++
++ BUG_ON(objnr >= cachep->num);
++ BUG_ON(objp != index_to_obj(cachep, slabp, objnr));
++
++#ifdef CONFIG_DEBUG_SLAB_LEAK
++ slab_bufctl(slabp)[objnr] = BUFCTL_FREE;
++#endif
++ if (cachep->flags & SLAB_POISON) {
++#ifdef CONFIG_DEBUG_PAGEALLOC
++ if ((cachep->buffer_size % PAGE_SIZE)==0 && OFF_SLAB(cachep)) {
++ store_stackinfo(cachep, objp, (unsigned long)caller);
++ kernel_map_pages(virt_to_page(objp),
++ cachep->buffer_size / PAGE_SIZE, 0);
++ } else {
++ poison_obj(cachep, objp, POISON_FREE);
++ }
++#else
++ poison_obj(cachep, objp, POISON_FREE);
++#endif
++ }
++ return objp;
++}
++
++static void check_slabp(struct kmem_cache *cachep, struct slab *slabp)
++{
++ kmem_bufctl_t i;
++ int entries = 0;
++
++ /* Check slab's freelist to see if this obj is there. */
++ for (i = slabp->free; i != BUFCTL_END; i = slab_bufctl(slabp)[i]) {
++ entries++;
++ if (entries > cachep->num || i >= cachep->num)
++ goto bad;
++ }
++ if (entries != cachep->num - slabp->inuse) {
++bad:
++ printk(KERN_ERR "slab: Internal list corruption detected in "
++ "cache '%s'(%d), slabp %p(%d). Hexdump:\n",
++ cachep->name, cachep->num, slabp, slabp->inuse);
++ for (i = 0;
++ i < sizeof(*slabp) + cachep->num * sizeof(kmem_bufctl_t);
++ i++) {
++ if (i % 16 == 0)
++ printk("\n%03x:", i);
++ printk(" %02x", ((unsigned char *)slabp)[i]);
++ }
++ printk("\n");
++ BUG();
++ }
++}
++#else
++#define kfree_debugcheck(x) do { } while(0)
++#define cache_free_debugcheck(x,objp,z) (objp)
++#define check_slabp(x,y) do { } while(0)
++#endif
++
++static void *cache_alloc_refill(struct kmem_cache *cachep, gfp_t flags)
++{
++ int batchcount;
++ struct kmem_list3 *l3;
++ struct array_cache *ac;
++ int node;
++
++retry:
++ check_irq_off();
++ node = numa_node_id();
++ ac = cpu_cache_get(cachep);
++ batchcount = ac->batchcount;
++ if (!ac->touched && batchcount > BATCHREFILL_LIMIT) {
++ /*
++ * If there was little recent activity on this cache, then
++ * perform only a partial refill. Otherwise we could generate
++ * refill bouncing.
++ */
++ batchcount = BATCHREFILL_LIMIT;
++ }
++ l3 = cachep->nodelists[node];
++
++ BUG_ON(ac->avail > 0 || !l3);
++ spin_lock(&l3->list_lock);
++
++ /* See if we can refill from the shared array */
++ if (l3->shared && transfer_objects(ac, l3->shared, batchcount))
++ goto alloc_done;
++
++ while (batchcount > 0) {
++ struct list_head *entry;
++ struct slab *slabp;
++ /* Get slab alloc is to come from. */
++ entry = l3->slabs_partial.next;
++ if (entry == &l3->slabs_partial) {
++ l3->free_touched = 1;
++ entry = l3->slabs_free.next;
++ if (entry == &l3->slabs_free)
++ goto must_grow;
++ }
++
++ slabp = list_entry(entry, struct slab, list);
++ check_slabp(cachep, slabp);
++ check_spinlock_acquired(cachep);
++
++ /*
++ * The slab was either on partial or free list so
++ * there must be at least one object available for
++ * allocation.
++ */
++ BUG_ON(slabp->inuse < 0 || slabp->inuse >= cachep->num);
++
++ while (slabp->inuse < cachep->num && batchcount--) {
++ STATS_INC_ALLOCED(cachep);
++ STATS_INC_ACTIVE(cachep);
++ STATS_SET_HIGH(cachep);
++
++ ac->entry[ac->avail++] = slab_get_obj(cachep, slabp,
++ node);
++ }
++ check_slabp(cachep, slabp);
++
++ /* move slabp to correct slabp list: */
++ list_del(&slabp->list);
++ if (slabp->free == BUFCTL_END)
++ list_add(&slabp->list, &l3->slabs_full);
++ else
++ list_add(&slabp->list, &l3->slabs_partial);
++ }
++
++must_grow:
++ l3->free_objects -= ac->avail;
++alloc_done:
++ spin_unlock(&l3->list_lock);
++
++ if (unlikely(!ac->avail)) {
++ int x;
++ x = cache_grow(cachep, flags | GFP_THISNODE, node, NULL);
++
++ /* cache_grow can reenable interrupts, then ac could change. */
++ ac = cpu_cache_get(cachep);
++ if (!x && ac->avail == 0) /* no objects in sight? abort */
++ return NULL;
++
++ if (!ac->avail) /* objects refilled by interrupt? */
++ goto retry;
++ }
++ ac->touched = 1;
++ return ac->entry[--ac->avail];
++}
++
++static inline void cache_alloc_debugcheck_before(struct kmem_cache *cachep,
++ gfp_t flags)
++{
++ might_sleep_if(flags & __GFP_WAIT);
++#if DEBUG
++ kmem_flagcheck(cachep, flags);
++#endif
++}
++
++#if DEBUG
++static void *cache_alloc_debugcheck_after(struct kmem_cache *cachep,
++ gfp_t flags, void *objp, void *caller)
++{
++ if (!objp)
++ return objp;
++ if (cachep->flags & SLAB_POISON) {
++#ifdef CONFIG_DEBUG_PAGEALLOC
++ if ((cachep->buffer_size % PAGE_SIZE) == 0 && OFF_SLAB(cachep))
++ kernel_map_pages(virt_to_page(objp),
++ cachep->buffer_size / PAGE_SIZE, 1);
++ else
++ check_poison_obj(cachep, objp);
++#else
++ check_poison_obj(cachep, objp);
++#endif
++ poison_obj(cachep, objp, POISON_INUSE);
++ }
++ if (cachep->flags & SLAB_STORE_USER)
++ *dbg_userword(cachep, objp) = caller;
++
++ if (cachep->flags & SLAB_RED_ZONE) {
++ if (*dbg_redzone1(cachep, objp) != RED_INACTIVE ||
++ *dbg_redzone2(cachep, objp) != RED_INACTIVE) {
++ slab_error(cachep, "double free, or memory outside"
++ " object was overwritten");
++ printk(KERN_ERR
++ "%p: redzone 1:0x%llx, redzone 2:0x%llx\n",
++ objp, *dbg_redzone1(cachep, objp),
++ *dbg_redzone2(cachep, objp));
++ }
++ *dbg_redzone1(cachep, objp) = RED_ACTIVE;
++ *dbg_redzone2(cachep, objp) = RED_ACTIVE;
++ }
++#ifdef CONFIG_DEBUG_SLAB_LEAK
++ {
++ struct slab *slabp;
++ unsigned objnr;
++
++ slabp = page_get_slab(virt_to_head_page(objp));
++ objnr = (unsigned)(objp - slabp->s_mem) / cachep->buffer_size;
++ slab_bufctl(slabp)[objnr] = BUFCTL_ACTIVE;
++ }
++#endif
++ objp += obj_offset(cachep);
++ if (cachep->ctor && cachep->flags & SLAB_POISON)
++ cachep->ctor(objp);
++#if ARCH_SLAB_MINALIGN
++ if ((u32)objp & (ARCH_SLAB_MINALIGN-1)) {
++ printk(KERN_ERR "0x%p: not aligned to ARCH_SLAB_MINALIGN=%d\n",
++ objp, ARCH_SLAB_MINALIGN);
++ }
++#endif
++ return objp;
++}
++#else
++#define cache_alloc_debugcheck_after(a,b,objp,d) (objp)
++#endif
++
++#ifdef CONFIG_FAILSLAB
++
++static struct failslab_attr {
++
++ struct fault_attr attr;
++
++ u32 ignore_gfp_wait;
++#ifdef CONFIG_FAULT_INJECTION_DEBUG_FS
++ struct dentry *ignore_gfp_wait_file;
++#endif
++
++} failslab = {
++ .attr = FAULT_ATTR_INITIALIZER,
++ .ignore_gfp_wait = 1,
++};
++
++static int __init setup_failslab(char *str)
++{
++ return setup_fault_attr(&failslab.attr, str);
++}
++__setup("failslab=", setup_failslab);
++
++static int should_failslab(struct kmem_cache *cachep, gfp_t flags)
++{
++ if (cachep == &cache_cache)
++ return 0;
++ if (flags & __GFP_NOFAIL)
++ return 0;
++ if (failslab.ignore_gfp_wait && (flags & __GFP_WAIT))
++ return 0;
++
++ return should_fail(&failslab.attr, obj_size(cachep));
++}
++
++#ifdef CONFIG_FAULT_INJECTION_DEBUG_FS
++
++static int __init failslab_debugfs(void)
++{
++ mode_t mode = S_IFREG | S_IRUSR | S_IWUSR;
++ struct dentry *dir;
++ int err;
++
++ err = init_fault_attr_dentries(&failslab.attr, "failslab");
++ if (err)
++ return err;
++ dir = failslab.attr.dentries.dir;
++
++ failslab.ignore_gfp_wait_file =
++ debugfs_create_bool("ignore-gfp-wait", mode, dir,
++ &failslab.ignore_gfp_wait);
++
++ if (!failslab.ignore_gfp_wait_file) {
++ err = -ENOMEM;
++ debugfs_remove(failslab.ignore_gfp_wait_file);
++ cleanup_fault_attr_dentries(&failslab.attr);
++ }
++
++ return err;
++}
++
++late_initcall(failslab_debugfs);
++
++#endif /* CONFIG_FAULT_INJECTION_DEBUG_FS */
++
++#else /* CONFIG_FAILSLAB */
++
++static inline int should_failslab(struct kmem_cache *cachep, gfp_t flags)
++{
++ return 0;
++}
++
++#endif /* CONFIG_FAILSLAB */
++
++static inline void *____cache_alloc(struct kmem_cache *cachep, gfp_t flags)
++{
++ void *objp;
++ struct array_cache *ac;
++
++ check_irq_off();
++
++ ac = cpu_cache_get(cachep);
++ if (likely(ac->avail)) {
++ STATS_INC_ALLOCHIT(cachep);
++ ac->touched = 1;
++ objp = ac->entry[--ac->avail];
++ } else {
++ STATS_INC_ALLOCMISS(cachep);
++ objp = cache_alloc_refill(cachep, flags);
++ }
++ return objp;
++}
++
++#ifdef CONFIG_NUMA
++/*
++ * Try allocating on another node if PF_SPREAD_SLAB|PF_MEMPOLICY.
++ *
++ * If we are in_interrupt, then process context, including cpusets and
++ * mempolicy, may not apply and should not be used for allocation policy.
++ */
++static void *alternate_node_alloc(struct kmem_cache *cachep, gfp_t flags)
++{
++ int nid_alloc, nid_here;
++
++ if (in_interrupt() || (flags & __GFP_THISNODE))
++ return NULL;
++ nid_alloc = nid_here = numa_node_id();
++ if (cpuset_do_slab_mem_spread() && (cachep->flags & SLAB_MEM_SPREAD))
++ nid_alloc = cpuset_mem_spread_node();
++ else if (current->mempolicy)
++ nid_alloc = slab_node(current->mempolicy);
++ if (nid_alloc != nid_here)
++ return ____cache_alloc_node(cachep, flags, nid_alloc);
++ return NULL;
++}
++
++/*
++ * Fallback function if there was no memory available and no objects on a
++ * certain node and fall back is permitted. First we scan all the
++ * available nodelists for available objects. If that fails then we
++ * perform an allocation without specifying a node. This allows the page
++ * allocator to do its reclaim / fallback magic. We then insert the
++ * slab into the proper nodelist and then allocate from it.
++ */
++static void *fallback_alloc(struct kmem_cache *cache, gfp_t flags)
++{
++ struct zonelist *zonelist;
++ gfp_t local_flags;
++ struct zoneref *z;
++ struct zone *zone;
++ enum zone_type high_zoneidx = gfp_zone(flags);
++ void *obj = NULL;
++ int nid;
++
++ if (flags & __GFP_THISNODE)
++ return NULL;
++
++ zonelist = node_zonelist(slab_node(current->mempolicy), flags);
++ local_flags = flags & (GFP_CONSTRAINT_MASK|GFP_RECLAIM_MASK);
++
++retry:
++ /*
++ * Look through allowed nodes for objects available
++ * from existing per node queues.
++ */
++ for_each_zone_zonelist(zone, z, zonelist, high_zoneidx) {
++ nid = zone_to_nid(zone);
++
++ if (cpuset_zone_allowed_hardwall(zone, flags) &&
++ cache->nodelists[nid] &&
++ cache->nodelists[nid]->free_objects) {
++ obj = ____cache_alloc_node(cache,
++ flags | GFP_THISNODE, nid);
++ if (obj)
++ break;
++ }
++ }
++
++ if (!obj) {
++ /*
++ * This allocation will be performed within the constraints
++ * of the current cpuset / memory policy requirements.
++ * We may trigger various forms of reclaim on the allowed
++ * set and go into memory reserves if necessary.
++ */
++ if (local_flags & __GFP_WAIT)
++ local_irq_enable();
++ kmem_flagcheck(cache, flags);
++ obj = kmem_getpages(cache, local_flags, -1);
++ if (local_flags & __GFP_WAIT)
++ local_irq_disable();
++ if (obj) {
++ /*
++ * Insert into the appropriate per node queues
++ */
++ nid = page_to_nid(virt_to_page(obj));
++ if (cache_grow(cache, flags, nid, obj)) {
++ obj = ____cache_alloc_node(cache,
++ flags | GFP_THISNODE, nid);
++ if (!obj)
++ /*
++ * Another processor may allocate the
++ * objects in the slab since we are
++ * not holding any locks.
++ */
++ goto retry;
++ } else {
++ /* cache_grow already freed obj */
++ obj = NULL;
++ }
++ }
++ }
++ return obj;
++}
++
++/*
++ * A interface to enable slab creation on nodeid
++ */
++static void *____cache_alloc_node(struct kmem_cache *cachep, gfp_t flags,
++ int nodeid)
++{
++ struct list_head *entry;
++ struct slab *slabp;
++ struct kmem_list3 *l3;
++ void *obj;
++ int x;
++
++ l3 = cachep->nodelists[nodeid];
++ BUG_ON(!l3);
++
++retry:
++ check_irq_off();
++ spin_lock(&l3->list_lock);
++ entry = l3->slabs_partial.next;
++ if (entry == &l3->slabs_partial) {
++ l3->free_touched = 1;
++ entry = l3->slabs_free.next;
++ if (entry == &l3->slabs_free)
++ goto must_grow;
++ }
++
++ slabp = list_entry(entry, struct slab, list);
++ check_spinlock_acquired_node(cachep, nodeid);
++ check_slabp(cachep, slabp);
++
++ STATS_INC_NODEALLOCS(cachep);
++ STATS_INC_ACTIVE(cachep);
++ STATS_SET_HIGH(cachep);
++
++ BUG_ON(slabp->inuse == cachep->num);
++
++ obj = slab_get_obj(cachep, slabp, nodeid);
++ check_slabp(cachep, slabp);
++ vx_slab_alloc(cachep, flags);
++ l3->free_objects--;
++ /* move slabp to correct slabp list: */
++ list_del(&slabp->list);
++
++ if (slabp->free == BUFCTL_END)
++ list_add(&slabp->list, &l3->slabs_full);
++ else
++ list_add(&slabp->list, &l3->slabs_partial);
++
++ spin_unlock(&l3->list_lock);
++ goto done;
++
++must_grow:
++ spin_unlock(&l3->list_lock);
++ x = cache_grow(cachep, flags | GFP_THISNODE, nodeid, NULL);
++ if (x)
++ goto retry;
++
++ return fallback_alloc(cachep, flags);
++
++done:
++ return obj;
++}
++
++/**
++ * kmem_cache_alloc_node - Allocate an object on the specified node
++ * @cachep: The cache to allocate from.
++ * @flags: See kmalloc().
++ * @nodeid: node number of the target node.
++ * @caller: return address of caller, used for debug information
++ *
++ * Identical to kmem_cache_alloc but it will allocate memory on the given
++ * node, which can improve the performance for cpu bound structures.
++ *
++ * Fallback to other node is possible if __GFP_THISNODE is not set.
++ */
++static __always_inline void *
++__cache_alloc_node(struct kmem_cache *cachep, gfp_t flags, int nodeid,
++ void *caller)
++{
++ unsigned long save_flags;
++ void *ptr;
++
++ if (should_failslab(cachep, flags))
++ return NULL;
++
++ cache_alloc_debugcheck_before(cachep, flags);
++ local_irq_save(save_flags);
++
++ if (unlikely(nodeid == -1))
++ nodeid = numa_node_id();
++
++ if (unlikely(!cachep->nodelists[nodeid])) {
++ /* Node not bootstrapped yet */
++ ptr = fallback_alloc(cachep, flags);
++ goto out;
++ }
++
++ if (nodeid == numa_node_id()) {
++ /*
++ * Use the locally cached objects if possible.
++ * However ____cache_alloc does not allow fallback
++ * to other nodes. It may fail while we still have
++ * objects on other nodes available.
++ */
++ ptr = ____cache_alloc(cachep, flags);
++ if (ptr)
++ goto out;
++ }
++ /* ___cache_alloc_node can fall back to other nodes */
++ ptr = ____cache_alloc_node(cachep, flags, nodeid);
++ out:
++ vx_slab_alloc(cachep, flags);
++ local_irq_restore(save_flags);
++ ptr = cache_alloc_debugcheck_after(cachep, flags, ptr, caller);
++
++ if (unlikely((flags & __GFP_ZERO) && ptr))
++ memset(ptr, 0, obj_size(cachep));
++
++ return ptr;
++}
++
++static __always_inline void *
++__do_cache_alloc(struct kmem_cache *cache, gfp_t flags)
++{
++ void *objp;
++
++ if (unlikely(current->flags & (PF_SPREAD_SLAB | PF_MEMPOLICY))) {
++ objp = alternate_node_alloc(cache, flags);
++ if (objp)
++ goto out;
++ }
++ objp = ____cache_alloc(cache, flags);
++
++ /*
++ * We may just have run out of memory on the local node.
++ * ____cache_alloc_node() knows how to locate memory on other nodes
++ */
++ if (!objp)
++ objp = ____cache_alloc_node(cache, flags, numa_node_id());
++
++ out:
++ return objp;
++}
++#else
++
++static __always_inline void *
++__do_cache_alloc(struct kmem_cache *cachep, gfp_t flags)
++{
++ return ____cache_alloc(cachep, flags);
++}
++
++#endif /* CONFIG_NUMA */
++
++static __always_inline void *
++__cache_alloc(struct kmem_cache *cachep, gfp_t flags, void *caller)
++{
++ unsigned long save_flags;
++ void *objp;
++
++ if (should_failslab(cachep, flags))
++ return NULL;
++
++ cache_alloc_debugcheck_before(cachep, flags);
++ local_irq_save(save_flags);
++ objp = __do_cache_alloc(cachep, flags);
++ local_irq_restore(save_flags);
++ objp = cache_alloc_debugcheck_after(cachep, flags, objp, caller);
++ prefetchw(objp);
++
++ if (unlikely((flags & __GFP_ZERO) && objp))
++ memset(objp, 0, obj_size(cachep));
++
++ return objp;
++}
++
++/*
++ * Caller needs to acquire correct kmem_list's list_lock
++ */
++static void free_block(struct kmem_cache *cachep, void **objpp, int nr_objects,
++ int node)
++{
++ int i;
++ struct kmem_list3 *l3;
++
++ for (i = 0; i < nr_objects; i++) {
++ void *objp = objpp[i];
++ struct slab *slabp;
++
++ slabp = virt_to_slab(objp);
++ l3 = cachep->nodelists[node];
++ list_del(&slabp->list);
++ check_spinlock_acquired_node(cachep, node);
++ check_slabp(cachep, slabp);
++ slab_put_obj(cachep, slabp, objp, node);
++ STATS_DEC_ACTIVE(cachep);
++ l3->free_objects++;
++ check_slabp(cachep, slabp);
++
++ /* fixup slab chains */
++ if (slabp->inuse == 0) {
++ if (l3->free_objects > l3->free_limit) {
++ l3->free_objects -= cachep->num;
++ /* No need to drop any previously held
++ * lock here, even if we have a off-slab slab
++ * descriptor it is guaranteed to come from
++ * a different cache, refer to comments before
++ * alloc_slabmgmt.
++ */
++ slab_destroy(cachep, slabp);
++ } else {
++ list_add(&slabp->list, &l3->slabs_free);
++ }
++ } else {
++ /* Unconditionally move a slab to the end of the
++ * partial list on free - maximum time for the
++ * other objects to be freed, too.
++ */
++ list_add_tail(&slabp->list, &l3->slabs_partial);
++ }
++ }
++}
++
++static void cache_flusharray(struct kmem_cache *cachep, struct array_cache *ac)
++{
++ int batchcount;
++ struct kmem_list3 *l3;
++ int node = numa_node_id();
++
++ batchcount = ac->batchcount;
++#if DEBUG
++ BUG_ON(!batchcount || batchcount > ac->avail);
++#endif
++ check_irq_off();
++ l3 = cachep->nodelists[node];
++ spin_lock(&l3->list_lock);
++ if (l3->shared) {
++ struct array_cache *shared_array = l3->shared;
++ int max = shared_array->limit - shared_array->avail;
++ if (max) {
++ if (batchcount > max)
++ batchcount = max;
++ memcpy(&(shared_array->entry[shared_array->avail]),
++ ac->entry, sizeof(void *) * batchcount);
++ shared_array->avail += batchcount;
++ goto free_done;
++ }
++ }
++
++ free_block(cachep, ac->entry, batchcount, node);
++free_done:
++#if STATS
++ {
++ int i = 0;
++ struct list_head *p;
++
++ p = l3->slabs_free.next;
++ while (p != &(l3->slabs_free)) {
++ struct slab *slabp;
++
++ slabp = list_entry(p, struct slab, list);
++ BUG_ON(slabp->inuse);
++
++ i++;
++ p = p->next;
++ }
++ STATS_SET_FREEABLE(cachep, i);
++ }
++#endif
++ spin_unlock(&l3->list_lock);
++ ac->avail -= batchcount;
++ memmove(ac->entry, &(ac->entry[batchcount]), sizeof(void *)*ac->avail);
++}
++
++/*
++ * Release an obj back to its cache. If the obj has a constructed state, it must
++ * be in this state _before_ it is released. Called with disabled ints.
++ */
++static inline void __cache_free(struct kmem_cache *cachep, void *objp)
++{
++ struct array_cache *ac = cpu_cache_get(cachep);
++
++ check_irq_off();
++ objp = cache_free_debugcheck(cachep, objp, __builtin_return_address(0));
++ vx_slab_free(cachep);
++
++ /*
++ * Skip calling cache_free_alien() when the platform is not numa.
++ * This will avoid cache misses that happen while accessing slabp (which
++ * is per page memory reference) to get nodeid. Instead use a global
++ * variable to skip the call, which is mostly likely to be present in
++ * the cache.
++ */
++ if (numa_platform && cache_free_alien(cachep, objp))
++ return;
++
++ if (likely(ac->avail < ac->limit)) {
++ STATS_INC_FREEHIT(cachep);
++ ac->entry[ac->avail++] = objp;
++ return;
++ } else {
++ STATS_INC_FREEMISS(cachep);
++ cache_flusharray(cachep, ac);
++ ac->entry[ac->avail++] = objp;
++ }
++}
++
++/**
++ * kmem_cache_alloc - Allocate an object
++ * @cachep: The cache to allocate from.
++ * @flags: See kmalloc().
++ *
++ * Allocate an object from this cache. The flags are only relevant
++ * if the cache has no available objects.
++ */
++void *kmem_cache_alloc(struct kmem_cache *cachep, gfp_t flags)
++{
++ return __cache_alloc(cachep, flags, __builtin_return_address(0));
++}
++EXPORT_SYMBOL(kmem_cache_alloc);
++
++/**
++ * kmem_ptr_validate - check if an untrusted pointer might be a slab entry.
++ * @cachep: the cache we're checking against
++ * @ptr: pointer to validate
++ *
++ * This verifies that the untrusted pointer looks sane;
++ * it is _not_ a guarantee that the pointer is actually
++ * part of the slab cache in question, but it at least
++ * validates that the pointer can be dereferenced and
++ * looks half-way sane.
++ *
++ * Currently only used for dentry validation.
++ */
++int kmem_ptr_validate(struct kmem_cache *cachep, const void *ptr)
++{
++ unsigned long addr = (unsigned long)ptr;
++ unsigned long min_addr = PAGE_OFFSET;
++ unsigned long align_mask = BYTES_PER_WORD - 1;
++ unsigned long size = cachep->buffer_size;
++ struct page *page;
++
++ if (unlikely(addr < min_addr))
++ goto out;
++ if (unlikely(addr > (unsigned long)high_memory - size))
++ goto out;
++ if (unlikely(addr & align_mask))
++ goto out;
++ if (unlikely(!kern_addr_valid(addr)))
++ goto out;
++ if (unlikely(!kern_addr_valid(addr + size - 1)))
++ goto out;
++ page = virt_to_page(ptr);
++ if (unlikely(!PageSlab(page)))
++ goto out;
++ if (unlikely(page_get_cache(page) != cachep))
++ goto out;
++ return 1;
++out:
++ return 0;
++}
++
++#ifdef CONFIG_NUMA
++void *kmem_cache_alloc_node(struct kmem_cache *cachep, gfp_t flags, int nodeid)
++{
++ return __cache_alloc_node(cachep, flags, nodeid,
++ __builtin_return_address(0));
++}
++EXPORT_SYMBOL(kmem_cache_alloc_node);
++
++static __always_inline void *
++__do_kmalloc_node(size_t size, gfp_t flags, int node, void *caller)
++{
++ struct kmem_cache *cachep;
++
++ cachep = kmem_find_general_cachep(size, flags);
++ if (unlikely(ZERO_OR_NULL_PTR(cachep)))
++ return cachep;
++ return kmem_cache_alloc_node(cachep, flags, node);
++}
++
++#ifdef CONFIG_DEBUG_SLAB
++void *__kmalloc_node(size_t size, gfp_t flags, int node)
++{
++ return __do_kmalloc_node(size, flags, node,
++ __builtin_return_address(0));
++}
++EXPORT_SYMBOL(__kmalloc_node);
++
++void *__kmalloc_node_track_caller(size_t size, gfp_t flags,
++ int node, void *caller)
++{
++ return __do_kmalloc_node(size, flags, node, caller);
++}
++EXPORT_SYMBOL(__kmalloc_node_track_caller);
++#else
++void *__kmalloc_node(size_t size, gfp_t flags, int node)
++{
++ return __do_kmalloc_node(size, flags, node, NULL);
++}
++EXPORT_SYMBOL(__kmalloc_node);
++#endif /* CONFIG_DEBUG_SLAB */
++#endif /* CONFIG_NUMA */
++
++/**
++ * __do_kmalloc - allocate memory
++ * @size: how many bytes of memory are required.
++ * @flags: the type of memory to allocate (see kmalloc).
++ * @caller: function caller for debug tracking of the caller
++ */
++static __always_inline void *__do_kmalloc(size_t size, gfp_t flags,
++ void *caller)
++{
++ struct kmem_cache *cachep;
++
++ /* If you want to save a few bytes .text space: replace
++ * __ with kmem_.
++ * Then kmalloc uses the uninlined functions instead of the inline
++ * functions.
++ */
++ cachep = __find_general_cachep(size, flags);
++ if (unlikely(ZERO_OR_NULL_PTR(cachep)))
++ return cachep;
++ return __cache_alloc(cachep, flags, caller);
++}
++
++
++#ifdef CONFIG_DEBUG_SLAB
++void *__kmalloc(size_t size, gfp_t flags)
++{
++ return __do_kmalloc(size, flags, __builtin_return_address(0));
++}
++EXPORT_SYMBOL(__kmalloc);
++
++void *__kmalloc_track_caller(size_t size, gfp_t flags, void *caller)
++{
++ return __do_kmalloc(size, flags, caller);
++}
++EXPORT_SYMBOL(__kmalloc_track_caller);
++
++#else
++void *__kmalloc(size_t size, gfp_t flags)
++{
++ return __do_kmalloc(size, flags, NULL);
++}
++EXPORT_SYMBOL(__kmalloc);
++#endif
++
++/**
++ * kmem_cache_free - Deallocate an object
++ * @cachep: The cache the allocation was from.
++ * @objp: The previously allocated object.
++ *
++ * Free an object which was previously allocated from this
++ * cache.
++ */
++void kmem_cache_free(struct kmem_cache *cachep, void *objp)
++{
++ unsigned long flags;
++
++ local_irq_save(flags);
++ debug_check_no_locks_freed(objp, obj_size(cachep));
++ if (!(cachep->flags & SLAB_DEBUG_OBJECTS))
++ debug_check_no_obj_freed(objp, obj_size(cachep));
++ __cache_free(cachep, objp);
++ local_irq_restore(flags);
++}
++EXPORT_SYMBOL(kmem_cache_free);
++
++/**
++ * kfree - free previously allocated memory
++ * @objp: pointer returned by kmalloc.
++ *
++ * If @objp is NULL, no operation is performed.
++ *
++ * Don't free memory not originally allocated by kmalloc()
++ * or you will run into trouble.
++ */
++void kfree(const void *objp)
++{
++ struct kmem_cache *c;
++ unsigned long flags;
++
++ if (unlikely(ZERO_OR_NULL_PTR(objp)))
++ return;
++ local_irq_save(flags);
++ kfree_debugcheck(objp);
++ c = virt_to_cache(objp);
++ debug_check_no_locks_freed(objp, obj_size(c));
++ debug_check_no_obj_freed(objp, obj_size(c));
++ __cache_free(c, (void *)objp);
++ local_irq_restore(flags);
++}
++EXPORT_SYMBOL(kfree);
++
++unsigned int kmem_cache_size(struct kmem_cache *cachep)
++{
++ return obj_size(cachep);
++}
++EXPORT_SYMBOL(kmem_cache_size);
++
++const char *kmem_cache_name(struct kmem_cache *cachep)
++{
++ return cachep->name;
++}
++EXPORT_SYMBOL_GPL(kmem_cache_name);
++
++/*
++ * This initializes kmem_list3 or resizes various caches for all nodes.
++ */
++static int alloc_kmemlist(struct kmem_cache *cachep)
++{
++ int node;
++ struct kmem_list3 *l3;
++ struct array_cache *new_shared;
++ struct array_cache **new_alien = NULL;
++
++ for_each_online_node(node) {
++
++ if (use_alien_caches) {
++ new_alien = alloc_alien_cache(node, cachep->limit);
++ if (!new_alien)
++ goto fail;
++ }
++
++ new_shared = NULL;
++ if (cachep->shared) {
++ new_shared = alloc_arraycache(node,
++ cachep->shared*cachep->batchcount,
++ 0xbaadf00d);
++ if (!new_shared) {
++ free_alien_cache(new_alien);
++ goto fail;
++ }
++ }
++
++ l3 = cachep->nodelists[node];
++ if (l3) {
++ struct array_cache *shared = l3->shared;
++
++ spin_lock_irq(&l3->list_lock);
++
++ if (shared)
++ free_block(cachep, shared->entry,
++ shared->avail, node);
++
++ l3->shared = new_shared;
++ if (!l3->alien) {
++ l3->alien = new_alien;
++ new_alien = NULL;
++ }
++ l3->free_limit = (1 + nr_cpus_node(node)) *
++ cachep->batchcount + cachep->num;
++ spin_unlock_irq(&l3->list_lock);
++ kfree(shared);
++ free_alien_cache(new_alien);
++ continue;
++ }
++ l3 = kmalloc_node(sizeof(struct kmem_list3), GFP_KERNEL, node);
++ if (!l3) {
++ free_alien_cache(new_alien);
++ kfree(new_shared);
++ goto fail;
++ }
++
++ kmem_list3_init(l3);
++ l3->next_reap = jiffies + REAPTIMEOUT_LIST3 +
++ ((unsigned long)cachep) % REAPTIMEOUT_LIST3;
++ l3->shared = new_shared;
++ l3->alien = new_alien;
++ l3->free_limit = (1 + nr_cpus_node(node)) *
++ cachep->batchcount + cachep->num;
++ cachep->nodelists[node] = l3;
++ }
++ return 0;
++
++fail:
++ if (!cachep->next.next) {
++ /* Cache is not active yet. Roll back what we did */
++ node--;
++ while (node >= 0) {
++ if (cachep->nodelists[node]) {
++ l3 = cachep->nodelists[node];
++
++ kfree(l3->shared);
++ free_alien_cache(l3->alien);
++ kfree(l3);
++ cachep->nodelists[node] = NULL;
++ }
++ node--;
++ }
++ }
++ return -ENOMEM;
++}
++
++struct ccupdate_struct {
++ struct kmem_cache *cachep;
++ struct array_cache *new[NR_CPUS];
++};
++
++static void do_ccupdate_local(void *info)
++{
++ struct ccupdate_struct *new = info;
++ struct array_cache *old;
++
++ check_irq_off();
++ old = cpu_cache_get(new->cachep);
++
++ new->cachep->array[smp_processor_id()] = new->new[smp_processor_id()];
++ new->new[smp_processor_id()] = old;
++}
++
++/* Always called with the cache_chain_mutex held */
++static int do_tune_cpucache(struct kmem_cache *cachep, int limit,
++ int batchcount, int shared)
++{
++ struct ccupdate_struct *new;
++ int i;
++
++ new = kzalloc(sizeof(*new), GFP_KERNEL);
++ if (!new)
++ return -ENOMEM;
++
++ for_each_online_cpu(i) {
++ new->new[i] = alloc_arraycache(cpu_to_node(i), limit,
++ batchcount);
++ if (!new->new[i]) {
++ for (i--; i >= 0; i--)
++ kfree(new->new[i]);
++ kfree(new);
++ return -ENOMEM;
++ }
++ }
++ new->cachep = cachep;
++
++ on_each_cpu(do_ccupdate_local, (void *)new, 1);
++
++ check_irq_on();
++ cachep->batchcount = batchcount;
++ cachep->limit = limit;
++ cachep->shared = shared;
++
++ for_each_online_cpu(i) {
++ struct array_cache *ccold = new->new[i];
++ if (!ccold)
++ continue;
++ spin_lock_irq(&cachep->nodelists[cpu_to_node(i)]->list_lock);
++ free_block(cachep, ccold->entry, ccold->avail, cpu_to_node(i));
++ spin_unlock_irq(&cachep->nodelists[cpu_to_node(i)]->list_lock);
++ kfree(ccold);
++ }
++ kfree(new);
++ return alloc_kmemlist(cachep);
++}
++
++/* Called with cache_chain_mutex held always */
++static int enable_cpucache(struct kmem_cache *cachep)
++{
++ int err;
++ int limit, shared;
++
++ /*
++ * The head array serves three purposes:
++ * - create a LIFO ordering, i.e. return objects that are cache-warm
++ * - reduce the number of spinlock operations.
++ * - reduce the number of linked list operations on the slab and
++ * bufctl chains: array operations are cheaper.
++ * The numbers are guessed, we should auto-tune as described by
++ * Bonwick.
++ */
++ if (cachep->buffer_size > 131072)
++ limit = 1;
++ else if (cachep->buffer_size > PAGE_SIZE)
++ limit = 8;
++ else if (cachep->buffer_size > 1024)
++ limit = 24;
++ else if (cachep->buffer_size > 256)
++ limit = 54;
++ else
++ limit = 120;
++
++ /*
++ * CPU bound tasks (e.g. network routing) can exhibit cpu bound
++ * allocation behaviour: Most allocs on one cpu, most free operations
++ * on another cpu. For these cases, an efficient object passing between
++ * cpus is necessary. This is provided by a shared array. The array
++ * replaces Bonwick's magazine layer.
++ * On uniprocessor, it's functionally equivalent (but less efficient)
++ * to a larger limit. Thus disabled by default.
++ */
++ shared = 0;
++ if (cachep->buffer_size <= PAGE_SIZE && num_possible_cpus() > 1)
++ shared = 8;
++
++#if DEBUG
++ /*
++ * With debugging enabled, large batchcount lead to excessively long
++ * periods with disabled local interrupts. Limit the batchcount
++ */
++ if (limit > 32)
++ limit = 32;
++#endif
++ err = do_tune_cpucache(cachep, limit, (limit + 1) / 2, shared);
++ if (err)
++ printk(KERN_ERR "enable_cpucache failed for %s, error %d.\n",
++ cachep->name, -err);
++ return err;
++}
++
++/*
++ * Drain an array if it contains any elements taking the l3 lock only if
++ * necessary. Note that the l3 listlock also protects the array_cache
++ * if drain_array() is used on the shared array.
++ */
++void drain_array(struct kmem_cache *cachep, struct kmem_list3 *l3,
++ struct array_cache *ac, int force, int node)
++{
++ int tofree;
++
++ if (!ac || !ac->avail)
++ return;
++ if (ac->touched && !force) {
++ ac->touched = 0;
++ } else {
++ spin_lock_irq(&l3->list_lock);
++ if (ac->avail) {
++ tofree = force ? ac->avail : (ac->limit + 4) / 5;
++ if (tofree > ac->avail)
++ tofree = (ac->avail + 1) / 2;
++ free_block(cachep, ac->entry, tofree, node);
++ ac->avail -= tofree;
++ memmove(ac->entry, &(ac->entry[tofree]),
++ sizeof(void *) * ac->avail);
++ }
++ spin_unlock_irq(&l3->list_lock);
++ }
++}
++
++/**
++ * cache_reap - Reclaim memory from caches.
++ * @w: work descriptor
++ *
++ * Called from workqueue/eventd every few seconds.
++ * Purpose:
++ * - clear the per-cpu caches for this CPU.
++ * - return freeable pages to the main free memory pool.
++ *
++ * If we cannot acquire the cache chain mutex then just give up - we'll try
++ * again on the next iteration.
++ */
++static void cache_reap(struct work_struct *w)
++{
++ struct kmem_cache *searchp;
++ struct kmem_list3 *l3;
++ int node = numa_node_id();
++ struct delayed_work *work =
++ container_of(w, struct delayed_work, work);
++
++ if (!mutex_trylock(&cache_chain_mutex))
++ /* Give up. Setup the next iteration. */
++ goto out;
++
++ list_for_each_entry(searchp, &cache_chain, next) {
++ check_irq_on();
++
++ /*
++ * We only take the l3 lock if absolutely necessary and we
++ * have established with reasonable certainty that
++ * we can do some work if the lock was obtained.
++ */
++ l3 = searchp->nodelists[node];
++
++ reap_alien(searchp, l3);
++
++ drain_array(searchp, l3, cpu_cache_get(searchp), 0, node);
++
++ /*
++ * These are racy checks but it does not matter
++ * if we skip one check or scan twice.
++ */
++ if (time_after(l3->next_reap, jiffies))
++ goto next;
++
++ l3->next_reap = jiffies + REAPTIMEOUT_LIST3;
++
++ drain_array(searchp, l3, l3->shared, 0, node);
++
++ if (l3->free_touched)
++ l3->free_touched = 0;
++ else {
++ int freed;
++
++ freed = drain_freelist(searchp, l3, (l3->free_limit +
++ 5 * searchp->num - 1) / (5 * searchp->num));
++ STATS_ADD_REAPED(searchp, freed);
++ }
++next:
++ cond_resched();
++ }
++ check_irq_on();
++ mutex_unlock(&cache_chain_mutex);
++ next_reap_node();
++out:
++ /* Set up the next iteration */
++ schedule_delayed_work(work, round_jiffies_relative(REAPTIMEOUT_CPUC));
++}
++
++#ifdef CONFIG_SLABINFO
++
++static void print_slabinfo_header(struct seq_file *m)
++{
++ /*
++ * Output format version, so at least we can change it
++ * without _too_ many complaints.
++ */
++#if STATS
++ seq_puts(m, "slabinfo - version: 2.1 (statistics)\n");
++#else
++ seq_puts(m, "slabinfo - version: 2.1\n");
++#endif
++ seq_puts(m, "# name <active_objs> <num_objs> <objsize> "
++ "<objperslab> <pagesperslab>");
++ seq_puts(m, " : tunables <limit> <batchcount> <sharedfactor>");
++ seq_puts(m, " : slabdata <active_slabs> <num_slabs> <sharedavail>");
++#if STATS
++ seq_puts(m, " : globalstat <listallocs> <maxobjs> <grown> <reaped> "
++ "<error> <maxfreeable> <nodeallocs> <remotefrees> <alienoverflow>");
++ seq_puts(m, " : cpustat <allochit> <allocmiss> <freehit> <freemiss>");
++#endif
++ seq_putc(m, '\n');
++}
++
++static void *s_start(struct seq_file *m, loff_t *pos)
++{
++ loff_t n = *pos;
++
++ mutex_lock(&cache_chain_mutex);
++ if (!n)
++ print_slabinfo_header(m);
++
++ return seq_list_start(&cache_chain, *pos);
++}
++
++static void *s_next(struct seq_file *m, void *p, loff_t *pos)
++{
++ return seq_list_next(p, &cache_chain, pos);
++}
++
++static void s_stop(struct seq_file *m, void *p)
++{
++ mutex_unlock(&cache_chain_mutex);
++}
++
++static int s_show(struct seq_file *m, void *p)
++{
++ struct kmem_cache *cachep = list_entry(p, struct kmem_cache, next);
++ struct slab *slabp;
++ unsigned long active_objs;
++ unsigned long num_objs;
++ unsigned long active_slabs = 0;
++ unsigned long num_slabs, free_objects = 0, shared_avail = 0;
++ const char *name;
++ char *error = NULL;
++ int node;
++ struct kmem_list3 *l3;
++
++ active_objs = 0;
++ num_slabs = 0;
++ for_each_online_node(node) {
++ l3 = cachep->nodelists[node];
++ if (!l3)
++ continue;
++
++ check_irq_on();
++ spin_lock_irq(&l3->list_lock);
++
++ list_for_each_entry(slabp, &l3->slabs_full, list) {
++ if (slabp->inuse != cachep->num && !error)
++ error = "slabs_full accounting error";
++ active_objs += cachep->num;
++ active_slabs++;
++ }
++ list_for_each_entry(slabp, &l3->slabs_partial, list) {
++ if (slabp->inuse == cachep->num && !error)
++ error = "slabs_partial inuse accounting error";
++ if (!slabp->inuse && !error)
++ error = "slabs_partial/inuse accounting error";
++ active_objs += slabp->inuse;
++ active_slabs++;
++ }
++ list_for_each_entry(slabp, &l3->slabs_free, list) {
++ if (slabp->inuse && !error)
++ error = "slabs_free/inuse accounting error";
++ num_slabs++;
++ }
++ free_objects += l3->free_objects;
++ if (l3->shared)
++ shared_avail += l3->shared->avail;
++
++ spin_unlock_irq(&l3->list_lock);
++ }
++ num_slabs += active_slabs;
++ num_objs = num_slabs * cachep->num;
++ if (num_objs - active_objs != free_objects && !error)
++ error = "free_objects accounting error";
++
++ name = cachep->name;
++ if (error)
++ printk(KERN_ERR "slab: cache %s error: %s\n", name, error);
++
++ seq_printf(m, "%-17s %6lu %6lu %6u %4u %4d",
++ name, active_objs, num_objs, cachep->buffer_size,
++ cachep->num, (1 << cachep->gfporder));
++ seq_printf(m, " : tunables %4u %4u %4u",
++ cachep->limit, cachep->batchcount, cachep->shared);
++ seq_printf(m, " : slabdata %6lu %6lu %6lu",
++ active_slabs, num_slabs, shared_avail);
++#if STATS
++ { /* list3 stats */
++ unsigned long high = cachep->high_mark;
++ unsigned long allocs = cachep->num_allocations;
++ unsigned long grown = cachep->grown;
++ unsigned long reaped = cachep->reaped;
++ unsigned long errors = cachep->errors;
++ unsigned long max_freeable = cachep->max_freeable;
++ unsigned long node_allocs = cachep->node_allocs;
++ unsigned long node_frees = cachep->node_frees;
++ unsigned long overflows = cachep->node_overflow;
++
++ seq_printf(m, " : globalstat %7lu %6lu %5lu %4lu \
++ %4lu %4lu %4lu %4lu %4lu", allocs, high, grown,
++ reaped, errors, max_freeable, node_allocs,
++ node_frees, overflows);
++ }
++ /* cpu stats */
++ {
++ unsigned long allochit = atomic_read(&cachep->allochit);
++ unsigned long allocmiss = atomic_read(&cachep->allocmiss);
++ unsigned long freehit = atomic_read(&cachep->freehit);
++ unsigned long freemiss = atomic_read(&cachep->freemiss);
++
++ seq_printf(m, " : cpustat %6lu %6lu %6lu %6lu",
++ allochit, allocmiss, freehit, freemiss);
++ }
++#endif
++ seq_putc(m, '\n');
++ return 0;
++}
++
++/*
++ * slabinfo_op - iterator that generates /proc/slabinfo
++ *
++ * Output layout:
++ * cache-name
++ * num-active-objs
++ * total-objs
++ * object size
++ * num-active-slabs
++ * total-slabs
++ * num-pages-per-slab
++ * + further values on SMP and with statistics enabled
++ */
++
++const struct seq_operations slabinfo_op = {
++ .start = s_start,
++ .next = s_next,
++ .stop = s_stop,
++ .show = s_show,
++};
++
++#define MAX_SLABINFO_WRITE 128
++/**
++ * slabinfo_write - Tuning for the slab allocator
++ * @file: unused
++ * @buffer: user buffer
++ * @count: data length
++ * @ppos: unused
++ */
++ssize_t slabinfo_write(struct file *file, const char __user * buffer,
++ size_t count, loff_t *ppos)
++{
++ char kbuf[MAX_SLABINFO_WRITE + 1], *tmp;
++ int limit, batchcount, shared, res;
++ struct kmem_cache *cachep;
++
++ if (count > MAX_SLABINFO_WRITE)
++ return -EINVAL;
++ if (copy_from_user(&kbuf, buffer, count))
++ return -EFAULT;
++ kbuf[MAX_SLABINFO_WRITE] = '\0';
++
++ tmp = strchr(kbuf, ' ');
++ if (!tmp)
++ return -EINVAL;
++ *tmp = '\0';
++ tmp++;
++ if (sscanf(tmp, " %d %d %d", &limit, &batchcount, &shared) != 3)
++ return -EINVAL;
++
++ /* Find the cache in the chain of caches. */
++ mutex_lock(&cache_chain_mutex);
++ res = -EINVAL;
++ list_for_each_entry(cachep, &cache_chain, next) {
++ if (!strcmp(cachep->name, kbuf)) {
++ if (limit < 1 || batchcount < 1 ||
++ batchcount > limit || shared < 0) {
++ res = 0;
++ } else {
++ res = do_tune_cpucache(cachep, limit,
++ batchcount, shared);
++ }
++ break;
++ }
++ }
++ mutex_unlock(&cache_chain_mutex);
++ if (res >= 0)
++ res = count;
++ return res;
++}
++
++#ifdef CONFIG_DEBUG_SLAB_LEAK
++
++static void *leaks_start(struct seq_file *m, loff_t *pos)
++{
++ mutex_lock(&cache_chain_mutex);
++ return seq_list_start(&cache_chain, *pos);
++}
++
++static inline int add_caller(unsigned long *n, unsigned long v)
++{
++ unsigned long *p;
++ int l;
++ if (!v)
++ return 1;
++ l = n[1];
++ p = n + 2;
++ while (l) {
++ int i = l/2;
++ unsigned long *q = p + 2 * i;
++ if (*q == v) {
++ q[1]++;
++ return 1;
++ }
++ if (*q > v) {
++ l = i;
++ } else {
++ p = q + 2;
++ l -= i + 1;
++ }
++ }
++ if (++n[1] == n[0])
++ return 0;
++ memmove(p + 2, p, n[1] * 2 * sizeof(unsigned long) - ((void *)p - (void *)n));
++ p[0] = v;
++ p[1] = 1;
++ return 1;
++}
++
++static void handle_slab(unsigned long *n, struct kmem_cache *c, struct slab *s)
++{
++ void *p;
++ int i;
++ if (n[0] == n[1])
++ return;
++ for (i = 0, p = s->s_mem; i < c->num; i++, p += c->buffer_size) {
++ if (slab_bufctl(s)[i] != BUFCTL_ACTIVE)
++ continue;
++ if (!add_caller(n, (unsigned long)*dbg_userword(c, p)))
++ return;
++ }
++}
++
++static void show_symbol(struct seq_file *m, unsigned long address)
++{
++#ifdef CONFIG_KALLSYMS
++ unsigned long offset, size;
++ char modname[MODULE_NAME_LEN], name[KSYM_NAME_LEN];
++
++ if (lookup_symbol_attrs(address, &size, &offset, modname, name) == 0) {
++ seq_printf(m, "%s+%#lx/%#lx", name, offset, size);
++ if (modname[0])
++ seq_printf(m, " [%s]", modname);
++ return;
++ }
++#endif
++ seq_printf(m, "%p", (void *)address);
++}
++
++static int leaks_show(struct seq_file *m, void *p)
++{
++ struct kmem_cache *cachep = list_entry(p, struct kmem_cache, next);
++ struct slab *slabp;
++ struct kmem_list3 *l3;
++ const char *name;
++ unsigned long *n = m->private;
++ int node;
++ int i;
++
++ if (!(cachep->flags & SLAB_STORE_USER))
++ return 0;
++ if (!(cachep->flags & SLAB_RED_ZONE))
++ return 0;
++
++ /* OK, we can do it */
++
++ n[1] = 0;
++
++ for_each_online_node(node) {
++ l3 = cachep->nodelists[node];
++ if (!l3)
++ continue;
++
++ check_irq_on();
++ spin_lock_irq(&l3->list_lock);
++
++ list_for_each_entry(slabp, &l3->slabs_full, list)
++ handle_slab(n, cachep, slabp);
++ list_for_each_entry(slabp, &l3->slabs_partial, list)
++ handle_slab(n, cachep, slabp);
++ spin_unlock_irq(&l3->list_lock);
++ }
++ name = cachep->name;
++ if (n[0] == n[1]) {
++ /* Increase the buffer size */
++ mutex_unlock(&cache_chain_mutex);
++ m->private = kzalloc(n[0] * 4 * sizeof(unsigned long), GFP_KERNEL);
++ if (!m->private) {
++ /* Too bad, we are really out */
++ m->private = n;
++ mutex_lock(&cache_chain_mutex);
++ return -ENOMEM;
++ }
++ *(unsigned long *)m->private = n[0] * 2;
++ kfree(n);
++ mutex_lock(&cache_chain_mutex);
++ /* Now make sure this entry will be retried */
++ m->count = m->size;
++ return 0;
++ }
++ for (i = 0; i < n[1]; i++) {
++ seq_printf(m, "%s: %lu ", name, n[2*i+3]);
++ show_symbol(m, n[2*i+2]);
++ seq_putc(m, '\n');
++ }
++
++ return 0;
++}
++
++const struct seq_operations slabstats_op = {
++ .start = leaks_start,
++ .next = s_next,
++ .stop = s_stop,
++ .show = leaks_show,
++};
++#endif
++#endif
++
++/**
++ * ksize - get the actual amount of memory allocated for a given object
++ * @objp: Pointer to the object
++ *
++ * kmalloc may internally round up allocations and return more memory
++ * than requested. ksize() can be used to determine the actual amount of
++ * memory allocated. The caller may use this additional memory, even though
++ * a smaller amount of memory was initially specified with the kmalloc call.
++ * The caller must guarantee that objp points to a valid object previously
++ * allocated with either kmalloc() or kmem_cache_alloc(). The object
++ * must not be freed during the duration of the call.
++ */
++size_t ksize(const void *objp)
++{
++ BUG_ON(!objp);
++ if (unlikely(objp == ZERO_SIZE_PTR))
++ return 0;
++
++ return obj_size(virt_to_cache(objp));
++}
+diff -Nurb linux-2.6.27-590/mm/slab.c.rej.orig linux-2.6.27-591/mm/slab.c.rej.orig
+--- linux-2.6.27-590/mm/slab.c.rej.orig 1969-12-31 19:00:00.000000000 -0500
++++ linux-2.6.27-591/mm/slab.c.rej.orig 2010-01-29 15:43:46.000000000 -0500
+@@ -0,0 +1,121 @@
++***************
++*** 110,120 ****
++ #include <linux/fault-inject.h>
++ #include <linux/rtmutex.h>
++ #include <linux/reciprocal_div.h>
++
++ #include <asm/cacheflush.h>
++ #include <asm/tlbflush.h>
++ #include <asm/page.h>
++
++ /*
++ * DEBUG - 1 for kmem_cache_create() to honour; SLAB_RED_ZONE & SLAB_POISON.
++ * 0 for faster, smaller code (especially in the critical paths).
++--- 110,122 ----
++ #include <linux/fault-inject.h>
++ #include <linux/rtmutex.h>
++ #include <linux/reciprocal_div.h>
+++ #include <linux/arrays.h>
++
++ #include <asm/cacheflush.h>
++ #include <asm/tlbflush.h>
++ #include <asm/page.h>
++
+++
++ /*
++ * DEBUG - 1 for kmem_cache_create() to honour; SLAB_RED_ZONE & SLAB_POISON.
++ * 0 for faster, smaller code (especially in the critical paths).
++***************
++*** 3680,3695 ****
++ __builtin_return_address(0));
++ }
++ EXPORT_SYMBOL(kmem_cache_alloc_node);
++-
++ static __always_inline void *
++ __do_kmalloc_node(size_t size, gfp_t flags, int node, void *caller)
++ {
++ struct kmem_cache *cachep;
++
++ cachep = kmem_find_general_cachep(size, flags);
++ if (unlikely(cachep == NULL))
++ return NULL;
++- return kmem_cache_alloc_node(cachep, flags, node);
++ }
++
++ #ifdef CONFIG_DEBUG_SLAB
++--- 3717,3735 ----
++ __builtin_return_address(0));
++ }
++ EXPORT_SYMBOL(kmem_cache_alloc_node);
++ static __always_inline void *
++ __do_kmalloc_node(size_t size, gfp_t flags, int node, void *caller)
++ {
++ struct kmem_cache *cachep;
+++ void *ret;
+++
++
++ cachep = kmem_find_general_cachep(size, flags);
++ if (unlikely(cachep == NULL))
++ return NULL;
+++ ret = kmem_cache_alloc_node(cachep, flags, node);
+++
+++ return ret;
++ }
++
++ #ifdef CONFIG_DEBUG_SLAB
++***************
++*** 3723,3731 ****
++ cachep = __find_general_cachep(size, flags);
++ if (unlikely(cachep == NULL))
++ return NULL;
++- return __cache_alloc(cachep, flags, caller);
++- }
++
++
++ #ifdef CONFIG_DEBUG_SLAB
++ void *__kmalloc(size_t size, gfp_t flags)
++--- 3764,3773 ----
++ cachep = __find_general_cachep(size, flags);
++ if (unlikely(cachep == NULL))
++ return NULL;
+++ ret = __cache_alloc(cachep, flags, caller);
++
+++ return ret;
+++ }
++
++ #ifdef CONFIG_DEBUG_SLAB
++ void *__kmalloc(size_t size, gfp_t flags)
++***************
++*** 3810,3816 ****
++
++ local_irq_save(flags);
++ debug_check_no_locks_freed(objp, obj_size(cachep));
++- __cache_free(cachep, objp);
++ local_irq_restore(flags);
++ }
++ EXPORT_SYMBOL(kmem_cache_free);
++--- 3859,3865 ----
++
++ local_irq_save(flags);
++ debug_check_no_locks_freed(objp, obj_size(cachep));
+++ __cache_free(cachep, objp,__builtin_return_address(0));
++ local_irq_restore(flags);
++ }
++ EXPORT_SYMBOL(kmem_cache_free);
++***************
++*** 3835,3841 ****
++ kfree_debugcheck(objp);
++ c = virt_to_cache(objp);
++ debug_check_no_locks_freed(objp, obj_size(c));
++- __cache_free(c, (void *)objp);
++ local_irq_restore(flags);
++ }
++ EXPORT_SYMBOL(kfree);
++--- 3884,3890 ----
++ kfree_debugcheck(objp);
++ c = virt_to_cache(objp);
++ debug_check_no_locks_freed(objp, obj_size(c));
+++ __cache_free(c, (void *)objp,__builtin_return_address(0));
++ local_irq_restore(flags);
++ }
++ EXPORT_SYMBOL(kfree);