X-Git-Url: http://git.onelab.eu/?a=blobdiff_plain;f=include%2Fasm-ia64%2Fprocessor.h;h=cbc657909c78256844e8c172e9e954a84d67bf7d;hb=97bf2856c6014879bd04983a3e9dfcdac1e7fe85;hp=9e1b61be5c3bf844106035bd568180d5f9c242fd;hpb=c7b5ebbddf7bcd3651947760f423e3783bbe6573;p=linux-2.6.git diff --git a/include/asm-ia64/processor.h b/include/asm-ia64/processor.h index 9e1b61be5..cbc657909 100644 --- a/include/asm-ia64/processor.h +++ b/include/asm-ia64/processor.h @@ -13,20 +13,14 @@ * 06/16/00 A. Mallick added csd/ssd/tssd for ia32 support */ -#include #include #include #include #include +#include #define IA64_NUM_DBG_REGS 8 -/* - * Limits for PMC and PMD are set to less than maximum architected values - * but should be sufficient for a while - */ -#define IA64_NUM_PMC_REGS 32 -#define IA64_NUM_PMD_REGS 32 #define DEFAULT_MAP_BASE __IA64_UL_CONST(0x2000000000000000) #define DEFAULT_TASK_SIZE __IA64_UL_CONST(0xa000000000000000) @@ -39,14 +33,6 @@ */ #define TASK_SIZE (current->thread.task_size) -/* - * MM_VM_SIZE(mm) gives the maximum address (plus 1) which may contain a mapping for - * address-space MM. Note that with 32-bit tasks, this is still DEFAULT_TASK_SIZE, - * because the kernel may have installed helper-mappings above TASK_SIZE. For example, - * for x86 emulation, the LDT and GDT are mapped above TASK_SIZE. - */ -#define MM_VM_SIZE(mm) DEFAULT_TASK_SIZE - /* * This decides where the kernel will search for a free chunk of vm * space during mmap's. @@ -58,7 +44,8 @@ #define IA64_THREAD_PM_VALID (__IA64_UL(1) << 2) /* performance registers valid? */ #define IA64_THREAD_UAC_NOPRINT (__IA64_UL(1) << 3) /* don't log unaligned accesses */ #define IA64_THREAD_UAC_SIGBUS (__IA64_UL(1) << 4) /* generate SIGBUS on unaligned acc. */ - /* bit 5 is currently unused */ +#define IA64_THREAD_MIGRATION (__IA64_UL(1) << 5) /* require migration + sync at ctx sw */ #define IA64_THREAD_FPEMU_NOPRINT (__IA64_UL(1) << 6) /* don't log any fpswa faults */ #define IA64_THREAD_FPEMU_SIGFPE (__IA64_UL(1) << 7) /* send a SIGFPE for fpswa faults */ @@ -142,9 +129,6 @@ struct cpuinfo_ia64 { __u64 nsec_per_cyc; /* (1000000000<0, pending perfmon work on kernel exit */ -# define INIT_THREAD_PM .pmcs = {0UL, }, \ - .pmds = {0UL, }, \ - .pfm_context = NULL, \ +# define INIT_THREAD_PM .pfm_context = NULL, \ .pfm_needs_checking = 0UL, #else # define INIT_THREAD_PM @@ -334,26 +321,6 @@ struct task_struct; /* Prepare to copy thread state - unlazy all lazy status */ #define prepare_to_copy(tsk) do { } while (0) -#ifdef CONFIG_NUMA -#define SD_NODE_INIT (struct sched_domain) { \ - .span = CPU_MASK_NONE, \ - .parent = NULL, \ - .groups = NULL, \ - .min_interval = 80, \ - .max_interval = 320, \ - .busy_factor = 320, \ - .imbalance_pct = 125, \ - .cache_hot_time = (10*1000000), \ - .cache_nice_tries = 1, \ - .per_cpu_gain = 100, \ - .flags = SD_BALANCE_EXEC \ - | SD_WAKE_BALANCE, \ - .last_balance = jiffies, \ - .balance_interval = 10, \ - .nr_balance_failed = 0, \ -} -#endif - /* * This is the mechanism for creating a new kernel thread. * @@ -376,7 +343,7 @@ extern unsigned long get_wchan (struct task_struct *p); /* Return instruction pointer of blocked task TSK. */ #define KSTK_EIP(tsk) \ ({ \ - struct pt_regs *_regs = ia64_task_regs(tsk); \ + struct pt_regs *_regs = task_pt_regs(tsk); \ _regs->cr_iip + ia64_psr(_regs)->ri; \ }) @@ -424,7 +391,10 @@ extern void ia64_setreg_unknown_kr (void); * task_struct at this point. */ -/* Return TRUE if task T owns the fph partition of the CPU we're running on. */ +/* + * Return TRUE if task T owns the fph partition of the CPU we're running on. + * Must be called from code that has preemption disabled. + */ #define ia64_is_local_fpu_owner(t) \ ({ \ struct task_struct *__ia64_islfo_task = (t); \ @@ -432,7 +402,10 @@ extern void ia64_setreg_unknown_kr (void); && __ia64_islfo_task == (struct task_struct *) ia64_get_kr(IA64_KR_FPU_OWNER)); \ }) -/* Mark task T as owning the fph partition of the CPU we're running on. */ +/* + * Mark task T as owning the fph partition of the CPU we're running on. + * Must be called from code that has preemption disabled. + */ #define ia64_set_local_fpu_owner(t) do { \ struct task_struct *__ia64_slfo_task = (t); \ __ia64_slfo_task->thread.last_fph_cpu = smp_processor_id(); \ @@ -577,6 +550,23 @@ ia64_eoi (void) #define cpu_relax() ia64_hint(ia64_hint_pause) +static inline int +ia64_get_irr(unsigned int vector) +{ + unsigned int reg = vector / 64; + unsigned int bit = vector % 64; + u64 irr; + + switch (reg) { + case 0: irr = ia64_getreg(_IA64_REG_CR_IRR0); break; + case 1: irr = ia64_getreg(_IA64_REG_CR_IRR1); break; + case 2: irr = ia64_getreg(_IA64_REG_CR_IRR2); break; + case 3: irr = ia64_getreg(_IA64_REG_CR_IRR3); break; + } + + return test_bit(bit, &irr); +} + static inline void ia64_set_lrr0 (unsigned long val) { @@ -708,6 +698,8 @@ prefetchw (const void *x) #define spin_lock_prefetch(x) prefetchw(x) +extern unsigned long boot_option_idle_override; + #endif /* !__ASSEMBLY__ */ #endif /* _ASM_IA64_PROCESSOR_H */