vserver 1.9.3
[linux-2.6.git] / include / asm-ia64 / processor.h
1 #ifndef _ASM_IA64_PROCESSOR_H
2 #define _ASM_IA64_PROCESSOR_H
3
4 /*
5  * Copyright (C) 1998-2004 Hewlett-Packard Co
6  *      David Mosberger-Tang <davidm@hpl.hp.com>
7  *      Stephane Eranian <eranian@hpl.hp.com>
8  * Copyright (C) 1999 Asit Mallick <asit.k.mallick@intel.com>
9  * Copyright (C) 1999 Don Dugger <don.dugger@intel.com>
10  *
11  * 11/24/98     S.Eranian       added ia64_set_iva()
12  * 12/03/99     D. Mosberger    implement thread_saved_pc() via kernel unwind API
13  * 06/16/00     A. Mallick      added csd/ssd/tssd for ia32 support
14  */
15
16 #include <linux/config.h>
17
18 #include <asm/intrinsics.h>
19 #include <asm/kregs.h>
20 #include <asm/ptrace.h>
21 #include <asm/ustack.h>
22
23 #define IA64_NUM_DBG_REGS       8
24 /*
25  * Limits for PMC and PMD are set to less than maximum architected values
26  * but should be sufficient for a while
27  */
28 #define IA64_NUM_PMC_REGS       32
29 #define IA64_NUM_PMD_REGS       32
30
31 #define DEFAULT_MAP_BASE        __IA64_UL_CONST(0x2000000000000000)
32 #define DEFAULT_TASK_SIZE       __IA64_UL_CONST(0xa000000000000000)
33
34 /*
35  * TASK_SIZE really is a mis-named.  It really is the maximum user
36  * space address (plus one).  On IA-64, there are five regions of 2TB
37  * each (assuming 8KB page size), for a total of 8TB of user virtual
38  * address space.
39  */
40 #define TASK_SIZE               (current->thread.task_size)
41
42 /*
43  * MM_VM_SIZE(mm) gives the maximum address (plus 1) which may contain a mapping for
44  * address-space MM.  Note that with 32-bit tasks, this is still DEFAULT_TASK_SIZE,
45  * because the kernel may have installed helper-mappings above TASK_SIZE.  For example,
46  * for x86 emulation, the LDT and GDT are mapped above TASK_SIZE.
47  */
48 #define MM_VM_SIZE(mm)          DEFAULT_TASK_SIZE
49
50 /*
51  * This decides where the kernel will search for a free chunk of vm
52  * space during mmap's.
53  */
54 #define TASK_UNMAPPED_BASE      (current->thread.map_base)
55
56 #define IA64_THREAD_FPH_VALID   (__IA64_UL(1) << 0)     /* floating-point high state valid? */
57 #define IA64_THREAD_DBG_VALID   (__IA64_UL(1) << 1)     /* debug registers valid? */
58 #define IA64_THREAD_PM_VALID    (__IA64_UL(1) << 2)     /* performance registers valid? */
59 #define IA64_THREAD_UAC_NOPRINT (__IA64_UL(1) << 3)     /* don't log unaligned accesses */
60 #define IA64_THREAD_UAC_SIGBUS  (__IA64_UL(1) << 4)     /* generate SIGBUS on unaligned acc. */
61                                                         /* bit 5 is currently unused */
62 #define IA64_THREAD_FPEMU_NOPRINT (__IA64_UL(1) << 6)   /* don't log any fpswa faults */
63 #define IA64_THREAD_FPEMU_SIGFPE  (__IA64_UL(1) << 7)   /* send a SIGFPE for fpswa faults */
64
65 #define IA64_THREAD_UAC_SHIFT   3
66 #define IA64_THREAD_UAC_MASK    (IA64_THREAD_UAC_NOPRINT | IA64_THREAD_UAC_SIGBUS)
67 #define IA64_THREAD_FPEMU_SHIFT 6
68 #define IA64_THREAD_FPEMU_MASK  (IA64_THREAD_FPEMU_NOPRINT | IA64_THREAD_FPEMU_SIGFPE)
69
70
71 /*
72  * This shift should be large enough to be able to represent 1000000000/itc_freq with good
73  * accuracy while being small enough to fit 10*1000000000<<IA64_NSEC_PER_CYC_SHIFT in 64 bits
74  * (this will give enough slack to represent 10 seconds worth of time as a scaled number).
75  */
76 #define IA64_NSEC_PER_CYC_SHIFT 30
77
78 #ifndef __ASSEMBLY__
79
80 #include <linux/cache.h>
81 #include <linux/compiler.h>
82 #include <linux/threads.h>
83 #include <linux/types.h>
84
85 #include <asm/fpu.h>
86 #include <asm/page.h>
87 #include <asm/percpu.h>
88 #include <asm/rse.h>
89 #include <asm/unwind.h>
90 #include <asm/atomic.h>
91 #ifdef CONFIG_NUMA
92 #include <asm/nodedata.h>
93 #endif
94
95 /* like above but expressed as bitfields for more efficient access: */
96 struct ia64_psr {
97         __u64 reserved0 : 1;
98         __u64 be : 1;
99         __u64 up : 1;
100         __u64 ac : 1;
101         __u64 mfl : 1;
102         __u64 mfh : 1;
103         __u64 reserved1 : 7;
104         __u64 ic : 1;
105         __u64 i : 1;
106         __u64 pk : 1;
107         __u64 reserved2 : 1;
108         __u64 dt : 1;
109         __u64 dfl : 1;
110         __u64 dfh : 1;
111         __u64 sp : 1;
112         __u64 pp : 1;
113         __u64 di : 1;
114         __u64 si : 1;
115         __u64 db : 1;
116         __u64 lp : 1;
117         __u64 tb : 1;
118         __u64 rt : 1;
119         __u64 reserved3 : 4;
120         __u64 cpl : 2;
121         __u64 is : 1;
122         __u64 mc : 1;
123         __u64 it : 1;
124         __u64 id : 1;
125         __u64 da : 1;
126         __u64 dd : 1;
127         __u64 ss : 1;
128         __u64 ri : 2;
129         __u64 ed : 1;
130         __u64 bn : 1;
131         __u64 reserved4 : 19;
132 };
133
134 /*
135  * CPU type, hardware bug flags, and per-CPU state.  Frequently used
136  * state comes earlier:
137  */
138 struct cpuinfo_ia64 {
139         __u32 softirq_pending;
140         __u64 itm_delta;        /* # of clock cycles between clock ticks */
141         __u64 itm_next;         /* interval timer mask value to use for next clock tick */
142         __u64 nsec_per_cyc;     /* (1000000000<<IA64_NSEC_PER_CYC_SHIFT)/itc_freq */
143         __u64 unimpl_va_mask;   /* mask of unimplemented virtual address bits (from PAL) */
144         __u64 unimpl_pa_mask;   /* mask of unimplemented physical address bits (from PAL) */
145         __u64 *pgd_quick;
146         __u64 *pmd_quick;
147         __u64 pgtable_cache_sz;
148         __u64 itc_freq;         /* frequency of ITC counter */
149         __u64 proc_freq;        /* frequency of processor */
150         __u64 cyc_per_usec;     /* itc_freq/1000000 */
151         __u64 ptce_base;
152         __u32 ptce_count[2];
153         __u32 ptce_stride[2];
154         struct task_struct *ksoftirqd;  /* kernel softirq daemon for this CPU */
155
156 #ifdef CONFIG_SMP
157         __u64 loops_per_jiffy;
158         int cpu;
159 #endif
160
161         /* CPUID-derived information: */
162         __u64 ppn;
163         __u64 features;
164         __u8 number;
165         __u8 revision;
166         __u8 model;
167         __u8 family;
168         __u8 archrev;
169         char vendor[16];
170
171 #ifdef CONFIG_NUMA
172         struct ia64_node_data *node_data;
173 #endif
174 };
175
176 DECLARE_PER_CPU(struct cpuinfo_ia64, cpu_info);
177
178 /*
179  * The "local" data variable.  It refers to the per-CPU data of the currently executing
180  * CPU, much like "current" points to the per-task data of the currently executing task.
181  * Do not use the address of local_cpu_data, since it will be different from
182  * cpu_data(smp_processor_id())!
183  */
184 #define local_cpu_data          (&__ia64_per_cpu_var(cpu_info))
185 #define cpu_data(cpu)           (&per_cpu(cpu_info, cpu))
186
187 extern void identify_cpu (struct cpuinfo_ia64 *);
188 extern void print_cpu_info (struct cpuinfo_ia64 *);
189
190 typedef struct {
191         unsigned long seg;
192 } mm_segment_t;
193
194 #define SET_UNALIGN_CTL(task,value)                                                             \
195 ({                                                                                              \
196         (task)->thread.flags = (((task)->thread.flags & ~IA64_THREAD_UAC_MASK)                  \
197                                 | (((value) << IA64_THREAD_UAC_SHIFT) & IA64_THREAD_UAC_MASK)); \
198         0;                                                                                      \
199 })
200 #define GET_UNALIGN_CTL(task,addr)                                                              \
201 ({                                                                                              \
202         put_user(((task)->thread.flags & IA64_THREAD_UAC_MASK) >> IA64_THREAD_UAC_SHIFT,        \
203                  (int __user *) (addr));                                                        \
204 })
205
206 #define SET_FPEMU_CTL(task,value)                                                               \
207 ({                                                                                              \
208         (task)->thread.flags = (((task)->thread.flags & ~IA64_THREAD_FPEMU_MASK)                \
209                           | (((value) << IA64_THREAD_FPEMU_SHIFT) & IA64_THREAD_FPEMU_MASK));   \
210         0;                                                                                      \
211 })
212 #define GET_FPEMU_CTL(task,addr)                                                                \
213 ({                                                                                              \
214         put_user(((task)->thread.flags & IA64_THREAD_FPEMU_MASK) >> IA64_THREAD_FPEMU_SHIFT,    \
215                  (int __user *) (addr));                                                        \
216 })
217
218 #ifdef CONFIG_IA32_SUPPORT
219 struct desc_struct {
220         unsigned int a, b;
221 };
222
223 #define desc_empty(desc)                (!((desc)->a + (desc)->b))
224 #define desc_equal(desc1, desc2)        (((desc1)->a == (desc2)->a) && ((desc1)->b == (desc2)->b))
225
226 #define GDT_ENTRY_TLS_ENTRIES   3
227 #define GDT_ENTRY_TLS_MIN       6
228 #define GDT_ENTRY_TLS_MAX       (GDT_ENTRY_TLS_MIN + GDT_ENTRY_TLS_ENTRIES - 1)
229
230 #define TLS_SIZE (GDT_ENTRY_TLS_ENTRIES * 8)
231
232 struct partial_page_list;
233 #endif
234
235 struct thread_struct {
236         __u32 flags;                    /* various thread flags (see IA64_THREAD_*) */
237         /* writing on_ustack is performance-critical, so it's worth spending 8 bits on it... */
238         __u8 on_ustack;                 /* executing on user-stacks? */
239         __u8 pad[3];
240         __u64 ksp;                      /* kernel stack pointer */
241         __u64 map_base;                 /* base address for get_unmapped_area() */
242         __u64 task_size;                /* limit for task size */
243         __u64 rbs_bot;                  /* the base address for the RBS */
244         int last_fph_cpu;               /* CPU that may hold the contents of f32-f127 */
245
246 #ifdef CONFIG_IA32_SUPPORT
247         __u64 eflag;                    /* IA32 EFLAGS reg */
248         __u64 fsr;                      /* IA32 floating pt status reg */
249         __u64 fcr;                      /* IA32 floating pt control reg */
250         __u64 fir;                      /* IA32 fp except. instr. reg */
251         __u64 fdr;                      /* IA32 fp except. data reg */
252         __u64 old_k1;                   /* old value of ar.k1 */
253         __u64 old_iob;                  /* old IOBase value */
254         struct partial_page_list *ppl;  /* partial page list for 4K page size issue */
255         /* cached TLS descriptors. */
256         struct desc_struct tls_array[GDT_ENTRY_TLS_ENTRIES];
257
258 # define INIT_THREAD_IA32       .eflag =        0,                      \
259                                 .fsr =          0,                      \
260                                 .fcr =          0x17800000037fULL,      \
261                                 .fir =          0,                      \
262                                 .fdr =          0,                      \
263                                 .old_k1 =       0,                      \
264                                 .old_iob =      0,                      \
265                                 .ppl =          NULL,
266 #else
267 # define INIT_THREAD_IA32
268 #endif /* CONFIG_IA32_SUPPORT */
269 #ifdef CONFIG_PERFMON
270         __u64 pmcs[IA64_NUM_PMC_REGS];
271         __u64 pmds[IA64_NUM_PMD_REGS];
272         void *pfm_context;                   /* pointer to detailed PMU context */
273         unsigned long pfm_needs_checking;    /* when >0, pending perfmon work on kernel exit */
274 # define INIT_THREAD_PM         .pmcs =                 {0UL, },  \
275                                 .pmds =                 {0UL, },  \
276                                 .pfm_context =          NULL,     \
277                                 .pfm_needs_checking =   0UL,
278 #else
279 # define INIT_THREAD_PM
280 #endif
281         __u64 dbr[IA64_NUM_DBG_REGS];
282         __u64 ibr[IA64_NUM_DBG_REGS];
283         struct ia64_fpreg fph[96];      /* saved/loaded on demand */
284 };
285
286 #define INIT_THREAD {                                           \
287         .flags =        0,                                      \
288         .on_ustack =    0,                                      \
289         .ksp =          0,                                      \
290         .map_base =     DEFAULT_MAP_BASE,                       \
291         .rbs_bot =      STACK_TOP - DEFAULT_USER_STACK_SIZE,    \
292         .task_size =    DEFAULT_TASK_SIZE,                      \
293         .last_fph_cpu =  -1,                                    \
294         INIT_THREAD_IA32                                        \
295         INIT_THREAD_PM                                          \
296         .dbr =          {0, },                                  \
297         .ibr =          {0, },                                  \
298         .fph =          {{{{0}}}, }                             \
299 }
300
301 #define start_thread(regs,new_ip,new_sp) do {                                                   \
302         set_fs(USER_DS);                                                                        \
303         regs->cr_ipsr = ((regs->cr_ipsr | (IA64_PSR_BITS_TO_SET | IA64_PSR_CPL))                \
304                          & ~(IA64_PSR_BITS_TO_CLEAR | IA64_PSR_RI | IA64_PSR_IS));              \
305         regs->cr_iip = new_ip;                                                                  \
306         regs->ar_rsc = 0xf;             /* eager mode, privilege level 3 */                     \
307         regs->ar_rnat = 0;                                                                      \
308         regs->ar_bspstore = current->thread.rbs_bot;                                            \
309         regs->ar_fpsr = FPSR_DEFAULT;                                                           \
310         regs->loadrs = 0;                                                                       \
311         regs->r8 = current->mm->dumpable;       /* set "don't zap registers" flag */            \
312         regs->r12 = new_sp - 16;        /* allocate 16 byte scratch area */                     \
313         if (unlikely(!current->mm->dumpable)) {                                                 \
314                 /*                                                                              \
315                  * Zap scratch regs to avoid leaking bits between processes with different      \
316                  * uid/privileges.                                                              \
317                  */                                                                             \
318                 regs->ar_pfs = 0; regs->b0 = 0; regs->pr = 0;                                   \
319                 regs->r1 = 0; regs->r9  = 0; regs->r11 = 0; regs->r13 = 0; regs->r15 = 0;       \
320         }                                                                                       \
321 } while (0)
322
323 /* Forward declarations, a strange C thing... */
324 struct mm_struct;
325 struct task_struct;
326
327 /*
328  * Free all resources held by a thread. This is called after the
329  * parent of DEAD_TASK has collected the exit status of the task via
330  * wait().
331  */
332 #define release_thread(dead_task)
333
334 /* Prepare to copy thread state - unlazy all lazy status */
335 #define prepare_to_copy(tsk)    do { } while (0)
336
337 #ifdef CONFIG_NUMA
338 #define SD_NODE_INIT (struct sched_domain) {            \
339         .span                   = CPU_MASK_NONE,        \
340         .parent                 = NULL,                 \
341         .groups                 = NULL,                 \
342         .min_interval           = 80,                   \
343         .max_interval           = 320,                  \
344         .busy_factor            = 320,                  \
345         .imbalance_pct          = 125,                  \
346         .cache_hot_time         = (10*1000000),         \
347         .cache_nice_tries       = 1,                    \
348         .per_cpu_gain           = 100,                  \
349         .flags                  = SD_BALANCE_EXEC       \
350                                 | SD_WAKE_BALANCE,      \
351         .last_balance           = jiffies,              \
352         .balance_interval       = 10,                   \
353         .nr_balance_failed      = 0,                    \
354 }
355 #endif
356
357 /*
358  * This is the mechanism for creating a new kernel thread.
359  *
360  * NOTE 1: Only a kernel-only process (ie the swapper or direct
361  * descendants who haven't done an "execve()") should use this: it
362  * will work within a system call from a "real" process, but the
363  * process memory space will not be free'd until both the parent and
364  * the child have exited.
365  *
366  * NOTE 2: This MUST NOT be an inlined function.  Otherwise, we get
367  * into trouble in init/main.c when the child thread returns to
368  * do_basic_setup() and the timing is such that free_initmem() has
369  * been called already.
370  */
371 extern pid_t kernel_thread (int (*fn)(void *), void *arg, unsigned long flags);
372
373 /* Get wait channel for task P.  */
374 extern unsigned long get_wchan (struct task_struct *p);
375
376 /* Return instruction pointer of blocked task TSK.  */
377 #define KSTK_EIP(tsk)                                   \
378   ({                                                    \
379         struct pt_regs *_regs = ia64_task_regs(tsk);    \
380         _regs->cr_iip + ia64_psr(_regs)->ri;            \
381   })
382
383 /* Return stack pointer of blocked task TSK.  */
384 #define KSTK_ESP(tsk)  ((tsk)->thread.ksp)
385
386 extern void ia64_getreg_unknown_kr (void);
387 extern void ia64_setreg_unknown_kr (void);
388
389 #define ia64_get_kr(regnum)                                     \
390 ({                                                              \
391         unsigned long r = 0;                                    \
392                                                                 \
393         switch (regnum) {                                       \
394             case 0: r = ia64_getreg(_IA64_REG_AR_KR0); break;   \
395             case 1: r = ia64_getreg(_IA64_REG_AR_KR1); break;   \
396             case 2: r = ia64_getreg(_IA64_REG_AR_KR2); break;   \
397             case 3: r = ia64_getreg(_IA64_REG_AR_KR3); break;   \
398             case 4: r = ia64_getreg(_IA64_REG_AR_KR4); break;   \
399             case 5: r = ia64_getreg(_IA64_REG_AR_KR5); break;   \
400             case 6: r = ia64_getreg(_IA64_REG_AR_KR6); break;   \
401             case 7: r = ia64_getreg(_IA64_REG_AR_KR7); break;   \
402             default: ia64_getreg_unknown_kr(); break;           \
403         }                                                       \
404         r;                                                      \
405 })
406
407 #define ia64_set_kr(regnum, r)                                  \
408 ({                                                              \
409         switch (regnum) {                                       \
410             case 0: ia64_setreg(_IA64_REG_AR_KR0, r); break;    \
411             case 1: ia64_setreg(_IA64_REG_AR_KR1, r); break;    \
412             case 2: ia64_setreg(_IA64_REG_AR_KR2, r); break;    \
413             case 3: ia64_setreg(_IA64_REG_AR_KR3, r); break;    \
414             case 4: ia64_setreg(_IA64_REG_AR_KR4, r); break;    \
415             case 5: ia64_setreg(_IA64_REG_AR_KR5, r); break;    \
416             case 6: ia64_setreg(_IA64_REG_AR_KR6, r); break;    \
417             case 7: ia64_setreg(_IA64_REG_AR_KR7, r); break;    \
418             default: ia64_setreg_unknown_kr(); break;           \
419         }                                                       \
420 })
421
422 /*
423  * The following three macros can't be inline functions because we don't have struct
424  * task_struct at this point.
425  */
426
427 /* Return TRUE if task T owns the fph partition of the CPU we're running on. */
428 #define ia64_is_local_fpu_owner(t)                                                              \
429 ({                                                                                              \
430         struct task_struct *__ia64_islfo_task = (t);                                            \
431         (__ia64_islfo_task->thread.last_fph_cpu == smp_processor_id()                           \
432          && __ia64_islfo_task == (struct task_struct *) ia64_get_kr(IA64_KR_FPU_OWNER));        \
433 })
434
435 /* Mark task T as owning the fph partition of the CPU we're running on. */
436 #define ia64_set_local_fpu_owner(t) do {                                                \
437         struct task_struct *__ia64_slfo_task = (t);                                     \
438         __ia64_slfo_task->thread.last_fph_cpu = smp_processor_id();                     \
439         ia64_set_kr(IA64_KR_FPU_OWNER, (unsigned long) __ia64_slfo_task);               \
440 } while (0)
441
442 /* Mark the fph partition of task T as being invalid on all CPUs.  */
443 #define ia64_drop_fpu(t)        ((t)->thread.last_fph_cpu = -1)
444
445 extern void __ia64_init_fpu (void);
446 extern void __ia64_save_fpu (struct ia64_fpreg *fph);
447 extern void __ia64_load_fpu (struct ia64_fpreg *fph);
448 extern void ia64_save_debug_regs (unsigned long *save_area);
449 extern void ia64_load_debug_regs (unsigned long *save_area);
450
451 #ifdef CONFIG_IA32_SUPPORT
452 extern void ia32_save_state (struct task_struct *task);
453 extern void ia32_load_state (struct task_struct *task);
454 #endif
455
456 #define ia64_fph_enable()       do { ia64_rsm(IA64_PSR_DFH); ia64_srlz_d(); } while (0)
457 #define ia64_fph_disable()      do { ia64_ssm(IA64_PSR_DFH); ia64_srlz_d(); } while (0)
458
459 /* load fp 0.0 into fph */
460 static inline void
461 ia64_init_fpu (void) {
462         ia64_fph_enable();
463         __ia64_init_fpu();
464         ia64_fph_disable();
465 }
466
467 /* save f32-f127 at FPH */
468 static inline void
469 ia64_save_fpu (struct ia64_fpreg *fph) {
470         ia64_fph_enable();
471         __ia64_save_fpu(fph);
472         ia64_fph_disable();
473 }
474
475 /* load f32-f127 from FPH */
476 static inline void
477 ia64_load_fpu (struct ia64_fpreg *fph) {
478         ia64_fph_enable();
479         __ia64_load_fpu(fph);
480         ia64_fph_disable();
481 }
482
483 static inline __u64
484 ia64_clear_ic (void)
485 {
486         __u64 psr;
487         psr = ia64_getreg(_IA64_REG_PSR);
488         ia64_stop();
489         ia64_rsm(IA64_PSR_I | IA64_PSR_IC);
490         ia64_srlz_i();
491         return psr;
492 }
493
494 /*
495  * Restore the psr.
496  */
497 static inline void
498 ia64_set_psr (__u64 psr)
499 {
500         ia64_stop();
501         ia64_setreg(_IA64_REG_PSR_L, psr);
502         ia64_srlz_d();
503 }
504
505 /*
506  * Insert a translation into an instruction and/or data translation
507  * register.
508  */
509 static inline void
510 ia64_itr (__u64 target_mask, __u64 tr_num,
511           __u64 vmaddr, __u64 pte,
512           __u64 log_page_size)
513 {
514         ia64_setreg(_IA64_REG_CR_ITIR, (log_page_size << 2));
515         ia64_setreg(_IA64_REG_CR_IFA, vmaddr);
516         ia64_stop();
517         if (target_mask & 0x1)
518                 ia64_itri(tr_num, pte);
519         if (target_mask & 0x2)
520                 ia64_itrd(tr_num, pte);
521 }
522
523 /*
524  * Insert a translation into the instruction and/or data translation
525  * cache.
526  */
527 static inline void
528 ia64_itc (__u64 target_mask, __u64 vmaddr, __u64 pte,
529           __u64 log_page_size)
530 {
531         ia64_setreg(_IA64_REG_CR_ITIR, (log_page_size << 2));
532         ia64_setreg(_IA64_REG_CR_IFA, vmaddr);
533         ia64_stop();
534         /* as per EAS2.6, itc must be the last instruction in an instruction group */
535         if (target_mask & 0x1)
536                 ia64_itci(pte);
537         if (target_mask & 0x2)
538                 ia64_itcd(pte);
539 }
540
541 /*
542  * Purge a range of addresses from instruction and/or data translation
543  * register(s).
544  */
545 static inline void
546 ia64_ptr (__u64 target_mask, __u64 vmaddr, __u64 log_size)
547 {
548         if (target_mask & 0x1)
549                 ia64_ptri(vmaddr, (log_size << 2));
550         if (target_mask & 0x2)
551                 ia64_ptrd(vmaddr, (log_size << 2));
552 }
553
554 /* Set the interrupt vector address.  The address must be suitably aligned (32KB).  */
555 static inline void
556 ia64_set_iva (void *ivt_addr)
557 {
558         ia64_setreg(_IA64_REG_CR_IVA, (__u64) ivt_addr);
559         ia64_srlz_i();
560 }
561
562 /* Set the page table address and control bits.  */
563 static inline void
564 ia64_set_pta (__u64 pta)
565 {
566         /* Note: srlz.i implies srlz.d */
567         ia64_setreg(_IA64_REG_CR_PTA, pta);
568         ia64_srlz_i();
569 }
570
571 static inline void
572 ia64_eoi (void)
573 {
574         ia64_setreg(_IA64_REG_CR_EOI, 0);
575         ia64_srlz_d();
576 }
577
578 #define cpu_relax()     ia64_hint(ia64_hint_pause)
579
580 static inline void
581 ia64_set_lrr0 (unsigned long val)
582 {
583         ia64_setreg(_IA64_REG_CR_LRR0, val);
584         ia64_srlz_d();
585 }
586
587 static inline void
588 ia64_set_lrr1 (unsigned long val)
589 {
590         ia64_setreg(_IA64_REG_CR_LRR1, val);
591         ia64_srlz_d();
592 }
593
594
595 /*
596  * Given the address to which a spill occurred, return the unat bit
597  * number that corresponds to this address.
598  */
599 static inline __u64
600 ia64_unat_pos (void *spill_addr)
601 {
602         return ((__u64) spill_addr >> 3) & 0x3f;
603 }
604
605 /*
606  * Set the NaT bit of an integer register which was spilled at address
607  * SPILL_ADDR.  UNAT is the mask to be updated.
608  */
609 static inline void
610 ia64_set_unat (__u64 *unat, void *spill_addr, unsigned long nat)
611 {
612         __u64 bit = ia64_unat_pos(spill_addr);
613         __u64 mask = 1UL << bit;
614
615         *unat = (*unat & ~mask) | (nat << bit);
616 }
617
618 /*
619  * Return saved PC of a blocked thread.
620  * Note that the only way T can block is through a call to schedule() -> switch_to().
621  */
622 static inline unsigned long
623 thread_saved_pc (struct task_struct *t)
624 {
625         struct unw_frame_info info;
626         unsigned long ip;
627
628         unw_init_from_blocked_task(&info, t);
629         if (unw_unwind(&info) < 0)
630                 return 0;
631         unw_get_ip(&info, &ip);
632         return ip;
633 }
634
635 /*
636  * Get the current instruction/program counter value.
637  */
638 #define current_text_addr() \
639         ({ void *_pc; _pc = (void *)ia64_getreg(_IA64_REG_IP); _pc; })
640
641 static inline __u64
642 ia64_get_ivr (void)
643 {
644         __u64 r;
645         ia64_srlz_d();
646         r = ia64_getreg(_IA64_REG_CR_IVR);
647         ia64_srlz_d();
648         return r;
649 }
650
651 static inline void
652 ia64_set_dbr (__u64 regnum, __u64 value)
653 {
654         __ia64_set_dbr(regnum, value);
655 #ifdef CONFIG_ITANIUM
656         ia64_srlz_d();
657 #endif
658 }
659
660 static inline __u64
661 ia64_get_dbr (__u64 regnum)
662 {
663         __u64 retval;
664
665         retval = __ia64_get_dbr(regnum);
666 #ifdef CONFIG_ITANIUM
667         ia64_srlz_d();
668 #endif
669         return retval;
670 }
671
672 static inline __u64
673 ia64_rotr (__u64 w, __u64 n)
674 {
675         return (w >> n) | (w << (64 - n));
676 }
677
678 #define ia64_rotl(w,n)  ia64_rotr((w), (64) - (n))
679
680 /*
681  * Take a mapped kernel address and return the equivalent address
682  * in the region 7 identity mapped virtual area.
683  */
684 static inline void *
685 ia64_imva (void *addr)
686 {
687         void *result;
688         result = (void *) ia64_tpa(addr);
689         return __va(result);
690 }
691
692 #define ARCH_HAS_PREFETCH
693 #define ARCH_HAS_PREFETCHW
694 #define ARCH_HAS_SPINLOCK_PREFETCH
695 #define PREFETCH_STRIDE                 L1_CACHE_BYTES
696
697 static inline void
698 prefetch (const void *x)
699 {
700          ia64_lfetch(ia64_lfhint_none, x);
701 }
702
703 static inline void
704 prefetchw (const void *x)
705 {
706         ia64_lfetch_excl(ia64_lfhint_none, x);
707 }
708
709 #define spin_lock_prefetch(x)   prefetchw(x)
710
711 #endif /* !__ASSEMBLY__ */
712
713 #endif /* _ASM_IA64_PROCESSOR_H */