2 * linux/arch/cris/arch-v10/mm/tlb.c
4 * Low level TLB handling
7 * Copyright (C) 2000-2002 Axis Communications AB
9 * Authors: Bjorn Wesen (bjornw@axis.com)
14 #include <asm/mmu_context.h>
15 #include <asm/arch/svinto.h>
19 /* The TLB can host up to 64 different mm contexts at the same time.
20 * The running context is R_MMU_CONTEXT, and each TLB entry contains a
21 * page_id that has to match to give a hit. In page_id_map, we keep track
22 * of which mm's we have assigned which page_id's, so that we know when
23 * to invalidate TLB entries.
25 * The last page_id is never running - it is used as an invalid page_id
26 * so we can make TLB entries that will never match.
28 * Notice that we need to make the flushes atomic, otherwise an interrupt
29 * handler that uses vmalloced memory might cause a TLB load in the middle
33 /* invalidate all TLB entries */
41 /* the vpn of i & 0xf is so we dont write similar TLB entries
42 * in the same 4-way entry group. details..
45 local_save_flags(flags);
47 for(i = 0; i < NUM_TLB_ENTRIES; i++) {
48 *R_TLB_SELECT = ( IO_FIELD(R_TLB_SELECT, index, i) );
49 *R_TLB_HI = ( IO_FIELD(R_TLB_HI, page_id, INVALID_PAGEID ) |
50 IO_FIELD(R_TLB_HI, vpn, i & 0xf ) );
52 *R_TLB_LO = ( IO_STATE(R_TLB_LO, global,no ) |
53 IO_STATE(R_TLB_LO, valid, no ) |
54 IO_STATE(R_TLB_LO, kernel,no ) |
55 IO_STATE(R_TLB_LO, we, no ) |
56 IO_FIELD(R_TLB_LO, pfn, 0 ) );
58 local_irq_restore(flags);
59 D(printk("tlb: flushed all\n"));
62 /* invalidate the selected mm context only */
65 flush_tlb_mm(struct mm_struct *mm)
68 int page_id = mm->context;
71 D(printk("tlb: flush mm context %d (%p)\n", page_id, mm));
73 if(page_id == NO_CONTEXT)
76 /* mark the TLB entries that match the page_id as invalid.
77 * here we could also check the _PAGE_GLOBAL bit and NOT flush
78 * global pages. is it worth the extra I/O ?
81 local_save_flags(flags);
83 for(i = 0; i < NUM_TLB_ENTRIES; i++) {
84 *R_TLB_SELECT = IO_FIELD(R_TLB_SELECT, index, i);
85 if (IO_EXTRACT(R_TLB_HI, page_id, *R_TLB_HI) == page_id) {
86 *R_TLB_HI = ( IO_FIELD(R_TLB_HI, page_id, INVALID_PAGEID ) |
87 IO_FIELD(R_TLB_HI, vpn, i & 0xf ) );
89 *R_TLB_LO = ( IO_STATE(R_TLB_LO, global,no ) |
90 IO_STATE(R_TLB_LO, valid, no ) |
91 IO_STATE(R_TLB_LO, kernel,no ) |
92 IO_STATE(R_TLB_LO, we, no ) |
93 IO_FIELD(R_TLB_LO, pfn, 0 ) );
96 local_irq_restore(flags);
99 /* invalidate a single page */
102 flush_tlb_page(struct vm_area_struct *vma,
105 struct mm_struct *mm = vma->vm_mm;
106 int page_id = mm->context;
110 D(printk("tlb: flush page %p in context %d (%p)\n", addr, page_id, mm));
112 if(page_id == NO_CONTEXT)
115 addr &= PAGE_MASK; /* perhaps not necessary */
117 /* invalidate those TLB entries that match both the mm context
118 * and the virtual address requested
121 local_save_flags(flags);
123 for(i = 0; i < NUM_TLB_ENTRIES; i++) {
124 unsigned long tlb_hi;
125 *R_TLB_SELECT = IO_FIELD(R_TLB_SELECT, index, i);
127 if (IO_EXTRACT(R_TLB_HI, page_id, tlb_hi) == page_id &&
128 (tlb_hi & PAGE_MASK) == addr) {
129 *R_TLB_HI = IO_FIELD(R_TLB_HI, page_id, INVALID_PAGEID ) |
130 addr; /* same addr as before works. */
132 *R_TLB_LO = ( IO_STATE(R_TLB_LO, global,no ) |
133 IO_STATE(R_TLB_LO, valid, no ) |
134 IO_STATE(R_TLB_LO, kernel,no ) |
135 IO_STATE(R_TLB_LO, we, no ) |
136 IO_FIELD(R_TLB_LO, pfn, 0 ) );
139 local_irq_restore(flags);
142 /* invalidate a page range */
145 flush_tlb_range(struct vm_area_struct *vma,
149 struct mm_struct *mm = vma->vm_mm;
150 int page_id = mm->context;
154 D(printk("tlb: flush range %p<->%p in context %d (%p)\n",
155 start, end, page_id, mm));
157 if(page_id == NO_CONTEXT)
160 start &= PAGE_MASK; /* probably not necessary */
161 end &= PAGE_MASK; /* dito */
163 /* invalidate those TLB entries that match both the mm context
164 * and the virtual address range
167 local_save_flags(flags);
169 for(i = 0; i < NUM_TLB_ENTRIES; i++) {
170 unsigned long tlb_hi, vpn;
171 *R_TLB_SELECT = IO_FIELD(R_TLB_SELECT, index, i);
173 vpn = tlb_hi & PAGE_MASK;
174 if (IO_EXTRACT(R_TLB_HI, page_id, tlb_hi) == page_id &&
175 vpn >= start && vpn < end) {
176 *R_TLB_HI = ( IO_FIELD(R_TLB_HI, page_id, INVALID_PAGEID ) |
177 IO_FIELD(R_TLB_HI, vpn, i & 0xf ) );
179 *R_TLB_LO = ( IO_STATE(R_TLB_LO, global,no ) |
180 IO_STATE(R_TLB_LO, valid, no ) |
181 IO_STATE(R_TLB_LO, kernel,no ) |
182 IO_STATE(R_TLB_LO, we, no ) |
183 IO_FIELD(R_TLB_LO, pfn, 0 ) );
186 local_irq_restore(flags);
189 /* dump the entire TLB for debug purposes */
198 printk("TLB dump. LO is: pfn | reserved | global | valid | kernel | we |\n");
200 local_save_flags(flags);
202 for(i = 0; i < NUM_TLB_ENTRIES; i++) {
203 *R_TLB_SELECT = ( IO_FIELD(R_TLB_SELECT, index, i) );
204 printk("Entry %d: HI 0x%08lx, LO 0x%08lx\n",
205 i, *R_TLB_HI, *R_TLB_LO);
207 local_irq_restore(flags);
211 /* called in schedule() just before actually doing the switch_to */
214 switch_mm(struct mm_struct *prev, struct mm_struct *next,
215 struct task_struct *tsk)
217 /* make sure we have a context */
219 get_mmu_context(next);
221 /* remember the pgd for the fault handlers
222 * this is similar to the pgd register in some other CPU's.
223 * we need our own copy of it because current and active_mm
224 * might be invalid at points where we still need to derefer
228 current_pgd = next->pgd;
230 /* switch context in the MMU */
232 D(printk("switching mmu_context to %d (%p)\n", next->context, next));
234 *R_MMU_CONTEXT = IO_FIELD(R_MMU_CONTEXT, page_id, next->context);