2 #include <linux/hugetlb.h>
3 #include <linux/mount.h>
4 #include <linux/seq_file.h>
5 #include <linux/highmem.h>
6 #include <linux/pagemap.h>
7 #include <linux/mempolicy.h>
10 #include <asm/uaccess.h>
11 #include <asm/tlbflush.h>
14 char *task_mem(struct mm_struct *mm, char *buffer)
16 unsigned long data, text, lib;
17 unsigned long hiwater_vm, total_vm, hiwater_rss, total_rss;
20 * Note: to minimize their overhead, mm maintains hiwater_vm and
21 * hiwater_rss only when about to *lower* total_vm or rss. Any
22 * collector of these hiwater stats must therefore get total_vm
23 * and rss too, which will usually be the higher. Barriers? not
24 * worth the effort, such snapshots can always be inconsistent.
26 hiwater_vm = total_vm = mm->total_vm;
27 if (hiwater_vm < mm->hiwater_vm)
28 hiwater_vm = mm->hiwater_vm;
29 hiwater_rss = total_rss = get_mm_rss(mm);
30 if (hiwater_rss < mm->hiwater_rss)
31 hiwater_rss = mm->hiwater_rss;
33 data = mm->total_vm - mm->shared_vm - mm->stack_vm;
34 text = (PAGE_ALIGN(mm->end_code) - (mm->start_code & PAGE_MASK)) >> 10;
35 lib = (mm->exec_vm << (PAGE_SHIFT-10)) - text;
36 buffer += sprintf(buffer,
51 hiwater_vm << (PAGE_SHIFT-10),
52 (total_vm - mm->reserved_vm) << (PAGE_SHIFT-10),
53 mm->locked_vm << (PAGE_SHIFT-10),
54 hiwater_rss << (PAGE_SHIFT-10),
55 total_rss << (PAGE_SHIFT-10),
56 data << (PAGE_SHIFT-10),
57 mm->stack_vm << (PAGE_SHIFT-10), text, lib,
58 (PTRS_PER_PTE*sizeof(pte_t)*mm->nr_ptes) >> 10,
59 mm->start_brk, mm->brk, mm->start_stack);
62 buffer += sprintf(buffer,
63 "ExecLim:\t%08lx\n", mm->context.exec_limit);
68 unsigned long task_vsize(struct mm_struct *mm)
70 return PAGE_SIZE * mm->total_vm;
73 int task_statm(struct mm_struct *mm, int *shared, int *text,
74 int *data, int *resident)
76 *shared = get_mm_counter(mm, file_rss);
77 *text = (PAGE_ALIGN(mm->end_code) - (mm->start_code & PAGE_MASK))
79 *data = mm->total_vm - mm->shared_vm;
80 *resident = *shared + get_mm_counter(mm, anon_rss);
84 int proc_exe_link(struct inode *inode, struct dentry **dentry, struct vfsmount **mnt)
86 struct vm_area_struct * vma;
88 struct task_struct *task = proc_task(inode);
89 struct mm_struct * mm = get_task_mm(task);
93 down_read(&mm->mmap_sem);
97 if ((vma->vm_flags & VM_EXECUTABLE) && vma->vm_file)
103 *mnt = mntget(vma->vm_file->f_vfsmnt);
104 *dentry = dget(vma->vm_file->f_dentry);
108 up_read(&mm->mmap_sem);
114 static void pad_len_spaces(struct seq_file *m, int len)
116 len = 25 + sizeof(void*) * 6 - len;
119 seq_printf(m, "%*c", len, ' ');
122 struct mem_size_stats
124 unsigned long resident;
125 unsigned long shared_clean;
126 unsigned long shared_dirty;
127 unsigned long private_clean;
128 unsigned long private_dirty;
131 static int show_map_internal(struct seq_file *m, void *v, struct mem_size_stats *mss)
133 struct task_struct *task = m->private;
134 struct vm_area_struct *vma = v;
135 struct mm_struct *mm = vma->vm_mm;
136 struct file *file = vma->vm_file;
137 int flags = vma->vm_flags;
138 unsigned long ino = 0;
143 struct inode *inode = vma->vm_file->f_dentry->d_inode;
144 dev = inode->i_sb->s_dev;
148 seq_printf(m, "%08lx-%08lx %c%c%c%c %08lx %02x:%02x %lu %n",
151 flags & VM_READ ? 'r' : '-',
152 flags & VM_WRITE ? 'w' : '-',
156 (vma->vm_start < task->mm->context.exec_limit))
160 flags & VM_MAYSHARE ? 's' : 'p',
161 vma->vm_pgoff << PAGE_SHIFT,
162 MAJOR(dev), MINOR(dev), ino, &len);
165 * Print the dentry name for named mappings, and a
166 * special [heap] marker for the heap:
169 pad_len_spaces(m, len);
170 seq_path(m, file->f_vfsmnt, file->f_dentry, "\n");
173 if (vma->vm_end == mm->brk) {
174 pad_len_spaces(m, len);
175 seq_puts(m, "[heap]");
176 } else if (vma->vm_start <= mm->start_stack &&
177 vma->vm_end >= mm->start_stack) {
179 pad_len_spaces(m, len);
180 seq_puts(m, "[stack]");
183 else if (vma->vm_start ==
184 (unsigned long)mm->context.vdso) {
185 pad_len_spaces(m, len);
186 seq_puts(m, "[vdso]");
190 pad_len_spaces(m, len);
191 seq_puts(m, "[vdso]");
200 "Shared_Clean: %8lu kB\n"
201 "Shared_Dirty: %8lu kB\n"
202 "Private_Clean: %8lu kB\n"
203 "Private_Dirty: %8lu kB\n",
204 (vma->vm_end - vma->vm_start) >> 10,
206 mss->shared_clean >> 10,
207 mss->shared_dirty >> 10,
208 mss->private_clean >> 10,
209 mss->private_dirty >> 10);
211 if (m->count < m->size) /* vma is copied successfully */
212 m->version = (vma != get_gate_vma(task))? vma->vm_start: 0;
216 static int show_map(struct seq_file *m, void *v)
218 return show_map_internal(m, v, NULL);
221 static void smaps_pte_range(struct vm_area_struct *vma, pmd_t *pmd,
222 unsigned long addr, unsigned long end,
223 struct mem_size_stats *mss)
229 pte = pte_offset_map_lock(vma->vm_mm, pmd, addr, &ptl);
232 if (!pte_present(ptent))
235 mss->resident += PAGE_SIZE;
237 page = vm_normal_page(vma, addr, ptent);
241 if (page_mapcount(page) >= 2) {
242 if (pte_dirty(ptent))
243 mss->shared_dirty += PAGE_SIZE;
245 mss->shared_clean += PAGE_SIZE;
247 if (pte_dirty(ptent))
248 mss->private_dirty += PAGE_SIZE;
250 mss->private_clean += PAGE_SIZE;
252 } while (pte++, addr += PAGE_SIZE, addr != end);
253 pte_unmap_unlock(pte - 1, ptl);
257 static inline void smaps_pmd_range(struct vm_area_struct *vma, pud_t *pud,
258 unsigned long addr, unsigned long end,
259 struct mem_size_stats *mss)
264 pmd = pmd_offset(pud, addr);
266 next = pmd_addr_end(addr, end);
267 if (pmd_none_or_clear_bad(pmd))
269 smaps_pte_range(vma, pmd, addr, next, mss);
270 } while (pmd++, addr = next, addr != end);
273 static inline void smaps_pud_range(struct vm_area_struct *vma, pgd_t *pgd,
274 unsigned long addr, unsigned long end,
275 struct mem_size_stats *mss)
280 pud = pud_offset(pgd, addr);
282 next = pud_addr_end(addr, end);
283 if (pud_none_or_clear_bad(pud))
285 smaps_pmd_range(vma, pud, addr, next, mss);
286 } while (pud++, addr = next, addr != end);
289 static inline void smaps_pgd_range(struct vm_area_struct *vma,
290 unsigned long addr, unsigned long end,
291 struct mem_size_stats *mss)
296 pgd = pgd_offset(vma->vm_mm, addr);
298 next = pgd_addr_end(addr, end);
299 if (pgd_none_or_clear_bad(pgd))
301 smaps_pud_range(vma, pgd, addr, next, mss);
302 } while (pgd++, addr = next, addr != end);
305 static int show_smap(struct seq_file *m, void *v)
307 struct vm_area_struct *vma = v;
308 struct mem_size_stats mss;
310 memset(&mss, 0, sizeof mss);
311 if (vma->vm_mm && !is_vm_hugetlb_page(vma))
312 smaps_pgd_range(vma, vma->vm_start, vma->vm_end, &mss);
313 return show_map_internal(m, v, &mss);
316 static void *m_start(struct seq_file *m, loff_t *pos)
318 struct task_struct *task = m->private;
319 unsigned long last_addr = m->version;
320 struct mm_struct *mm;
321 struct vm_area_struct *vma, *tail_vma;
325 * We remember last_addr rather than next_addr to hit with
326 * mmap_cache most of the time. We have zero last_addr at
327 * the beginning and also after lseek. We will have -1 last_addr
328 * after the end of the vmas.
331 if (last_addr == -1UL)
334 mm = mm_for_maps(task);
338 tail_vma = get_gate_vma(task);
340 /* Start with last addr hint */
341 if (last_addr && (vma = find_vma(mm, last_addr))) {
347 * Check the vma index is within the range and do
348 * sequential scan until m_index.
351 if ((unsigned long)l < mm->map_count) {
358 if (l != mm->map_count)
359 tail_vma = NULL; /* After gate vma */
365 /* End of vmas has been reached */
366 m->version = (tail_vma != NULL)? 0: -1UL;
367 up_read(&mm->mmap_sem);
372 static void m_stop(struct seq_file *m, void *v)
374 struct task_struct *task = m->private;
375 struct vm_area_struct *vma = v;
376 if (vma && vma != get_gate_vma(task)) {
377 struct mm_struct *mm = vma->vm_mm;
378 up_read(&mm->mmap_sem);
383 static void *m_next(struct seq_file *m, void *v, loff_t *pos)
385 struct task_struct *task = m->private;
386 struct vm_area_struct *vma = v;
387 struct vm_area_struct *tail_vma = get_gate_vma(task);
390 if (vma && (vma != tail_vma) && vma->vm_next)
393 return (vma != tail_vma)? tail_vma: NULL;
396 struct seq_operations proc_pid_maps_op = {
403 struct seq_operations proc_pid_smaps_op = {
411 extern int show_numa_map(struct seq_file *m, void *v);
413 struct seq_operations proc_pid_numa_maps_op = {
417 .show = show_numa_map