ftp://ftp.kernel.org/pub/linux/kernel/v2.6/linux-2.6.6.tar.bz2
[linux-2.6.git] / fs / proc / task_nommu.c
1
2 #include <linux/mm.h>
3 #include <linux/file.h>
4 #include <linux/seq_file.h>
5
6 /*
7  * Logic: we've got two memory sums for each process, "shared", and
8  * "non-shared". Shared memory may get counted more then once, for
9  * each process that owns it. Non-shared memory is counted
10  * accurately.
11  */
12 char *task_mem(struct mm_struct *mm, char *buffer)
13 {
14         unsigned long bytes = 0, sbytes = 0, slack = 0;
15         struct mm_tblock_struct *tblock;
16         
17         down_read(&mm->mmap_sem);
18         for (tblock = &mm->context.tblock; tblock; tblock = tblock->next) {
19                 if (!tblock->rblock)
20                         continue;
21                 bytes += kobjsize(tblock);
22                 if (atomic_read(&mm->mm_count) > 1 ||
23                     tblock->rblock->refcount > 1) {
24                         sbytes += kobjsize(tblock->rblock->kblock);
25                         sbytes += kobjsize(tblock->rblock);
26                 } else {
27                         bytes += kobjsize(tblock->rblock->kblock);
28                         bytes += kobjsize(tblock->rblock);
29                         slack += kobjsize(tblock->rblock->kblock) -
30                                         tblock->rblock->size;
31                 }
32         }
33
34         if (atomic_read(&mm->mm_count) > 1)
35                 sbytes += kobjsize(mm);
36         else
37                 bytes += kobjsize(mm);
38         
39         if (current->fs && atomic_read(&current->fs->count) > 1)
40                 sbytes += kobjsize(current->fs);
41         else
42                 bytes += kobjsize(current->fs);
43
44         if (current->files && atomic_read(&current->files->count) > 1)
45                 sbytes += kobjsize(current->files);
46         else
47                 bytes += kobjsize(current->files);
48
49         if (current->sighand && atomic_read(&current->sighand->count) > 1)
50                 sbytes += kobjsize(current->sighand);
51         else
52                 bytes += kobjsize(current->sighand);
53
54         bytes += kobjsize(current); /* includes kernel stack */
55
56         buffer += sprintf(buffer,
57                 "Mem:\t%8lu bytes\n"
58                 "Slack:\t%8lu bytes\n"
59                 "Shared:\t%8lu bytes\n",
60                 bytes, slack, sbytes);
61
62         up_read(&mm->mmap_sem);
63         return buffer;
64 }
65
66 unsigned long task_vsize(struct mm_struct *mm)
67 {
68         struct mm_tblock_struct *tbp;
69         unsigned long vsize = 0;
70
71         for (tbp = &mm->context.tblock; tbp; tbp = tbp->next) {
72                 if (tbp->rblock)
73                         vsize += kobjsize(tbp->rblock->kblock);
74         }
75
76         return vsize;
77 }
78
79 int task_statm(struct mm_struct *mm, int *shared, int *text,
80                int *data, int *resident)
81 {
82         struct mm_tblock_struct *tbp;
83         int size = kobjsize(mm);
84         
85         for (tbp = &mm->context.tblock; tbp; tbp = tbp->next) {
86                 if (tbp->next)
87                         size += kobjsize(tbp->next);
88                 if (tbp->rblock) {
89                         size += kobjsize(tbp->rblock);
90                         size += kobjsize(tbp->rblock->kblock);
91                 }
92         }
93
94         size += (*text = mm->end_code - mm->start_code);
95         size += (*data = mm->start_stack - mm->start_data);
96
97         *resident = size;
98         return size;
99 }
100
101 /*
102  * Albert D. Cahalan suggested to fake entries for the traditional
103  * sections here.  This might be worth investigating.
104  */
105 static int show_map(struct seq_file *m, void *v)
106 {
107         return 0;
108 }
109 static void *m_start(struct seq_file *m, loff_t *pos)
110 {
111         return NULL;
112 }
113 static void m_stop(struct seq_file *m, void *v)
114 {
115 }
116 static void *m_next(struct seq_file *m, void *v, loff_t *pos)
117 {
118         return NULL;
119 }
120 struct seq_operations proc_pid_maps_op = {
121         .start  = m_start,
122         .next   = m_next,
123         .stop   = m_stop,
124         .show   = show_map
125 };