1 /* Kernel module help for sparc64.
3 * Copyright (C) 2001 Rusty Russell.
4 * Copyright (C) 2002 David S. Miller.
7 #include <linux/moduleloader.h>
8 #include <linux/kernel.h>
10 #include <linux/vmalloc.h>
12 #include <linux/string.h>
13 #include <linux/slab.h>
14 #include <linux/vmalloc.h>
17 #include <asm/processor.h>
18 #include <asm/spitfire.h>
20 static struct vm_struct * modvmlist = NULL;
22 static void module_unmap(void * addr)
24 struct vm_struct **p, *tmp;
29 if ((PAGE_SIZE-1) & (unsigned long) addr) {
30 printk("Trying to unmap module with bad address (%p)\n", addr);
34 for (p = &modvmlist; (tmp = *p) != NULL; p = &tmp->next) {
35 if (tmp->addr == addr) {
40 printk("Trying to unmap nonexistent module vm area (%p)\n", addr);
46 for (i = 0; i < tmp->nr_pages; i++) {
47 if (unlikely(!tmp->pages[i]))
49 __free_page(tmp->pages[i]);
57 static void *module_map(unsigned long size)
59 struct vm_struct **p, *tmp, *area;
62 unsigned int nr_pages, array_size, i;
64 size = PAGE_ALIGN(size);
65 if (!size || size > MODULES_LEN)
68 addr = (void *) MODULES_VADDR;
69 for (p = &modvmlist; (tmp = *p) != NULL; p = &tmp->next) {
70 if (size + (unsigned long) addr < (unsigned long) tmp->addr)
72 addr = (void *) (tmp->size + (unsigned long) tmp->addr);
74 if ((unsigned long) addr + size >= MODULES_END)
77 area = (struct vm_struct *) kmalloc(sizeof(*area), GFP_KERNEL);
80 area->size = size + PAGE_SIZE;
88 nr_pages = size >> PAGE_SHIFT;
89 array_size = (nr_pages * sizeof(struct page *));
91 area->nr_pages = nr_pages;
92 area->pages = pages = kmalloc(array_size, GFP_KERNEL);
96 memset(area->pages, 0, array_size);
98 for (i = 0; i < area->nr_pages; i++) {
99 area->pages[i] = alloc_page(GFP_KERNEL);
100 if (unlikely(!area->pages[i]))
104 if (map_vm_area(area, PAGE_KERNEL, &pages)) {
113 for (i = 0; i < area->nr_pages; i++) {
115 __free_page(area->pages[i]);
124 void *module_alloc(unsigned long size)
128 /* We handle the zero case fine, unlike vmalloc */
132 ret = module_map(size);
134 ret = ERR_PTR(-ENOMEM);
136 memset(ret, 0, size);
141 /* Free memory returned from module_core_alloc/module_init_alloc */
142 void module_free(struct module *mod, void *module_region)
144 write_lock(&vmlist_lock);
145 module_unmap(module_region);
146 write_unlock(&vmlist_lock);
147 /* FIXME: If module_region == mod->init_region, trim exception
151 /* Make generic code ignore STT_REGISTER dummy undefined symbols. */
152 int module_frob_arch_sections(Elf_Ehdr *hdr,
162 for (symidx = 0; sechdrs[symidx].sh_type != SHT_SYMTAB; symidx++) {
163 if (symidx == hdr->e_shnum-1) {
164 printk("%s: no symtab found.\n", mod->name);
168 sym = (Elf64_Sym *)sechdrs[symidx].sh_addr;
169 strtab = (char *)sechdrs[sechdrs[symidx].sh_link].sh_addr;
171 for (i = 1; i < sechdrs[symidx].sh_size / sizeof(Elf_Sym); i++) {
172 if (sym[i].st_shndx == SHN_UNDEF &&
173 ELF64_ST_TYPE(sym[i].st_info) == STT_REGISTER)
174 sym[i].st_shndx = SHN_ABS;
179 int apply_relocate(Elf64_Shdr *sechdrs,
181 unsigned int symindex,
185 printk(KERN_ERR "module %s: non-ADD RELOCATION unsupported\n",
190 int apply_relocate_add(Elf64_Shdr *sechdrs,
192 unsigned int symindex,
197 Elf64_Rela *rel = (void *)sechdrs[relsec].sh_addr;
202 for (i = 0; i < sechdrs[relsec].sh_size / sizeof(*rel); i++) {
205 /* This is where to make the change */
206 location = (u8 *)sechdrs[sechdrs[relsec].sh_info].sh_addr
208 loc32 = (u32 *) location;
210 BUG_ON(((u64)location >> (u64)32) != (u64)0);
212 /* This is the symbol it is referring to. Note that all
213 undefined symbols have been resolved. */
214 sym = (Elf64_Sym *)sechdrs[symindex].sh_addr
215 + ELF64_R_SYM(rel[i].r_info);
216 v = sym->st_value + rel[i].r_addend;
218 switch (ELF64_R_TYPE(rel[i].r_info) & 0xff) {
220 location[0] = v >> 56;
221 location[1] = v >> 48;
222 location[2] = v >> 40;
223 location[3] = v >> 32;
224 location[4] = v >> 24;
225 location[5] = v >> 16;
226 location[6] = v >> 8;
227 location[7] = v >> 0;
231 location[0] = v >> 24;
232 location[1] = v >> 16;
233 location[2] = v >> 8;
234 location[3] = v >> 0;
237 case R_SPARC_WDISP30:
238 v -= (Elf64_Addr) location;
239 *loc32 = (*loc32 & ~0x3fffffff) |
240 ((v >> 2) & 0x3fffffff);
243 case R_SPARC_WDISP22:
244 v -= (Elf64_Addr) location;
245 *loc32 = (*loc32 & ~0x3fffff) |
246 ((v >> 2) & 0x3fffff);
249 case R_SPARC_WDISP19:
250 v -= (Elf64_Addr) location;
251 *loc32 = (*loc32 & ~0x7ffff) |
252 ((v >> 2) & 0x7ffff);
256 *loc32 = (*loc32 & ~0x3ff) | (v & 0x3ff);
260 *loc32 = (*loc32 & ~0x3fffff) |
261 ((v >> 10) & 0x3fffff);
265 *loc32 = (*loc32 & ~0x1fff) |
267 (ELF64_R_TYPE(rel[i].r_info) >> 8))
272 printk(KERN_ERR "module %s: Unknown relocation: %x\n",
274 (int) (ELF64_R_TYPE(rel[i].r_info) & 0xff));
281 int module_finalize(const Elf_Ehdr *hdr,
282 const Elf_Shdr *sechdrs,
285 /* Cheetah's I-cache is fully coherent. */
286 if (tlb_type == spitfire) {
290 for (va = 0; va < (PAGE_SIZE << 1); va += 32)
291 spitfire_put_icache_tag(va, 0x0);
292 __asm__ __volatile__("flush %g6");
298 void module_arch_cleanup(struct module *mod)