2 * File...........: arch/s390/mm/dcss.c
3 * Author(s)......: Steven Shultz <shultzss@us.ibm.com>
4 * Carsten Otte <cotte@de.ibm.com>
5 * Bugreports.to..: <Linux390@de.ibm.com>
6 * thanks to Rob M van der Heij
7 * - he wrote the diag64 function
8 * (C) IBM Corporation 2002
11 #include <linux/kernel.h>
12 #include <linux/string.h>
13 #include <linux/spinlock.h>
14 #include <linux/list.h>
15 #include <linux/slab.h>
16 #include <linux/module.h>
17 #include <linux/bootmem.h>
19 #include <asm/ebcdic.h>
20 #include <asm/errno.h>
21 #include <asm/extmem.h>
22 #include <asm/cpcmd.h>
23 #include <linux/ctype.h>
25 #define DCSS_DEBUG /* Debug messages on/off */
27 #define DCSS_NAME "extmem"
29 #define PRINT_DEBUG(x...) printk(KERN_DEBUG DCSS_NAME " debug:" x)
31 #define PRINT_DEBUG(x...) do {} while (0)
33 #define PRINT_INFO(x...) printk(KERN_INFO DCSS_NAME " info:" x)
34 #define PRINT_WARN(x...) printk(KERN_WARNING DCSS_NAME " warning:" x)
35 #define PRINT_ERR(x...) printk(KERN_ERR DCSS_NAME " error:" x)
38 #define DCSS_LOADSHR 0x00
39 #define DCSS_LOADNSR 0x04
40 #define DCSS_PURGESEG 0x08
41 #define DCSS_FINDSEG 0x0c
42 #define DCSS_LOADNOLY 0x10
43 #define DCSS_SEGEXT 0x18
44 #define DCSS_QACTV 0x0c
47 struct list_head list;
49 unsigned long start_addr;
56 static spinlock_t dcss_lock = SPIN_LOCK_UNLOCKED;
57 static struct list_head dcss_list = LIST_HEAD_INIT(dcss_list);
59 unsigned long addr, size, type;
60 } memory_chunk[MEMORY_CHUNKS];
63 * Create the 8 bytes, ebcdic VM segment name from
66 static void inline dcss_mkname(char *name, char *dcss_name)
70 for (i = 0; i <= 8; i++) {
73 dcss_name[i] = toupper(name[i]);
81 * Perform a function on a dcss segment.
84 dcss_diag (__u8 func, void *parameter,
85 unsigned long *ret1, unsigned long *ret2)
90 rx = (unsigned long) parameter;
91 ry = (unsigned long) func;
93 #ifdef CONFIG_ARCH_S390X
94 " sam31\n" // switch to 31 bit
96 " sam64\n" // switch back to 64 bit
102 : "+d" (rx), "+d" (ry), "=d" (rc) : : "cc" );
109 /* use to issue "extended" dcss query */
111 dcss_diag_query(char *name, int *rwattr, int *shattr, unsigned long *segstart, unsigned long *segend)
114 unsigned long rx, ry;
116 typedef struct segentry {
134 unsigned int qoutptr;
139 struct qin64 *qinarea;
140 struct qout64 *qoutarea;
142 qinarea = (struct qin64*) get_zeroed_page (GFP_DMA);
147 qoutarea = (struct qout64*) get_zeroed_page (GFP_DMA);
150 free_page ((unsigned long) qinarea);
153 memset (qinarea,0,PAGE_SIZE);
154 memset (qoutarea,0,PAGE_SIZE);
156 qinarea->qopcode = DCSS_QACTV; /* do a query for active
158 qinarea->qoutptr = (unsigned long) qoutarea;
159 qinarea->qoutlen = sizeof(struct qout64);
161 /* Move segment name into double word aligned
162 field and pad with blanks to 8 long.
165 for (i = j = 0 ; i < 8; i++) {
166 qinarea->qname[i] = (name[j] == '\0') ? ' ' : name[j++];
169 /* name already in EBCDIC */
170 /* ASCEBC ((void *)&qinarea.qname, 8); */
172 /* set the assembler variables */
173 rx = (unsigned long) qinarea;
174 ry = DCSS_SEGEXT; /* this is extended function */
176 /* issue diagnose x'64' */
177 __asm__ __volatile__(
178 #ifdef CONFIG_ARCH_S390X
179 " sam31\n" // switch to 31 bit
181 " sam64\n" // switch back to 64 bit
187 : "+d" (rx), "+d" (ry), "=d" (rc) : : "cc" );
189 /* parse the query output area */
190 *segstart=qoutarea->segstart;
191 *segend=qoutarea->segend;
201 if (qoutarea->segcnt > 6)
212 for (i=0; i < qoutarea->segrcnt; i++) {
213 if (qoutarea->segout[i].thisseg[3] == 2 ||
214 qoutarea->segout[i].thisseg[3] == 3 ||
215 qoutarea->segout[i].thisseg[3] == 6 )
217 if (qoutarea->segout[i].thisseg[3] == 1 ||
218 qoutarea->segout[i].thisseg[3] == 3 ||
219 qoutarea->segout[i].thisseg[3] == 5 )
221 } /* end of for statement */
224 free_page ((unsigned long) qoutarea);
225 free_page ((unsigned long) qinarea);
231 * Load a DCSS segment via the diag 0x64.
233 int segment_load(char *name, int segtype, unsigned long *addr,
238 struct dcss_segment *seg, *tmp;
240 unsigned long segstart, segend;
246 dcss_mkname(name, dcss_name);
247 /* search for the dcss in list of currently loaded segments */
248 spin_lock(&dcss_lock);
250 list_for_each(l, &dcss_list) {
251 tmp = list_entry(l, struct dcss_segment, list);
252 if (memcmp(tmp->dcss_name, dcss_name, 8) == 0) {
259 /* find out the attributes of this
261 dcss_diag_query(dcss_name, &rwattr, &shattr, &segstart, &segend);
262 /* does segment collide with main memory? */
263 for (i=0; i < MEMORY_CHUNKS; i++) {
264 if (memory_chunk[i].type != 0)
266 if (memory_chunk[i].addr > segend)
268 if (memory_chunk[i].addr + memory_chunk[i].size <= segstart)
270 spin_unlock(&dcss_lock);
273 /* or does it collide with other (loaded) segments? */
274 list_for_each(l, &dcss_list) {
275 tmp = list_entry(l, struct dcss_segment, list);
276 if ((segstart <= tmp->end && segstart >= tmp->start_addr) ||
277 (segend <= tmp->end && segend >= tmp->start_addr) ||
278 (segstart <= tmp->start_addr && segend >= tmp->end)) {
279 PRINT_ERR("Segment Overlap!\n");
280 spin_unlock(&dcss_lock);
285 /* do case statement on segtype */
286 /* if asking for shared ro,
288 /* if asking for exclusive ro,
289 exclusive rw works */
292 case SEGMENT_SHARED_RO:
293 if (shattr > 1 || rwattr > 1) {
294 spin_unlock(&dcss_lock);
297 if (shattr == 0 && rwattr == 0)
298 rc = SEGMENT_EXCLUSIVE_RO;
299 if (shattr == 0 && rwattr == 1)
300 rc = SEGMENT_EXCLUSIVE_RW;
301 if (shattr == 1 && rwattr == 0)
302 rc = SEGMENT_SHARED_RO;
303 if (shattr == 1 && rwattr == 1)
304 rc = SEGMENT_SHARED_RW;
307 case SEGMENT_SHARED_RW:
308 if (shattr > 1 || rwattr != 1) {
309 spin_unlock(&dcss_lock);
313 rc = SEGMENT_EXCLUSIVE_RW;
315 rc = SEGMENT_SHARED_RW;
319 case SEGMENT_EXCLUSIVE_RO:
320 if (shattr > 0 || rwattr > 1) {
321 spin_unlock(&dcss_lock);
325 rc = SEGMENT_EXCLUSIVE_RO;
327 rc = SEGMENT_EXCLUSIVE_RW;
331 case SEGMENT_EXCLUSIVE_RW:
332 /* if (shattr != 0 || rwattr != 1) {
333 spin_unlock(&dcss_lock);
337 rc = SEGMENT_EXCLUSIVE_RW;
342 spin_unlock(&dcss_lock);
346 seg = kmalloc(sizeof(struct dcss_segment), GFP_DMA);
348 memcpy(seg->dcss_name, dcss_name, 8);
349 if (rc == SEGMENT_EXCLUSIVE_RW) {
350 if (dcss_diag(DCSS_LOADNSR, seg->dcss_name,
351 &seg->start_addr, &seg->end) == 0) {
352 if (seg->end < max_low_pfn*PAGE_SIZE ) {
353 atomic_set(&seg->ref_count, 1);
354 list_add(&seg->list, &dcss_list);
355 *addr = seg->start_addr;
358 if (shattr == 1 && rwattr == 1)
359 seg->shared_attr = SEGMENT_SHARED_RW;
360 else if (shattr == 1 && rwattr == 0)
361 seg->shared_attr = SEGMENT_SHARED_RO;
363 seg->shared_attr = SEGMENT_EXCLUSIVE_RW;
365 dcss_diag(DCSS_PURGESEG, seg->dcss_name, &dummy, &dummy);
375 if (dcss_diag(DCSS_LOADNOLY, seg->dcss_name,
376 &seg->start_addr, &seg->end) == 0) {
377 if (seg->end < max_low_pfn*PAGE_SIZE ) {
378 atomic_set(&seg->ref_count, 1);
379 list_add(&seg->list, &dcss_list);
380 *addr = seg->start_addr;
383 seg->shared_attr = rc;
385 dcss_diag(DCSS_PURGESEG, seg->dcss_name, &dummy, &dummy);
396 if ((segtype == SEGMENT_EXCLUSIVE_RW) && (seg->dcss_attr != SEGMENT_EXCLUSIVE_RW)) {
397 PRINT_ERR("Segment already loaded in other mode than EXCLUSIVE_RW!\n");
400 /* reload segment in exclusive mode */
401 /* dcss_diag(DCSS_LOADNSR, seg->dcss_name,
402 &seg->start_addr, &seg->end);
403 seg->dcss_attr = SEGMENT_EXCLUSIVE_RW;*/
405 if ((segtype != SEGMENT_EXCLUSIVE_RW) && (seg->dcss_attr == SEGMENT_EXCLUSIVE_RW)) {
406 PRINT_ERR("Segment already loaded in EXCLUSIVE_RW mode!\n");
410 atomic_inc(&seg->ref_count);
411 *addr = seg->start_addr;
416 spin_unlock(&dcss_lock);
421 * Decrease the use count of a DCSS segment and remove
422 * it from the address space if nobody is using it
425 void segment_unload(char *name)
429 struct list_head *l,*l_tmp;
430 struct dcss_segment *seg;
434 dcss_mkname(name, dcss_name);
435 spin_lock(&dcss_lock);
436 list_for_each_safe(l, l_tmp, &dcss_list) {
437 seg = list_entry(l, struct dcss_segment, list);
438 if (memcmp(seg->dcss_name, dcss_name, 8) == 0) {
439 if (atomic_dec_return(&seg->ref_count) == 0) {
440 /* Last user of the segment is
442 list_del(&seg->list);
443 dcss_diag(DCSS_PURGESEG, seg->dcss_name,
450 spin_unlock(&dcss_lock);
454 * Replace an existing DCSS segment, so that machines
455 * that load it anew will see the new version.
457 void segment_replace(char *name)
461 struct dcss_segment *seg;
469 dcss_mkname(name, dcss_name);
471 memset (mybuff1, 0, sizeof(mybuff1));
472 memset (mybuff2, 0, sizeof(mybuff2));
474 spin_lock(&dcss_lock);
475 list_for_each(l, &dcss_list) {
476 seg = list_entry(l, struct dcss_segment, list);
477 if (memcmp(seg->dcss_name, dcss_name, 8) == 0) {
478 mybeg = seg->start_addr >> 12;
479 myend = (seg->end) >> 12;
480 if (seg->shared_attr == SEGMENT_EXCLUSIVE_RW)
481 sprintf(mybuff1, "DEFSEG %s %X-%X EW",
483 if (seg->shared_attr == SEGMENT_EXCLUSIVE_RO)
484 sprintf(mybuff1, "DEFSEG %s %X-%X RO",
486 if (seg->shared_attr == SEGMENT_SHARED_RW)
487 sprintf(mybuff1, "DEFSEG %s %X-%X SW",
489 if (seg->shared_attr == SEGMENT_SHARED_RO)
490 sprintf(mybuff1, "DEFSEG %s %X-%X SR",
492 spin_unlock(&dcss_lock);
493 sprintf(mybuff2, "SAVESEG %s", name);
494 cpcmd(mybuff1, NULL, 80);
495 cpcmd(mybuff2, NULL, 80);
500 if (myend == 0) spin_unlock(&dcss_lock);
503 EXPORT_SYMBOL(segment_load);
504 EXPORT_SYMBOL(segment_unload);
505 EXPORT_SYMBOL(segment_replace);