2 * File...........: arch/s390/mm/dcss.c
3 * Author(s)......: Steven Shultz <shultzss@us.ibm.com>
4 * Carsten Otte <cotte@de.ibm.com>
5 * Bugreports.to..: <Linux390@de.ibm.com>
6 * thanks to Rob M van der Heij
7 * - he wrote the diag64 function
8 * (C) IBM Corporation 2002
11 #include <linux/kernel.h>
12 #include <linux/string.h>
13 #include <linux/spinlock.h>
14 #include <linux/list.h>
15 #include <linux/slab.h>
16 #include <linux/module.h>
17 #include <linux/bootmem.h>
19 #include <asm/ebcdic.h>
20 #include <asm/errno.h>
21 #include <asm/extmem.h>
22 #include <asm/cpcmd.h>
23 #include <linux/ctype.h>
25 #define DCSS_DEBUG /* Debug messages on/off */
27 #define DCSS_NAME "extmem"
29 #define PRINT_DEBUG(x...) printk(KERN_DEBUG DCSS_NAME " debug:" x)
31 #define PRINT_DEBUG(x...) do {} while (0)
33 #define PRINT_INFO(x...) printk(KERN_INFO DCSS_NAME " info:" x)
34 #define PRINT_WARN(x...) printk(KERN_WARNING DCSS_NAME " warning:" x)
35 #define PRINT_ERR(x...) printk(KERN_ERR DCSS_NAME " error:" x)
38 #define DCSS_LOADSHR 0x00
39 #define DCSS_LOADNSR 0x04
40 #define DCSS_PURGESEG 0x08
41 #define DCSS_FINDSEG 0x0c
42 #define DCSS_LOADNOLY 0x10
43 #define DCSS_SEGEXT 0x18
44 #define DCSS_QACTV 0x0c
47 struct list_head list;
49 unsigned long start_addr;
56 static spinlock_t dcss_lock = SPIN_LOCK_UNLOCKED;
57 static struct list_head dcss_list = LIST_HEAD_INIT(dcss_list);
58 extern struct {unsigned long addr, size, type;} memory_chunk[16];
61 * Create the 8 bytes, ebcdic VM segment name from
64 static void inline dcss_mkname(char *name, char *dcss_name)
68 for (i = 0; i <= 8; i++) {
71 dcss_name[i] = toupper(name[i]);
79 * Perform a function on a dcss segment.
82 dcss_diag (__u8 func, void *parameter,
83 unsigned long *ret1, unsigned long *ret2)
88 rx = (unsigned long) parameter;
89 ry = (unsigned long) func;
91 #ifdef CONFIG_ARCH_S390X
92 " sam31\n" // switch to 31 bit
94 " sam64\n" // switch back to 64 bit
100 : "+d" (rx), "+d" (ry), "=d" (rc) : : "cc" );
107 /* use to issue "extended" dcss query */
109 dcss_diag_query(char *name, int *rwattr, int *shattr, unsigned long *segstart, unsigned long *segend)
112 unsigned long rx, ry;
114 typedef struct segentry {
132 unsigned int qoutptr;
137 struct qin64 *qinarea;
138 struct qout64 *qoutarea;
140 qinarea = (struct qin64*) get_zeroed_page (GFP_DMA);
145 qoutarea = (struct qout64*) get_zeroed_page (GFP_DMA);
148 free_page ((unsigned long) qinarea);
151 memset (qinarea,0,PAGE_SIZE);
152 memset (qoutarea,0,PAGE_SIZE);
154 qinarea->qopcode = DCSS_QACTV; /* do a query for active
156 qinarea->qoutptr = (unsigned long) qoutarea;
157 qinarea->qoutlen = sizeof(struct qout64);
159 /* Move segment name into double word aligned
160 field and pad with blanks to 8 long.
163 for (i = j = 0 ; i < 8; i++) {
164 qinarea->qname[i] = (name[j] == '\0') ? ' ' : name[j++];
167 /* name already in EBCDIC */
168 /* ASCEBC ((void *)&qinarea.qname, 8); */
170 /* set the assembler variables */
171 rx = (unsigned long) qinarea;
172 ry = DCSS_SEGEXT; /* this is extended function */
174 /* issue diagnose x'64' */
175 __asm__ __volatile__(
176 #ifdef CONFIG_ARCH_S390X
177 " sam31\n" // switch to 31 bit
179 " sam64\n" // switch back to 64 bit
185 : "+d" (rx), "+d" (ry), "=d" (rc) : : "cc" );
187 /* parse the query output area */
188 *segstart=qoutarea->segstart;
189 *segend=qoutarea->segend;
199 if (qoutarea->segcnt > 6)
210 for (i=0; i < qoutarea->segrcnt; i++) {
211 if (qoutarea->segout[i].thisseg[3] == 2 ||
212 qoutarea->segout[i].thisseg[3] == 3 ||
213 qoutarea->segout[i].thisseg[3] == 6 )
215 if (qoutarea->segout[i].thisseg[3] == 1 ||
216 qoutarea->segout[i].thisseg[3] == 3 ||
217 qoutarea->segout[i].thisseg[3] == 5 )
219 } /* end of for statement */
222 free_page ((unsigned long) qoutarea);
223 free_page ((unsigned long) qinarea);
229 * Load a DCSS segment via the diag 0x64.
231 int segment_load(char *name, int segtype, unsigned long *addr,
236 struct dcss_segment *seg, *tmp;
238 unsigned long segstart, segend;
244 dcss_mkname(name, dcss_name);
245 /* search for the dcss in list of currently loaded segments */
246 spin_lock(&dcss_lock);
248 list_for_each(l, &dcss_list) {
249 tmp = list_entry(l, struct dcss_segment, list);
250 if (memcmp(tmp->dcss_name, dcss_name, 8) == 0) {
257 /* find out the attributes of this
259 dcss_diag_query(dcss_name, &rwattr, &shattr, &segstart, &segend);
260 /* does segment collide with main memory? */
261 for (i=0; i<16; i++) {
262 if (memory_chunk[i].type != 0)
264 if (memory_chunk[i].addr > segend)
266 if (memory_chunk[i].addr + memory_chunk[i].size <= segstart)
268 spin_unlock(&dcss_lock);
271 /* or does it collide with other (loaded) segments? */
272 list_for_each(l, &dcss_list) {
273 tmp = list_entry(l, struct dcss_segment, list);
274 if ((segstart <= tmp->end && segstart >= tmp->start_addr) ||
275 (segend <= tmp->end && segend >= tmp->start_addr) ||
276 (segstart <= tmp->start_addr && segend >= tmp->end)) {
277 PRINT_ERR("Segment Overlap!\n");
278 spin_unlock(&dcss_lock);
283 /* do case statement on segtype */
284 /* if asking for shared ro,
286 /* if asking for exclusive ro,
287 exclusive rw works */
290 case SEGMENT_SHARED_RO:
291 if (shattr > 1 || rwattr > 1) {
292 spin_unlock(&dcss_lock);
295 if (shattr == 0 && rwattr == 0)
296 rc = SEGMENT_EXCLUSIVE_RO;
297 if (shattr == 0 && rwattr == 1)
298 rc = SEGMENT_EXCLUSIVE_RW;
299 if (shattr == 1 && rwattr == 0)
300 rc = SEGMENT_SHARED_RO;
301 if (shattr == 1 && rwattr == 1)
302 rc = SEGMENT_SHARED_RW;
305 case SEGMENT_SHARED_RW:
306 if (shattr > 1 || rwattr != 1) {
307 spin_unlock(&dcss_lock);
311 rc = SEGMENT_EXCLUSIVE_RW;
313 rc = SEGMENT_SHARED_RW;
317 case SEGMENT_EXCLUSIVE_RO:
318 if (shattr > 0 || rwattr > 1) {
319 spin_unlock(&dcss_lock);
323 rc = SEGMENT_EXCLUSIVE_RO;
325 rc = SEGMENT_EXCLUSIVE_RW;
329 case SEGMENT_EXCLUSIVE_RW:
330 /* if (shattr != 0 || rwattr != 1) {
331 spin_unlock(&dcss_lock);
335 rc = SEGMENT_EXCLUSIVE_RW;
340 spin_unlock(&dcss_lock);
344 seg = kmalloc(sizeof(struct dcss_segment), GFP_DMA);
346 memcpy(seg->dcss_name, dcss_name, 8);
347 if (rc == SEGMENT_EXCLUSIVE_RW) {
348 if (dcss_diag(DCSS_LOADNSR, seg->dcss_name,
349 &seg->start_addr, &seg->end) == 0) {
350 if (seg->end < max_low_pfn*PAGE_SIZE ) {
351 atomic_set(&seg->ref_count, 1);
352 list_add(&seg->list, &dcss_list);
353 *addr = seg->start_addr;
356 if (shattr == 1 && rwattr == 1)
357 seg->shared_attr = SEGMENT_SHARED_RW;
358 else if (shattr == 1 && rwattr == 0)
359 seg->shared_attr = SEGMENT_SHARED_RO;
361 seg->shared_attr = SEGMENT_EXCLUSIVE_RW;
363 dcss_diag(DCSS_PURGESEG, seg->dcss_name, &dummy, &dummy);
373 if (dcss_diag(DCSS_LOADNOLY, seg->dcss_name,
374 &seg->start_addr, &seg->end) == 0) {
375 if (seg->end < max_low_pfn*PAGE_SIZE ) {
376 atomic_set(&seg->ref_count, 1);
377 list_add(&seg->list, &dcss_list);
378 *addr = seg->start_addr;
381 seg->shared_attr = rc;
383 dcss_diag(DCSS_PURGESEG, seg->dcss_name, &dummy, &dummy);
394 if ((segtype == SEGMENT_EXCLUSIVE_RW) && (seg->dcss_attr != SEGMENT_EXCLUSIVE_RW)) {
395 PRINT_ERR("Segment already loaded in other mode than EXCLUSIVE_RW!\n");
398 /* reload segment in exclusive mode */
399 /* dcss_diag(DCSS_LOADNSR, seg->dcss_name,
400 &seg->start_addr, &seg->end);
401 seg->dcss_attr = SEGMENT_EXCLUSIVE_RW;*/
403 if ((segtype != SEGMENT_EXCLUSIVE_RW) && (seg->dcss_attr == SEGMENT_EXCLUSIVE_RW)) {
404 PRINT_ERR("Segment already loaded in EXCLUSIVE_RW mode!\n");
408 atomic_inc(&seg->ref_count);
409 *addr = seg->start_addr;
414 spin_unlock(&dcss_lock);
419 * Decrease the use count of a DCSS segment and remove
420 * it from the address space if nobody is using it
423 void segment_unload(char *name)
427 struct list_head *l,*l_tmp;
428 struct dcss_segment *seg;
432 dcss_mkname(name, dcss_name);
433 spin_lock(&dcss_lock);
434 list_for_each_safe(l, l_tmp, &dcss_list) {
435 seg = list_entry(l, struct dcss_segment, list);
436 if (memcmp(seg->dcss_name, dcss_name, 8) == 0) {
437 if (atomic_dec_return(&seg->ref_count) == 0) {
438 /* Last user of the segment is
440 list_del(&seg->list);
441 dcss_diag(DCSS_PURGESEG, seg->dcss_name,
448 spin_unlock(&dcss_lock);
452 * Replace an existing DCSS segment, so that machines
453 * that load it anew will see the new version.
455 void segment_replace(char *name)
459 struct dcss_segment *seg;
467 dcss_mkname(name, dcss_name);
469 memset (mybuff1, 0, sizeof(mybuff1));
470 memset (mybuff2, 0, sizeof(mybuff2));
472 spin_lock(&dcss_lock);
473 list_for_each(l, &dcss_list) {
474 seg = list_entry(l, struct dcss_segment, list);
475 if (memcmp(seg->dcss_name, dcss_name, 8) == 0) {
476 mybeg = seg->start_addr >> 12;
477 myend = (seg->end) >> 12;
478 if (seg->shared_attr == SEGMENT_EXCLUSIVE_RW)
479 sprintf(mybuff1, "DEFSEG %s %X-%X EW",
481 if (seg->shared_attr == SEGMENT_EXCLUSIVE_RO)
482 sprintf(mybuff1, "DEFSEG %s %X-%X RO",
484 if (seg->shared_attr == SEGMENT_SHARED_RW)
485 sprintf(mybuff1, "DEFSEG %s %X-%X SW",
487 if (seg->shared_attr == SEGMENT_SHARED_RO)
488 sprintf(mybuff1, "DEFSEG %s %X-%X SR",
490 spin_unlock(&dcss_lock);
491 sprintf(mybuff2, "SAVESEG %s", name);
492 cpcmd(mybuff1, NULL, 80);
493 cpcmd(mybuff2, NULL, 80);
498 if (myend == 0) spin_unlock(&dcss_lock);
501 EXPORT_SYMBOL(segment_load);
502 EXPORT_SYMBOL(segment_unload);
503 EXPORT_SYMBOL(segment_replace);