2 * dcssblk.c -- the S/390 block driver for dcss memory
4 * Authors: Carsten Otte, Stefan Weinhuber, Gerald Schaefer
7 #include <linux/module.h>
8 #include <linux/ctype.h>
9 #include <linux/errno.h>
10 #include <linux/init.h>
11 #include <linux/slab.h>
12 #include <linux/blkdev.h>
13 #include <asm/extmem.h>
15 #include <linux/completion.h>
16 #include <linux/interrupt.h>
17 #include <asm/ccwdev.h> // for s390_root_dev_(un)register()
19 //#define DCSSBLK_DEBUG /* Debug messages on/off */
20 #define DCSSBLK_NAME "dcssblk"
21 #define DCSSBLK_MINORS_PER_DISK 1
24 #define PRINT_DEBUG(x...) printk(KERN_DEBUG DCSSBLK_NAME " debug: " x)
26 #define PRINT_DEBUG(x...) do {} while (0)
28 #define PRINT_INFO(x...) printk(KERN_INFO DCSSBLK_NAME " info: " x)
29 #define PRINT_WARN(x...) printk(KERN_WARNING DCSSBLK_NAME " warning: " x)
30 #define PRINT_ERR(x...) printk(KERN_ERR DCSSBLK_NAME " error: " x)
33 static int dcssblk_open(struct inode *inode, struct file *filp);
34 static int dcssblk_release(struct inode *inode, struct file *filp);
35 static int dcssblk_make_request(struct request_queue *q, struct bio *bio);
37 static int dcssblk_major;
38 static struct block_device_operations dcssblk_devops = {
41 .release = dcssblk_release,
44 static ssize_t dcssblk_add_store(struct device * dev, const char * buf,
46 static ssize_t dcssblk_remove_store(struct device * dev, const char * buf,
48 static ssize_t dcssblk_save_store(struct device * dev, const char * buf,
50 static ssize_t dcssblk_save_show(struct device *dev, char *buf);
51 static ssize_t dcssblk_shared_store(struct device * dev, const char * buf,
53 static ssize_t dcssblk_shared_show(struct device *dev, char *buf);
55 static DEVICE_ATTR(add, S_IWUSR, NULL, dcssblk_add_store);
56 static DEVICE_ATTR(remove, S_IWUSR, NULL, dcssblk_remove_store);
57 static DEVICE_ATTR(save, S_IWUSR | S_IRUGO, dcssblk_save_show,
59 static DEVICE_ATTR(shared, S_IWUSR | S_IRUGO, dcssblk_shared_show,
60 dcssblk_shared_store);
62 static struct device *dcssblk_root_dev;
64 struct dcssblk_dev_info {
67 char segment_name[BUS_ID_SIZE];
73 unsigned char save_pending;
74 unsigned char is_shared;
75 struct request_queue *dcssblk_queue;
78 static struct list_head dcssblk_devices = LIST_HEAD_INIT(dcssblk_devices);
79 static rwlock_t dcssblk_devices_lock = RW_LOCK_UNLOCKED;
83 * release function for segment device.
86 dcssblk_release_segment(struct device *dev)
88 PRINT_DEBUG("segment release fn called for %s\n", dev->bus_id);
89 kfree(container_of(dev, struct dcssblk_dev_info, dev));
90 module_put(THIS_MODULE);
94 * get a minor number. needs to be called with
95 * write_lock(&dcssblk_devices_lock) and the
96 * device needs to be enqueued before the lock is
100 dcssblk_assign_free_minor(struct dcssblk_dev_info *dev_info)
103 struct dcssblk_dev_info *entry;
105 if (dev_info == NULL)
107 for (minor = 0; minor < (1<<MINORBITS); minor++) {
109 // test if minor available
110 list_for_each_entry(entry, &dcssblk_devices, lh)
111 if (minor == entry->gd->first_minor)
113 if (!found) break; // got unused minor
117 dev_info->gd->first_minor = minor;
122 * get the struct dcssblk_dev_info from dcssblk_devices
123 * for the given name.
124 * read_lock(&dcssblk_devices_lock) must be held.
126 static struct dcssblk_dev_info *
127 dcssblk_get_device_by_name(char *name)
129 struct dcssblk_dev_info *entry;
131 list_for_each_entry(entry, &dcssblk_devices, lh) {
132 if (!strcmp(name, entry->segment_name)) {
140 * register the device that represents a segment in sysfs,
141 * also add the attributes for the device
144 dcssblk_register_segment_device(struct device *dev)
148 rc = device_register(dev);
151 rc = device_create_file(dev, &dev_attr_shared);
154 rc = device_create_file(dev, &dev_attr_save);
160 device_unregister(dev);
165 * device attribute for switching shared/nonshared (exclusive)
166 * operation (show + store)
169 dcssblk_shared_show(struct device *dev, char *buf)
171 struct dcssblk_dev_info *dev_info;
173 dev_info = container_of(dev, struct dcssblk_dev_info, dev);
174 return sprintf(buf, dev_info->is_shared ? "1\n" : "0\n");
178 dcssblk_shared_store(struct device *dev, const char *inbuf, size_t count)
180 struct dcssblk_dev_info *dev_info;
183 if ((count > 1) && (inbuf[1] != '\n') && (inbuf[1] != '\0')) {
184 PRINT_WARN("Invalid value, must be 0 or 1\n");
187 write_lock(&dcssblk_devices_lock);
188 dev_info = container_of(dev, struct dcssblk_dev_info, dev);
189 if (atomic_read(&dev_info->use_count)) {
190 PRINT_ERR("share: segment %s is busy!\n",
191 dev_info->segment_name);
192 write_unlock(&dcssblk_devices_lock);
195 if ((inbuf[0] == '1') && (dev_info->is_shared == 1)) {
196 PRINT_WARN("Segment %s already loaded in shared mode!\n",
197 dev_info->segment_name);
198 write_unlock(&dcssblk_devices_lock);
201 if ((inbuf[0] == '0') && (dev_info->is_shared == 0)) {
202 PRINT_WARN("Segment %s already loaded in exclusive mode!\n",
203 dev_info->segment_name);
204 write_unlock(&dcssblk_devices_lock);
207 if (inbuf[0] == '1') {
208 // reload segment in shared mode
209 segment_unload(dev_info->segment_name);
210 rc = segment_load(dev_info->segment_name, SEGMENT_SHARED_RO,
211 &dev_info->start, &dev_info->end);
213 PRINT_ERR("Segment %s not reloaded, rc=%d\n",
214 dev_info->segment_name, rc);
217 dev_info->is_shared = 1;
218 PRINT_INFO("Segment %s reloaded, shared mode.\n",
219 dev_info->segment_name);
220 } else if (inbuf[0] == '0') {
221 // reload segment in exclusive mode
222 segment_unload(dev_info->segment_name);
223 rc = segment_load(dev_info->segment_name, SEGMENT_EXCLUSIVE_RW,
224 &dev_info->start, &dev_info->end);
226 PRINT_ERR("Segment %s not reloaded, rc=%d\n",
227 dev_info->segment_name, rc);
230 dev_info->is_shared = 0;
231 PRINT_INFO("Segment %s reloaded, exclusive (read-write) mode.\n",
232 dev_info->segment_name);
234 write_unlock(&dcssblk_devices_lock);
235 PRINT_WARN("Invalid value, must be 0 or 1\n");
238 dev_info->segment_type = rc;
241 switch (dev_info->segment_type) {
242 case SEGMENT_SHARED_RO:
243 case SEGMENT_EXCLUSIVE_RO:
244 set_disk_ro(dev_info->gd, 1);
246 case SEGMENT_SHARED_RW:
247 case SEGMENT_EXCLUSIVE_RW:
248 set_disk_ro(dev_info->gd, 0);
251 if ((inbuf[0] == '1') &&
252 ((dev_info->segment_type == SEGMENT_EXCLUSIVE_RO) ||
253 (dev_info->segment_type == SEGMENT_EXCLUSIVE_RW))) {
254 PRINT_WARN("Could not get shared copy of segment %s\n",
255 dev_info->segment_name);
258 if ((inbuf[0] == '0') &&
259 ((dev_info->segment_type == SEGMENT_SHARED_RO) ||
260 (dev_info->segment_type == SEGMENT_SHARED_RW))) {
261 PRINT_WARN("Could not get exclusive copy of segment %s\n",
262 dev_info->segment_name);
265 write_unlock(&dcssblk_devices_lock);
269 PRINT_ERR("Could not reload segment %s, removing it now!\n",
270 dev_info->segment_name);
271 list_del(&dev_info->lh);
272 write_unlock(&dcssblk_devices_lock);
274 del_gendisk(dev_info->gd);
275 blk_put_queue(dev_info->dcssblk_queue);
276 dev_info->gd->queue = NULL;
277 put_disk(dev_info->gd);
278 device_unregister(dev);
285 * device attribute for save operation on current copy
286 * of the segment. If the segment is busy, saving will
287 * become pending until it gets released, which can be
288 * undone by storing a non-true value to this entry.
292 dcssblk_save_show(struct device *dev, char *buf)
294 struct dcssblk_dev_info *dev_info;
296 dev_info = container_of(dev, struct dcssblk_dev_info, dev);
297 return sprintf(buf, dev_info->save_pending ? "1\n" : "0\n");
301 dcssblk_save_store(struct device *dev, const char *inbuf, size_t count)
303 struct dcssblk_dev_info *dev_info;
305 if ((count > 1) && (inbuf[1] != '\n') && (inbuf[1] != '\0')) {
306 PRINT_WARN("Invalid value, must be 0 or 1\n");
309 dev_info = container_of(dev, struct dcssblk_dev_info, dev);
311 write_lock(&dcssblk_devices_lock);
312 if (inbuf[0] == '1') {
313 if (atomic_read(&dev_info->use_count) == 0) {
314 // device is idle => we save immediately
315 PRINT_INFO("Saving segment %s\n",
316 dev_info->segment_name);
317 segment_replace(dev_info->segment_name);
319 // device is busy => we save it when it becomes
320 // idle in dcssblk_release
321 PRINT_INFO("Segment %s is currently busy, it will "
322 "be saved when it becomes idle...\n",
323 dev_info->segment_name);
324 dev_info->save_pending = 1;
326 } else if (inbuf[0] == '0') {
327 if (dev_info->save_pending) {
328 // device is busy & the user wants to undo his save
330 dev_info->save_pending = 0;
331 PRINT_INFO("Pending save for segment %s deactivated\n",
332 dev_info->segment_name);
335 write_unlock(&dcssblk_devices_lock);
336 PRINT_WARN("Invalid value, must be 0 or 1\n");
339 write_unlock(&dcssblk_devices_lock);
344 * device attribute for adding devices
347 dcssblk_add_store(struct device *dev, const char *buf, size_t count)
350 struct dcssblk_dev_info *dev_info;
352 unsigned long seg_byte_size;
355 if (dev != dcssblk_root_dev) {
359 local_buf = kmalloc(count + 1, GFP_KERNEL);
360 if (local_buf == NULL) {
367 for (i = 0; ((buf[i] != '\0') && (buf[i] != '\n') && i < count); i++) {
368 local_buf[i] = toupper(buf[i]);
371 if ((i == 0) || (i > 8)) {
378 read_lock(&dcssblk_devices_lock);
379 dev_info = dcssblk_get_device_by_name(local_buf);
380 read_unlock(&dcssblk_devices_lock);
381 if (dev_info != NULL) {
382 PRINT_WARN("Segment %s already loaded!\n", local_buf);
387 * get a struct dcssblk_dev_info
389 dev_info = kmalloc(sizeof(struct dcssblk_dev_info), GFP_KERNEL);
390 if (dev_info == NULL) {
394 memset(dev_info, 0, sizeof(struct dcssblk_dev_info));
396 strcpy(dev_info->segment_name, local_buf);
397 strlcpy(dev_info->dev.bus_id, local_buf, BUS_ID_SIZE);
398 dev_info->dev.release = dcssblk_release_segment;
399 INIT_LIST_HEAD(&dev_info->lh);
401 dev_info->gd = alloc_disk(DCSSBLK_MINORS_PER_DISK);
402 if (dev_info->gd == NULL) {
406 dev_info->gd->major = dcssblk_major;
407 dev_info->gd->fops = &dcssblk_devops;
408 dev_info->dcssblk_queue = blk_alloc_queue(GFP_KERNEL);
409 dev_info->gd->queue = dev_info->dcssblk_queue;
410 dev_info->gd->private_data = dev_info;
411 dev_info->gd->driverfs_dev = &dev_info->dev;
415 rc = segment_load(local_buf, SEGMENT_SHARED_RO,
416 &dev_info->start, &dev_info->end);
418 PRINT_ERR("Segment %s not loaded, rc=%d\n", local_buf, rc);
419 goto dealloc_gendisk;
421 seg_byte_size = (dev_info->end - dev_info->start + 1);
422 set_capacity(dev_info->gd, seg_byte_size >> 9); // size in sectors
423 PRINT_INFO("Loaded segment %s from %p to %p, size = %lu Byte, "
424 "capacity = %lu sectors (512 Byte)\n", local_buf,
425 (void *) dev_info->start, (void *) dev_info->end,
426 seg_byte_size, seg_byte_size >> 9);
428 dev_info->segment_type = rc;
429 dev_info->save_pending = 0;
430 dev_info->is_shared = 1;
431 dev_info->dev.parent = dcssblk_root_dev;
434 * get minor, add to list
436 write_lock(&dcssblk_devices_lock);
437 rc = dcssblk_assign_free_minor(dev_info);
439 write_unlock(&dcssblk_devices_lock);
440 PRINT_ERR("No free minor number available! "
441 "Unloading segment...\n");
444 sprintf(dev_info->gd->disk_name, "dcssblk%d",
445 dev_info->gd->first_minor);
446 list_add_tail(&dev_info->lh, &dcssblk_devices);
448 * register the device
450 rc = dcssblk_register_segment_device(&dev_info->dev);
452 PRINT_ERR("Segment %s could not be registered RC=%d\n",
457 if (!try_module_get(THIS_MODULE)) {
462 get_device(&dev_info->dev);
463 add_disk(dev_info->gd);
465 blk_queue_make_request(dev_info->dcssblk_queue, dcssblk_make_request);
466 blk_queue_hardsect_size(dev_info->dcssblk_queue, 4096);
468 switch (dev_info->segment_type) {
469 case SEGMENT_SHARED_RO:
470 case SEGMENT_EXCLUSIVE_RO:
471 set_disk_ro(dev_info->gd,1);
473 case SEGMENT_SHARED_RW:
474 case SEGMENT_EXCLUSIVE_RW:
475 set_disk_ro(dev_info->gd,0);
478 PRINT_DEBUG("Segment %s loaded successfully\n", local_buf);
479 write_unlock(&dcssblk_devices_lock);
484 list_del(&dev_info->lh);
485 write_unlock(&dcssblk_devices_lock);
487 segment_unload(local_buf);
489 blk_put_queue(dev_info->dcssblk_queue);
490 dev_info->gd->queue = NULL;
491 put_disk(dev_info->gd);
501 * device attribute for removing devices
504 dcssblk_remove_store(struct device *dev, const char *buf, size_t count)
506 struct dcssblk_dev_info *dev_info;
510 if (dev != dcssblk_root_dev) {
513 local_buf = kmalloc(count + 1, GFP_KERNEL);
514 if (local_buf == NULL) {
520 for (i = 0; ((*(buf+i)!='\0') && (*(buf+i)!='\n') && i < count); i++) {
521 local_buf[i] = toupper(buf[i]);
524 if ((i == 0) || (i > 8)) {
529 write_lock(&dcssblk_devices_lock);
530 dev_info = dcssblk_get_device_by_name(local_buf);
531 if (dev_info == NULL) {
532 write_unlock(&dcssblk_devices_lock);
533 PRINT_WARN("Segment %s is not loaded!\n", local_buf);
537 if (atomic_read(&dev_info->use_count) != 0) {
538 write_unlock(&dcssblk_devices_lock);
539 PRINT_WARN("Segment %s is in use!\n", local_buf);
543 list_del(&dev_info->lh);
544 write_unlock(&dcssblk_devices_lock);
546 del_gendisk(dev_info->gd);
547 blk_put_queue(dev_info->dcssblk_queue);
548 dev_info->gd->queue = NULL;
549 put_disk(dev_info->gd);
550 device_unregister(&dev_info->dev);
551 segment_unload(dev_info->segment_name);
552 PRINT_DEBUG("Segment %s unloaded successfully\n",
553 dev_info->segment_name);
554 put_device(&dev_info->dev);
562 dcssblk_open(struct inode *inode, struct file *filp)
564 struct dcssblk_dev_info *dev_info;
567 dev_info = inode->i_bdev->bd_disk->private_data;
568 if (NULL == dev_info) {
572 atomic_inc(&dev_info->use_count);
573 inode->i_bdev->bd_block_size = 4096;
580 dcssblk_release(struct inode *inode, struct file *filp)
582 struct dcssblk_dev_info *dev_info;
585 dev_info = inode->i_bdev->bd_disk->private_data;
586 if (NULL == dev_info) {
590 write_lock(&dcssblk_devices_lock);
591 if (atomic_dec_and_test(&dev_info->use_count)
592 && (dev_info->save_pending)) {
593 PRINT_INFO("Segment %s became idle and is being saved now\n",
594 dev_info->segment_name);
595 segment_replace(dev_info->segment_name);
596 dev_info->save_pending = 0;
598 write_unlock(&dcssblk_devices_lock);
605 dcssblk_make_request(request_queue_t *q, struct bio *bio)
607 struct dcssblk_dev_info *dev_info;
608 struct bio_vec *bvec;
610 unsigned long page_addr;
611 unsigned long source_addr;
612 unsigned long bytes_done;
616 dev_info = bio->bi_bdev->bd_disk->private_data;
617 if (dev_info == NULL)
619 if ((bio->bi_sector & 3) != 0 || (bio->bi_size & 4095) != 0)
620 /* Request is not page-aligned. */
622 if (((bio->bi_size >> 9) + bio->bi_sector)
623 > get_capacity(bio->bi_bdev->bd_disk)) {
624 /* Request beyond end of DCSS segment. */
627 index = (bio->bi_sector >> 3);
628 bio_for_each_segment(bvec, bio, i) {
629 page_addr = (unsigned long)
630 page_address(bvec->bv_page) + bvec->bv_offset;
631 source_addr = dev_info->start + (index<<12) + bytes_done;
632 if (unlikely(page_addr & 4095) != 0 || (bvec->bv_len & 4095) != 0)
635 if (bio_data_dir(bio) == READ) {
636 memcpy((void*)page_addr, (void*)source_addr,
639 memcpy((void*)source_addr, (void*)page_addr,
642 bytes_done += bvec->bv_len;
644 bio_endio(bio, bytes_done, 0);
647 bio_io_error(bio, bytes_done);
652 * The init/exit functions.
659 PRINT_DEBUG("DCSSBLOCK EXIT...\n");
660 s390_root_dev_unregister(dcssblk_root_dev);
661 rc = unregister_blkdev(dcssblk_major, DCSSBLK_NAME);
663 PRINT_ERR("unregister_blkdev() failed!\n");
665 PRINT_DEBUG("...finished!\n");
673 PRINT_DEBUG("DCSSBLOCK INIT...\n");
674 dcssblk_root_dev = s390_root_dev_register("dcssblk");
675 if (IS_ERR(dcssblk_root_dev)) {
676 PRINT_ERR("device_register() failed!\n");
677 return PTR_ERR(dcssblk_root_dev);
679 rc = device_create_file(dcssblk_root_dev, &dev_attr_add);
681 PRINT_ERR("device_create_file(add) failed!\n");
682 s390_root_dev_unregister(dcssblk_root_dev);
685 rc = device_create_file(dcssblk_root_dev, &dev_attr_remove);
687 PRINT_ERR("device_create_file(remove) failed!\n");
688 s390_root_dev_unregister(dcssblk_root_dev);
691 rc = register_blkdev(0, DCSSBLK_NAME);
693 PRINT_ERR("Can't get dynamic major!\n");
694 s390_root_dev_unregister(dcssblk_root_dev);
698 PRINT_DEBUG("...finished!\n");
702 module_init(dcssblk_init);
703 module_exit(dcssblk_exit);
705 MODULE_LICENSE("GPL");