*
*/
-static int sg_version_num = 30534; /* 2 digits for each component */
-#define SG_VERSION_STR "3.5.34"
+static int sg_version_num = 30533; /* 2 digits for each component */
+#define SG_VERSION_STR "3.5.33"
/*
* D. P. Gilbert (dgilbert@interlog.com, dougg@triode.net.au), notes:
* (otherwise the macros compile to empty statements).
*
*/
+#include <linux/config.h>
#include <linux/module.h>
#include <linux/fs.h>
#include <linux/poll.h>
#include <linux/smp_lock.h>
#include <linux/moduleparam.h>
+#include <linux/devfs_fs_kernel.h>
#include <linux/cdev.h>
#include <linux/seq_file.h>
#include <linux/blkdev.h>
#ifdef CONFIG_SCSI_PROC_FS
#include <linux/proc_fs.h>
-static char *sg_version_date = "20060818";
+static char *sg_version_date = "20050908";
static int sg_proc_init(void);
static void sg_proc_cleanup(void);
/*
* most likely out of mem, but could also be a bad map
*/
- sg_finish_rem_req(srp);
return -ENOMEM;
} else
return 0;
if (!sg_allow_access(opcode, sdp->device->type))
return -EPERM;
}
- return sg_scsi_ioctl(filp, sdp->device->request_queue, NULL, p);
+ return scsi_ioctl_send_command(sdp->device, p);
case SG_SET_DEBUG:
result = get_user(val, ip);
if (result)
return (retval < 0) ? retval : 0;
}
+/* When startFinish==1 increments page counts for pages other than the
+ first of scatter gather elements obtained from alloc_pages().
+ When startFinish==0 decrements ... */
+static void
+sg_rb_correct4mmap(Sg_scatter_hold * rsv_schp, int startFinish)
+{
+ struct scatterlist *sg = rsv_schp->buffer;
+ struct page *page;
+ int k, m;
+
+ SCSI_LOG_TIMEOUT(3, printk("sg_rb_correct4mmap: startFinish=%d, scatg=%d\n",
+ startFinish, rsv_schp->k_use_sg));
+ /* N.B. correction _not_ applied to base page of each allocation */
+ for (k = 0; k < rsv_schp->k_use_sg; ++k, ++sg) {
+ for (m = PAGE_SIZE; m < sg->length; m += PAGE_SIZE) {
+ page = sg->page;
+ if (startFinish)
+ get_page(page);
+ else {
+ if (page_count(page) > 0)
+ __put_page(page);
+ }
+ }
+ }
+}
+
static struct page *
sg_vma_nopage(struct vm_area_struct *vma, unsigned long addr, int *type)
{
len = vma->vm_end - sa;
len = (len < sg->length) ? len : sg->length;
if (offset < len) {
- page = virt_to_page(page_address(sg->page) + offset);
+ page = sg->page;
get_page(page); /* increment page count */
break;
}
sa += len;
}
- sfp->mmap_called = 1;
+ if (0 == sfp->mmap_called) {
+ sg_rb_correct4mmap(rsv_schp, 1); /* do only once per fd lifetime */
+ sfp->mmap_called = 1;
+ }
vma->vm_flags |= VM_RESERVED;
vma->vm_private_data = sfp;
vma->vm_ops = &sg_mmap_vm_ops;
void *old_sg_dev_arr = NULL;
int k, error;
- sdp = kzalloc(sizeof(Sg_device), GFP_KERNEL);
+ sdp = kmalloc(sizeof(Sg_device), GFP_KERNEL);
if (!sdp) {
printk(KERN_WARNING "kmalloc Sg_device failure\n");
return -ENOMEM;
int tmp_dev_max = sg_nr_dev + SG_DEV_ARR_LUMP;
write_unlock_irqrestore(&sg_dev_arr_lock, iflags);
- tmp_da = kzalloc(tmp_dev_max * sizeof(Sg_device *), GFP_KERNEL);
+ tmp_da = kmalloc(tmp_dev_max * sizeof(Sg_device *), GFP_KERNEL);
if (unlikely(!tmp_da))
goto expand_failed;
write_lock_irqsave(&sg_dev_arr_lock, iflags);
+ memset(tmp_da, 0, tmp_dev_max * sizeof(Sg_device *));
memcpy(tmp_da, sg_dev_arr, sg_dev_max * sizeof(Sg_device *));
old_sg_dev_arr = sg_dev_arr;
sg_dev_arr = tmp_da;
if (unlikely(k >= SG_MAX_DEVS))
goto overflow;
+ memset(sdp, 0, sizeof(*sdp));
SCSI_LOG_TIMEOUT(3, printk("sg_alloc: dev=%d \n", k));
sprintf(disk->disk_name, "sg%d", k);
disk->first_minor = k;
Sg_device *sdp = NULL;
struct cdev * cdev = NULL;
int error, k;
- unsigned long iflags;
disk = alloc_disk(1);
if (!disk) {
k = error;
sdp = sg_dev_arr[k];
+ devfs_mk_cdev(MKDEV(SCSI_GENERIC_MAJOR, k),
+ S_IFCHR | S_IRUSR | S_IWUSR | S_IRGRP,
+ "%s/generic", scsidp->devfs_name);
error = cdev_add(cdev, MKDEV(SCSI_GENERIC_MAJOR, k), 1);
- if (error)
- goto cdev_add_err;
-
+ if (error) {
+ devfs_remove("%s/generic", scsidp->devfs_name);
+ goto out;
+ }
sdp->cdev = cdev;
if (sg_sysfs_valid) {
struct class_device * sg_class_member;
return 0;
-cdev_add_err:
- write_lock_irqsave(&sg_dev_arr_lock, iflags);
- kfree(sg_dev_arr[k]);
- sg_dev_arr[k] = NULL;
- sg_nr_dev--;
- write_unlock_irqrestore(&sg_dev_arr_lock, iflags);
-
out:
put_disk(disk);
if (cdev)
class_device_destroy(sg_sysfs_class, MKDEV(SCSI_GENERIC_MAJOR, k));
cdev_del(sdp->cdev);
sdp->cdev = NULL;
+ devfs_remove("%s/generic", scsidp->devfs_name);
put_disk(sdp->disk);
sdp->disk = NULL;
if (NULL == sdp->headfp)
MODULE_DESCRIPTION("SCSI generic (sg) driver");
MODULE_LICENSE("GPL");
MODULE_VERSION(SG_VERSION_STR);
-MODULE_ALIAS_CHARDEV_MAJOR(SCSI_GENERIC_MAJOR);
MODULE_PARM_DESC(def_reserved_size, "size of buffer reserved for each fd");
MODULE_PARM_DESC(allow_dio, "allow direct I/O (default: 0 (disallow))");
res = st_map_user_pages(schp->buffer, mx_sc_elems,
(unsigned long)hp->dxferp, dxfer_len,
(SG_DXFER_TO_DEV == hp->dxfer_direction) ? 1 : 0);
- if (res <= 0) {
- sg_remove_scat(schp);
+ if (res <= 0)
return 1;
- }
schp->k_use_sg = res;
schp->dio_in_use = 1;
hp->info |= SG_INFO_DIRECT_IO;
SCSI_LOG_TIMEOUT(6,
printk("__sg_remove_sfp: bufflen=%d, k_use_sg=%d\n",
(int) sfp->reserve.bufflen, (int) sfp->reserve.k_use_sg));
+ if (sfp->mmap_called)
+ sg_rb_correct4mmap(&sfp->reserve, 0); /* undo correction */
sg_remove_scat(&sfp->reserve);
}
sfp->parentdp = NULL;
return resp;
if (lowDma)
- page_mask = GFP_ATOMIC | GFP_DMA | __GFP_COMP | __GFP_NOWARN;
+ page_mask = GFP_ATOMIC | GFP_DMA | __GFP_NOWARN;
else
- page_mask = GFP_ATOMIC | __GFP_COMP | __GFP_NOWARN;
+ page_mask = GFP_ATOMIC | __GFP_NOWARN;
for (order = 0, a_size = PAGE_SIZE; a_size < rqSz;
order++, a_size <<= 1) ;
sg_proc_init(void)
{
int k, mask;
- int num_leaves = ARRAY_SIZE(sg_proc_leaf_arr);
+ int num_leaves =
+ sizeof (sg_proc_leaf_arr) / sizeof (sg_proc_leaf_arr[0]);
struct proc_dir_entry *pdep;
struct sg_proc_leaf * leaf;
sg_proc_cleanup(void)
{
int k;
- int num_leaves = ARRAY_SIZE(sg_proc_leaf_arr);
+ int num_leaves =
+ sizeof (sg_proc_leaf_arr) / sizeof (sg_proc_leaf_arr[0]);
if (!sg_proc_sgp)
return;
module_init(init_sg);
module_exit(exit_sg);
+MODULE_ALIAS_CHARDEV_MAJOR(SCSI_GENERIC_MAJOR);