4 * Copyright (C) International Business Machines Corp., 2002,2004
5 * Author(s): Steve French (sfrench@us.ibm.com)
7 * Common Internet FileSystem (CIFS) client
9 * This library is free software; you can redistribute it and/or modify
10 * it under the terms of the GNU Lesser General Public License as published
11 * by the Free Software Foundation; either version 2.1 of the License, or
12 * (at your option) any later version.
14 * This library is distributed in the hope that it will be useful,
15 * but WITHOUT ANY WARRANTY; without even the implied warranty of
16 * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See
17 * the GNU Lesser General Public License for more details.
19 * You should have received a copy of the GNU Lesser General Public License
20 * along with this library; if not, write to the Free Software
21 * Foundation, Inc., 59 Temple Place, Suite 330, Boston, MA 02111-1307 USA
24 /* Note that BB means BUGBUG (ie something to fix eventually) */
26 #include <linux/module.h>
28 #include <linux/mount.h>
29 #include <linux/slab.h>
30 #include <linux/init.h>
31 #include <linux/list.h>
32 #include <linux/seq_file.h>
33 #include <linux/vfs.h>
34 #include <linux/mempool.h>
37 #define DECLARE_GLOBALS_HERE
39 #include "cifsproto.h"
40 #include "cifs_debug.h"
41 #include "cifs_fs_sb.h"
43 #define CIFS_MAGIC_NUMBER 0xFF534D42 /* the first four bytes of SMB PDUs */
45 #ifdef CONFIG_CIFS_QUOTA
46 static struct quotactl_ops cifs_quotactl_ops;
52 unsigned int oplockEnabled = 1;
53 unsigned int experimEnabled = 0;
54 unsigned int linuxExtEnabled = 1;
55 unsigned int lookupCacheEnabled = 1;
56 unsigned int multiuser_mount = 0;
57 unsigned int extended_security = 0;
58 unsigned int ntlmv2_support = 0;
59 unsigned int sign_CIFS_PDUs = 1;
60 struct task_struct * oplockThread = NULL;
61 unsigned int CIFSMaxBufSize = CIFS_MAX_MSGSIZE;
62 module_param(CIFSMaxBufSize, int, CIFS_MAX_MSGSIZE);
63 MODULE_PARM_DESC(CIFSMaxBufSize,"Network buffer size (not including header). Default: 16384 Range: 8192 to 130048");
64 unsigned int cifs_min_rcv = CIFS_MIN_RCV_POOL;
65 module_param(cifs_min_rcv, int, CIFS_MIN_RCV_POOL);
66 MODULE_PARM_DESC(cifs_min_rcv,"Network buffers in pool. Default: 4 Range: 1 to 64");
67 unsigned int cifs_min_small = 30;
68 module_param(cifs_min_small, int, 30);
69 MODULE_PARM_DESC(cifs_small_rcv,"Small network buffers in pool. Default: 30 Range: 2 to 256");
70 unsigned int cifs_max_pending = CIFS_MAX_REQ;
71 module_param(cifs_max_pending, int, CIFS_MAX_REQ);
72 MODULE_PARM_DESC(cifs_max_pending,"Simultaneous requests to server. Default: 50 Range: 2 to 256");
75 extern int cifs_mount(struct super_block *, struct cifs_sb_info *, char *,
77 extern int cifs_umount(struct super_block *, struct cifs_sb_info *);
78 void cifs_proc_init(void);
79 void cifs_proc_clean(void);
81 static DECLARE_COMPLETION(cifs_oplock_exited);
85 cifs_read_super(struct super_block *sb, void *data,
86 const char *devname, int silent)
89 struct cifs_sb_info *cifs_sb;
92 sb->s_flags |= MS_NODIRATIME; /* and probably even noatime */
93 sb->s_fs_info = kmalloc(sizeof(struct cifs_sb_info),GFP_KERNEL);
94 cifs_sb = CIFS_SB(sb);
98 memset(cifs_sb,0,sizeof(struct cifs_sb_info));
101 rc = cifs_mount(sb, cifs_sb, data, devname);
106 ("cifs_mount failed w/return code = %d", rc));
107 goto out_mount_failed;
110 sb->s_magic = CIFS_MAGIC_NUMBER;
111 sb->s_op = &cifs_super_ops;
112 /* if(cifs_sb->tcon->ses->server->maxBuf > MAX_CIFS_HDR_SIZE + 512)
113 sb->s_blocksize = cifs_sb->tcon->ses->server->maxBuf - MAX_CIFS_HDR_SIZE; */
114 #ifdef CONFIG_CIFS_QUOTA
115 sb->s_qcop = &cifs_quotactl_ops;
117 sb->s_blocksize = CIFS_MAX_MSGSIZE;
118 sb->s_blocksize_bits = 14; /* default 2**14 = CIFS_MAX_MSGSIZE */
119 inode = iget(sb, ROOT_I);
126 sb->s_root = d_alloc_root(inode);
136 cERROR(1, ("cifs_read_super: get root inode failed"));
142 if(cifs_sb->local_nls)
143 unload_nls(cifs_sb->local_nls);
150 cifs_put_super(struct super_block *sb)
153 struct cifs_sb_info *cifs_sb;
155 cFYI(1, ("In cifs_put_super"));
156 cifs_sb = CIFS_SB(sb);
157 if(cifs_sb == NULL) {
158 cFYI(1,("Empty cifs superblock info passed to unmount"));
161 rc = cifs_umount(sb, cifs_sb);
163 cERROR(1, ("cifs_umount failed with return code %d", rc));
165 unload_nls(cifs_sb->local_nls);
171 cifs_statfs(struct super_block *sb, struct kstatfs *buf)
174 struct cifs_sb_info *cifs_sb;
175 struct cifsTconInfo *pTcon;
179 cifs_sb = CIFS_SB(sb);
180 pTcon = cifs_sb->tcon;
182 buf->f_type = CIFS_MAGIC_NUMBER;
184 /* instead could get the real value via SMB_QUERY_FS_ATTRIBUTE_INFO */
185 buf->f_namelen = PATH_MAX; /* PATH_MAX may be too long - it would presumably
186 be length of total path, note that some servers may be
187 able to support more than this, but best to be safe
188 since Win2k and others can not handle very long filenames */
189 buf->f_files = 0; /* undefined */
190 buf->f_ffree = 0; /* unlimited */
192 rc = CIFSSMBQFSInfo(xid, pTcon, buf, cifs_sb->local_nls);
198 /* BB get from info put in tcon struct at mount time with call to QFSAttrInfo */
200 return 0; /* always return success? what if volume is no longer available? */
203 static int cifs_permission(struct inode * inode, int mask, struct nameidata *nd)
205 struct cifs_sb_info *cifs_sb;
207 cifs_sb = CIFS_SB(inode->i_sb);
209 if (cifs_sb->mnt_cifs_flags & CIFS_MOUNT_NO_PERM) {
211 } else /* file mode might have been restricted at mount time
212 on the client (above and beyond ACL on servers) for
213 servers which do not support setting and viewing mode bits,
214 so allowing client to check permissions is useful */
215 return generic_permission(inode, mask, NULL);
218 static kmem_cache_t *cifs_inode_cachep;
219 static kmem_cache_t *cifs_req_cachep;
220 static kmem_cache_t *cifs_mid_cachep;
221 kmem_cache_t *cifs_oplock_cachep;
222 static kmem_cache_t *cifs_sm_req_cachep;
223 mempool_t *cifs_sm_req_poolp;
224 mempool_t *cifs_req_poolp;
225 mempool_t *cifs_mid_poolp;
227 static struct inode *
228 cifs_alloc_inode(struct super_block *sb)
230 struct cifsInodeInfo *cifs_inode;
232 (struct cifsInodeInfo *) kmem_cache_alloc(cifs_inode_cachep,
236 cifs_inode->cifsAttrs = 0x20; /* default */
237 atomic_set(&cifs_inode->inUse, 0);
238 cifs_inode->time = 0;
239 /* Until the file is open and we have gotten oplock
240 info back from the server, can not assume caching of
241 file data or metadata */
242 cifs_inode->clientCanCacheRead = FALSE;
243 cifs_inode->clientCanCacheAll = FALSE;
244 cifs_inode->vfs_inode.i_blksize = CIFS_MAX_MSGSIZE;
245 cifs_inode->vfs_inode.i_blkbits = 14; /* 2**14 = CIFS_MAX_MSGSIZE */
247 INIT_LIST_HEAD(&cifs_inode->openFileList);
248 return &cifs_inode->vfs_inode;
252 cifs_destroy_inode(struct inode *inode)
254 kmem_cache_free(cifs_inode_cachep, CIFS_I(inode));
258 * cifs_show_options() is for displaying mount options in /proc/mounts.
259 * Not all settable options are displayed but most of the important
263 cifs_show_options(struct seq_file *s, struct vfsmount *m)
265 struct cifs_sb_info *cifs_sb;
267 cifs_sb = CIFS_SB(m->mnt_sb);
271 seq_printf(s, ",unc=%s", cifs_sb->tcon->treeName);
272 if (cifs_sb->tcon->ses) {
273 if (cifs_sb->tcon->ses->userName)
274 seq_printf(s, ",username=%s",
275 cifs_sb->tcon->ses->userName);
276 if(cifs_sb->tcon->ses->domainName)
277 seq_printf(s, ",domain=%s",
278 cifs_sb->tcon->ses->domainName);
281 seq_printf(s, ",rsize=%d",cifs_sb->rsize);
282 seq_printf(s, ",wsize=%d",cifs_sb->wsize);
287 #ifdef CONFIG_CIFS_QUOTA
288 int cifs_xquota_set(struct super_block * sb, int quota_type, qid_t qid,
289 struct fs_disk_quota * pdquota)
293 struct cifs_sb_info *cifs_sb = CIFS_SB(sb);
294 struct cifsTconInfo *pTcon;
297 pTcon = cifs_sb->tcon;
304 cFYI(1,("set type: 0x%x id: %d",quota_type,qid));
313 int cifs_xquota_get(struct super_block * sb, int quota_type, qid_t qid,
314 struct fs_disk_quota * pdquota)
318 struct cifs_sb_info *cifs_sb = CIFS_SB(sb);
319 struct cifsTconInfo *pTcon;
322 pTcon = cifs_sb->tcon;
328 cFYI(1,("set type: 0x%x id: %d",quota_type,qid));
337 int cifs_xstate_set(struct super_block * sb, unsigned int flags, int operation)
341 struct cifs_sb_info *cifs_sb = CIFS_SB(sb);
342 struct cifsTconInfo *pTcon;
345 pTcon = cifs_sb->tcon;
351 cFYI(1,("flags: 0x%x operation: 0x%x",flags,operation));
360 int cifs_xstate_get(struct super_block * sb, struct fs_quota_stat *qstats)
364 struct cifs_sb_info *cifs_sb = CIFS_SB(sb);
365 struct cifsTconInfo *pTcon;
368 pTcon = cifs_sb->tcon;
374 cFYI(1,("pqstats %p",qstats));
383 static struct quotactl_ops cifs_quotactl_ops = {
384 .set_xquota = cifs_xquota_set,
385 .get_xquota = cifs_xquota_set,
386 .set_xstate = cifs_xstate_set,
387 .get_xstate = cifs_xstate_get,
391 static int cifs_remount(struct super_block *sb, int *flags, char *data)
393 *flags |= MS_NODIRATIME;
397 struct super_operations cifs_super_ops = {
398 .read_inode = cifs_read_inode,
399 .put_super = cifs_put_super,
400 .statfs = cifs_statfs,
401 .alloc_inode = cifs_alloc_inode,
402 .destroy_inode = cifs_destroy_inode,
403 /* .drop_inode = generic_delete_inode,
404 .delete_inode = cifs_delete_inode, *//* Do not need the above two functions
405 unless later we add lazy close of inodes or unless the kernel forgets to call
406 us with the same number of releases (closes) as opens */
407 .show_options = cifs_show_options,
408 /* .umount_begin = cifs_umount_begin, *//* consider adding in the future */
409 .remount_fs = cifs_remount,
412 static struct super_block *
413 cifs_get_sb(struct file_system_type *fs_type,
414 int flags, const char *dev_name, void *data)
417 struct super_block *sb = sget(fs_type, NULL, set_anon_super, NULL);
419 cFYI(1, ("Devname: %s flags: %d ", dev_name, flags));
426 rc = cifs_read_super(sb, data, dev_name, flags & MS_VERBOSE ? 1 : 0);
428 up_write(&sb->s_umount);
429 deactivate_super(sb);
432 sb->s_flags |= MS_ACTIVE;
437 cifs_read_wrapper(struct file * file, char __user *read_data, size_t read_size,
442 else if(file->f_dentry == NULL)
444 else if(file->f_dentry->d_inode == NULL)
447 cFYI(1,("In read_wrapper size %zd at %lld",read_size,*poffset));
449 #ifdef CONFIG_CIFS_EXPERIMENTAL
450 /* check whether we can cache writes locally */
451 if(file->f_dentry->d_sb) {
452 struct cifs_sb_info *cifs_sb;
453 cifs_sb = CIFS_SB(file->f_dentry->d_sb);
454 if(cifs_sb != NULL) {
455 if(cifs_sb->mnt_cifs_flags & CIFS_MOUNT_DIRECT_IO)
456 return cifs_user_read(file,read_data,
460 #endif /* CIFS_EXPERIMENTAL */
462 if(CIFS_I(file->f_dentry->d_inode)->clientCanCacheRead) {
463 return generic_file_read(file,read_data,read_size,poffset);
465 /* BB do we need to lock inode from here until after invalidate? */
466 /* if(file->f_dentry->d_inode->i_mapping) {
467 filemap_fdatawrite(file->f_dentry->d_inode->i_mapping);
468 filemap_fdatawait(file->f_dentry->d_inode->i_mapping);
470 /* cifs_revalidate(file->f_dentry);*/ /* BB fixme */
472 /* BB we should make timer configurable - perhaps
473 by simply calling cifs_revalidate here */
474 /* invalidate_remote_inode(file->f_dentry->d_inode);*/
475 return generic_file_read(file,read_data,read_size,poffset);
480 cifs_write_wrapper(struct file * file, const char __user *write_data,
481 size_t write_size, loff_t * poffset)
487 else if(file->f_dentry == NULL)
489 else if(file->f_dentry->d_inode == NULL)
492 cFYI(1,("In write_wrapper size %zd at %lld",write_size,*poffset));
494 #ifdef CONFIG_CIFS_EXPERIMENTAL /* BB fixme - fix user char * to kernel char * mapping here BB */
495 /* check whether we can cache writes locally */
496 if(file->f_dentry->d_sb) {
497 struct cifs_sb_info *cifs_sb;
498 cifs_sb = CIFS_SB(file->f_dentry->d_sb);
499 if(cifs_sb != NULL) {
500 if(cifs_sb->mnt_cifs_flags & CIFS_MOUNT_DIRECT_IO) {
501 return cifs_user_write(file,write_data,
506 #endif /* CIFS_EXPERIMENTAL */
507 written = generic_file_write(file,write_data,write_size,poffset);
508 if(!CIFS_I(file->f_dentry->d_inode)->clientCanCacheAll) {
509 if(file->f_dentry->d_inode->i_mapping) {
510 filemap_fdatawrite(file->f_dentry->d_inode->i_mapping);
517 static struct file_system_type cifs_fs_type = {
518 .owner = THIS_MODULE,
520 .get_sb = cifs_get_sb,
521 .kill_sb = kill_anon_super,
524 struct inode_operations cifs_dir_inode_ops = {
525 .create = cifs_create,
526 .lookup = cifs_lookup,
527 .getattr = cifs_getattr,
528 .unlink = cifs_unlink,
529 .link = cifs_hardlink,
532 .rename = cifs_rename,
533 .permission = cifs_permission,
534 /* revalidate:cifs_revalidate, */
535 .setattr = cifs_setattr,
536 .symlink = cifs_symlink,
538 #ifdef CONFIG_CIFS_XATTR
539 .setxattr = cifs_setxattr,
540 .getxattr = cifs_getxattr,
541 .listxattr = cifs_listxattr,
542 .removexattr = cifs_removexattr,
546 struct inode_operations cifs_file_inode_ops = {
547 /* revalidate:cifs_revalidate, */
548 .setattr = cifs_setattr,
549 .getattr = cifs_getattr, /* do we need this anymore? */
550 .rename = cifs_rename,
551 .permission = cifs_permission,
552 #ifdef CONFIG_CIFS_XATTR
553 .setxattr = cifs_setxattr,
554 .getxattr = cifs_getxattr,
555 .listxattr = cifs_listxattr,
556 .removexattr = cifs_removexattr,
560 struct inode_operations cifs_symlink_inode_ops = {
561 .readlink = generic_readlink,
562 .follow_link = cifs_follow_link,
563 .put_link = cifs_put_link,
564 .permission = cifs_permission,
565 /* BB add the following two eventually */
566 /* revalidate: cifs_revalidate,
567 setattr: cifs_notify_change, *//* BB do we need notify change */
568 #ifdef CONFIG_CIFS_XATTR
569 .setxattr = cifs_setxattr,
570 .getxattr = cifs_getxattr,
571 .listxattr = cifs_listxattr,
572 .removexattr = cifs_removexattr,
576 struct file_operations cifs_file_ops = {
577 .read = cifs_read_wrapper,
578 .write = cifs_write_wrapper,
580 .release = cifs_close,
584 .mmap = cifs_file_mmap,
585 .sendfile = generic_file_sendfile,
586 #ifdef CONFIG_CIFS_EXPERIMENTAL
587 .dir_notify = cifs_dir_notify,
588 #endif /* CONFIG_CIFS_EXPERIMENTAL */
591 struct file_operations cifs_dir_ops = {
592 .readdir = cifs_readdir,
593 .release = cifs_closedir,
594 .read = generic_read_dir,
595 #ifdef CONFIG_CIFS_EXPERIMENTAL
596 .dir_notify = cifs_dir_notify,
597 #endif /* CONFIG_CIFS_EXPERIMENTAL */
601 cifs_init_once(void *inode, kmem_cache_t * cachep, unsigned long flags)
603 struct cifsInodeInfo *cifsi = (struct cifsInodeInfo *) inode;
605 if ((flags & (SLAB_CTOR_VERIFY | SLAB_CTOR_CONSTRUCTOR)) ==
606 SLAB_CTOR_CONSTRUCTOR) {
607 inode_init_once(&cifsi->vfs_inode);
608 INIT_LIST_HEAD(&cifsi->lockList);
613 cifs_init_inodecache(void)
615 cifs_inode_cachep = kmem_cache_create("cifs_inode_cache",
616 sizeof (struct cifsInodeInfo),
617 0, SLAB_RECLAIM_ACCOUNT,
618 cifs_init_once, NULL);
619 if (cifs_inode_cachep == NULL)
626 cifs_destroy_inodecache(void)
628 if (kmem_cache_destroy(cifs_inode_cachep))
629 printk(KERN_WARNING "cifs_inode_cache: error freeing\n");
633 cifs_init_request_bufs(void)
635 if(CIFSMaxBufSize < 8192) {
636 /* Buffer size can not be smaller than 2 * PATH_MAX since maximum
637 Unicode path name has to fit in any SMB/CIFS path based frames */
638 CIFSMaxBufSize = 8192;
639 } else if (CIFSMaxBufSize > 1024*127) {
640 CIFSMaxBufSize = 1024 * 127;
642 CIFSMaxBufSize &= 0x1FE00; /* Round size to even 512 byte mult*/
644 /* cERROR(1,("CIFSMaxBufSize %d 0x%x",CIFSMaxBufSize,CIFSMaxBufSize)); */
645 cifs_req_cachep = kmem_cache_create("cifs_request",
647 MAX_CIFS_HDR_SIZE, 0,
648 SLAB_HWCACHE_ALIGN, NULL, NULL);
649 if (cifs_req_cachep == NULL)
654 else if (cifs_min_rcv > 64) {
656 cFYI(1,("cifs_min_rcv set to maximum (64)"));
659 cifs_req_poolp = mempool_create(cifs_min_rcv,
664 if(cifs_req_poolp == NULL) {
665 kmem_cache_destroy(cifs_req_cachep);
668 /* 256 (MAX_CIFS_HDR_SIZE bytes is enough for most SMB responses and
669 almost all handle based requests (but not write response, nor is it
670 sufficient for path based requests). A smaller size would have
671 been more efficient (compacting multiple slab items on one 4k page)
672 for the case in which debug was on, but this larger size allows
673 more SMBs to use small buffer alloc and is still much more
674 efficient to alloc 1 per page off the slab compared to 17K (5page)
675 alloc of large cifs buffers even when page debugging is on */
676 cifs_sm_req_cachep = kmem_cache_create("cifs_small_rq",
677 MAX_CIFS_HDR_SIZE, 0, SLAB_HWCACHE_ALIGN, NULL, NULL);
678 if (cifs_sm_req_cachep == NULL) {
679 mempool_destroy(cifs_req_poolp);
680 kmem_cache_destroy(cifs_req_cachep);
684 if(cifs_min_small < 2)
686 else if (cifs_min_small > 256) {
687 cifs_min_small = 256;
688 cFYI(1,("cifs_min_small set to maximum (256)"));
691 cifs_sm_req_poolp = mempool_create(cifs_min_small,
696 if(cifs_sm_req_poolp == NULL) {
697 mempool_destroy(cifs_req_poolp);
698 kmem_cache_destroy(cifs_req_cachep);
699 kmem_cache_destroy(cifs_sm_req_cachep);
707 cifs_destroy_request_bufs(void)
709 mempool_destroy(cifs_req_poolp);
710 if (kmem_cache_destroy(cifs_req_cachep))
712 "cifs_destroy_request_cache: error not all structures were freed\n");
713 mempool_destroy(cifs_sm_req_poolp);
714 if (kmem_cache_destroy(cifs_sm_req_cachep))
716 "cifs_destroy_request_cache: cifs_small_rq free error\n");
722 cifs_mid_cachep = kmem_cache_create("cifs_mpx_ids",
723 sizeof (struct mid_q_entry), 0,
724 SLAB_HWCACHE_ALIGN, NULL, NULL);
725 if (cifs_mid_cachep == NULL)
728 cifs_mid_poolp = mempool_create(3 /* a reasonable min simultan opers */,
732 if(cifs_mid_poolp == NULL) {
733 kmem_cache_destroy(cifs_mid_cachep);
737 cifs_oplock_cachep = kmem_cache_create("cifs_oplock_structs",
738 sizeof (struct oplock_q_entry), 0,
739 SLAB_HWCACHE_ALIGN, NULL, NULL);
740 if (cifs_oplock_cachep == NULL) {
741 kmem_cache_destroy(cifs_mid_cachep);
742 mempool_destroy(cifs_mid_poolp);
750 cifs_destroy_mids(void)
752 mempool_destroy(cifs_mid_poolp);
753 if (kmem_cache_destroy(cifs_mid_cachep))
755 "cifs_destroy_mids: error not all structures were freed\n");
757 if (kmem_cache_destroy(cifs_oplock_cachep))
759 "error not all oplock structures were freed\n");
762 static int cifs_oplock_thread(void * dummyarg)
764 struct oplock_q_entry * oplock_item;
765 struct cifsTconInfo *pTcon;
766 struct inode * inode;
770 daemonize("cifsoplockd");
771 allow_signal(SIGTERM);
773 oplockThread = current;
775 set_current_state(TASK_INTERRUPTIBLE);
777 schedule_timeout(1*HZ);
778 spin_lock(&GlobalMid_Lock);
779 if(list_empty(&GlobalOplock_Q)) {
780 spin_unlock(&GlobalMid_Lock);
781 set_current_state(TASK_INTERRUPTIBLE);
782 schedule_timeout(39*HZ);
784 oplock_item = list_entry(GlobalOplock_Q.next,
785 struct oplock_q_entry, qhead);
787 cFYI(1,("found oplock item to write out"));
788 pTcon = oplock_item->tcon;
789 inode = oplock_item->pinode;
790 netfid = oplock_item->netfid;
791 spin_unlock(&GlobalMid_Lock);
792 DeleteOplockQEntry(oplock_item);
793 /* can not grab inode sem here since it would
794 deadlock when oplock received on delete
795 since vfs_unlink holds the i_sem across
797 /* down(&inode->i_sem);*/
798 if (S_ISREG(inode->i_mode)) {
799 rc = filemap_fdatawrite(inode->i_mapping);
800 if(CIFS_I(inode)->clientCanCacheRead == 0) {
801 filemap_fdatawait(inode->i_mapping);
802 invalidate_remote_inode(inode);
806 /* up(&inode->i_sem);*/
808 CIFS_I(inode)->write_behind_rc = rc;
809 cFYI(1,("Oplock flush inode %p rc %d",inode,rc));
811 /* releasing a stale oplock after recent reconnection
812 of smb session using a now incorrect file
813 handle is not a data integrity issue but do
814 not bother sending an oplock release if session
815 to server still is disconnected since oplock
816 already released by the server in that case */
817 if(pTcon->tidStatus != CifsNeedReconnect) {
818 rc = CIFSSMBLock(0, pTcon, netfid,
819 0 /* len */ , 0 /* offset */, 0,
820 0, LOCKING_ANDX_OPLOCK_RELEASE,
822 cFYI(1,("Oplock release rc = %d ",rc));
825 spin_unlock(&GlobalMid_Lock);
827 } while(!signal_pending(current));
828 complete_and_exit (&cifs_oplock_exited, 0);
835 #ifdef CONFIG_PROC_FS
838 INIT_LIST_HEAD(&GlobalServerList); /* BB not implemented yet */
839 INIT_LIST_HEAD(&GlobalSMBSessionList);
840 INIT_LIST_HEAD(&GlobalTreeConnectionList);
841 INIT_LIST_HEAD(&GlobalOplock_Q);
843 * Initialize Global counters
845 atomic_set(&sesInfoAllocCount, 0);
846 atomic_set(&tconInfoAllocCount, 0);
847 atomic_set(&tcpSesAllocCount,0);
848 atomic_set(&tcpSesReconnectCount, 0);
849 atomic_set(&tconInfoReconnectCount, 0);
851 atomic_set(&bufAllocCount, 0);
852 atomic_set(&midCount, 0);
853 GlobalCurrentXid = 0;
854 GlobalTotalActiveXid = 0;
855 GlobalMaxActiveXid = 0;
856 rwlock_init(&GlobalSMBSeslock);
857 spin_lock_init(&GlobalMid_Lock);
859 if(cifs_max_pending < 2) {
860 cifs_max_pending = 2;
861 cFYI(1,("cifs_max_pending set to min of 2"));
862 } else if(cifs_max_pending > 256) {
863 cifs_max_pending = 256;
864 cFYI(1,("cifs_max_pending set to max of 256"));
867 rc = cifs_init_inodecache();
869 rc = cifs_init_mids();
871 rc = cifs_init_request_bufs();
873 rc = register_filesystem(&cifs_fs_type);
875 rc = (int)kernel_thread(cifs_oplock_thread, NULL,
876 CLONE_FS | CLONE_FILES | CLONE_VM);
880 cERROR(1,("error %d create oplock thread",rc));
882 cifs_destroy_request_bufs();
886 cifs_destroy_inodecache();
888 #ifdef CONFIG_PROC_FS
897 cFYI(0, ("In unregister ie exit_cifs"));
898 #ifdef CONFIG_PROC_FS
901 unregister_filesystem(&cifs_fs_type);
902 cifs_destroy_inodecache();
904 cifs_destroy_request_bufs();
906 send_sig(SIGTERM, oplockThread, 1);
907 wait_for_completion(&cifs_oplock_exited);
911 MODULE_AUTHOR("Steve French <sfrench@us.ibm.com>");
912 MODULE_LICENSE("GPL"); /* combination of LGPL + GPL source behaves as GPL */
914 ("VFS to access servers complying with the SNIA CIFS Specification e.g. Samba and Windows");
915 MODULE_VERSION(CIFS_VERSION);
916 module_init(init_cifs)
917 module_exit(exit_cifs)