+struct smb_hdr *
+cifs_small_buf_get(void)
+{
+ struct smb_hdr *ret_buf = NULL;
+
+/* We could use negotiated size instead of max_msgsize -
+ but it may be more efficient to always alloc same size
+ albeit slightly larger than necessary and maxbuffersize
+ defaults to this and can not be bigger */
+ ret_buf =
+ (struct smb_hdr *) mempool_alloc(cifs_sm_req_poolp, GFP_KERNEL | GFP_NOFS);
+ if (ret_buf) {
+ /* No need to clear memory here, cleared in header assemble */
+ /* memset(ret_buf, 0, sizeof(struct smb_hdr) + 27);*/
+ atomic_inc(&smBufAllocCount);
+#ifdef CONFIG_CIFS_STATS2
+ atomic_inc(&totSmBufAllocCount);
+#endif /* CONFIG_CIFS_STATS2 */
+
+ }
+ return ret_buf;
+}
+
+void
+cifs_small_buf_release(void *buf_to_free)
+{
+
+ if (buf_to_free == NULL) {
+ cFYI(1, ("Null buffer passed to cifs_small_buf_release"));
+ return;
+ }
+ mempool_free(buf_to_free,cifs_sm_req_poolp);
+
+ atomic_dec(&smBufAllocCount);
+ return;
+}
+
+/*
+ Find a free multiplex id (SMB mid). Otherwise there could be
+ mid collisions which might cause problems, demultiplexing the
+ wrong response to this request. Multiplex ids could collide if
+ one of a series requests takes much longer than the others, or
+ if a very large number of long lived requests (byte range
+ locks or FindNotify requests) are pending. No more than
+ 64K-1 requests can be outstanding at one time. If no
+ mids are available, return zero. A future optimization
+ could make the combination of mids and uid the key we use
+ to demultiplex on (rather than mid alone).
+ In addition to the above check, the cifs demultiplex
+ code already used the command code as a secondary
+ check of the frame and if signing is negotiated the
+ response would be discarded if the mid were the same
+ but the signature was wrong. Since the mid is not put in the
+ pending queue until later (when it is about to be dispatched)
+ we do have to limit the number of outstanding requests
+ to somewhat less than 64K-1 although it is hard to imagine
+ so many threads being in the vfs at one time.
+*/
+__u16 GetNextMid(struct TCP_Server_Info *server)
+{
+ __u16 mid = 0;
+ __u16 last_mid;
+ int collision;
+
+ if(server == NULL)
+ return mid;
+
+ spin_lock(&GlobalMid_Lock);
+ last_mid = server->CurrentMid; /* we do not want to loop forever */
+ server->CurrentMid++;
+ /* This nested loop looks more expensive than it is.
+ In practice the list of pending requests is short,
+ fewer than 50, and the mids are likely to be unique
+ on the first pass through the loop unless some request
+ takes longer than the 64 thousand requests before it
+ (and it would also have to have been a request that
+ did not time out) */
+ while(server->CurrentMid != last_mid) {
+ struct list_head *tmp;
+ struct mid_q_entry *mid_entry;
+
+ collision = 0;
+ if(server->CurrentMid == 0)
+ server->CurrentMid++;
+
+ list_for_each(tmp, &server->pending_mid_q) {
+ mid_entry = list_entry(tmp, struct mid_q_entry, qhead);
+
+ if ((mid_entry->mid == server->CurrentMid) &&
+ (mid_entry->midState == MID_REQUEST_SUBMITTED)) {
+ /* This mid is in use, try a different one */
+ collision = 1;
+ break;
+ }
+ }
+ if(collision == 0) {
+ mid = server->CurrentMid;
+ break;
+ }
+ server->CurrentMid++;
+ }
+ spin_unlock(&GlobalMid_Lock);
+ return mid;
+}
+
+/* NB: MID can not be set if treeCon not passed in, in that
+ case it is responsbility of caller to set the mid */