patch-2_6_7-vs1_9_1_12
[linux-2.6.git] / drivers / scsi / aacraid / commsup.c
1 /*
2  *      Adaptec AAC series RAID controller driver
3  *      (c) Copyright 2001 Red Hat Inc. <alan@redhat.com>
4  *
5  * based on the old aacraid driver that is..
6  * Adaptec aacraid device driver for Linux.
7  *
8  * Copyright (c) 2000 Adaptec, Inc. (aacraid@adaptec.com)
9  *
10  * This program is free software; you can redistribute it and/or modify
11  * it under the terms of the GNU General Public License as published by
12  * the Free Software Foundation; either version 2, or (at your option)
13  * any later version.
14  *
15  * This program is distributed in the hope that it will be useful,
16  * but WITHOUT ANY WARRANTY; without even the implied warranty of
17  * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.  See the
18  * GNU General Public License for more details.
19  *
20  * You should have received a copy of the GNU General Public License
21  * along with this program; see the file COPYING.  If not, write to
22  * the Free Software Foundation, 675 Mass Ave, Cambridge, MA 02139, USA.
23  *
24  * Module Name:
25  *  commsup.c
26  *
27  * Abstract: Contain all routines that are required for FSA host/adapter
28  *    commuication.
29  *
30  */
31
32 #include <linux/kernel.h>
33 #include <linux/init.h>
34 #include <linux/types.h>
35 #include <linux/sched.h>
36 #include <linux/pci.h>
37 #include <linux/spinlock.h>
38 #include <linux/slab.h>
39 #include <linux/completion.h>
40 #include <linux/blkdev.h>
41 #include <asm/semaphore.h>
42
43 #include "aacraid.h"
44
45 /**
46  *      fib_map_alloc           -       allocate the fib objects
47  *      @dev: Adapter to allocate for
48  *
49  *      Allocate and map the shared PCI space for the FIB blocks used to
50  *      talk to the Adaptec firmware.
51  */
52  
53 static int fib_map_alloc(struct aac_dev *dev)
54 {
55         if((dev->hw_fib_va = pci_alloc_consistent(dev->pdev, sizeof(struct hw_fib) * AAC_NUM_FIB, &dev->hw_fib_pa))==NULL)
56                 return -ENOMEM;
57         return 0;
58 }
59
60 /**
61  *      fib_map_free            -       free the fib objects
62  *      @dev: Adapter to free
63  *
64  *      Free the PCI mappings and the memory allocated for FIB blocks
65  *      on this adapter.
66  */
67
68 void fib_map_free(struct aac_dev *dev)
69 {
70         pci_free_consistent(dev->pdev, sizeof(struct hw_fib) * AAC_NUM_FIB, dev->hw_fib_va, dev->hw_fib_pa);
71 }
72
73 /**
74  *      fib_setup       -       setup the fibs
75  *      @dev: Adapter to set up
76  *
77  *      Allocate the PCI space for the fibs, map it and then intialise the
78  *      fib area, the unmapped fib data and also the free list
79  */
80
81 int fib_setup(struct aac_dev * dev)
82 {
83         struct fib *fibptr;
84         struct hw_fib *hw_fib_va;
85         dma_addr_t hw_fib_pa;
86         int i;
87         
88         if(fib_map_alloc(dev)<0)
89                 return -ENOMEM;
90                 
91         hw_fib_va = dev->hw_fib_va;
92         hw_fib_pa = dev->hw_fib_pa;
93         memset(hw_fib_va, 0, sizeof(struct hw_fib) * AAC_NUM_FIB);
94         /*
95          *      Initialise the fibs
96          */
97         for (i = 0, fibptr = &dev->fibs[i]; i < AAC_NUM_FIB; i++, fibptr++) 
98         {
99                 fibptr->dev = dev;
100                 fibptr->hw_fib = hw_fib_va;
101                 fibptr->data = (void *) fibptr->hw_fib->data;
102                 fibptr->next = fibptr+1;        /* Forward chain the fibs */
103                 init_MUTEX_LOCKED(&fibptr->event_wait);
104                 spin_lock_init(&fibptr->event_lock);
105                 hw_fib_va->header.XferState = cpu_to_le32(0xffffffff);
106                 hw_fib_va->header.SenderSize = cpu_to_le16(sizeof(struct hw_fib));
107                 fibptr->hw_fib_pa = hw_fib_pa;
108                 hw_fib_va = (struct hw_fib *)((unsigned char *)hw_fib_va + sizeof(struct hw_fib));
109                 hw_fib_pa = hw_fib_pa + sizeof(struct hw_fib); 
110         }
111         /*
112          *      Add the fib chain to the free list
113          */
114         dev->fibs[AAC_NUM_FIB-1].next = NULL;
115         /*
116          *      Enable this to debug out of queue space
117          */
118         dev->free_fib = &dev->fibs[0];
119         return 0;
120 }
121
122 /**
123  *      fib_alloc       -       allocate a fib
124  *      @dev: Adapter to allocate the fib for
125  *
126  *      Allocate a fib from the adapter fib pool. If the pool is empty we
127  *      wait for fibs to become free.
128  */
129  
130 struct fib * fib_alloc(struct aac_dev *dev)
131 {
132         struct fib * fibptr;
133         unsigned long flags;
134         spin_lock_irqsave(&dev->fib_lock, flags);
135         fibptr = dev->free_fib; 
136         /* Cannot sleep here or you get hangs. Instead we did the
137            maths at compile time. */
138         if(!fibptr)
139                 BUG();
140         dev->free_fib = fibptr->next;
141         spin_unlock_irqrestore(&dev->fib_lock, flags);
142         /*
143          *      Set the proper node type code and node byte size
144          */
145         fibptr->type = FSAFS_NTC_FIB_CONTEXT;
146         fibptr->size = sizeof(struct fib);
147         /*
148          *      Null out fields that depend on being zero at the start of
149          *      each I/O
150          */
151         fibptr->hw_fib->header.XferState = cpu_to_le32(0);
152         fibptr->callback = NULL;
153         fibptr->callback_data = NULL;
154
155         return fibptr;
156 }
157
158 /**
159  *      fib_free        -       free a fib
160  *      @fibptr: fib to free up
161  *
162  *      Frees up a fib and places it on the appropriate queue
163  *      (either free or timed out)
164  */
165  
166 void fib_free(struct fib * fibptr)
167 {
168         unsigned long flags;
169
170         spin_lock_irqsave(&fibptr->dev->fib_lock, flags);
171         if (fibptr->flags & FIB_CONTEXT_FLAG_TIMED_OUT) {
172                 aac_config.fib_timeouts++;
173                 fibptr->next = fibptr->dev->timeout_fib;
174                 fibptr->dev->timeout_fib = fibptr;
175         } else {
176                 if (fibptr->hw_fib->header.XferState != 0) {
177                         printk(KERN_WARNING "fib_free, XferState != 0, fibptr = 0x%p, XferState = 0x%x\n", 
178                                  (void*)fibptr, fibptr->hw_fib->header.XferState);
179                 }
180                 fibptr->next = fibptr->dev->free_fib;
181                 fibptr->dev->free_fib = fibptr;
182         }       
183         spin_unlock_irqrestore(&fibptr->dev->fib_lock, flags);
184 }
185
186 /**
187  *      fib_init        -       initialise a fib
188  *      @fibptr: The fib to initialize
189  *      
190  *      Set up the generic fib fields ready for use
191  */
192  
193 void fib_init(struct fib *fibptr)
194 {
195         struct hw_fib *hw_fib = fibptr->hw_fib;
196
197         hw_fib->header.StructType = FIB_MAGIC;
198         hw_fib->header.Size = cpu_to_le16(sizeof(struct hw_fib));
199         hw_fib->header.XferState = cpu_to_le32(HostOwned | FibInitialized | FibEmpty | FastResponseCapable);
200         hw_fib->header.SenderFibAddress = cpu_to_le32(fibptr->hw_fib_pa);
201         hw_fib->header.ReceiverFibAddress = cpu_to_le32(fibptr->hw_fib_pa);
202         hw_fib->header.SenderSize = cpu_to_le16(sizeof(struct hw_fib));
203 }
204
205 /**
206  *      fib_deallocate          -       deallocate a fib
207  *      @fibptr: fib to deallocate
208  *
209  *      Will deallocate and return to the free pool the FIB pointed to by the
210  *      caller.
211  */
212  
213 void fib_dealloc(struct fib * fibptr)
214 {
215         struct hw_fib *hw_fib = fibptr->hw_fib;
216         if(hw_fib->header.StructType != FIB_MAGIC) 
217                 BUG();
218         hw_fib->header.XferState = cpu_to_le32(0);        
219 }
220
221 /*
222  *      Commuication primitives define and support the queuing method we use to
223  *      support host to adapter commuication. All queue accesses happen through
224  *      these routines and are the only routines which have a knowledge of the
225  *       how these queues are implemented.
226  */
227  
228 /**
229  *      aac_get_entry           -       get a queue entry
230  *      @dev: Adapter
231  *      @qid: Queue Number
232  *      @entry: Entry return
233  *      @index: Index return
234  *      @nonotify: notification control
235  *
236  *      With a priority the routine returns a queue entry if the queue has free entries. If the queue
237  *      is full(no free entries) than no entry is returned and the function returns 0 otherwise 1 is
238  *      returned.
239  */
240  
241 static int aac_get_entry (struct aac_dev * dev, u32 qid, struct aac_entry **entry, u32 * index, unsigned long *nonotify)
242 {
243         struct aac_queue * q;
244
245         /*
246          *      All of the queues wrap when they reach the end, so we check
247          *      to see if they have reached the end and if they have we just
248          *      set the index back to zero. This is a wrap. You could or off
249          *      the high bits in all updates but this is a bit faster I think.
250          */
251
252         q = &dev->queues->queue[qid];
253         
254         *index = le32_to_cpu(*(q->headers.producer));
255         if ((*index - 2) == le32_to_cpu(*(q->headers.consumer)))
256                         *nonotify = 1; 
257
258         if (qid == AdapHighCmdQueue) {
259                 if (*index >= ADAP_HIGH_CMD_ENTRIES)
260                         *index = 0;
261         } else if (qid == AdapNormCmdQueue) {
262                 if (*index >= ADAP_NORM_CMD_ENTRIES) 
263                         *index = 0; /* Wrap to front of the Producer Queue. */
264         }
265         else if (qid == AdapHighRespQueue) 
266         {
267                 if (*index >= ADAP_HIGH_RESP_ENTRIES)
268                         *index = 0;
269         }
270         else if (qid == AdapNormRespQueue) 
271         {
272                 if (*index >= ADAP_NORM_RESP_ENTRIES) 
273                         *index = 0; /* Wrap to front of the Producer Queue. */
274         }
275         else {
276                 printk("aacraid: invalid qid\n");
277                 BUG();
278         }
279
280         if ((*index + 1) == le32_to_cpu(*(q->headers.consumer))) { /* Queue is full */
281                 printk(KERN_WARNING "Queue %d full, %d outstanding.\n",
282                                 qid, q->numpending);
283                 return 0;
284         } else {
285                 *entry = q->base + *index;
286                 return 1;
287         }
288 }   
289
290 /**
291  *      aac_queue_get           -       get the next free QE
292  *      @dev: Adapter
293  *      @index: Returned index
294  *      @priority: Priority of fib
295  *      @fib: Fib to associate with the queue entry
296  *      @wait: Wait if queue full
297  *      @fibptr: Driver fib object to go with fib
298  *      @nonotify: Don't notify the adapter
299  *
300  *      Gets the next free QE off the requested priorty adapter command
301  *      queue and associates the Fib with the QE. The QE represented by
302  *      index is ready to insert on the queue when this routine returns
303  *      success.
304  */
305
306 static int aac_queue_get(struct aac_dev * dev, u32 * index, u32 qid, struct hw_fib * hw_fib, int wait, struct fib * fibptr, unsigned long *nonotify)
307 {
308         struct aac_entry * entry = NULL;
309         int map = 0;
310         struct aac_queue * q = &dev->queues->queue[qid];
311                 
312         spin_lock_irqsave(q->lock, q->SavedIrql);
313             
314         if (qid == AdapHighCmdQueue || qid == AdapNormCmdQueue) 
315         {
316                 /*  if no entries wait for some if caller wants to */
317                 while (!aac_get_entry(dev, qid, &entry, index, nonotify)) 
318                 {
319                         printk(KERN_ERR "GetEntries failed\n");
320                 }
321                 /*
322                  *      Setup queue entry with a command, status and fib mapped
323                  */
324                 entry->size = cpu_to_le32(le16_to_cpu(hw_fib->header.Size));
325                 map = 1;
326         }
327         else if (qid == AdapHighRespQueue || qid == AdapNormRespQueue)
328         {
329                 while(!aac_get_entry(dev, qid, &entry, index, nonotify)) 
330                 {
331                         /* if no entries wait for some if caller wants to */
332                 }
333                 /*
334                  *      Setup queue entry with command, status and fib mapped
335                  */
336                 entry->size = cpu_to_le32(le16_to_cpu(hw_fib->header.Size));
337                 entry->addr = hw_fib->header.SenderFibAddress;
338                         /* Restore adapters pointer to the FIB */
339                 hw_fib->header.ReceiverFibAddress = hw_fib->header.SenderFibAddress;    /* Let the adapter now where to find its data */
340                 map = 0;
341         }
342         /*
343          *      If MapFib is true than we need to map the Fib and put pointers
344          *      in the queue entry.
345          */
346         if (map)
347                 entry->addr = fibptr->hw_fib_pa;
348         return 0;
349 }
350
351
352 /**
353  *      aac_insert_entry        -       insert a queue entry
354  *      @dev: Adapter
355  *      @index: Index of entry to insert
356  *      @qid: Queue number
357  *      @nonotify: Suppress adapter notification
358  *
359  *      Gets the next free QE off the requested priorty adapter command
360  *      queue and associates the Fib with the QE. The QE represented by
361  *      index is ready to insert on the queue when this routine returns
362  *      success.
363  */
364  
365 static int aac_insert_entry(struct aac_dev * dev, u32 index, u32 qid, unsigned long nonotify) 
366 {
367         struct aac_queue * q = &dev->queues->queue[qid];
368
369         if(q == NULL)
370                 BUG();
371         *(q->headers.producer) = cpu_to_le32(index + 1);
372         spin_unlock_irqrestore(q->lock, q->SavedIrql);
373
374         if (qid == AdapHighCmdQueue ||
375             qid == AdapNormCmdQueue ||
376             qid == AdapHighRespQueue ||
377             qid == AdapNormRespQueue)
378         {
379                 if (!nonotify)
380                         aac_adapter_notify(dev, qid);
381         }
382         else
383                 printk("Suprise insert!\n");
384         return 0;
385 }
386
387 /*
388  *      Define the highest level of host to adapter communication routines. 
389  *      These routines will support host to adapter FS commuication. These 
390  *      routines have no knowledge of the commuication method used. This level
391  *      sends and receives FIBs. This level has no knowledge of how these FIBs
392  *      get passed back and forth.
393  */
394
395 /**
396  *      fib_send        -       send a fib to the adapter
397  *      @command: Command to send
398  *      @fibptr: The fib
399  *      @size: Size of fib data area
400  *      @priority: Priority of Fib
401  *      @wait: Async/sync select
402  *      @reply: True if a reply is wanted
403  *      @callback: Called with reply
404  *      @callback_data: Passed to callback
405  *
406  *      Sends the requested FIB to the adapter and optionally will wait for a
407  *      response FIB. If the caller does not wish to wait for a response than
408  *      an event to wait on must be supplied. This event will be set when a
409  *      response FIB is received from the adapter.
410  */
411  
412 int fib_send(u16 command, struct fib * fibptr, unsigned long size,  int priority, int wait, int reply, fib_callback callback, void * callback_data)
413 {
414         u32 index;
415         u32 qid;
416         struct aac_dev * dev = fibptr->dev;
417         unsigned long nointr = 0;
418         struct hw_fib * hw_fib = fibptr->hw_fib;
419         struct aac_queue * q;
420         unsigned long flags = 0;
421         if (!(le32_to_cpu(hw_fib->header.XferState) & HostOwned))
422                 return -EBUSY;
423         /*
424          *      There are 5 cases with the wait and reponse requested flags. 
425          *      The only invalid cases are if the caller requests to wait and
426          *      does not request a response and if the caller does not want a
427          *      response and the Fibis not allocated from pool. If a response
428          *      is not requesed the Fib will just be deallocaed by the DPC
429          *      routine when the response comes back from the adapter. No
430          *      further processing will be done besides deleting the Fib. We 
431          *      will have a debug mode where the adapter can notify the host
432          *      it had a problem and the host can log that fact.
433          */
434         if (wait && !reply) {
435                 return -EINVAL;
436         } else if (!wait && reply) {
437                 hw_fib->header.XferState |= cpu_to_le32(Async | ResponseExpected);
438                 FIB_COUNTER_INCREMENT(aac_config.AsyncSent);
439         } else if (!wait && !reply) {
440                 hw_fib->header.XferState |= cpu_to_le32(NoResponseExpected);
441                 FIB_COUNTER_INCREMENT(aac_config.NoResponseSent);
442         } else if (wait && reply) {
443                 hw_fib->header.XferState |= cpu_to_le32(ResponseExpected);
444                 FIB_COUNTER_INCREMENT(aac_config.NormalSent);
445         } 
446         /*
447          *      Map the fib into 32bits by using the fib number
448          */
449
450         hw_fib->header.SenderFibAddress = cpu_to_le32(((u32)(fibptr-dev->fibs)) << 1);
451         hw_fib->header.SenderData = (u32)(fibptr - dev->fibs);
452         /*
453          *      Set FIB state to indicate where it came from and if we want a
454          *      response from the adapter. Also load the command from the
455          *      caller.
456          *
457          *      Map the hw fib pointer as a 32bit value
458          */
459         hw_fib->header.Command = cpu_to_le16(command);
460         hw_fib->header.XferState |= cpu_to_le32(SentFromHost);
461         fibptr->hw_fib->header.Flags = 0;       /* 0 the flags field - internal only*/
462         /*
463          *      Set the size of the Fib we want to send to the adapter
464          */
465         hw_fib->header.Size = cpu_to_le16(sizeof(struct aac_fibhdr) + size);
466         if (le16_to_cpu(hw_fib->header.Size) > le16_to_cpu(hw_fib->header.SenderSize)) {
467                 return -EMSGSIZE;
468         }                
469         /*
470          *      Get a queue entry connect the FIB to it and send an notify
471          *      the adapter a command is ready.
472          */
473         if (priority == FsaHigh) {
474                 hw_fib->header.XferState |= cpu_to_le32(HighPriority);
475                 qid = AdapHighCmdQueue;
476         } else {
477                 hw_fib->header.XferState |= cpu_to_le32(NormalPriority);
478                 qid = AdapNormCmdQueue;
479         }
480         q = &dev->queues->queue[qid];
481
482         if(wait)
483                 spin_lock_irqsave(&fibptr->event_lock, flags);
484         if(aac_queue_get( dev, &index, qid, hw_fib, 1, fibptr, &nointr)<0)
485                 return -EWOULDBLOCK;
486         dprintk((KERN_DEBUG "fib_send: inserting a queue entry at index %d.\n",index));
487         dprintk((KERN_DEBUG "Fib contents:.\n"));
488         dprintk((KERN_DEBUG "  Command =               %d.\n", hw_fib->header.Command));
489         dprintk((KERN_DEBUG "  XferState  =            %x.\n", hw_fib->header.XferState));
490         dprintk((KERN_DEBUG "  hw_fib va being sent=%p\n",fibptr->hw_fib));
491         dprintk((KERN_DEBUG "  hw_fib pa being sent=%lx\n",(ulong)fibptr->hw_fib_pa));
492         dprintk((KERN_DEBUG "  fib being sent=%p\n",fibptr));
493         /*
494          *      Fill in the Callback and CallbackContext if we are not
495          *      going to wait.
496          */
497         if (!wait) {
498                 fibptr->callback = callback;
499                 fibptr->callback_data = callback_data;
500         }
501         FIB_COUNTER_INCREMENT(aac_config.FibsSent);
502         list_add_tail(&fibptr->queue, &q->pendingq);
503         q->numpending++;
504
505         fibptr->done = 0;
506         fibptr->flags = 0;
507
508         if(aac_insert_entry(dev, index, qid, (nointr & aac_config.irq_mod)) < 0)
509                 return -EWOULDBLOCK;
510         /*
511          *      If the caller wanted us to wait for response wait now. 
512          */
513     
514         if (wait) {
515                 spin_unlock_irqrestore(&fibptr->event_lock, flags);
516                 down(&fibptr->event_wait);
517                 if(fibptr->done == 0)
518                         BUG();
519                         
520                 if((fibptr->flags & FIB_CONTEXT_FLAG_TIMED_OUT)){
521                         return -ETIMEDOUT;
522                 } else {
523                         return 0;
524                 }
525         }
526         /*
527          *      If the user does not want a response than return success otherwise
528          *      return pending
529          */
530         if (reply)
531                 return -EINPROGRESS;
532         else
533                 return 0;
534 }
535
536 /** 
537  *      aac_consumer_get        -       get the top of the queue
538  *      @dev: Adapter
539  *      @q: Queue
540  *      @entry: Return entry
541  *
542  *      Will return a pointer to the entry on the top of the queue requested that
543  *      we are a consumer of, and return the address of the queue entry. It does
544  *      not change the state of the queue. 
545  */
546
547 int aac_consumer_get(struct aac_dev * dev, struct aac_queue * q, struct aac_entry **entry)
548 {
549         u32 index;
550         int status;
551         if (le32_to_cpu(*q->headers.producer) == le32_to_cpu(*q->headers.consumer)) {
552                 status = 0;
553         } else {
554                 /*
555                  *      The consumer index must be wrapped if we have reached
556                  *      the end of the queue, else we just use the entry
557                  *      pointed to by the header index
558                  */
559                 if (le32_to_cpu(*q->headers.consumer) >= q->entries) 
560                         index = 0;              
561                 else
562                         index = le32_to_cpu(*q->headers.consumer);
563                 *entry = q->base + index;
564                 status = 1;
565         }
566         return(status);
567 }
568
569 int aac_consumer_avail(struct aac_dev *dev, struct aac_queue * q)
570 {
571         return (le32_to_cpu(*q->headers.producer) != le32_to_cpu(*q->headers.consumer));
572 }
573
574
575 /**
576  *      aac_consumer_free       -       free consumer entry
577  *      @dev: Adapter
578  *      @q: Queue
579  *      @qid: Queue ident
580  *
581  *      Frees up the current top of the queue we are a consumer of. If the
582  *      queue was full notify the producer that the queue is no longer full.
583  */
584
585 void aac_consumer_free(struct aac_dev * dev, struct aac_queue *q, u32 qid)
586 {
587         int wasfull = 0;
588         u32 notify;
589
590         if ((le32_to_cpu(*q->headers.producer)+1) == le32_to_cpu(*q->headers.consumer))
591                 wasfull = 1;
592         
593         if (le32_to_cpu(*q->headers.consumer) >= q->entries)
594                 *q->headers.consumer = cpu_to_le32(1);
595         else
596                 *q->headers.consumer = cpu_to_le32(le32_to_cpu(*q->headers.consumer)+1);
597         
598         if (wasfull) {
599                 switch (qid) {
600
601                 case HostNormCmdQueue:
602                         notify = HostNormCmdNotFull;
603                         break;
604                 case HostHighCmdQueue:
605                         notify = HostHighCmdNotFull;
606                         break;
607                 case HostNormRespQueue:
608                         notify = HostNormRespNotFull;
609                         break;
610                 case HostHighRespQueue:
611                         notify = HostHighRespNotFull;
612                         break;
613                 default:
614                         BUG();
615                         return;
616                 }
617                 aac_adapter_notify(dev, notify);
618         }
619 }        
620
621 /**
622  *      fib_adapter_complete    -       complete adapter issued fib
623  *      @fibptr: fib to complete
624  *      @size: size of fib
625  *
626  *      Will do all necessary work to complete a FIB that was sent from
627  *      the adapter.
628  */
629
630 int fib_adapter_complete(struct fib * fibptr, unsigned short size)
631 {
632         struct hw_fib * hw_fib = fibptr->hw_fib;
633         struct aac_dev * dev = fibptr->dev;
634         unsigned long nointr = 0;
635         if (le32_to_cpu(hw_fib->header.XferState) == 0)
636                 return 0;
637         /*
638          *      If we plan to do anything check the structure type first.
639          */ 
640         if ( hw_fib->header.StructType != FIB_MAGIC ) {
641                 return -EINVAL;
642         }
643         /*
644          *      This block handles the case where the adapter had sent us a
645          *      command and we have finished processing the command. We
646          *      call completeFib when we are done processing the command 
647          *      and want to send a response back to the adapter. This will 
648          *      send the completed cdb to the adapter.
649          */
650         if (hw_fib->header.XferState & cpu_to_le32(SentFromAdapter)) {
651                 hw_fib->header.XferState |= cpu_to_le32(HostProcessed);
652                 if (hw_fib->header.XferState & cpu_to_le32(HighPriority)) {
653                         u32 index;
654                         if (size) 
655                         {
656                                 size += sizeof(struct aac_fibhdr);
657                                 if (size > le16_to_cpu(hw_fib->header.SenderSize))
658                                         return -EMSGSIZE;
659                                 hw_fib->header.Size = cpu_to_le16(size);
660                         }
661                         if(aac_queue_get(dev, &index, AdapHighRespQueue, hw_fib, 1, NULL, &nointr) < 0) {
662                                 return -EWOULDBLOCK;
663                         }
664                         if (aac_insert_entry(dev, index, AdapHighRespQueue,  (nointr & (int)aac_config.irq_mod)) != 0) {
665                         }
666                 }
667                 else if (hw_fib->header.XferState & NormalPriority) 
668                 {
669                         u32 index;
670
671                         if (size) {
672                                 size += sizeof(struct aac_fibhdr);
673                                 if (size > le16_to_cpu(hw_fib->header.SenderSize)) 
674                                         return -EMSGSIZE;
675                                 hw_fib->header.Size = cpu_to_le16(size);
676                         }
677                         if (aac_queue_get(dev, &index, AdapNormRespQueue, hw_fib, 1, NULL, &nointr) < 0) 
678                                 return -EWOULDBLOCK;
679                         if (aac_insert_entry(dev, index, AdapNormRespQueue, (nointr & (int)aac_config.irq_mod)) != 0) 
680                         {
681                         }
682                 }
683         }
684         else 
685         {
686                 printk(KERN_WARNING "fib_adapter_complete: Unknown xferstate detected.\n");
687                 BUG();
688         }   
689         return 0;
690 }
691
692 /**
693  *      fib_complete    -       fib completion handler
694  *      @fib: FIB to complete
695  *
696  *      Will do all necessary work to complete a FIB.
697  */
698  
699 int fib_complete(struct fib * fibptr)
700 {
701         struct hw_fib * hw_fib = fibptr->hw_fib;
702
703         /*
704          *      Check for a fib which has already been completed
705          */
706
707         if (hw_fib->header.XferState == cpu_to_le32(0))
708                 return 0;
709         /*
710          *      If we plan to do anything check the structure type first.
711          */ 
712
713         if (hw_fib->header.StructType != FIB_MAGIC)
714                 return -EINVAL;
715         /*
716          *      This block completes a cdb which orginated on the host and we 
717          *      just need to deallocate the cdb or reinit it. At this point the
718          *      command is complete that we had sent to the adapter and this
719          *      cdb could be reused.
720          */
721         if((hw_fib->header.XferState & cpu_to_le32(SentFromHost)) &&
722                 (hw_fib->header.XferState & cpu_to_le32(AdapterProcessed)))
723         {
724                 fib_dealloc(fibptr);
725         }
726         else if(hw_fib->header.XferState & cpu_to_le32(SentFromHost))
727         {
728                 /*
729                  *      This handles the case when the host has aborted the I/O
730                  *      to the adapter because the adapter is not responding
731                  */
732                 fib_dealloc(fibptr);
733         } else if(hw_fib->header.XferState & cpu_to_le32(HostOwned)) {
734                 fib_dealloc(fibptr);
735         } else {
736                 BUG();
737         }   
738         return 0;
739 }
740
741 /**
742  *      aac_printf      -       handle printf from firmware
743  *      @dev: Adapter
744  *      @val: Message info
745  *
746  *      Print a message passed to us by the controller firmware on the
747  *      Adaptec board
748  */
749
750 void aac_printf(struct aac_dev *dev, u32 val)
751 {
752         int length = val & 0xffff;
753         int level = (val >> 16) & 0xffff;
754         char *cp = dev->printfbuf;
755         
756         /*
757          *      The size of the printfbuf is set in port.c
758          *      There is no variable or define for it
759          */
760         if (length > 255)
761                 length = 255;
762         if (cp[length] != 0)
763                 cp[length] = 0;
764         if (level == LOG_HIGH_ERROR)
765                 printk(KERN_WARNING "aacraid:%s", cp);
766         else
767                 printk(KERN_INFO "aacraid:%s", cp);
768         memset(cp, 0,  256);
769 }
770
771
772 /**
773  *      aac_handle_aif          -       Handle a message from the firmware
774  *      @dev: Which adapter this fib is from
775  *      @fibptr: Pointer to fibptr from adapter
776  *
777  *      This routine handles a driver notify fib from the adapter and
778  *      dispatches it to the appropriate routine for handling.
779  */
780
781 static void aac_handle_aif(struct aac_dev * dev, struct fib * fibptr)
782 {
783         struct hw_fib * hw_fib = fibptr->hw_fib;
784         /*
785          * Set the status of this FIB to be Invalid parameter.
786          *
787          *      *(u32 *)fib->data = ST_INVAL;
788          */
789         *(u32 *)hw_fib->data = cpu_to_le32(ST_OK);
790         fib_adapter_complete(fibptr, sizeof(u32));
791 }
792
793 /**
794  *      aac_command_thread      -       command processing thread
795  *      @dev: Adapter to monitor
796  *
797  *      Waits on the commandready event in it's queue. When the event gets set
798  *      it will pull FIBs off it's queue. It will continue to pull FIBs off
799  *      until the queue is empty. When the queue is empty it will wait for
800  *      more FIBs.
801  */
802  
803 int aac_command_thread(struct aac_dev * dev)
804 {
805         struct hw_fib *hw_fib, *hw_newfib;
806         struct fib *fib, *newfib;
807         struct aac_queue_block *queues = dev->queues;
808         struct aac_fib_context *fibctx;
809         unsigned long flags;
810         DECLARE_WAITQUEUE(wait, current);
811
812         /*
813          *      We can only have one thread per adapter for AIF's.
814          */
815         if (dev->aif_thread)
816                 return -EINVAL;
817         /*
818          *      Set up the name that will appear in 'ps'
819          *      stored in  task_struct.comm[16].
820          */
821         daemonize("aacraid");
822         allow_signal(SIGKILL);
823         /*
824          *      Let the DPC know it has a place to send the AIF's to.
825          */
826         dev->aif_thread = 1;
827         add_wait_queue(&queues->queue[HostNormCmdQueue].cmdready, &wait);
828         set_current_state(TASK_INTERRUPTIBLE);
829         while(1) 
830         {
831                 spin_lock_irqsave(queues->queue[HostNormCmdQueue].lock, flags);
832                 while(!list_empty(&(queues->queue[HostNormCmdQueue].cmdq))) {
833                         struct list_head *entry;
834                         struct aac_aifcmd * aifcmd;
835
836                         set_current_state(TASK_RUNNING);
837                 
838                         entry = queues->queue[HostNormCmdQueue].cmdq.next;
839                         list_del(entry);
840                         
841                         spin_unlock_irqrestore(queues->queue[HostNormCmdQueue].lock, flags);
842                         fib = list_entry(entry, struct fib, fiblink);
843                         /*
844                          *      We will process the FIB here or pass it to a 
845                          *      worker thread that is TBD. We Really can't 
846                          *      do anything at this point since we don't have
847                          *      anything defined for this thread to do.
848                          */
849                         hw_fib = fib->hw_fib;
850                         memset(fib, 0, sizeof(struct fib));
851                         fib->type = FSAFS_NTC_FIB_CONTEXT;
852                         fib->size = sizeof( struct fib );
853                         fib->hw_fib = hw_fib;
854                         fib->data = hw_fib->data;
855                         fib->dev = dev;
856                         /*
857                          *      We only handle AifRequest fibs from the adapter.
858                          */
859                         aifcmd = (struct aac_aifcmd *) hw_fib->data;
860                         if (aifcmd->command == cpu_to_le32(AifCmdDriverNotify)) {
861                                 /* Handle Driver Notify Events */
862                                 aac_handle_aif(dev, fib);
863                                 *(u32 *)hw_fib->data = cpu_to_le32(ST_OK);
864                                 fib_adapter_complete(fib, sizeof(u32));
865                         } else {
866                                 struct list_head *entry;
867                                 /* The u32 here is important and intended. We are using
868                                    32bit wrapping time to fit the adapter field */
869                                    
870                                 u32 time_now, time_last;
871                                 unsigned long flagv;
872                                 
873                                 /* Sniff events */
874                                 if (aifcmd->command == cpu_to_le32(AifCmdEventNotify))
875                                         aac_handle_aif(dev, fib);
876                                 
877                                 time_now = jiffies/HZ;
878
879                                 spin_lock_irqsave(&dev->fib_lock, flagv);
880                                 entry = dev->fib_list.next;
881                                 /*
882                                  * For each Context that is on the 
883                                  * fibctxList, make a copy of the
884                                  * fib, and then set the event to wake up the
885                                  * thread that is waiting for it.
886                                  */
887                                 while (entry != &dev->fib_list) {
888                                         /*
889                                          * Extract the fibctx
890                                          */
891                                         fibctx = list_entry(entry, struct aac_fib_context, next);
892                                         /*
893                                          * Check if the queue is getting
894                                          * backlogged
895                                          */
896                                         if (fibctx->count > 20)
897                                         {
898                                                 /*
899                                                  * It's *not* jiffies folks,
900                                                  * but jiffies / HZ so do not
901                                                  * panic ...
902                                                  */
903                                                 time_last = fibctx->jiffies;
904                                                 /*
905                                                  * Has it been > 2 minutes 
906                                                  * since the last read off
907                                                  * the queue?
908                                                  */
909                                                 if ((time_now - time_last) > 120) {
910                                                         entry = entry->next;
911                                                         aac_close_fib_context(dev, fibctx);
912                                                         continue;
913                                                 }
914                                         }
915                                         /*
916                                          * Warning: no sleep allowed while
917                                          * holding spinlock
918                                          */
919                                         hw_newfib = kmalloc(sizeof(struct hw_fib), GFP_ATOMIC);
920                                         newfib = kmalloc(sizeof(struct fib), GFP_ATOMIC);
921                                         if (newfib && hw_newfib) {
922                                                 /*
923                                                  * Make the copy of the FIB
924                                                  */
925                                                 memcpy(hw_newfib, hw_fib, sizeof(struct hw_fib));
926                                                 memcpy(newfib, fib, sizeof(struct fib));
927                                                 newfib->hw_fib = hw_newfib;
928                                                 /*
929                                                  * Put the FIB onto the
930                                                  * fibctx's fibs
931                                                  */
932                                                 list_add_tail(&newfib->fiblink, &fibctx->fib_list);
933                                                 fibctx->count++;
934                                                 /* 
935                                                  * Set the event to wake up the
936                                                  * thread that will waiting.
937                                                  */
938                                                 up(&fibctx->wait_sem);
939                                         } else {
940                                                 printk(KERN_WARNING "aifd: didn't allocate NewFib.\n");
941                                                 if(newfib)
942                                                         kfree(newfib);
943                                                 if(hw_newfib)
944                                                         kfree(hw_newfib);
945                                         }
946                                         entry = entry->next;
947                                 }
948                                 /*
949                                  *      Set the status of this FIB
950                                  */
951                                 *(u32 *)hw_fib->data = cpu_to_le32(ST_OK);
952                                 fib_adapter_complete(fib, sizeof(u32));
953                                 spin_unlock_irqrestore(&dev->fib_lock, flagv);
954                         }
955                         spin_lock_irqsave(queues->queue[HostNormCmdQueue].lock, flags);
956                         kfree(fib);
957                 }
958                 /*
959                  *      There are no more AIF's
960                  */
961                 spin_unlock_irqrestore(queues->queue[HostNormCmdQueue].lock, flags);
962                 schedule();
963
964                 if(signal_pending(current))
965                         break;
966                 set_current_state(TASK_INTERRUPTIBLE);
967         }
968         remove_wait_queue(&queues->queue[HostNormCmdQueue].cmdready, &wait);
969         dev->aif_thread = 0;
970         complete_and_exit(&dev->aif_completion, 0);
971 }