patch-2_6_7-vs1_9_1_12
[linux-2.6.git] / drivers / scsi / qla2xxx / qla_iocb.c
1 /******************************************************************************
2  *                  QLOGIC LINUX SOFTWARE
3  *
4  * QLogic ISP2x00 device driver for Linux 2.6.x
5  * Copyright (C) 2003-2004 QLogic Corporation
6  * (www.qlogic.com)
7  *
8  * This program is free software; you can redistribute it and/or modify it
9  * under the terms of the GNU General Public License as published by the
10  * Free Software Foundation; either version 2, or (at your option) any
11  * later version.
12  *
13  * This program is distributed in the hope that it will be useful, but
14  * WITHOUT ANY WARRANTY; without even the implied warranty of
15  * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.  See the GNU
16  * General Public License for more details.
17  *
18  ******************************************************************************/
19
20 #include "qla_os.h"
21 #include "qla_def.h"
22
23 static inline uint16_t qla2x00_get_cmd_direction(struct scsi_cmnd *cmd);
24 static inline cont_entry_t *qla2x00_prep_cont_type0_iocb(scsi_qla_host_t *);
25 static inline cont_a64_entry_t *qla2x00_prep_cont_type1_iocb(scsi_qla_host_t *);
26
27 /**
28  * qla2x00_get_cmd_direction() - Determine control_flag data direction.
29  * @cmd: SCSI command
30  *
31  * Returns the proper CF_* direction based on CDB.
32  */
33 static inline uint16_t
34 qla2x00_get_cmd_direction(struct scsi_cmnd *cmd)
35 {
36         uint16_t cflags;
37
38         cflags = 0;
39
40         /* Set transfer direction */
41         if (cmd->sc_data_direction == DMA_TO_DEVICE)
42                 cflags = CF_WRITE;
43         else if (cmd->sc_data_direction == DMA_FROM_DEVICE)
44                 cflags = CF_READ;
45         else {
46                 switch (cmd->data_cmnd[0]) {
47                 case WRITE_6:
48                 case WRITE_10:
49                 case WRITE_12:
50                 case WRITE_BUFFER:
51                 case WRITE_LONG:
52                 case WRITE_SAME:
53                 case WRITE_VERIFY:
54                 case WRITE_VERIFY_12:
55                 case FORMAT_UNIT:
56                 case SEND_VOLUME_TAG:
57                 case MODE_SELECT:
58                 case SEND_DIAGNOSTIC:
59                 case MODE_SELECT_10:
60                         cflags = CF_WRITE;
61                         break;
62                 default:
63                         cflags = CF_READ;
64                         break;
65                 }
66         }
67         return (cflags);
68 }
69
70 /**
71  * qla2x00_calc_iocbs_32() - Determine number of Command Type 2 and
72  * Continuation Type 0 IOCBs to allocate.
73  *
74  * @dsds: number of data segment decriptors needed
75  *
76  * Returns the number of IOCB entries needed to store @dsds.
77  */
78 uint16_t
79 qla2x00_calc_iocbs_32(uint16_t dsds)
80 {
81         uint16_t iocbs;
82
83         iocbs = 1;
84         if (dsds > 3) {
85                 iocbs += (dsds - 3) / 7;
86                 if ((dsds - 3) % 7)
87                         iocbs++;
88         }
89         return (iocbs);
90 }
91
92 /**
93  * qla2x00_calc_iocbs_64() - Determine number of Command Type 3 and
94  * Continuation Type 1 IOCBs to allocate.
95  *
96  * @dsds: number of data segment decriptors needed
97  *
98  * Returns the number of IOCB entries needed to store @dsds.
99  */
100 uint16_t
101 qla2x00_calc_iocbs_64(uint16_t dsds)
102 {
103         uint16_t iocbs;
104
105         iocbs = 1;
106         if (dsds > 2) {
107                 iocbs += (dsds - 2) / 5;
108                 if ((dsds - 2) % 5)
109                         iocbs++;
110         }
111         return (iocbs);
112 }
113
114 /**
115  * qla2x00_prep_cont_type0_iocb() - Initialize a Continuation Type 0 IOCB.
116  * @ha: HA context
117  *
118  * Returns a pointer to the Continuation Type 0 IOCB packet.
119  */
120 static inline cont_entry_t *
121 qla2x00_prep_cont_type0_iocb(scsi_qla_host_t *ha)
122 {
123         cont_entry_t *cont_pkt;
124
125         /* Adjust ring index. */
126         ha->req_ring_index++;
127         if (ha->req_ring_index == ha->request_q_length) {
128                 ha->req_ring_index = 0;
129                 ha->request_ring_ptr = ha->request_ring;
130         } else {
131                 ha->request_ring_ptr++;
132         }
133
134         cont_pkt = (cont_entry_t *)ha->request_ring_ptr;
135
136         /* Load packet defaults. */
137         *((uint32_t *)(&cont_pkt->entry_type)) =
138             __constant_cpu_to_le32(CONTINUE_TYPE);
139
140         return (cont_pkt);
141 }
142
143 /**
144  * qla2x00_prep_cont_type1_iocb() - Initialize a Continuation Type 1 IOCB.
145  * @ha: HA context
146  *
147  * Returns a pointer to the continuation type 1 IOCB packet.
148  */
149 static inline cont_a64_entry_t *
150 qla2x00_prep_cont_type1_iocb(scsi_qla_host_t *ha)
151 {
152         cont_a64_entry_t *cont_pkt;
153
154         /* Adjust ring index. */
155         ha->req_ring_index++;
156         if (ha->req_ring_index == ha->request_q_length) {
157                 ha->req_ring_index = 0;
158                 ha->request_ring_ptr = ha->request_ring;
159         } else {
160                 ha->request_ring_ptr++;
161         }
162
163         cont_pkt = (cont_a64_entry_t *)ha->request_ring_ptr;
164
165         /* Load packet defaults. */
166         *((uint32_t *)(&cont_pkt->entry_type)) =
167             __constant_cpu_to_le32(CONTINUE_A64_TYPE);
168
169         return (cont_pkt);
170 }
171
172 /**
173  * qla2x00_build_scsi_iocbs_32() - Build IOCB command utilizing 32bit
174  * capable IOCB types.
175  *
176  * @sp: SRB command to process
177  * @cmd_pkt: Command type 2 IOCB
178  * @tot_dsds: Total number of segments to transfer
179  */
180 void qla2x00_build_scsi_iocbs_32(srb_t *sp, cmd_entry_t *cmd_pkt,
181     uint16_t tot_dsds)
182 {
183         uint16_t        avail_dsds;
184         uint32_t        *cur_dsd;
185         scsi_qla_host_t *ha;
186         struct scsi_cmnd *cmd;
187
188         cmd = sp->cmd;
189
190         /* Update entry type to indicate Command Type 2 IOCB */
191         *((uint32_t *)(&cmd_pkt->entry_type)) =
192             __constant_cpu_to_le32(COMMAND_TYPE);
193
194         /* No data transfer */
195         if (cmd->request_bufflen == 0 || cmd->sc_data_direction == DMA_NONE) {
196                 cmd_pkt->byte_count = __constant_cpu_to_le32(0);
197                 return;
198         }
199
200         ha = sp->ha;
201
202         cmd_pkt->control_flags |= cpu_to_le16(qla2x00_get_cmd_direction(cmd));
203
204         /* Three DSDs are available in the Command Type 2 IOCB */
205         avail_dsds = 3;
206         cur_dsd = (uint32_t *)&cmd_pkt->dseg_0_address;
207
208         /* Load data segments */
209         if (cmd->use_sg != 0) {
210                 struct  scatterlist *cur_seg;
211                 struct  scatterlist *end_seg;
212
213                 cur_seg = (struct scatterlist *)cmd->request_buffer;
214                 end_seg = cur_seg + tot_dsds;
215                 while (cur_seg < end_seg) {
216                         cont_entry_t    *cont_pkt;
217
218                         /* Allocate additional continuation packets? */
219                         if (avail_dsds == 0) {
220                                 /*
221                                  * Seven DSDs are available in the Continuation
222                                  * Type 0 IOCB.
223                                  */
224                                 cont_pkt = qla2x00_prep_cont_type0_iocb(ha);
225                                 cur_dsd = (uint32_t *)&cont_pkt->dseg_0_address;
226                                 avail_dsds = 7;
227                         }
228
229                         *cur_dsd++ = cpu_to_le32(sg_dma_address(cur_seg));
230                         *cur_dsd++ = cpu_to_le32(sg_dma_len(cur_seg));
231                         avail_dsds--;
232
233                         cur_seg++;
234                 }
235         } else {
236                 dma_addr_t      req_dma;
237                 struct page     *page;
238                 unsigned long   offset;
239
240                 page = virt_to_page(cmd->request_buffer);
241                 offset = ((unsigned long)cmd->request_buffer & ~PAGE_MASK);
242                 req_dma = pci_map_page(ha->pdev, page, offset,
243                     cmd->request_bufflen, cmd->sc_data_direction);
244
245                 sp->dma_handle = req_dma;
246
247                 *cur_dsd++ = cpu_to_le32(req_dma);
248                 *cur_dsd++ = cpu_to_le32(cmd->request_bufflen);
249         }
250 }
251
252 /**
253  * qla2x00_build_scsi_iocbs_64() - Build IOCB command utilizing 64bit
254  * capable IOCB types.
255  *
256  * @sp: SRB command to process
257  * @cmd_pkt: Command type 3 IOCB
258  * @tot_dsds: Total number of segments to transfer
259  */
260 void qla2x00_build_scsi_iocbs_64(srb_t *sp, cmd_entry_t *cmd_pkt,
261     uint16_t tot_dsds)
262 {
263         uint16_t        avail_dsds;
264         uint32_t        *cur_dsd;
265         scsi_qla_host_t *ha;
266         struct scsi_cmnd *cmd;
267
268         cmd = sp->cmd;
269
270         /* Update entry type to indicate Command Type 3 IOCB */
271         *((uint32_t *)(&cmd_pkt->entry_type)) =
272             __constant_cpu_to_le32(COMMAND_A64_TYPE);
273
274         /* No data transfer */
275         if (cmd->request_bufflen == 0 || cmd->sc_data_direction == DMA_NONE) {
276                 cmd_pkt->byte_count = __constant_cpu_to_le32(0);
277                 return;
278         }
279
280         ha = sp->ha;
281
282         cmd_pkt->control_flags |= cpu_to_le16(qla2x00_get_cmd_direction(cmd));
283
284         /* Two DSDs are available in the Command Type 3 IOCB */
285         avail_dsds = 2;
286         cur_dsd = (uint32_t *)&cmd_pkt->dseg_0_address;
287
288         /* Load data segments */
289         if (cmd->use_sg != 0) {
290                 struct  scatterlist *cur_seg;
291                 struct  scatterlist *end_seg;
292
293                 cur_seg = (struct scatterlist *)cmd->request_buffer;
294                 end_seg = cur_seg + tot_dsds;
295                 while (cur_seg < end_seg) {
296                         dma_addr_t      sle_dma;
297                         cont_a64_entry_t *cont_pkt;
298
299                         /* Allocate additional continuation packets? */
300                         if (avail_dsds == 0) {
301                                 /*
302                                  * Five DSDs are available in the Continuation
303                                  * Type 1 IOCB.
304                                  */
305                                 cont_pkt = qla2x00_prep_cont_type1_iocb(ha);
306                                 cur_dsd = (uint32_t *)cont_pkt->dseg_0_address;
307                                 avail_dsds = 5;
308                         }
309
310                         sle_dma = sg_dma_address(cur_seg);
311                         *cur_dsd++ = cpu_to_le32(LSD(sle_dma));
312                         *cur_dsd++ = cpu_to_le32(MSD(sle_dma));
313                         *cur_dsd++ = cpu_to_le32(sg_dma_len(cur_seg));
314                         avail_dsds--;
315
316                         cur_seg++;
317                 }
318         } else {
319                 dma_addr_t      req_dma;
320                 struct page     *page;
321                 unsigned long   offset;
322
323                 page = virt_to_page(cmd->request_buffer);
324                 offset = ((unsigned long)cmd->request_buffer & ~PAGE_MASK);
325                 req_dma = pci_map_page(ha->pdev, page, offset,
326                     cmd->request_bufflen, cmd->sc_data_direction);
327
328                 sp->dma_handle = req_dma;
329
330                 *cur_dsd++ = cpu_to_le32(LSD(req_dma));
331                 *cur_dsd++ = cpu_to_le32(MSD(req_dma));
332                 *cur_dsd++ = cpu_to_le32(cmd->request_bufflen);
333         }
334 }
335
336 /**
337  * qla2x00_start_scsi() - Send a SCSI command to the ISP
338  * @sp: command to send to the ISP
339  *
340  * Returns non-zero if a failure occured, else zero.
341  */
342 int
343 qla2x00_start_scsi(srb_t *sp)
344 {
345         int             ret;
346         unsigned long   flags;
347         scsi_qla_host_t *ha;
348         fc_lun_t        *fclun;
349         struct scsi_cmnd *cmd;
350         uint32_t        *clr_ptr;
351         uint32_t        index;
352         uint32_t        handle;
353         uint16_t        cnt;
354         cmd_entry_t     *cmd_pkt;
355         uint32_t        timeout;
356         struct scatterlist *sg;
357
358         device_reg_t    *reg;
359
360         /* Setup device pointers. */
361         ret = 0;
362         fclun = sp->lun_queue->fclun;
363         ha = fclun->fcport->ha;
364         cmd = sp->cmd;
365         reg = ha->iobase;
366
367         /* Send marker if required */
368         if (ha->marker_needed != 0) {
369                 if (qla2x00_marker(ha, 0, 0, MK_SYNC_ALL) != QLA_SUCCESS) {
370                         return (QLA_FUNCTION_FAILED);
371                 }
372                 ha->marker_needed = 0;
373         }
374
375         /* Calculate number of segments and entries required. */
376         if (sp->req_cnt == 0) {
377                 sp->tot_dsds = 0;
378                 if (cmd->use_sg) {
379                         sg = (struct scatterlist *) cmd->request_buffer;
380                         sp->tot_dsds = pci_map_sg(ha->pdev, sg, cmd->use_sg,
381                             cmd->sc_data_direction);
382                 } else if (cmd->request_bufflen) {
383                     sp->tot_dsds++;
384                 }
385                 sp->req_cnt = (ha->calc_request_entries)(sp->tot_dsds);
386         }
387
388         /* Acquire ring specific lock */
389         spin_lock_irqsave(&ha->hardware_lock, flags);
390
391         if (ha->req_q_cnt < (sp->req_cnt + 2)) {
392                 /* Calculate number of free request entries */
393                 cnt = RD_REG_WORD_RELAXED(ISP_REQ_Q_OUT(ha, reg));
394                 if (ha->req_ring_index < cnt)
395                         ha->req_q_cnt = cnt - ha->req_ring_index;
396                 else
397                         ha->req_q_cnt = ha->request_q_length -
398                             (ha->req_ring_index - cnt);
399         }
400
401         /* If no room for request in request ring */
402         if (ha->req_q_cnt < (sp->req_cnt + 2)) {
403                 DEBUG5(printk("scsi(%ld): in-ptr=%x req_q_cnt=%x "
404                     "tot_dsds=%x.\n",
405                     ha->host_no, ha->req_ring_index, ha->req_q_cnt,
406                     sp->tot_dsds));
407
408                 goto queuing_error;
409         }
410
411         /* Check for room in outstanding command list. */
412         handle = ha->current_outstanding_cmd;
413         for (index = 1; index < MAX_OUTSTANDING_COMMANDS; index++) {
414                 handle++;
415                 if (handle == MAX_OUTSTANDING_COMMANDS)
416                         handle = 1;
417                 if (ha->outstanding_cmds[handle] == 0) {
418                         ha->current_outstanding_cmd = handle;
419                         break;
420                 }
421         }
422         if (index == MAX_OUTSTANDING_COMMANDS) {
423                 DEBUG5(printk("scsi(%ld): Unable to queue command -- NO ROOM "
424                     "IN OUTSTANDING ARRAY (req_q_cnt=%x).\n",
425                     ha->host_no, ha->req_q_cnt));
426                 goto queuing_error;
427         }
428
429         /* Build command packet */
430         ha->outstanding_cmds[handle] = sp;
431         sp->ha = ha;
432         sp->cmd->host_scribble = (unsigned char *)(unsigned long)handle;
433         ha->req_q_cnt -= sp->req_cnt;
434
435         cmd_pkt = (cmd_entry_t *)ha->request_ring_ptr;
436         cmd_pkt->handle = handle;
437         /* Zero out remaining portion of packet. */
438         clr_ptr = (uint32_t *)cmd_pkt + 2;
439         memset(clr_ptr, 0, REQUEST_ENTRY_SIZE - 8);
440         cmd_pkt->dseg_count = cpu_to_le16(sp->tot_dsds);
441
442         /* Set target ID */
443         SET_TARGET_ID(ha, cmd_pkt->target, fclun->fcport->loop_id);
444
445         /* Set LUN number*/
446         cmd_pkt->lun = cpu_to_le16(fclun->lun);
447
448         /* Update tagged queuing modifier */
449         cmd_pkt->control_flags = __constant_cpu_to_le16(CF_SIMPLE_TAG);
450         if (cmd->device->tagged_supported) {
451                 switch (cmd->tag) {
452                 case HEAD_OF_QUEUE_TAG:
453                         cmd_pkt->control_flags =
454                             __constant_cpu_to_le16(CF_HEAD_TAG);
455                         break;
456                 case ORDERED_QUEUE_TAG:
457                         cmd_pkt->control_flags =
458                             __constant_cpu_to_le16(CF_ORDERED_TAG);
459                         break;
460                 }
461         }
462
463         /*
464          * Allocate at least 5 (+ QLA_CMD_TIMER_DELTA) seconds for RISC timeout.
465          */
466         timeout = (uint32_t)(cmd->timeout_per_command / HZ);
467         if (timeout > 65535)
468                 cmd_pkt->timeout = __constant_cpu_to_le16(0);
469         else if (timeout > 25)
470                 cmd_pkt->timeout = cpu_to_le16((uint16_t)timeout -
471                     (5 + QLA_CMD_TIMER_DELTA));
472         else
473                 cmd_pkt->timeout = cpu_to_le16((uint16_t)timeout);
474
475         /* Load SCSI command packet. */
476         memcpy(cmd_pkt->scsi_cdb, cmd->cmnd, cmd->cmd_len);
477         cmd_pkt->byte_count = cpu_to_le32((uint32_t)cmd->request_bufflen);
478
479         /* Build IOCB segments */
480         (ha->build_scsi_iocbs)(sp, cmd_pkt, sp->tot_dsds);
481
482         /* Set total data segment count. */
483         cmd_pkt->entry_count = (uint8_t)sp->req_cnt;
484
485         /* Adjust ring index. */
486         ha->req_ring_index++;
487         if (ha->req_ring_index == ha->request_q_length) {
488                 ha->req_ring_index = 0;
489                 ha->request_ring_ptr = ha->request_ring;
490         } else
491                 ha->request_ring_ptr++;
492
493         ha->actthreads++;
494         ha->total_ios++;
495         sp->lun_queue->out_cnt++;
496         sp->flags |= SRB_DMA_VALID;
497         sp->state = SRB_ACTIVE_STATE;
498         sp->u_start = jiffies;
499
500         /* Set chip new ring index. */
501         WRT_REG_WORD(ISP_REQ_Q_IN(ha, reg), ha->req_ring_index);
502         RD_REG_WORD_RELAXED(ISP_REQ_Q_IN(ha, reg));     /* PCI Posting. */
503
504         spin_unlock_irqrestore(&ha->hardware_lock, flags);
505         return (QLA_SUCCESS);
506
507 queuing_error:
508         spin_unlock_irqrestore(&ha->hardware_lock, flags);
509
510         return (QLA_FUNCTION_FAILED);
511 }
512
513 /**
514  * qla2x00_marker() - Send a marker IOCB to the firmware.
515  * @ha: HA context
516  * @loop_id: loop ID
517  * @lun: LUN
518  * @type: marker modifier
519  *
520  * Can be called from both normal and interrupt context.
521  *
522  * Returns non-zero if a failure occured, else zero.
523  */
524 int
525 __qla2x00_marker(scsi_qla_host_t *ha, uint16_t loop_id, uint16_t lun,
526     uint8_t type)
527 {
528         mrk_entry_t     *pkt;
529
530         pkt = (mrk_entry_t *)qla2x00_req_pkt(ha);
531         if (pkt == NULL) {
532                 DEBUG2_3(printk("%s(): **** FAILED ****\n", __func__));
533
534                 return (QLA_FUNCTION_FAILED);
535         }
536
537         pkt->entry_type = MARKER_TYPE;
538         pkt->modifier = type;
539
540         if (type != MK_SYNC_ALL) {
541                 pkt->lun = cpu_to_le16(lun);
542                 SET_TARGET_ID(ha, pkt->target, loop_id);
543         }
544
545         /* Issue command to ISP */
546         qla2x00_isp_cmd(ha);
547
548         return (QLA_SUCCESS);
549 }
550
551 int 
552 qla2x00_marker(scsi_qla_host_t *ha, uint16_t loop_id, uint16_t lun,
553     uint8_t type)
554 {
555         int ret;
556         unsigned long flags = 0;
557
558         spin_lock_irqsave(&ha->hardware_lock, flags);
559         ret = __qla2x00_marker(ha, loop_id, lun, type);
560         spin_unlock_irqrestore(&ha->hardware_lock, flags);
561
562         return (ret);
563 }
564
565 /**
566  * qla2x00_req_pkt() - Retrieve a request packet from the request ring.
567  * @ha: HA context
568  *
569  * Note: The caller must hold the hardware lock before calling this routine.
570  *
571  * Returns NULL if function failed, else, a pointer to the request packet.
572  */
573 request_t *
574 qla2x00_req_pkt(scsi_qla_host_t *ha)
575 {
576         device_reg_t    *reg = ha->iobase;
577         request_t       *pkt = NULL;
578         uint16_t        cnt;
579         uint32_t        *dword_ptr;
580         uint32_t        timer;
581         uint16_t        req_cnt = 1;
582
583         /* Wait 1 second for slot. */
584         for (timer = HZ; timer; timer--) {
585                 if ((req_cnt + 2) >= ha->req_q_cnt) {
586                         /* Calculate number of free request entries. */
587                         cnt = qla2x00_debounce_register(ISP_REQ_Q_OUT(ha, reg));
588                         if  (ha->req_ring_index < cnt)
589                                 ha->req_q_cnt = cnt - ha->req_ring_index;
590                         else
591                                 ha->req_q_cnt = ha->request_q_length -
592                                     (ha->req_ring_index - cnt);
593                 }
594                 /* If room for request in request ring. */
595                 if ((req_cnt + 2) < ha->req_q_cnt) {
596                         ha->req_q_cnt--;
597                         pkt = ha->request_ring_ptr;
598
599                         /* Zero out packet. */
600                         dword_ptr = (uint32_t *)pkt;
601                         for (cnt = 0; cnt < REQUEST_ENTRY_SIZE / 4; cnt++)
602                                 *dword_ptr++ = 0;
603
604                         /* Set system defined field. */
605                         pkt->sys_define = (uint8_t)ha->req_ring_index;
606
607                         /* Set entry count. */
608                         pkt->entry_count = 1;
609
610                         break;
611                 }
612
613                 /* Release ring specific lock */
614                 spin_unlock(&ha->hardware_lock);
615
616                 udelay(2);   /* 2 us */
617
618                 /* Check for pending interrupts. */
619                 /* During init we issue marker directly */
620                 if (!ha->marker_needed)
621                         qla2x00_poll(ha);
622
623                 spin_lock_irq(&ha->hardware_lock);
624         }
625         if (!pkt) {
626                 DEBUG2_3(printk("%s(): **** FAILED ****\n", __func__));
627         }
628
629         return (pkt);
630 }
631
632 /**
633  * qla2x00_ms_req_pkt() - Retrieve a Management Server request packet from
634  *                              the request ring.
635  * @ha: HA context
636  * @sp: pointer to handle post function call
637  *
638  * Note: The caller must hold the hardware lock before calling this routine.
639  *
640  * Returns NULL if function failed, else, a pointer to the request packet.
641  */
642 request_t *
643 qla2x00_ms_req_pkt(scsi_qla_host_t *ha, srb_t  *sp)
644 {
645         device_reg_t    *reg = ha->iobase;
646         request_t       *pkt = NULL;
647         uint16_t        cnt, i, index;
648         uint32_t        *dword_ptr;
649         uint32_t        timer;
650         uint8_t         found = 0;
651         uint16_t        req_cnt = 1;
652
653         /* Wait 1 second for slot. */
654         for (timer = HZ; timer; timer--) {
655                 if ((req_cnt + 2) >= ha->req_q_cnt) {
656                         /* Calculate number of free request entries. */
657                         cnt = qla2x00_debounce_register(ISP_REQ_Q_OUT(ha, reg));
658                         if (ha->req_ring_index < cnt) {
659                                 ha->req_q_cnt = cnt - ha->req_ring_index;
660                         } else {
661                                 ha->req_q_cnt = ha->request_q_length -
662                                     (ha->req_ring_index - cnt);
663                         }
664                 }
665
666                 /* Check for room in outstanding command list. */
667                 cnt = ha->current_outstanding_cmd;
668                 for (index = 1; index < MAX_OUTSTANDING_COMMANDS; index++) {
669                         cnt++;
670                         if (cnt == MAX_OUTSTANDING_COMMANDS)
671                                 cnt = 1;
672
673                         if (ha->outstanding_cmds[cnt] == 0) {
674                                 found = 1;
675                                 ha->current_outstanding_cmd = cnt;
676                                 break;
677                         }
678                 }
679
680                 /* If room for request in request ring. */
681                 if (found && (req_cnt + 2) < ha->req_q_cnt) {
682                         pkt = ha->request_ring_ptr;
683
684                         /* Zero out packet. */
685                         dword_ptr = (uint32_t *)pkt;
686                         for (i = 0; i < REQUEST_ENTRY_SIZE / 4; i++ )
687                                 *dword_ptr++ = 0;
688
689                         DEBUG5(printk("%s(): putting sp=%p in "
690                             "outstanding_cmds[%x]\n",
691                             __func__,
692                             sp, cnt));
693
694                         ha->outstanding_cmds[cnt] = sp;
695
696                         /* save the handle */
697                         sp->cmd->host_scribble = (unsigned char *) (u_long) cnt;
698                         CMD_SP(sp->cmd) = (void *)sp;
699
700                         ha->req_q_cnt--;
701                         pkt->handle = (uint32_t)cnt;
702
703                         /* Set system defined field. */
704                         pkt->sys_define = (uint8_t)ha->req_ring_index;
705                         pkt->entry_status = 0;
706
707                         break;
708                 }
709
710                 /* Release ring specific lock */
711                 spin_unlock(&ha->hardware_lock);
712                 udelay(20);
713
714                 /* Check for pending interrupts. */
715                 qla2x00_poll(ha);
716
717                 spin_lock_irq(&ha->hardware_lock);
718         }
719         if (!pkt) {
720                 DEBUG2_3(printk("%s(): **** FAILED ****\n", __func__));
721         }
722
723         return (pkt);
724 }
725
726 /**
727  * qla2x00_isp_cmd() - Modify the request ring pointer.
728  * @ha: HA context
729  *
730  * Note: The caller must hold the hardware lock before calling this routine.
731  */
732 void
733 qla2x00_isp_cmd(scsi_qla_host_t *ha)
734 {
735         device_reg_t *reg = ha->iobase;
736
737         DEBUG5(printk("%s(): IOCB data:\n", __func__));
738         DEBUG5(qla2x00_dump_buffer(
739             (uint8_t *)ha->request_ring_ptr, REQUEST_ENTRY_SIZE));
740
741         /* Adjust ring index. */
742         ha->req_ring_index++;
743         if (ha->req_ring_index == ha->request_q_length) {
744                 ha->req_ring_index = 0;
745                 ha->request_ring_ptr = ha->request_ring;
746         } else
747                 ha->request_ring_ptr++;
748
749         /* Set chip new ring index. */
750         WRT_REG_WORD(ISP_REQ_Q_IN(ha, reg), ha->req_ring_index);
751         RD_REG_WORD_RELAXED(ISP_REQ_Q_IN(ha, reg));     /* PCI Posting. */
752 }