VServer 1.9.2 (patch-2.6.8.1-vs1.9.2.diff)
[linux-2.6.git] / drivers / scsi / qla2xxx / qla_iocb.c
1 /******************************************************************************
2  *                  QLOGIC LINUX SOFTWARE
3  *
4  * QLogic ISP2x00 device driver for Linux 2.6.x
5  * Copyright (C) 2003-2004 QLogic Corporation
6  * (www.qlogic.com)
7  *
8  * This program is free software; you can redistribute it and/or modify it
9  * under the terms of the GNU General Public License as published by the
10  * Free Software Foundation; either version 2, or (at your option) any
11  * later version.
12  *
13  * This program is distributed in the hope that it will be useful, but
14  * WITHOUT ANY WARRANTY; without even the implied warranty of
15  * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.  See the GNU
16  * General Public License for more details.
17  *
18  ******************************************************************************/
19
20 #include "qla_def.h"
21
22 #include <linux/blkdev.h>
23 #include <linux/delay.h>
24
25 static inline uint16_t qla2x00_get_cmd_direction(struct scsi_cmnd *cmd);
26 static inline cont_entry_t *qla2x00_prep_cont_type0_iocb(scsi_qla_host_t *);
27 static inline cont_a64_entry_t *qla2x00_prep_cont_type1_iocb(scsi_qla_host_t *);
28
29 /**
30  * qla2x00_get_cmd_direction() - Determine control_flag data direction.
31  * @cmd: SCSI command
32  *
33  * Returns the proper CF_* direction based on CDB.
34  */
35 static inline uint16_t
36 qla2x00_get_cmd_direction(struct scsi_cmnd *cmd)
37 {
38         uint16_t cflags;
39
40         cflags = 0;
41
42         /* Set transfer direction */
43         if (cmd->sc_data_direction == DMA_TO_DEVICE)
44                 cflags = CF_WRITE;
45         else if (cmd->sc_data_direction == DMA_FROM_DEVICE)
46                 cflags = CF_READ;
47         return (cflags);
48 }
49
50 /**
51  * qla2x00_calc_iocbs_32() - Determine number of Command Type 2 and
52  * Continuation Type 0 IOCBs to allocate.
53  *
54  * @dsds: number of data segment decriptors needed
55  *
56  * Returns the number of IOCB entries needed to store @dsds.
57  */
58 uint16_t
59 qla2x00_calc_iocbs_32(uint16_t dsds)
60 {
61         uint16_t iocbs;
62
63         iocbs = 1;
64         if (dsds > 3) {
65                 iocbs += (dsds - 3) / 7;
66                 if ((dsds - 3) % 7)
67                         iocbs++;
68         }
69         return (iocbs);
70 }
71
72 /**
73  * qla2x00_calc_iocbs_64() - Determine number of Command Type 3 and
74  * Continuation Type 1 IOCBs to allocate.
75  *
76  * @dsds: number of data segment decriptors needed
77  *
78  * Returns the number of IOCB entries needed to store @dsds.
79  */
80 uint16_t
81 qla2x00_calc_iocbs_64(uint16_t dsds)
82 {
83         uint16_t iocbs;
84
85         iocbs = 1;
86         if (dsds > 2) {
87                 iocbs += (dsds - 2) / 5;
88                 if ((dsds - 2) % 5)
89                         iocbs++;
90         }
91         return (iocbs);
92 }
93
94 /**
95  * qla2x00_prep_cont_type0_iocb() - Initialize a Continuation Type 0 IOCB.
96  * @ha: HA context
97  *
98  * Returns a pointer to the Continuation Type 0 IOCB packet.
99  */
100 static inline cont_entry_t *
101 qla2x00_prep_cont_type0_iocb(scsi_qla_host_t *ha)
102 {
103         cont_entry_t *cont_pkt;
104
105         /* Adjust ring index. */
106         ha->req_ring_index++;
107         if (ha->req_ring_index == ha->request_q_length) {
108                 ha->req_ring_index = 0;
109                 ha->request_ring_ptr = ha->request_ring;
110         } else {
111                 ha->request_ring_ptr++;
112         }
113
114         cont_pkt = (cont_entry_t *)ha->request_ring_ptr;
115
116         /* Load packet defaults. */
117         *((uint32_t *)(&cont_pkt->entry_type)) =
118             __constant_cpu_to_le32(CONTINUE_TYPE);
119
120         return (cont_pkt);
121 }
122
123 /**
124  * qla2x00_prep_cont_type1_iocb() - Initialize a Continuation Type 1 IOCB.
125  * @ha: HA context
126  *
127  * Returns a pointer to the continuation type 1 IOCB packet.
128  */
129 static inline cont_a64_entry_t *
130 qla2x00_prep_cont_type1_iocb(scsi_qla_host_t *ha)
131 {
132         cont_a64_entry_t *cont_pkt;
133
134         /* Adjust ring index. */
135         ha->req_ring_index++;
136         if (ha->req_ring_index == ha->request_q_length) {
137                 ha->req_ring_index = 0;
138                 ha->request_ring_ptr = ha->request_ring;
139         } else {
140                 ha->request_ring_ptr++;
141         }
142
143         cont_pkt = (cont_a64_entry_t *)ha->request_ring_ptr;
144
145         /* Load packet defaults. */
146         *((uint32_t *)(&cont_pkt->entry_type)) =
147             __constant_cpu_to_le32(CONTINUE_A64_TYPE);
148
149         return (cont_pkt);
150 }
151
152 /**
153  * qla2x00_build_scsi_iocbs_32() - Build IOCB command utilizing 32bit
154  * capable IOCB types.
155  *
156  * @sp: SRB command to process
157  * @cmd_pkt: Command type 2 IOCB
158  * @tot_dsds: Total number of segments to transfer
159  */
160 void qla2x00_build_scsi_iocbs_32(srb_t *sp, cmd_entry_t *cmd_pkt,
161     uint16_t tot_dsds)
162 {
163         uint16_t        avail_dsds;
164         uint32_t        *cur_dsd;
165         scsi_qla_host_t *ha;
166         struct scsi_cmnd *cmd;
167
168         cmd = sp->cmd;
169
170         /* Update entry type to indicate Command Type 2 IOCB */
171         *((uint32_t *)(&cmd_pkt->entry_type)) =
172             __constant_cpu_to_le32(COMMAND_TYPE);
173
174         /* No data transfer */
175         if (cmd->request_bufflen == 0 || cmd->sc_data_direction == DMA_NONE) {
176                 cmd_pkt->byte_count = __constant_cpu_to_le32(0);
177                 return;
178         }
179
180         ha = sp->ha;
181
182         cmd_pkt->control_flags |= cpu_to_le16(qla2x00_get_cmd_direction(cmd));
183
184         /* Three DSDs are available in the Command Type 2 IOCB */
185         avail_dsds = 3;
186         cur_dsd = (uint32_t *)&cmd_pkt->dseg_0_address;
187
188         /* Load data segments */
189         if (cmd->use_sg != 0) {
190                 struct  scatterlist *cur_seg;
191                 struct  scatterlist *end_seg;
192
193                 cur_seg = (struct scatterlist *)cmd->request_buffer;
194                 end_seg = cur_seg + tot_dsds;
195                 while (cur_seg < end_seg) {
196                         cont_entry_t    *cont_pkt;
197
198                         /* Allocate additional continuation packets? */
199                         if (avail_dsds == 0) {
200                                 /*
201                                  * Seven DSDs are available in the Continuation
202                                  * Type 0 IOCB.
203                                  */
204                                 cont_pkt = qla2x00_prep_cont_type0_iocb(ha);
205                                 cur_dsd = (uint32_t *)&cont_pkt->dseg_0_address;
206                                 avail_dsds = 7;
207                         }
208
209                         *cur_dsd++ = cpu_to_le32(sg_dma_address(cur_seg));
210                         *cur_dsd++ = cpu_to_le32(sg_dma_len(cur_seg));
211                         avail_dsds--;
212
213                         cur_seg++;
214                 }
215         } else {
216                 dma_addr_t      req_dma;
217                 struct page     *page;
218                 unsigned long   offset;
219
220                 page = virt_to_page(cmd->request_buffer);
221                 offset = ((unsigned long)cmd->request_buffer & ~PAGE_MASK);
222                 req_dma = pci_map_page(ha->pdev, page, offset,
223                     cmd->request_bufflen, cmd->sc_data_direction);
224
225                 sp->dma_handle = req_dma;
226
227                 *cur_dsd++ = cpu_to_le32(req_dma);
228                 *cur_dsd++ = cpu_to_le32(cmd->request_bufflen);
229         }
230 }
231
232 /**
233  * qla2x00_build_scsi_iocbs_64() - Build IOCB command utilizing 64bit
234  * capable IOCB types.
235  *
236  * @sp: SRB command to process
237  * @cmd_pkt: Command type 3 IOCB
238  * @tot_dsds: Total number of segments to transfer
239  */
240 void qla2x00_build_scsi_iocbs_64(srb_t *sp, cmd_entry_t *cmd_pkt,
241     uint16_t tot_dsds)
242 {
243         uint16_t        avail_dsds;
244         uint32_t        *cur_dsd;
245         scsi_qla_host_t *ha;
246         struct scsi_cmnd *cmd;
247
248         cmd = sp->cmd;
249
250         /* Update entry type to indicate Command Type 3 IOCB */
251         *((uint32_t *)(&cmd_pkt->entry_type)) =
252             __constant_cpu_to_le32(COMMAND_A64_TYPE);
253
254         /* No data transfer */
255         if (cmd->request_bufflen == 0 || cmd->sc_data_direction == DMA_NONE) {
256                 cmd_pkt->byte_count = __constant_cpu_to_le32(0);
257                 return;
258         }
259
260         ha = sp->ha;
261
262         cmd_pkt->control_flags |= cpu_to_le16(qla2x00_get_cmd_direction(cmd));
263
264         /* Two DSDs are available in the Command Type 3 IOCB */
265         avail_dsds = 2;
266         cur_dsd = (uint32_t *)&cmd_pkt->dseg_0_address;
267
268         /* Load data segments */
269         if (cmd->use_sg != 0) {
270                 struct  scatterlist *cur_seg;
271                 struct  scatterlist *end_seg;
272
273                 cur_seg = (struct scatterlist *)cmd->request_buffer;
274                 end_seg = cur_seg + tot_dsds;
275                 while (cur_seg < end_seg) {
276                         dma_addr_t      sle_dma;
277                         cont_a64_entry_t *cont_pkt;
278
279                         /* Allocate additional continuation packets? */
280                         if (avail_dsds == 0) {
281                                 /*
282                                  * Five DSDs are available in the Continuation
283                                  * Type 1 IOCB.
284                                  */
285                                 cont_pkt = qla2x00_prep_cont_type1_iocb(ha);
286                                 cur_dsd = (uint32_t *)cont_pkt->dseg_0_address;
287                                 avail_dsds = 5;
288                         }
289
290                         sle_dma = sg_dma_address(cur_seg);
291                         *cur_dsd++ = cpu_to_le32(LSD(sle_dma));
292                         *cur_dsd++ = cpu_to_le32(MSD(sle_dma));
293                         *cur_dsd++ = cpu_to_le32(sg_dma_len(cur_seg));
294                         avail_dsds--;
295
296                         cur_seg++;
297                 }
298         } else {
299                 dma_addr_t      req_dma;
300                 struct page     *page;
301                 unsigned long   offset;
302
303                 page = virt_to_page(cmd->request_buffer);
304                 offset = ((unsigned long)cmd->request_buffer & ~PAGE_MASK);
305                 req_dma = pci_map_page(ha->pdev, page, offset,
306                     cmd->request_bufflen, cmd->sc_data_direction);
307
308                 sp->dma_handle = req_dma;
309
310                 *cur_dsd++ = cpu_to_le32(LSD(req_dma));
311                 *cur_dsd++ = cpu_to_le32(MSD(req_dma));
312                 *cur_dsd++ = cpu_to_le32(cmd->request_bufflen);
313         }
314 }
315
316 /**
317  * qla2x00_start_scsi() - Send a SCSI command to the ISP
318  * @sp: command to send to the ISP
319  *
320  * Returns non-zero if a failure occured, else zero.
321  */
322 int
323 qla2x00_start_scsi(srb_t *sp)
324 {
325         int             ret;
326         unsigned long   flags;
327         scsi_qla_host_t *ha;
328         fc_lun_t        *fclun;
329         struct scsi_cmnd *cmd;
330         uint32_t        *clr_ptr;
331         uint32_t        index;
332         uint32_t        handle;
333         cmd_entry_t     *cmd_pkt;
334         uint32_t        timeout;
335         struct scatterlist *sg;
336         uint16_t        cnt;
337         uint16_t        req_cnt;
338         uint16_t        tot_dsds;
339         device_reg_t    *reg;
340
341         /* Setup device pointers. */
342         ret = 0;
343         fclun = sp->lun_queue->fclun;
344         ha = fclun->fcport->ha;
345         cmd = sp->cmd;
346         reg = ha->iobase;
347
348         /* Send marker if required */
349         if (ha->marker_needed != 0) {
350                 if (qla2x00_marker(ha, 0, 0, MK_SYNC_ALL) != QLA_SUCCESS) {
351                         return (QLA_FUNCTION_FAILED);
352                 }
353                 ha->marker_needed = 0;
354         }
355
356         /* Acquire ring specific lock */
357         spin_lock_irqsave(&ha->hardware_lock, flags);
358
359         /* Check for room in outstanding command list. */
360         handle = ha->current_outstanding_cmd;
361         for (index = 1; index < MAX_OUTSTANDING_COMMANDS; index++) {
362                 handle++;
363                 if (handle == MAX_OUTSTANDING_COMMANDS)
364                         handle = 1;
365                 if (ha->outstanding_cmds[handle] == 0)
366                         break;
367         }
368         if (index == MAX_OUTSTANDING_COMMANDS)
369                 goto queuing_error;
370
371         /* Calculate the number of request entries needed. */
372         req_cnt = (ha->calc_request_entries)(cmd->request->nr_hw_segments);
373         if (ha->req_q_cnt < (req_cnt + 2)) {
374                 cnt = RD_REG_WORD_RELAXED(ISP_REQ_Q_OUT(ha, reg));
375                 if (ha->req_ring_index < cnt)
376                         ha->req_q_cnt = cnt - ha->req_ring_index;
377                 else
378                         ha->req_q_cnt = ha->request_q_length -
379                             (ha->req_ring_index - cnt);
380         }
381         if (ha->req_q_cnt < (req_cnt + 2))
382                 goto queuing_error;
383
384         /* Finally, we have enough space, now perform mappings. */
385         tot_dsds = 0;
386         if (cmd->use_sg) {
387                 sg = (struct scatterlist *) cmd->request_buffer;
388                 tot_dsds = pci_map_sg(ha->pdev, sg, cmd->use_sg,
389                     cmd->sc_data_direction);
390                 if (tot_dsds == 0)
391                         goto queuing_error;
392         } else if (cmd->request_bufflen) {
393             tot_dsds++;
394         }
395         req_cnt = (ha->calc_request_entries)(tot_dsds);
396
397         /* Build command packet */
398         ha->current_outstanding_cmd = handle;
399         ha->outstanding_cmds[handle] = sp;
400         sp->ha = ha;
401         sp->cmd->host_scribble = (unsigned char *)(unsigned long)handle;
402         ha->req_q_cnt -= req_cnt;
403
404         cmd_pkt = (cmd_entry_t *)ha->request_ring_ptr;
405         cmd_pkt->handle = handle;
406         /* Zero out remaining portion of packet. */
407         clr_ptr = (uint32_t *)cmd_pkt + 2;
408         memset(clr_ptr, 0, REQUEST_ENTRY_SIZE - 8);
409         cmd_pkt->dseg_count = cpu_to_le16(tot_dsds);
410
411         /* Set target ID */
412         SET_TARGET_ID(ha, cmd_pkt->target, fclun->fcport->loop_id);
413
414         /* Set LUN number*/
415         cmd_pkt->lun = cpu_to_le16(fclun->lun);
416
417         /* Update tagged queuing modifier */
418         cmd_pkt->control_flags = __constant_cpu_to_le16(CF_SIMPLE_TAG);
419         if (cmd->device->tagged_supported) {
420                 switch (cmd->tag) {
421                 case HEAD_OF_QUEUE_TAG:
422                         cmd_pkt->control_flags =
423                             __constant_cpu_to_le16(CF_HEAD_TAG);
424                         break;
425                 case ORDERED_QUEUE_TAG:
426                         cmd_pkt->control_flags =
427                             __constant_cpu_to_le16(CF_ORDERED_TAG);
428                         break;
429                 }
430         }
431
432         /*
433          * Allocate at least 5 (+ QLA_CMD_TIMER_DELTA) seconds for RISC timeout.
434          */
435         timeout = (uint32_t)(cmd->timeout_per_command / HZ);
436         if (timeout > 65535)
437                 cmd_pkt->timeout = __constant_cpu_to_le16(0);
438         else if (timeout > 25)
439                 cmd_pkt->timeout = cpu_to_le16((uint16_t)timeout -
440                     (5 + QLA_CMD_TIMER_DELTA));
441         else
442                 cmd_pkt->timeout = cpu_to_le16((uint16_t)timeout);
443
444         /* Load SCSI command packet. */
445         memcpy(cmd_pkt->scsi_cdb, cmd->cmnd, cmd->cmd_len);
446         cmd_pkt->byte_count = cpu_to_le32((uint32_t)cmd->request_bufflen);
447
448         /* Build IOCB segments */
449         (ha->build_scsi_iocbs)(sp, cmd_pkt, tot_dsds);
450
451         /* Set total data segment count. */
452         cmd_pkt->entry_count = (uint8_t)req_cnt;
453         wmb();
454
455         /* Adjust ring index. */
456         ha->req_ring_index++;
457         if (ha->req_ring_index == ha->request_q_length) {
458                 ha->req_ring_index = 0;
459                 ha->request_ring_ptr = ha->request_ring;
460         } else
461                 ha->request_ring_ptr++;
462
463         ha->actthreads++;
464         ha->total_ios++;
465         sp->lun_queue->out_cnt++;
466         sp->flags |= SRB_DMA_VALID;
467         sp->state = SRB_ACTIVE_STATE;
468         sp->u_start = jiffies;
469
470         /* Set chip new ring index. */
471         WRT_REG_WORD(ISP_REQ_Q_IN(ha, reg), ha->req_ring_index);
472         RD_REG_WORD_RELAXED(ISP_REQ_Q_IN(ha, reg));     /* PCI Posting. */
473
474         spin_unlock_irqrestore(&ha->hardware_lock, flags);
475         return (QLA_SUCCESS);
476
477 queuing_error:
478         spin_unlock_irqrestore(&ha->hardware_lock, flags);
479
480         return (QLA_FUNCTION_FAILED);
481 }
482
483 /**
484  * qla2x00_marker() - Send a marker IOCB to the firmware.
485  * @ha: HA context
486  * @loop_id: loop ID
487  * @lun: LUN
488  * @type: marker modifier
489  *
490  * Can be called from both normal and interrupt context.
491  *
492  * Returns non-zero if a failure occured, else zero.
493  */
494 int
495 __qla2x00_marker(scsi_qla_host_t *ha, uint16_t loop_id, uint16_t lun,
496     uint8_t type)
497 {
498         mrk_entry_t     *pkt;
499
500         pkt = (mrk_entry_t *)qla2x00_req_pkt(ha);
501         if (pkt == NULL) {
502                 DEBUG2_3(printk("%s(): **** FAILED ****\n", __func__));
503
504                 return (QLA_FUNCTION_FAILED);
505         }
506
507         pkt->entry_type = MARKER_TYPE;
508         pkt->modifier = type;
509
510         if (type != MK_SYNC_ALL) {
511                 pkt->lun = cpu_to_le16(lun);
512                 SET_TARGET_ID(ha, pkt->target, loop_id);
513         }
514         wmb();
515
516         /* Issue command to ISP */
517         qla2x00_isp_cmd(ha);
518
519         return (QLA_SUCCESS);
520 }
521
522 int 
523 qla2x00_marker(scsi_qla_host_t *ha, uint16_t loop_id, uint16_t lun,
524     uint8_t type)
525 {
526         int ret;
527         unsigned long flags = 0;
528
529         spin_lock_irqsave(&ha->hardware_lock, flags);
530         ret = __qla2x00_marker(ha, loop_id, lun, type);
531         spin_unlock_irqrestore(&ha->hardware_lock, flags);
532
533         return (ret);
534 }
535
536 /**
537  * qla2x00_req_pkt() - Retrieve a request packet from the request ring.
538  * @ha: HA context
539  *
540  * Note: The caller must hold the hardware lock before calling this routine.
541  *
542  * Returns NULL if function failed, else, a pointer to the request packet.
543  */
544 request_t *
545 qla2x00_req_pkt(scsi_qla_host_t *ha)
546 {
547         device_reg_t    *reg = ha->iobase;
548         request_t       *pkt = NULL;
549         uint16_t        cnt;
550         uint32_t        *dword_ptr;
551         uint32_t        timer;
552         uint16_t        req_cnt = 1;
553
554         /* Wait 1 second for slot. */
555         for (timer = HZ; timer; timer--) {
556                 if ((req_cnt + 2) >= ha->req_q_cnt) {
557                         /* Calculate number of free request entries. */
558                         cnt = qla2x00_debounce_register(ISP_REQ_Q_OUT(ha, reg));
559                         if  (ha->req_ring_index < cnt)
560                                 ha->req_q_cnt = cnt - ha->req_ring_index;
561                         else
562                                 ha->req_q_cnt = ha->request_q_length -
563                                     (ha->req_ring_index - cnt);
564                 }
565                 /* If room for request in request ring. */
566                 if ((req_cnt + 2) < ha->req_q_cnt) {
567                         ha->req_q_cnt--;
568                         pkt = ha->request_ring_ptr;
569
570                         /* Zero out packet. */
571                         dword_ptr = (uint32_t *)pkt;
572                         for (cnt = 0; cnt < REQUEST_ENTRY_SIZE / 4; cnt++)
573                                 *dword_ptr++ = 0;
574
575                         /* Set system defined field. */
576                         pkt->sys_define = (uint8_t)ha->req_ring_index;
577
578                         /* Set entry count. */
579                         pkt->entry_count = 1;
580
581                         break;
582                 }
583
584                 /* Release ring specific lock */
585                 spin_unlock(&ha->hardware_lock);
586
587                 udelay(2);   /* 2 us */
588
589                 /* Check for pending interrupts. */
590                 /* During init we issue marker directly */
591                 if (!ha->marker_needed)
592                         qla2x00_poll(ha);
593
594                 spin_lock_irq(&ha->hardware_lock);
595         }
596         if (!pkt) {
597                 DEBUG2_3(printk("%s(): **** FAILED ****\n", __func__));
598         }
599
600         return (pkt);
601 }
602
603 /**
604  * qla2x00_ms_req_pkt() - Retrieve a Management Server request packet from
605  *                              the request ring.
606  * @ha: HA context
607  * @sp: pointer to handle post function call
608  *
609  * Note: The caller must hold the hardware lock before calling this routine.
610  *
611  * Returns NULL if function failed, else, a pointer to the request packet.
612  */
613 request_t *
614 qla2x00_ms_req_pkt(scsi_qla_host_t *ha, srb_t  *sp)
615 {
616         device_reg_t    *reg = ha->iobase;
617         request_t       *pkt = NULL;
618         uint16_t        cnt, i, index;
619         uint32_t        *dword_ptr;
620         uint32_t        timer;
621         uint8_t         found = 0;
622         uint16_t        req_cnt = 1;
623
624         /* Wait 1 second for slot. */
625         for (timer = HZ; timer; timer--) {
626                 if ((req_cnt + 2) >= ha->req_q_cnt) {
627                         /* Calculate number of free request entries. */
628                         cnt = qla2x00_debounce_register(ISP_REQ_Q_OUT(ha, reg));
629                         if (ha->req_ring_index < cnt) {
630                                 ha->req_q_cnt = cnt - ha->req_ring_index;
631                         } else {
632                                 ha->req_q_cnt = ha->request_q_length -
633                                     (ha->req_ring_index - cnt);
634                         }
635                 }
636
637                 /* Check for room in outstanding command list. */
638                 cnt = ha->current_outstanding_cmd;
639                 for (index = 1; index < MAX_OUTSTANDING_COMMANDS; index++) {
640                         cnt++;
641                         if (cnt == MAX_OUTSTANDING_COMMANDS)
642                                 cnt = 1;
643
644                         if (ha->outstanding_cmds[cnt] == 0) {
645                                 found = 1;
646                                 ha->current_outstanding_cmd = cnt;
647                                 break;
648                         }
649                 }
650
651                 /* If room for request in request ring. */
652                 if (found && (req_cnt + 2) < ha->req_q_cnt) {
653                         pkt = ha->request_ring_ptr;
654
655                         /* Zero out packet. */
656                         dword_ptr = (uint32_t *)pkt;
657                         for (i = 0; i < REQUEST_ENTRY_SIZE / 4; i++ )
658                                 *dword_ptr++ = 0;
659
660                         DEBUG5(printk("%s(): putting sp=%p in "
661                             "outstanding_cmds[%x]\n",
662                             __func__,
663                             sp, cnt));
664
665                         ha->outstanding_cmds[cnt] = sp;
666
667                         /* save the handle */
668                         sp->cmd->host_scribble = (unsigned char *) (u_long) cnt;
669                         CMD_SP(sp->cmd) = (void *)sp;
670
671                         ha->req_q_cnt--;
672                         pkt->handle = (uint32_t)cnt;
673
674                         /* Set system defined field. */
675                         pkt->sys_define = (uint8_t)ha->req_ring_index;
676                         pkt->entry_status = 0;
677
678                         break;
679                 }
680
681                 /* Release ring specific lock */
682                 spin_unlock(&ha->hardware_lock);
683                 udelay(20);
684
685                 /* Check for pending interrupts. */
686                 qla2x00_poll(ha);
687
688                 spin_lock_irq(&ha->hardware_lock);
689         }
690         if (!pkt) {
691                 DEBUG2_3(printk("%s(): **** FAILED ****\n", __func__));
692         }
693
694         return (pkt);
695 }
696
697 /**
698  * qla2x00_isp_cmd() - Modify the request ring pointer.
699  * @ha: HA context
700  *
701  * Note: The caller must hold the hardware lock before calling this routine.
702  */
703 void
704 qla2x00_isp_cmd(scsi_qla_host_t *ha)
705 {
706         device_reg_t *reg = ha->iobase;
707
708         DEBUG5(printk("%s(): IOCB data:\n", __func__));
709         DEBUG5(qla2x00_dump_buffer(
710             (uint8_t *)ha->request_ring_ptr, REQUEST_ENTRY_SIZE));
711
712         /* Adjust ring index. */
713         ha->req_ring_index++;
714         if (ha->req_ring_index == ha->request_q_length) {
715                 ha->req_ring_index = 0;
716                 ha->request_ring_ptr = ha->request_ring;
717         } else
718                 ha->request_ring_ptr++;
719
720         /* Set chip new ring index. */
721         WRT_REG_WORD(ISP_REQ_Q_IN(ha, reg), ha->req_ring_index);
722         RD_REG_WORD_RELAXED(ISP_REQ_Q_IN(ha, reg));     /* PCI Posting. */
723 }