2 * ipr.c -- driver for IBM Power Linux RAID adapters
4 * Written By: Brian King, IBM Corporation
6 * Copyright (C) 2003, 2004 IBM Corporation
8 * This program is free software; you can redistribute it and/or modify
9 * it under the terms of the GNU General Public License as published by
10 * the Free Software Foundation; either version 2 of the License, or
11 * (at your option) any later version.
13 * This program is distributed in the hope that it will be useful,
14 * but WITHOUT ANY WARRANTY; without even the implied warranty of
15 * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
16 * GNU General Public License for more details.
18 * You should have received a copy of the GNU General Public License
19 * along with this program; if not, write to the Free Software
20 * Foundation, Inc., 59 Temple Place, Suite 330, Boston, MA 02111-1307 USA
27 * This driver is used to control the following SCSI adapters:
29 * IBM iSeries: 5702, 5703, 2780, 5709, 570A, 570B
31 * IBM pSeries: PCI-X Dual Channel Ultra 320 SCSI RAID Adapter
32 * PCI-X Dual Channel Ultra 320 SCSI Adapter
33 * PCI-X Dual Channel Ultra 320 SCSI RAID Enablement Card
34 * Embedded SCSI adapter on p615 and p655 systems
36 * Supported Hardware Features:
37 * - Ultra 320 SCSI controller
38 * - PCI-X host interface
39 * - Embedded PowerPC RISC Processor and Hardware XOR DMA Engine
40 * - Non-Volatile Write Cache
41 * - Supports attachment of non-RAID disks, tape, and optical devices
42 * - RAID Levels 0, 5, 10
44 * - Background Parity Checking
45 * - Background Data Scrubbing
46 * - Ability to increase the capacity of an existing RAID 5 disk array
50 * - Tagged command queuing
51 * - Adapter microcode download
53 * - SCSI device hot plug
57 #include <linux/config.h>
59 #include <linux/init.h>
60 #include <linux/types.h>
61 #include <linux/errno.h>
62 #include <linux/kernel.h>
63 #include <linux/ioport.h>
64 #include <linux/delay.h>
65 #include <linux/pci.h>
66 #include <linux/wait.h>
67 #include <linux/spinlock.h>
68 #include <linux/sched.h>
69 #include <linux/interrupt.h>
70 #include <linux/blkdev.h>
71 #include <linux/firmware.h>
72 #include <linux/module.h>
73 #include <linux/moduleparam.h>
76 #include <asm/processor.h>
77 #include <scsi/scsi.h>
78 #include <scsi/scsi_host.h>
79 #include <scsi/scsi_tcq.h>
80 #include <scsi/scsi_eh.h>
81 #include <scsi/scsi_cmnd.h>
82 #include <scsi/scsi_request.h>
88 static struct list_head ipr_ioa_head = LIST_HEAD_INIT(ipr_ioa_head);
89 static unsigned int ipr_log_level = IPR_DEFAULT_LOG_LEVEL;
90 static unsigned int ipr_max_speed = 1;
91 static int ipr_testmode = 0;
92 static spinlock_t ipr_driver_lock = SPIN_LOCK_UNLOCKED;
94 /* This table describes the differences between DMA controller chips */
95 static const struct ipr_chip_cfg_t ipr_chip_cfg[] = {
98 .cache_line_size = 0x20,
100 .set_interrupt_mask_reg = 0x0022C,
101 .clr_interrupt_mask_reg = 0x00230,
102 .sense_interrupt_mask_reg = 0x0022C,
103 .clr_interrupt_reg = 0x00228,
104 .sense_interrupt_reg = 0x00224,
105 .ioarrin_reg = 0x00404,
106 .sense_uproc_interrupt_reg = 0x00214,
107 .set_uproc_interrupt_reg = 0x00214,
108 .clr_uproc_interrupt_reg = 0x00218
113 .cache_line_size = 0x20,
115 .set_interrupt_mask_reg = 0x00288,
116 .clr_interrupt_mask_reg = 0x0028C,
117 .sense_interrupt_mask_reg = 0x00288,
118 .clr_interrupt_reg = 0x00284,
119 .sense_interrupt_reg = 0x00280,
120 .ioarrin_reg = 0x00504,
121 .sense_uproc_interrupt_reg = 0x00290,
122 .set_uproc_interrupt_reg = 0x00290,
123 .clr_uproc_interrupt_reg = 0x00294
128 static int ipr_max_bus_speeds [] = {
129 IPR_80MBs_SCSI_RATE, IPR_U160_SCSI_RATE, IPR_U320_SCSI_RATE
132 MODULE_AUTHOR("Brian King <brking@us.ibm.com>");
133 MODULE_DESCRIPTION("IBM Power RAID SCSI Adapter Driver");
134 module_param_named(max_speed, ipr_max_speed, uint, 0);
135 MODULE_PARM_DESC(max_speed, "Maximum bus speed (0-2). Default: 1=U160. Speeds: 0=80 MB/s, 1=U160, 2=U320");
136 module_param_named(log_level, ipr_log_level, uint, 0);
137 MODULE_PARM_DESC(log_level, "Set to 0 - 4 for increasing verbosity of device driver");
138 module_param_named(testmode, ipr_testmode, int, 0);
139 MODULE_PARM_DESC(testmode, "DANGEROUS!!! Allows unsupported configurations");
140 MODULE_LICENSE("GPL");
141 MODULE_VERSION(IPR_DRIVER_VERSION);
143 static const char *ipr_gpdd_dev_end_states[] = {
145 "Terminated by host",
146 "Terminated by device reset",
147 "Terminated by bus reset",
149 "Command not started"
152 static const char *ipr_gpdd_dev_bus_phases[] = {
166 /* A constant array of IOASCs/URCs/Error Messages */
168 struct ipr_error_table_t ipr_error_table[] = {
170 "8155: An unknown error was received"},
172 "Soft underlength error"},
174 "Command to be cancelled not found"},
176 "Qualified success"},
178 "FFFE: Soft device bus error recovered by the IOA"},
180 "FFF9: Device sector reassign successful"},
182 "FFF7: Media error recovered by device rewrite procedures"},
184 "7001: IOA sector reassignment successful"},
186 "FFF9: Soft media error. Sector reassignment recommended"},
188 "FFF7: Media error recovered by IOA rewrite procedures"},
190 "FF3D: Soft PCI bus error recovered by the IOA"},
192 "FFF6: Device hardware error recovered by the IOA"},
194 "FFF6: Device hardware error recovered by the device"},
196 "FF3D: Soft IOA error recovered by the IOA"},
198 "FFFA: Undefined device response recovered by the IOA"},
200 "FFF6: Device bus error, message or command phase"},
202 "FFF6: Failure prediction threshold exceeded"},
204 "8009: Impending cache battery pack failure"},
206 "34FF: Disk device format in progress"},
208 "Synchronization required"},
210 "No ready, IOA shutdown"},
212 "3020: Storage subsystem configuration error"},
214 "FFF5: Medium error, data unreadable, recommend reassign"},
216 "7000: Medium error, data unreadable, do not reassign"},
218 "FFF3: Disk media format bad"},
220 "3002: Addressed device failed to respond to selection"},
222 "3100: Device bus error"},
224 "3109: IOA timed out a device command"},
226 "3120: SCSI bus is not operational"},
228 "9000: IOA reserved area data check"},
230 "9001: IOA reserved area invalid data pattern"},
232 "9002: IOA reserved area LRC error"},
234 "102E: Out of alternate sectors for disk storage"},
236 "FFF4: Data transfer underlength error"},
238 "FFF4: Data transfer overlength error"},
240 "3400: Logical unit failure"},
242 "FFF4: Device microcode is corrupt"},
244 "8150: PCI bus error"},
246 "Unsupported device bus message received"},
248 "FFF4: Disk device problem"},
250 "8150: Permanent IOA failure"},
252 "3010: Disk device returned wrong response to IOA"},
254 "8151: IOA microcode error"},
256 "Device bus status error"},
258 "8157: IOA error requiring IOA reset to recover"},
260 "Message reject received from the device"},
262 "8008: A permanent cache battery pack failure occurred"},
264 "9090: Disk unit has been modified after the last known status"},
266 "9081: IOA detected device error"},
268 "9082: IOA detected device error"},
270 "3110: Device bus error, message or command phase"},
272 "9091: Incorrect hardware configuration change has been detected"},
274 "FFF4: Command to logical unit failed"},
276 "Illegal request, invalid request type or request packet"},
278 "Illegal request, invalid resource handle"},
280 "Illegal request, invalid field in parameter list"},
282 "Illegal request, parameter not supported"},
284 "Illegal request, parameter value invalid"},
286 "Illegal request, command sequence error"},
288 "9031: Array protection temporarily suspended, protection resuming"},
290 "9040: Array protection temporarily suspended, protection resuming"},
292 "FFFB: SCSI bus was reset"},
294 "FFFE: SCSI bus transition to single ended"},
296 "FFFE: SCSI bus transition to LVD"},
298 "FFFB: SCSI bus was reset by another initiator"},
300 "3029: A device replacement has occurred"},
302 "9051: IOA cache data exists for a missing or failed device"},
304 "9025: Disk unit is not supported at its physical location"},
306 "3020: IOA detected a SCSI bus configuration error"},
308 "3150: SCSI bus configuration error"},
310 "9041: Array protection temporarily suspended"},
312 "9030: Array no longer protected due to missing or failed disk unit"},
314 "Failure due to other device"},
316 "9008: IOA does not support functions expected by devices"},
318 "9010: Cache data associated with attached devices cannot be found"},
320 "9011: Cache data belongs to devices other than those attached"},
322 "9020: Array missing 2 or more devices with only 1 device present"},
324 "9021: Array missing 2 or more devices with 2 or more devices present"},
326 "9022: Exposed array is missing a required device"},
328 "9023: Array member(s) not at required physical locations"},
330 "9024: Array not functional due to present hardware configuration"},
332 "9026: Array not functional due to present hardware configuration"},
334 "9027: Array is missing a device and parity is out of sync"},
336 "9028: Maximum number of arrays already exist"},
338 "9050: Required cache data cannot be located for a disk unit"},
340 "9052: Cache data exists for a device that has been modified"},
342 "9054: IOA resources not available due to previous problems"},
344 "9092: Disk unit requires initialization before use"},
346 "9029: Incorrect hardware configuration change has been detected"},
348 "9060: One or more disk pairs are missing from an array"},
350 "9061: One or more disks are missing from an array"},
352 "9062: One or more disks are missing from an array"},
354 "9063: Maximum number of functional arrays has been exceeded"},
356 "Aborted command, invalid descriptor"},
358 "Command terminated by host"}
361 static const struct ipr_ses_table_entry ipr_ses_table[] = {
362 { "2104-DL1 ", "XXXXXXXXXXXXXXXX", 80 },
363 { "2104-TL1 ", "XXXXXXXXXXXXXXXX", 80 },
364 { "HSBP07M P U2SCSI", "XXXXXXXXXXXXXXXX", 80 }, /* Hidive 7 slot */
365 { "HSBP05M P U2SCSI", "XXXXXXXXXXXXXXXX", 80 }, /* Hidive 5 slot */
366 { "HSBP05M S U2SCSI", "XXXXXXXXXXXXXXXX", 80 }, /* Bowtie */
367 { "HSBP06E ASU2SCSI", "XXXXXXXXXXXXXXXX", 80 }, /* MartinFenning */
368 { "2104-DU3 ", "XXXXXXXXXXXXXXXX", 160 },
369 { "2104-TU3 ", "XXXXXXXXXXXXXXXX", 160 },
370 { "HSBP04C RSU2SCSI", "XXXXXXX*XXXXXXXX", 160 },
371 { "HSBP06E RSU2SCSI", "XXXXXXX*XXXXXXXX", 160 },
372 { "St V1S2 ", "XXXXXXXXXXXXXXXX", 160 },
373 { "HSBPD4M PU3SCSI", "XXXXXXX*XXXXXXXX", 160 },
374 { "VSBPD1H U3SCSI", "XXXXXXX*XXXXXXXX", 160 }
378 * Function Prototypes
380 static int ipr_reset_alert(struct ipr_cmnd *);
381 static void ipr_process_ccn(struct ipr_cmnd *);
382 static void ipr_process_error(struct ipr_cmnd *);
383 static void ipr_reset_ioa_job(struct ipr_cmnd *);
384 static void ipr_initiate_ioa_reset(struct ipr_ioa_cfg *,
385 enum ipr_shutdown_type);
387 #ifdef CONFIG_SCSI_IPR_TRACE
389 * ipr_trc_hook - Add a trace entry to the driver trace
390 * @ipr_cmd: ipr command struct
392 * @add_data: additional data
397 static void ipr_trc_hook(struct ipr_cmnd *ipr_cmd,
398 u8 type, u32 add_data)
400 struct ipr_trace_entry *trace_entry;
401 struct ipr_ioa_cfg *ioa_cfg = ipr_cmd->ioa_cfg;
403 trace_entry = &ioa_cfg->trace[ioa_cfg->trace_index++];
404 trace_entry->time = jiffies;
405 trace_entry->op_code = ipr_cmd->ioarcb.cmd_pkt.cdb[0];
406 trace_entry->type = type;
407 trace_entry->cmd_index = ipr_cmd->cmd_index;
408 trace_entry->res_handle = ipr_cmd->ioarcb.res_handle;
409 trace_entry->u.add_data = add_data;
412 #define ipr_trc_hook(ipr_cmd, type, add_data) do { } while(0)
416 * ipr_reinit_ipr_cmnd - Re-initialize an IPR Cmnd block for reuse
417 * @ipr_cmd: ipr command struct
422 static void ipr_reinit_ipr_cmnd(struct ipr_cmnd *ipr_cmd)
424 struct ipr_ioarcb *ioarcb = &ipr_cmd->ioarcb;
425 struct ipr_ioasa *ioasa = &ipr_cmd->ioasa;
427 memset(&ioarcb->cmd_pkt, 0, sizeof(struct ipr_cmd_pkt));
428 ioarcb->write_data_transfer_length = 0;
429 ioarcb->read_data_transfer_length = 0;
430 ioarcb->write_ioadl_len = 0;
431 ioarcb->read_ioadl_len = 0;
433 ioasa->residual_data_len = 0;
435 ipr_cmd->scsi_cmd = NULL;
436 ipr_cmd->sense_buffer[0] = 0;
437 ipr_cmd->dma_use_sg = 0;
441 * ipr_init_ipr_cmnd - Initialize an IPR Cmnd block
442 * @ipr_cmd: ipr command struct
447 static void ipr_init_ipr_cmnd(struct ipr_cmnd *ipr_cmd)
449 ipr_reinit_ipr_cmnd(ipr_cmd);
450 ipr_cmd->u.scratch = 0;
451 init_timer(&ipr_cmd->timer);
455 * ipr_get_free_ipr_cmnd - Get a free IPR Cmnd block
456 * @ioa_cfg: ioa config struct
459 * pointer to ipr command struct
462 struct ipr_cmnd *ipr_get_free_ipr_cmnd(struct ipr_ioa_cfg *ioa_cfg)
464 struct ipr_cmnd *ipr_cmd;
466 ipr_cmd = list_entry(ioa_cfg->free_q.next, struct ipr_cmnd, queue);
467 list_del(&ipr_cmd->queue);
468 ipr_init_ipr_cmnd(ipr_cmd);
474 * ipr_unmap_sglist - Unmap scatterlist if mapped
475 * @ioa_cfg: ioa config struct
476 * @ipr_cmd: ipr command struct
481 static void ipr_unmap_sglist(struct ipr_ioa_cfg *ioa_cfg,
482 struct ipr_cmnd *ipr_cmd)
484 struct scsi_cmnd *scsi_cmd = ipr_cmd->scsi_cmd;
486 if (ipr_cmd->dma_use_sg) {
487 if (scsi_cmd->use_sg > 0) {
488 pci_unmap_sg(ioa_cfg->pdev, scsi_cmd->request_buffer,
490 scsi_cmd->sc_data_direction);
492 pci_unmap_single(ioa_cfg->pdev, ipr_cmd->dma_handle,
493 scsi_cmd->request_bufflen,
494 scsi_cmd->sc_data_direction);
500 * ipr_mask_and_clear_interrupts - Mask all and clear specified interrupts
501 * @ioa_cfg: ioa config struct
502 * @clr_ints: interrupts to clear
504 * This function masks all interrupts on the adapter, then clears the
505 * interrupts specified in the mask
510 static void ipr_mask_and_clear_interrupts(struct ipr_ioa_cfg *ioa_cfg,
513 volatile u32 int_reg;
515 /* Stop new interrupts */
516 ioa_cfg->allow_interrupts = 0;
518 /* Set interrupt mask to stop all new interrupts */
519 writel(~0, ioa_cfg->regs.set_interrupt_mask_reg);
521 /* Clear any pending interrupts */
522 writel(clr_ints, ioa_cfg->regs.clr_interrupt_reg);
523 int_reg = readl(ioa_cfg->regs.sense_interrupt_reg);
527 * ipr_save_pcix_cmd_reg - Save PCI-X command register
528 * @ioa_cfg: ioa config struct
531 * 0 on success / -EIO on failure
533 static int ipr_save_pcix_cmd_reg(struct ipr_ioa_cfg *ioa_cfg)
535 int pcix_cmd_reg = pci_find_capability(ioa_cfg->pdev, PCI_CAP_ID_PCIX);
537 if (pcix_cmd_reg == 0) {
538 dev_err(&ioa_cfg->pdev->dev, "Failed to save PCI-X command register\n");
542 if (pci_read_config_word(ioa_cfg->pdev, pcix_cmd_reg,
543 &ioa_cfg->saved_pcix_cmd_reg) != PCIBIOS_SUCCESSFUL) {
544 dev_err(&ioa_cfg->pdev->dev, "Failed to save PCI-X command register\n");
548 ioa_cfg->saved_pcix_cmd_reg |= PCI_X_CMD_DPERR_E | PCI_X_CMD_ERO;
553 * ipr_set_pcix_cmd_reg - Setup PCI-X command register
554 * @ioa_cfg: ioa config struct
557 * 0 on success / -EIO on failure
559 static int ipr_set_pcix_cmd_reg(struct ipr_ioa_cfg *ioa_cfg)
561 int pcix_cmd_reg = pci_find_capability(ioa_cfg->pdev, PCI_CAP_ID_PCIX);
564 if (pci_write_config_word(ioa_cfg->pdev, pcix_cmd_reg,
565 ioa_cfg->saved_pcix_cmd_reg) != PCIBIOS_SUCCESSFUL) {
566 dev_err(&ioa_cfg->pdev->dev, "Failed to setup PCI-X command register\n");
570 dev_err(&ioa_cfg->pdev->dev,
571 "Failed to setup PCI-X command register\n");
579 * ipr_scsi_eh_done - mid-layer done function for aborted ops
580 * @ipr_cmd: ipr command struct
582 * This function is invoked by the interrupt handler for
583 * ops generated by the SCSI mid-layer which are being aborted.
588 static void ipr_scsi_eh_done(struct ipr_cmnd *ipr_cmd)
590 struct ipr_ioa_cfg *ioa_cfg = ipr_cmd->ioa_cfg;
591 struct scsi_cmnd *scsi_cmd = ipr_cmd->scsi_cmd;
593 scsi_cmd->result |= (DID_ERROR << 16);
595 ipr_unmap_sglist(ioa_cfg, ipr_cmd);
596 scsi_cmd->scsi_done(scsi_cmd);
597 list_add_tail(&ipr_cmd->queue, &ioa_cfg->free_q);
601 * ipr_fail_all_ops - Fails all outstanding ops.
602 * @ioa_cfg: ioa config struct
604 * This function fails all outstanding ops.
609 static void ipr_fail_all_ops(struct ipr_ioa_cfg *ioa_cfg)
611 struct ipr_cmnd *ipr_cmd, *temp;
614 list_for_each_entry_safe(ipr_cmd, temp, &ioa_cfg->pending_q, queue) {
615 list_del(&ipr_cmd->queue);
617 ipr_cmd->ioasa.ioasc = cpu_to_be32(IPR_IOASC_IOA_WAS_RESET);
618 ipr_cmd->ioasa.ilid = cpu_to_be32(IPR_DRIVER_ILID);
620 if (ipr_cmd->scsi_cmd)
621 ipr_cmd->done = ipr_scsi_eh_done;
623 ipr_trc_hook(ipr_cmd, IPR_TRACE_FINISH, IPR_IOASC_IOA_WAS_RESET);
624 del_timer(&ipr_cmd->timer);
625 ipr_cmd->done(ipr_cmd);
632 * ipr_do_req - Send driver initiated requests.
633 * @ipr_cmd: ipr command struct
634 * @done: done function
635 * @timeout_func: timeout function
636 * @timeout: timeout value
638 * This function sends the specified command to the adapter with the
639 * timeout given. The done function is invoked on command completion.
644 static void ipr_do_req(struct ipr_cmnd *ipr_cmd,
645 void (*done) (struct ipr_cmnd *),
646 void (*timeout_func) (struct ipr_cmnd *), u32 timeout)
648 struct ipr_ioa_cfg *ioa_cfg = ipr_cmd->ioa_cfg;
650 list_add_tail(&ipr_cmd->queue, &ioa_cfg->pending_q);
652 ipr_cmd->done = done;
654 ipr_cmd->timer.data = (unsigned long) ipr_cmd;
655 ipr_cmd->timer.expires = jiffies + timeout;
656 ipr_cmd->timer.function = (void (*)(unsigned long))timeout_func;
658 add_timer(&ipr_cmd->timer);
660 ipr_trc_hook(ipr_cmd, IPR_TRACE_START, 0);
663 writel(be32_to_cpu(ipr_cmd->ioarcb.ioarcb_host_pci_addr),
664 ioa_cfg->regs.ioarrin_reg);
668 * ipr_internal_cmd_done - Op done function for an internally generated op.
669 * @ipr_cmd: ipr command struct
671 * This function is the op done function for an internally generated,
672 * blocking op. It simply wakes the sleeping thread.
677 static void ipr_internal_cmd_done(struct ipr_cmnd *ipr_cmd)
679 if (ipr_cmd->u.sibling)
680 ipr_cmd->u.sibling = NULL;
682 complete(&ipr_cmd->completion);
686 * ipr_send_blocking_cmd - Send command and sleep on its completion.
687 * @ipr_cmd: ipr command struct
688 * @timeout_func: function to invoke if command times out
694 static void ipr_send_blocking_cmd(struct ipr_cmnd *ipr_cmd,
695 void (*timeout_func) (struct ipr_cmnd *ipr_cmd),
698 struct ipr_ioa_cfg *ioa_cfg = ipr_cmd->ioa_cfg;
700 init_completion(&ipr_cmd->completion);
701 ipr_do_req(ipr_cmd, ipr_internal_cmd_done, timeout_func, timeout);
703 spin_unlock_irq(ioa_cfg->host->host_lock);
704 wait_for_completion(&ipr_cmd->completion);
705 spin_lock_irq(ioa_cfg->host->host_lock);
709 * ipr_send_hcam - Send an HCAM to the adapter.
710 * @ioa_cfg: ioa config struct
712 * @hostrcb: hostrcb struct
714 * This function will send a Host Controlled Async command to the adapter.
715 * If HCAMs are currently not allowed to be issued to the adapter, it will
716 * place the hostrcb on the free queue.
721 static void ipr_send_hcam(struct ipr_ioa_cfg *ioa_cfg, u8 type,
722 struct ipr_hostrcb *hostrcb)
724 struct ipr_cmnd *ipr_cmd;
725 struct ipr_ioarcb *ioarcb;
727 if (ioa_cfg->allow_cmds) {
728 ipr_cmd = ipr_get_free_ipr_cmnd(ioa_cfg);
729 list_add_tail(&ipr_cmd->queue, &ioa_cfg->pending_q);
730 list_add_tail(&hostrcb->queue, &ioa_cfg->hostrcb_pending_q);
732 ipr_cmd->u.hostrcb = hostrcb;
733 ioarcb = &ipr_cmd->ioarcb;
735 ioarcb->res_handle = cpu_to_be32(IPR_IOA_RES_HANDLE);
736 ioarcb->cmd_pkt.request_type = IPR_RQTYPE_HCAM;
737 ioarcb->cmd_pkt.cdb[0] = IPR_HOST_CONTROLLED_ASYNC;
738 ioarcb->cmd_pkt.cdb[1] = type;
739 ioarcb->cmd_pkt.cdb[7] = (sizeof(hostrcb->hcam) >> 8) & 0xff;
740 ioarcb->cmd_pkt.cdb[8] = sizeof(hostrcb->hcam) & 0xff;
742 ioarcb->read_data_transfer_length = cpu_to_be32(sizeof(hostrcb->hcam));
743 ioarcb->read_ioadl_len = cpu_to_be32(sizeof(struct ipr_ioadl_desc));
744 ipr_cmd->ioadl[0].flags_and_data_len =
745 cpu_to_be32(IPR_IOADL_FLAGS_READ_LAST | sizeof(hostrcb->hcam));
746 ipr_cmd->ioadl[0].address = cpu_to_be32(hostrcb->hostrcb_dma);
748 if (type == IPR_HCAM_CDB_OP_CODE_CONFIG_CHANGE)
749 ipr_cmd->done = ipr_process_ccn;
751 ipr_cmd->done = ipr_process_error;
753 ipr_trc_hook(ipr_cmd, IPR_TRACE_START, IPR_IOA_RES_ADDR);
756 writel(be32_to_cpu(ipr_cmd->ioarcb.ioarcb_host_pci_addr),
757 ioa_cfg->regs.ioarrin_reg);
759 list_add_tail(&hostrcb->queue, &ioa_cfg->hostrcb_free_q);
764 * ipr_init_res_entry - Initialize a resource entry struct.
765 * @res: resource entry struct
770 static void ipr_init_res_entry(struct ipr_resource_entry *res)
772 res->needs_sync_complete = 1;
775 res->del_from_ml = 0;
776 res->resetting_device = 0;
778 res->qdepth = IPR_MAX_CMD_PER_LUN;
783 * ipr_handle_config_change - Handle a config change from the adapter
784 * @ioa_cfg: ioa config struct
790 static void ipr_handle_config_change(struct ipr_ioa_cfg *ioa_cfg,
791 struct ipr_hostrcb *hostrcb)
793 struct ipr_resource_entry *res = NULL;
794 struct ipr_config_table_entry *cfgte;
797 cfgte = &hostrcb->hcam.u.ccn.cfgte;
799 list_for_each_entry(res, &ioa_cfg->used_res_q, queue) {
800 if (!memcmp(&res->cfgte.res_addr, &cfgte->res_addr,
801 sizeof(cfgte->res_addr))) {
808 if (list_empty(&ioa_cfg->free_res_q)) {
809 ipr_send_hcam(ioa_cfg,
810 IPR_HCAM_CDB_OP_CODE_CONFIG_CHANGE,
815 res = list_entry(ioa_cfg->free_res_q.next,
816 struct ipr_resource_entry, queue);
818 list_del(&res->queue);
819 ipr_init_res_entry(res);
820 list_add_tail(&res->queue, &ioa_cfg->used_res_q);
823 memcpy(&res->cfgte, cfgte, sizeof(struct ipr_config_table_entry));
825 if (hostrcb->hcam.notify_type == IPR_HOST_RCB_NOTIF_TYPE_REM_ENTRY) {
827 res->sdev->hostdata = NULL;
828 res->del_from_ml = 1;
829 if (ioa_cfg->allow_ml_add_del)
830 schedule_work(&ioa_cfg->work_q);
832 list_move_tail(&res->queue, &ioa_cfg->free_res_q);
833 } else if (!res->sdev) {
835 if (ioa_cfg->allow_ml_add_del)
836 schedule_work(&ioa_cfg->work_q);
839 ipr_send_hcam(ioa_cfg, IPR_HCAM_CDB_OP_CODE_CONFIG_CHANGE, hostrcb);
843 * ipr_process_ccn - Op done function for a CCN.
844 * @ipr_cmd: ipr command struct
846 * This function is the op done function for a configuration
847 * change notification host controlled async from the adapter.
852 static void ipr_process_ccn(struct ipr_cmnd *ipr_cmd)
854 struct ipr_ioa_cfg *ioa_cfg = ipr_cmd->ioa_cfg;
855 struct ipr_hostrcb *hostrcb = ipr_cmd->u.hostrcb;
856 u32 ioasc = be32_to_cpu(ipr_cmd->ioasa.ioasc);
858 list_del(&hostrcb->queue);
859 list_add_tail(&ipr_cmd->queue, &ioa_cfg->free_q);
862 if (ioasc != IPR_IOASC_IOA_WAS_RESET)
863 dev_err(&ioa_cfg->pdev->dev,
864 "Host RCB failed with IOASC: 0x%08X\n", ioasc);
866 ipr_send_hcam(ioa_cfg, IPR_HCAM_CDB_OP_CODE_CONFIG_CHANGE, hostrcb);
868 ipr_handle_config_change(ioa_cfg, hostrcb);
873 * ipr_log_vpd - Log the passed VPD to the error log.
874 * @vpids: vendor/product id struct
875 * @serial_num: serial number string
880 static void ipr_log_vpd(struct ipr_std_inq_vpids *vpids, u8 *serial_num)
882 char buffer[max_t(int, sizeof(struct ipr_std_inq_vpids),
883 IPR_SERIAL_NUM_LEN) + 1];
885 memcpy(buffer, vpids, sizeof(struct ipr_std_inq_vpids));
886 buffer[sizeof(struct ipr_std_inq_vpids)] = '\0';
887 ipr_err("Vendor/Product ID: %s\n", buffer);
889 memcpy(buffer, serial_num, IPR_SERIAL_NUM_LEN);
890 buffer[IPR_SERIAL_NUM_LEN] = '\0';
891 ipr_err(" Serial Number: %s\n", buffer);
895 * ipr_log_cache_error - Log a cache error.
896 * @ioa_cfg: ioa config struct
897 * @hostrcb: hostrcb struct
902 static void ipr_log_cache_error(struct ipr_ioa_cfg *ioa_cfg,
903 struct ipr_hostrcb *hostrcb)
905 struct ipr_hostrcb_type_02_error *error =
906 &hostrcb->hcam.u.error.u.type_02_error;
908 ipr_err("-----Current Configuration-----\n");
909 ipr_err("Cache Directory Card Information:\n");
910 ipr_log_vpd(&error->ioa_vpids, error->ioa_sn);
911 ipr_err("Adapter Card Information:\n");
912 ipr_log_vpd(&error->cfc_vpids, error->cfc_sn);
914 ipr_err("-----Expected Configuration-----\n");
915 ipr_err("Cache Directory Card Information:\n");
916 ipr_log_vpd(&error->ioa_last_attached_to_cfc_vpids,
917 error->ioa_last_attached_to_cfc_sn);
918 ipr_err("Adapter Card Information:\n");
919 ipr_log_vpd(&error->cfc_last_attached_to_ioa_vpids,
920 error->cfc_last_attached_to_ioa_sn);
922 ipr_err("Additional IOA Data: %08X %08X %08X\n",
923 be32_to_cpu(error->ioa_data[0]),
924 be32_to_cpu(error->ioa_data[1]),
925 be32_to_cpu(error->ioa_data[2]));
929 * ipr_log_config_error - Log a configuration error.
930 * @ioa_cfg: ioa config struct
931 * @hostrcb: hostrcb struct
936 static void ipr_log_config_error(struct ipr_ioa_cfg *ioa_cfg,
937 struct ipr_hostrcb *hostrcb)
939 int errors_logged, i;
940 struct ipr_hostrcb_device_data_entry *dev_entry;
941 struct ipr_hostrcb_type_03_error *error;
943 error = &hostrcb->hcam.u.error.u.type_03_error;
944 errors_logged = be32_to_cpu(error->errors_logged);
946 ipr_err("Device Errors Detected/Logged: %d/%d\n",
947 be32_to_cpu(error->errors_detected), errors_logged);
949 dev_entry = error->dev_entry;
951 for (i = 0; i < errors_logged; i++, dev_entry++) {
954 if (dev_entry->dev_res_addr.bus >= IPR_MAX_NUM_BUSES) {
955 ipr_err("Device %d: missing\n", i + 1);
957 ipr_err("Device %d: %d:%d:%d:%d\n", i + 1,
958 ioa_cfg->host->host_no, dev_entry->dev_res_addr.bus,
959 dev_entry->dev_res_addr.target, dev_entry->dev_res_addr.lun);
961 ipr_log_vpd(&dev_entry->dev_vpids, dev_entry->dev_sn);
963 ipr_err("-----New Device Information-----\n");
964 ipr_log_vpd(&dev_entry->new_dev_vpids, dev_entry->new_dev_sn);
966 ipr_err("Cache Directory Card Information:\n");
967 ipr_log_vpd(&dev_entry->ioa_last_with_dev_vpids,
968 dev_entry->ioa_last_with_dev_sn);
970 ipr_err("Adapter Card Information:\n");
971 ipr_log_vpd(&dev_entry->cfc_last_with_dev_vpids,
972 dev_entry->cfc_last_with_dev_sn);
974 ipr_err("Additional IOA Data: %08X %08X %08X %08X %08X\n",
975 be32_to_cpu(dev_entry->ioa_data[0]),
976 be32_to_cpu(dev_entry->ioa_data[1]),
977 be32_to_cpu(dev_entry->ioa_data[2]),
978 be32_to_cpu(dev_entry->ioa_data[3]),
979 be32_to_cpu(dev_entry->ioa_data[4]));
984 * ipr_log_array_error - Log an array configuration error.
985 * @ioa_cfg: ioa config struct
986 * @hostrcb: hostrcb struct
991 static void ipr_log_array_error(struct ipr_ioa_cfg *ioa_cfg,
992 struct ipr_hostrcb *hostrcb)
995 struct ipr_hostrcb_type_04_error *error;
996 struct ipr_hostrcb_array_data_entry *array_entry;
997 u8 zero_sn[IPR_SERIAL_NUM_LEN];
999 memset(zero_sn, '0', IPR_SERIAL_NUM_LEN);
1001 error = &hostrcb->hcam.u.error.u.type_04_error;
1005 ipr_err("RAID %s Array Configuration: %d:%d:%d:%d\n",
1006 error->protection_level,
1007 ioa_cfg->host->host_no,
1008 error->last_func_vset_res_addr.bus,
1009 error->last_func_vset_res_addr.target,
1010 error->last_func_vset_res_addr.lun);
1014 array_entry = error->array_member;
1016 for (i = 0; i < 18; i++) {
1017 if (!memcmp(array_entry->serial_num, zero_sn, IPR_SERIAL_NUM_LEN))
1020 if (error->exposed_mode_adn == i) {
1021 ipr_err("Exposed Array Member %d:\n", i);
1023 ipr_err("Array Member %d:\n", i);
1026 ipr_log_vpd(&array_entry->vpids, array_entry->serial_num);
1028 if (array_entry->dev_res_addr.bus >= IPR_MAX_NUM_BUSES) {
1029 ipr_err("Current Location: unknown\n");
1031 ipr_err("Current Location: %d:%d:%d:%d\n",
1032 ioa_cfg->host->host_no,
1033 array_entry->dev_res_addr.bus,
1034 array_entry->dev_res_addr.target,
1035 array_entry->dev_res_addr.lun);
1038 if (array_entry->dev_res_addr.bus >= IPR_MAX_NUM_BUSES) {
1039 ipr_err("Expected Location: unknown\n");
1041 ipr_err("Expected Location: %d:%d:%d:%d\n",
1042 ioa_cfg->host->host_no,
1043 array_entry->expected_dev_res_addr.bus,
1044 array_entry->expected_dev_res_addr.target,
1045 array_entry->expected_dev_res_addr.lun);
1051 array_entry = error->array_member2;
1058 * ipr_log_generic_error - Log an adapter error.
1059 * @ioa_cfg: ioa config struct
1060 * @hostrcb: hostrcb struct
1065 static void ipr_log_generic_error(struct ipr_ioa_cfg *ioa_cfg,
1066 struct ipr_hostrcb *hostrcb)
1069 int ioa_data_len = be32_to_cpu(hostrcb->hcam.length);
1071 if (ioa_data_len == 0)
1074 ipr_err("IOA Error Data:\n");
1075 ipr_err("Offset 0 1 2 3 4 5 6 7 8 9 A B C D E F\n");
1077 for (i = 0; i < ioa_data_len / 4; i += 4) {
1078 ipr_err("%08X: %08X %08X %08X %08X\n", i*4,
1079 be32_to_cpu(hostrcb->hcam.u.raw.data[i]),
1080 be32_to_cpu(hostrcb->hcam.u.raw.data[i+1]),
1081 be32_to_cpu(hostrcb->hcam.u.raw.data[i+2]),
1082 be32_to_cpu(hostrcb->hcam.u.raw.data[i+3]));
1087 * ipr_get_error - Find the specfied IOASC in the ipr_error_table.
1090 * This function will return the index of into the ipr_error_table
1091 * for the specified IOASC. If the IOASC is not in the table,
1092 * 0 will be returned, which points to the entry used for unknown errors.
1095 * index into the ipr_error_table
1097 static u32 ipr_get_error(u32 ioasc)
1101 for (i = 0; i < ARRAY_SIZE(ipr_error_table); i++)
1102 if (ipr_error_table[i].ioasc == ioasc)
1109 * ipr_handle_log_data - Log an adapter error.
1110 * @ioa_cfg: ioa config struct
1111 * @hostrcb: hostrcb struct
1113 * This function logs an adapter error to the system.
1118 static void ipr_handle_log_data(struct ipr_ioa_cfg *ioa_cfg,
1119 struct ipr_hostrcb *hostrcb)
1124 if (hostrcb->hcam.notify_type != IPR_HOST_RCB_NOTIF_TYPE_ERROR_LOG_ENTRY)
1127 if (hostrcb->hcam.notifications_lost == IPR_HOST_RCB_NOTIFICATIONS_LOST)
1128 dev_err(&ioa_cfg->pdev->dev, "Error notifications lost\n");
1130 ioasc = be32_to_cpu(hostrcb->hcam.u.error.failing_dev_ioasc);
1132 if (ioasc == IPR_IOASC_BUS_WAS_RESET ||
1133 ioasc == IPR_IOASC_BUS_WAS_RESET_BY_OTHER) {
1134 /* Tell the midlayer we had a bus reset so it will handle the UA properly */
1135 scsi_report_bus_reset(ioa_cfg->host,
1136 hostrcb->hcam.u.error.failing_dev_res_addr.bus);
1139 error_index = ipr_get_error(ioasc);
1141 if (!ipr_error_table[error_index].log_hcam)
1144 if (ipr_is_device(&hostrcb->hcam.u.error.failing_dev_res_addr)) {
1145 ipr_res_err(ioa_cfg, hostrcb->hcam.u.error.failing_dev_res_addr,
1146 "%s\n", ipr_error_table[error_index].error);
1148 dev_err(&ioa_cfg->pdev->dev, "%s\n",
1149 ipr_error_table[error_index].error);
1152 /* Set indication we have logged an error */
1153 ioa_cfg->errors_logged++;
1155 if (ioa_cfg->log_level < IPR_DEFAULT_LOG_LEVEL)
1158 switch (hostrcb->hcam.overlay_id) {
1159 case IPR_HOST_RCB_OVERLAY_ID_1:
1160 ipr_log_generic_error(ioa_cfg, hostrcb);
1162 case IPR_HOST_RCB_OVERLAY_ID_2:
1163 ipr_log_cache_error(ioa_cfg, hostrcb);
1165 case IPR_HOST_RCB_OVERLAY_ID_3:
1166 ipr_log_config_error(ioa_cfg, hostrcb);
1168 case IPR_HOST_RCB_OVERLAY_ID_4:
1169 case IPR_HOST_RCB_OVERLAY_ID_6:
1170 ipr_log_array_error(ioa_cfg, hostrcb);
1172 case IPR_HOST_RCB_OVERLAY_ID_DEFAULT:
1173 ipr_log_generic_error(ioa_cfg, hostrcb);
1176 dev_err(&ioa_cfg->pdev->dev,
1177 "Unknown error received. Overlay ID: %d\n",
1178 hostrcb->hcam.overlay_id);
1184 * ipr_process_error - Op done function for an adapter error log.
1185 * @ipr_cmd: ipr command struct
1187 * This function is the op done function for an error log host
1188 * controlled async from the adapter. It will log the error and
1189 * send the HCAM back to the adapter.
1194 static void ipr_process_error(struct ipr_cmnd *ipr_cmd)
1196 struct ipr_ioa_cfg *ioa_cfg = ipr_cmd->ioa_cfg;
1197 struct ipr_hostrcb *hostrcb = ipr_cmd->u.hostrcb;
1198 u32 ioasc = be32_to_cpu(ipr_cmd->ioasa.ioasc);
1200 list_del(&hostrcb->queue);
1201 list_add_tail(&ipr_cmd->queue, &ioa_cfg->free_q);
1204 ipr_handle_log_data(ioa_cfg, hostrcb);
1205 } else if (ioasc != IPR_IOASC_IOA_WAS_RESET) {
1206 dev_err(&ioa_cfg->pdev->dev,
1207 "Host RCB failed with IOASC: 0x%08X\n", ioasc);
1210 ipr_send_hcam(ioa_cfg, IPR_HCAM_CDB_OP_CODE_LOG_DATA, hostrcb);
1214 * ipr_timeout - An internally generated op has timed out.
1215 * @ipr_cmd: ipr command struct
1217 * This function blocks host requests and initiates an
1223 static void ipr_timeout(struct ipr_cmnd *ipr_cmd)
1225 unsigned long lock_flags = 0;
1226 struct ipr_ioa_cfg *ioa_cfg = ipr_cmd->ioa_cfg;
1229 spin_lock_irqsave(ioa_cfg->host->host_lock, lock_flags);
1231 ioa_cfg->errors_logged++;
1232 dev_err(&ioa_cfg->pdev->dev,
1233 "Adapter being reset due to command timeout.\n");
1235 if (WAIT_FOR_DUMP == ioa_cfg->sdt_state)
1236 ioa_cfg->sdt_state = GET_DUMP;
1238 if (!ioa_cfg->in_reset_reload || ioa_cfg->reset_cmd == ipr_cmd)
1239 ipr_initiate_ioa_reset(ioa_cfg, IPR_SHUTDOWN_NONE);
1241 spin_unlock_irqrestore(ioa_cfg->host->host_lock, lock_flags);
1246 * ipr_reset_reload - Reset/Reload the IOA
1247 * @ioa_cfg: ioa config struct
1248 * @shutdown_type: shutdown type
1250 * This function resets the adapter and re-initializes it.
1251 * This function assumes that all new host commands have been stopped.
1255 static int ipr_reset_reload(struct ipr_ioa_cfg *ioa_cfg,
1256 enum ipr_shutdown_type shutdown_type)
1258 if (!ioa_cfg->in_reset_reload)
1259 ipr_initiate_ioa_reset(ioa_cfg, shutdown_type);
1261 spin_unlock_irq(ioa_cfg->host->host_lock);
1262 wait_event(ioa_cfg->reset_wait_q, !ioa_cfg->in_reset_reload);
1263 spin_lock_irq(ioa_cfg->host->host_lock);
1265 /* If we got hit with a host reset while we were already resetting
1266 the adapter for some reason, and the reset failed. */
1267 if (ioa_cfg->ioa_is_dead) {
1276 * ipr_find_ses_entry - Find matching SES in SES table
1277 * @res: resource entry struct of SES
1280 * pointer to SES table entry / NULL on failure
1282 static const struct ipr_ses_table_entry *
1283 ipr_find_ses_entry(struct ipr_resource_entry *res)
1286 const struct ipr_ses_table_entry *ste = ipr_ses_table;
1288 for (i = 0; i < ARRAY_SIZE(ipr_ses_table); i++, ste++) {
1289 for (j = 0, matches = 0; j < IPR_PROD_ID_LEN; j++) {
1290 if (ste->compare_product_id_byte[j] == 'X') {
1291 if (res->cfgte.std_inq_data.vpids.product_id[j] == ste->product_id[j])
1299 if (matches == IPR_PROD_ID_LEN)
1307 * ipr_get_max_scsi_speed - Determine max SCSI speed for a given bus
1308 * @ioa_cfg: ioa config struct
1310 * @bus_width: bus width
1313 * SCSI bus speed in units of 100KHz, 1600 is 160 MHz
1314 * For a 2-byte wide SCSI bus, the maximum transfer speed is
1315 * twice the maximum transfer rate (e.g. for a wide enabled bus,
1316 * max 160MHz = max 320MB/sec).
1318 static u32 ipr_get_max_scsi_speed(struct ipr_ioa_cfg *ioa_cfg, u8 bus, u8 bus_width)
1320 struct ipr_resource_entry *res;
1321 const struct ipr_ses_table_entry *ste;
1322 u32 max_xfer_rate = IPR_MAX_SCSI_RATE(bus_width);
1324 /* Loop through each config table entry in the config table buffer */
1325 list_for_each_entry(res, &ioa_cfg->used_res_q, queue) {
1326 if (!(IPR_IS_SES_DEVICE(res->cfgte.std_inq_data)))
1329 if (bus != res->cfgte.res_addr.bus)
1332 if (!(ste = ipr_find_ses_entry(res)))
1335 max_xfer_rate = (ste->max_bus_speed_limit * 10) / (bus_width / 8);
1338 return max_xfer_rate;
1342 * ipr_wait_iodbg_ack - Wait for an IODEBUG ACK from the IOA
1343 * @ioa_cfg: ioa config struct
1344 * @max_delay: max delay in micro-seconds to wait
1346 * Waits for an IODEBUG ACK from the IOA, doing busy looping.
1349 * 0 on success / other on failure
1351 static int ipr_wait_iodbg_ack(struct ipr_ioa_cfg *ioa_cfg, int max_delay)
1353 volatile u32 pcii_reg;
1356 /* Read interrupt reg until IOA signals IO Debug Acknowledge */
1357 while (delay < max_delay) {
1358 pcii_reg = readl(ioa_cfg->regs.sense_interrupt_reg);
1360 if (pcii_reg & IPR_PCII_IO_DEBUG_ACKNOWLEDGE)
1363 /* udelay cannot be used if delay is more than a few milliseconds */
1364 if ((delay / 1000) > MAX_UDELAY_MS)
1365 mdelay(delay / 1000);
1375 * ipr_get_ldump_data_section - Dump IOA memory
1376 * @ioa_cfg: ioa config struct
1377 * @start_addr: adapter address to dump
1378 * @dest: destination kernel buffer
1379 * @length_in_words: length to dump in 4 byte words
1382 * 0 on success / -EIO on failure
1384 static int ipr_get_ldump_data_section(struct ipr_ioa_cfg *ioa_cfg,
1386 u32 *dest, u32 length_in_words)
1388 volatile u32 temp_pcii_reg;
1391 /* Write IOA interrupt reg starting LDUMP state */
1392 writel((IPR_UPROCI_RESET_ALERT | IPR_UPROCI_IO_DEBUG_ALERT),
1393 ioa_cfg->regs.set_uproc_interrupt_reg);
1395 /* Wait for IO debug acknowledge */
1396 if (ipr_wait_iodbg_ack(ioa_cfg,
1397 IPR_LDUMP_MAX_LONG_ACK_DELAY_IN_USEC)) {
1398 dev_err(&ioa_cfg->pdev->dev,
1399 "IOA dump long data transfer timeout\n");
1403 /* Signal LDUMP interlocked - clear IO debug ack */
1404 writel(IPR_PCII_IO_DEBUG_ACKNOWLEDGE,
1405 ioa_cfg->regs.clr_interrupt_reg);
1407 /* Write Mailbox with starting address */
1408 writel(start_addr, ioa_cfg->ioa_mailbox);
1410 /* Signal address valid - clear IOA Reset alert */
1411 writel(IPR_UPROCI_RESET_ALERT,
1412 ioa_cfg->regs.clr_uproc_interrupt_reg);
1414 for (i = 0; i < length_in_words; i++) {
1415 /* Wait for IO debug acknowledge */
1416 if (ipr_wait_iodbg_ack(ioa_cfg,
1417 IPR_LDUMP_MAX_SHORT_ACK_DELAY_IN_USEC)) {
1418 dev_err(&ioa_cfg->pdev->dev,
1419 "IOA dump short data transfer timeout\n");
1423 /* Read data from mailbox and increment destination pointer */
1424 *dest = cpu_to_be32(readl(ioa_cfg->ioa_mailbox));
1427 /* For all but the last word of data, signal data received */
1428 if (i < (length_in_words - 1)) {
1429 /* Signal dump data received - Clear IO debug Ack */
1430 writel(IPR_PCII_IO_DEBUG_ACKNOWLEDGE,
1431 ioa_cfg->regs.clr_interrupt_reg);
1435 /* Signal end of block transfer. Set reset alert then clear IO debug ack */
1436 writel(IPR_UPROCI_RESET_ALERT,
1437 ioa_cfg->regs.set_uproc_interrupt_reg);
1439 writel(IPR_UPROCI_IO_DEBUG_ALERT,
1440 ioa_cfg->regs.clr_uproc_interrupt_reg);
1442 /* Signal dump data received - Clear IO debug Ack */
1443 writel(IPR_PCII_IO_DEBUG_ACKNOWLEDGE,
1444 ioa_cfg->regs.clr_interrupt_reg);
1446 /* Wait for IOA to signal LDUMP exit - IOA reset alert will be cleared */
1447 while (delay < IPR_LDUMP_MAX_SHORT_ACK_DELAY_IN_USEC) {
1449 readl(ioa_cfg->regs.sense_uproc_interrupt_reg);
1451 if (!(temp_pcii_reg & IPR_UPROCI_RESET_ALERT))
1461 #ifdef CONFIG_SCSI_IPR_DUMP
1463 * ipr_sdt_copy - Copy Smart Dump Table to kernel buffer
1464 * @ioa_cfg: ioa config struct
1465 * @pci_address: adapter address
1466 * @length: length of data to copy
1468 * Copy data from PCI adapter to kernel buffer.
1469 * Note: length MUST be a 4 byte multiple
1471 * 0 on success / other on failure
1473 static int ipr_sdt_copy(struct ipr_ioa_cfg *ioa_cfg,
1474 unsigned long pci_address, u32 length)
1476 int bytes_copied = 0;
1477 int cur_len, rc, rem_len, rem_page_len;
1479 unsigned long lock_flags = 0;
1480 struct ipr_ioa_dump *ioa_dump = &ioa_cfg->dump->ioa_dump;
1482 while (bytes_copied < length &&
1483 (ioa_dump->hdr.len + bytes_copied) < IPR_MAX_IOA_DUMP_SIZE) {
1484 if (ioa_dump->page_offset >= PAGE_SIZE ||
1485 ioa_dump->page_offset == 0) {
1486 page = (u32 *)__get_free_page(GFP_ATOMIC);
1490 return bytes_copied;
1493 ioa_dump->page_offset = 0;
1494 ioa_dump->ioa_data[ioa_dump->next_page_index] = page;
1495 ioa_dump->next_page_index++;
1497 page = ioa_dump->ioa_data[ioa_dump->next_page_index - 1];
1499 rem_len = length - bytes_copied;
1500 rem_page_len = PAGE_SIZE - ioa_dump->page_offset;
1501 cur_len = min(rem_len, rem_page_len);
1503 spin_lock_irqsave(ioa_cfg->host->host_lock, lock_flags);
1504 if (ioa_cfg->sdt_state == ABORT_DUMP) {
1507 rc = ipr_get_ldump_data_section(ioa_cfg,
1508 pci_address + bytes_copied,
1509 &page[ioa_dump->page_offset / 4],
1510 (cur_len / sizeof(u32)));
1512 spin_unlock_irqrestore(ioa_cfg->host->host_lock, lock_flags);
1515 ioa_dump->page_offset += cur_len;
1516 bytes_copied += cur_len;
1524 return bytes_copied;
1528 * ipr_init_dump_entry_hdr - Initialize a dump entry header.
1529 * @hdr: dump entry header struct
1534 static void ipr_init_dump_entry_hdr(struct ipr_dump_entry_header *hdr)
1536 hdr->eye_catcher = IPR_DUMP_EYE_CATCHER;
1538 hdr->offset = sizeof(*hdr);
1539 hdr->status = IPR_DUMP_STATUS_SUCCESS;
1543 * ipr_dump_ioa_type_data - Fill in the adapter type in the dump.
1544 * @ioa_cfg: ioa config struct
1545 * @driver_dump: driver dump struct
1550 static void ipr_dump_ioa_type_data(struct ipr_ioa_cfg *ioa_cfg,
1551 struct ipr_driver_dump *driver_dump)
1553 struct ipr_inquiry_page3 *ucode_vpd = &ioa_cfg->vpd_cbs->page3_data;
1555 ipr_init_dump_entry_hdr(&driver_dump->ioa_type_entry.hdr);
1556 driver_dump->ioa_type_entry.hdr.len =
1557 sizeof(struct ipr_dump_ioa_type_entry) -
1558 sizeof(struct ipr_dump_entry_header);
1559 driver_dump->ioa_type_entry.hdr.data_type = IPR_DUMP_DATA_TYPE_BINARY;
1560 driver_dump->ioa_type_entry.hdr.id = IPR_DUMP_DRIVER_TYPE_ID;
1561 driver_dump->ioa_type_entry.type = ioa_cfg->type;
1562 driver_dump->ioa_type_entry.fw_version = (ucode_vpd->major_release << 24) |
1563 (ucode_vpd->card_type << 16) | (ucode_vpd->minor_release[0] << 8) |
1564 ucode_vpd->minor_release[1];
1565 driver_dump->hdr.num_entries++;
1569 * ipr_dump_version_data - Fill in the driver version in the dump.
1570 * @ioa_cfg: ioa config struct
1571 * @driver_dump: driver dump struct
1576 static void ipr_dump_version_data(struct ipr_ioa_cfg *ioa_cfg,
1577 struct ipr_driver_dump *driver_dump)
1579 ipr_init_dump_entry_hdr(&driver_dump->version_entry.hdr);
1580 driver_dump->version_entry.hdr.len =
1581 sizeof(struct ipr_dump_version_entry) -
1582 sizeof(struct ipr_dump_entry_header);
1583 driver_dump->version_entry.hdr.data_type = IPR_DUMP_DATA_TYPE_ASCII;
1584 driver_dump->version_entry.hdr.id = IPR_DUMP_DRIVER_VERSION_ID;
1585 strcpy(driver_dump->version_entry.version, IPR_DRIVER_VERSION);
1586 driver_dump->hdr.num_entries++;
1590 * ipr_dump_trace_data - Fill in the IOA trace in the dump.
1591 * @ioa_cfg: ioa config struct
1592 * @driver_dump: driver dump struct
1597 static void ipr_dump_trace_data(struct ipr_ioa_cfg *ioa_cfg,
1598 struct ipr_driver_dump *driver_dump)
1600 ipr_init_dump_entry_hdr(&driver_dump->trace_entry.hdr);
1601 driver_dump->trace_entry.hdr.len =
1602 sizeof(struct ipr_dump_trace_entry) -
1603 sizeof(struct ipr_dump_entry_header);
1604 driver_dump->trace_entry.hdr.data_type = IPR_DUMP_DATA_TYPE_BINARY;
1605 driver_dump->trace_entry.hdr.id = IPR_DUMP_TRACE_ID;
1606 memcpy(driver_dump->trace_entry.trace, ioa_cfg->trace, IPR_TRACE_SIZE);
1607 driver_dump->hdr.num_entries++;
1611 * ipr_dump_location_data - Fill in the IOA location in the dump.
1612 * @ioa_cfg: ioa config struct
1613 * @driver_dump: driver dump struct
1618 static void ipr_dump_location_data(struct ipr_ioa_cfg *ioa_cfg,
1619 struct ipr_driver_dump *driver_dump)
1621 ipr_init_dump_entry_hdr(&driver_dump->location_entry.hdr);
1622 driver_dump->location_entry.hdr.len =
1623 sizeof(struct ipr_dump_location_entry) -
1624 sizeof(struct ipr_dump_entry_header);
1625 driver_dump->location_entry.hdr.data_type = IPR_DUMP_DATA_TYPE_ASCII;
1626 driver_dump->location_entry.hdr.id = IPR_DUMP_LOCATION_ID;
1627 strcpy(driver_dump->location_entry.location, ioa_cfg->pdev->dev.bus_id);
1628 driver_dump->hdr.num_entries++;
1632 * ipr_get_ioa_dump - Perform a dump of the driver and adapter.
1633 * @ioa_cfg: ioa config struct
1634 * @dump: dump struct
1639 static void ipr_get_ioa_dump(struct ipr_ioa_cfg *ioa_cfg, struct ipr_dump *dump)
1641 unsigned long start_addr, sdt_word;
1642 unsigned long lock_flags = 0;
1643 struct ipr_driver_dump *driver_dump = &dump->driver_dump;
1644 struct ipr_ioa_dump *ioa_dump = &dump->ioa_dump;
1645 u32 num_entries, start_off, end_off;
1646 u32 bytes_to_copy, bytes_copied, rc;
1647 struct ipr_sdt *sdt;
1652 spin_lock_irqsave(ioa_cfg->host->host_lock, lock_flags);
1654 if (ioa_cfg->sdt_state != GET_DUMP) {
1655 spin_unlock_irqrestore(ioa_cfg->host->host_lock, lock_flags);
1659 start_addr = readl(ioa_cfg->ioa_mailbox);
1661 if (!ipr_sdt_is_fmt2(start_addr)) {
1662 dev_err(&ioa_cfg->pdev->dev,
1663 "Invalid dump table format: %lx\n", start_addr);
1664 spin_unlock_irqrestore(ioa_cfg->host->host_lock, lock_flags);
1668 dev_err(&ioa_cfg->pdev->dev, "Dump of IOA initiated\n");
1670 driver_dump->hdr.eye_catcher = IPR_DUMP_EYE_CATCHER;
1672 /* Initialize the overall dump header */
1673 driver_dump->hdr.len = sizeof(struct ipr_driver_dump);
1674 driver_dump->hdr.num_entries = 1;
1675 driver_dump->hdr.first_entry_offset = sizeof(struct ipr_dump_header);
1676 driver_dump->hdr.status = IPR_DUMP_STATUS_SUCCESS;
1677 driver_dump->hdr.os = IPR_DUMP_OS_LINUX;
1678 driver_dump->hdr.driver_name = IPR_DUMP_DRIVER_NAME;
1680 ipr_dump_version_data(ioa_cfg, driver_dump);
1681 ipr_dump_location_data(ioa_cfg, driver_dump);
1682 ipr_dump_ioa_type_data(ioa_cfg, driver_dump);
1683 ipr_dump_trace_data(ioa_cfg, driver_dump);
1685 /* Update dump_header */
1686 driver_dump->hdr.len += sizeof(struct ipr_dump_entry_header);
1688 /* IOA Dump entry */
1689 ipr_init_dump_entry_hdr(&ioa_dump->hdr);
1690 ioa_dump->format = IPR_SDT_FMT2;
1691 ioa_dump->hdr.len = 0;
1692 ioa_dump->hdr.data_type = IPR_DUMP_DATA_TYPE_BINARY;
1693 ioa_dump->hdr.id = IPR_DUMP_IOA_DUMP_ID;
1695 /* First entries in sdt are actually a list of dump addresses and
1696 lengths to gather the real dump data. sdt represents the pointer
1697 to the ioa generated dump table. Dump data will be extracted based
1698 on entries in this table */
1699 sdt = &ioa_dump->sdt;
1701 rc = ipr_get_ldump_data_section(ioa_cfg, start_addr, (u32 *)sdt,
1702 sizeof(struct ipr_sdt) / sizeof(u32));
1704 /* Smart Dump table is ready to use and the first entry is valid */
1705 if (rc || (be32_to_cpu(sdt->hdr.state) != IPR_FMT2_SDT_READY_TO_USE)) {
1706 dev_err(&ioa_cfg->pdev->dev,
1707 "Dump of IOA failed. Dump table not valid: %d, %X.\n",
1708 rc, be32_to_cpu(sdt->hdr.state));
1709 driver_dump->hdr.status = IPR_DUMP_STATUS_FAILED;
1710 ioa_cfg->sdt_state = DUMP_OBTAINED;
1711 spin_unlock_irqrestore(ioa_cfg->host->host_lock, lock_flags);
1715 num_entries = be32_to_cpu(sdt->hdr.num_entries_used);
1717 if (num_entries > IPR_NUM_SDT_ENTRIES)
1718 num_entries = IPR_NUM_SDT_ENTRIES;
1720 spin_unlock_irqrestore(ioa_cfg->host->host_lock, lock_flags);
1722 for (i = 0; i < num_entries; i++) {
1723 if (ioa_dump->hdr.len > IPR_MAX_IOA_DUMP_SIZE) {
1724 driver_dump->hdr.status = IPR_DUMP_STATUS_QUAL_SUCCESS;
1728 if (sdt->entry[i].flags & IPR_SDT_VALID_ENTRY) {
1729 sdt_word = be32_to_cpu(sdt->entry[i].bar_str_offset);
1730 start_off = sdt_word & IPR_FMT2_MBX_ADDR_MASK;
1731 end_off = be32_to_cpu(sdt->entry[i].end_offset);
1733 if (ipr_sdt_is_fmt2(sdt_word) && sdt_word) {
1734 bytes_to_copy = end_off - start_off;
1735 if (bytes_to_copy > IPR_MAX_IOA_DUMP_SIZE) {
1736 sdt->entry[i].flags &= ~IPR_SDT_VALID_ENTRY;
1740 /* Copy data from adapter to driver buffers */
1741 bytes_copied = ipr_sdt_copy(ioa_cfg, sdt_word,
1744 ioa_dump->hdr.len += bytes_copied;
1746 if (bytes_copied != bytes_to_copy) {
1747 driver_dump->hdr.status = IPR_DUMP_STATUS_QUAL_SUCCESS;
1754 dev_err(&ioa_cfg->pdev->dev, "Dump of IOA completed.\n");
1756 /* Update dump_header */
1757 driver_dump->hdr.len += ioa_dump->hdr.len;
1759 ioa_cfg->sdt_state = DUMP_OBTAINED;
1764 #define ipr_get_ioa_dump(ioa_cfg, dump) do { } while(0)
1768 * ipr_worker_thread - Worker thread
1769 * @data: ioa config struct
1771 * Called at task level from a work thread. This function takes care
1772 * of adding and removing device from the mid-layer as configuration
1773 * changes are detected by the adapter.
1778 static void ipr_worker_thread(void *data)
1780 unsigned long lock_flags;
1781 struct ipr_resource_entry *res;
1782 struct scsi_device *sdev;
1783 struct ipr_dump *dump;
1784 struct ipr_ioa_cfg *ioa_cfg = data;
1785 u8 bus, target, lun;
1789 spin_lock_irqsave(ioa_cfg->host->host_lock, lock_flags);
1791 if (ioa_cfg->sdt_state == GET_DUMP) {
1792 dump = ioa_cfg->dump;
1793 if (!dump || !kobject_get(&dump->kobj)) {
1794 spin_unlock_irqrestore(ioa_cfg->host->host_lock, lock_flags);
1797 spin_unlock_irqrestore(ioa_cfg->host->host_lock, lock_flags);
1798 ipr_get_ioa_dump(ioa_cfg, dump);
1799 kobject_put(&dump->kobj);
1801 spin_lock_irqsave(ioa_cfg->host->host_lock, lock_flags);
1802 if (ioa_cfg->sdt_state == DUMP_OBTAINED)
1803 ipr_initiate_ioa_reset(ioa_cfg, IPR_SHUTDOWN_NONE);
1804 spin_unlock_irqrestore(ioa_cfg->host->host_lock, lock_flags);
1811 if (!ioa_cfg->allow_cmds || !ioa_cfg->allow_ml_add_del) {
1812 spin_unlock_irqrestore(ioa_cfg->host->host_lock, lock_flags);
1816 list_for_each_entry(res, &ioa_cfg->used_res_q, queue) {
1817 if (res->del_from_ml && res->sdev) {
1820 if (!scsi_device_get(sdev)) {
1822 list_move_tail(&res->queue, &ioa_cfg->free_res_q);
1823 spin_unlock_irqrestore(ioa_cfg->host->host_lock, lock_flags);
1824 scsi_remove_device(sdev);
1825 scsi_device_put(sdev);
1826 spin_lock_irqsave(ioa_cfg->host->host_lock, lock_flags);
1833 list_for_each_entry(res, &ioa_cfg->used_res_q, queue) {
1834 if (res->add_to_ml) {
1835 bus = res->cfgte.res_addr.bus;
1836 target = res->cfgte.res_addr.target;
1837 lun = res->cfgte.res_addr.lun;
1838 spin_unlock_irqrestore(ioa_cfg->host->host_lock, lock_flags);
1839 scsi_add_device(ioa_cfg->host, bus, target, lun);
1840 spin_lock_irqsave(ioa_cfg->host->host_lock, lock_flags);
1845 spin_unlock_irqrestore(ioa_cfg->host->host_lock, lock_flags);
1849 #ifdef CONFIG_SCSI_IPR_TRACE
1851 * ipr_read_trace - Dump the adapter trace
1852 * @kobj: kobject struct
1855 * @count: buffer size
1858 * number of bytes printed to buffer
1860 static ssize_t ipr_read_trace(struct kobject *kobj, char *buf,
1861 loff_t off, size_t count)
1863 struct class_device *cdev = container_of(kobj,struct class_device,kobj);
1864 struct Scsi_Host *shost = class_to_shost(cdev);
1865 struct ipr_ioa_cfg *ioa_cfg = (struct ipr_ioa_cfg *)shost->hostdata;
1866 unsigned long lock_flags = 0;
1867 int size = IPR_TRACE_SIZE;
1868 char *src = (char *)ioa_cfg->trace;
1872 if (off + count > size) {
1877 spin_lock_irqsave(ioa_cfg->host->host_lock, lock_flags);
1878 memcpy(buf, &src[off], count);
1879 spin_unlock_irqrestore(ioa_cfg->host->host_lock, lock_flags);
1883 static struct bin_attribute ipr_trace_attr = {
1889 .read = ipr_read_trace,
1894 * ipr_show_fw_version - Show the firmware version
1895 * @class_dev: class device struct
1899 * number of bytes printed to buffer
1901 static ssize_t ipr_show_fw_version(struct class_device *class_dev, char *buf)
1903 struct Scsi_Host *shost = class_to_shost(class_dev);
1904 struct ipr_ioa_cfg *ioa_cfg = (struct ipr_ioa_cfg *)shost->hostdata;
1905 struct ipr_inquiry_page3 *ucode_vpd = &ioa_cfg->vpd_cbs->page3_data;
1906 unsigned long lock_flags = 0;
1909 spin_lock_irqsave(ioa_cfg->host->host_lock, lock_flags);
1910 len = snprintf(buf, PAGE_SIZE, "%02X%02X%02X%02X\n",
1911 ucode_vpd->major_release, ucode_vpd->card_type,
1912 ucode_vpd->minor_release[0],
1913 ucode_vpd->minor_release[1]);
1914 spin_unlock_irqrestore(ioa_cfg->host->host_lock, lock_flags);
1918 static struct class_device_attribute ipr_fw_version_attr = {
1920 .name = "fw_version",
1923 .show = ipr_show_fw_version,
1927 * ipr_show_log_level - Show the adapter's error logging level
1928 * @class_dev: class device struct
1932 * number of bytes printed to buffer
1934 static ssize_t ipr_show_log_level(struct class_device *class_dev, char *buf)
1936 struct Scsi_Host *shost = class_to_shost(class_dev);
1937 struct ipr_ioa_cfg *ioa_cfg = (struct ipr_ioa_cfg *)shost->hostdata;
1938 unsigned long lock_flags = 0;
1941 spin_lock_irqsave(ioa_cfg->host->host_lock, lock_flags);
1942 len = snprintf(buf, PAGE_SIZE, "%d\n", ioa_cfg->log_level);
1943 spin_unlock_irqrestore(ioa_cfg->host->host_lock, lock_flags);
1948 * ipr_store_log_level - Change the adapter's error logging level
1949 * @class_dev: class device struct
1953 * number of bytes printed to buffer
1955 static ssize_t ipr_store_log_level(struct class_device *class_dev,
1956 const char *buf, size_t count)
1958 struct Scsi_Host *shost = class_to_shost(class_dev);
1959 struct ipr_ioa_cfg *ioa_cfg = (struct ipr_ioa_cfg *)shost->hostdata;
1960 unsigned long lock_flags = 0;
1962 spin_lock_irqsave(ioa_cfg->host->host_lock, lock_flags);
1963 ioa_cfg->log_level = simple_strtoul(buf, NULL, 10);
1964 spin_unlock_irqrestore(ioa_cfg->host->host_lock, lock_flags);
1968 static struct class_device_attribute ipr_log_level_attr = {
1970 .name = "log_level",
1971 .mode = S_IRUGO | S_IWUSR,
1973 .show = ipr_show_log_level,
1974 .store = ipr_store_log_level
1978 * ipr_store_diagnostics - IOA Diagnostics interface
1979 * @class_dev: class_device struct
1981 * @count: buffer size
1983 * This function will reset the adapter and wait a reasonable
1984 * amount of time for any errors that the adapter might log.
1987 * count on success / other on failure
1989 static ssize_t ipr_store_diagnostics(struct class_device *class_dev,
1990 const char *buf, size_t count)
1992 struct Scsi_Host *shost = class_to_shost(class_dev);
1993 struct ipr_ioa_cfg *ioa_cfg = (struct ipr_ioa_cfg *)shost->hostdata;
1994 unsigned long lock_flags = 0;
1997 if (!capable(CAP_SYS_ADMIN))
2000 wait_event(ioa_cfg->reset_wait_q, !ioa_cfg->in_reset_reload);
2001 spin_lock_irqsave(ioa_cfg->host->host_lock, lock_flags);
2002 ioa_cfg->errors_logged = 0;
2003 ipr_initiate_ioa_reset(ioa_cfg, IPR_SHUTDOWN_NORMAL);
2005 if (ioa_cfg->in_reset_reload) {
2006 spin_unlock_irqrestore(ioa_cfg->host->host_lock, lock_flags);
2007 wait_event(ioa_cfg->reset_wait_q, !ioa_cfg->in_reset_reload);
2009 /* Wait for a second for any errors to be logged */
2010 schedule_timeout(HZ);
2012 spin_unlock_irqrestore(ioa_cfg->host->host_lock, lock_flags);
2016 spin_lock_irqsave(ioa_cfg->host->host_lock, lock_flags);
2017 if (ioa_cfg->in_reset_reload || ioa_cfg->errors_logged)
2019 spin_unlock_irqrestore(ioa_cfg->host->host_lock, lock_flags);
2024 static struct class_device_attribute ipr_diagnostics_attr = {
2026 .name = "run_diagnostics",
2029 .store = ipr_store_diagnostics
2033 * ipr_store_reset_adapter - Reset the adapter
2034 * @class_dev: class_device struct
2036 * @count: buffer size
2038 * This function will reset the adapter.
2041 * count on success / other on failure
2043 static ssize_t ipr_store_reset_adapter(struct class_device *class_dev,
2044 const char *buf, size_t count)
2046 struct Scsi_Host *shost = class_to_shost(class_dev);
2047 struct ipr_ioa_cfg *ioa_cfg = (struct ipr_ioa_cfg *)shost->hostdata;
2048 unsigned long lock_flags;
2051 if (!capable(CAP_SYS_ADMIN))
2054 spin_lock_irqsave(ioa_cfg->host->host_lock, lock_flags);
2055 if (!ioa_cfg->in_reset_reload)
2056 ipr_initiate_ioa_reset(ioa_cfg, IPR_SHUTDOWN_NORMAL);
2057 spin_unlock_irqrestore(ioa_cfg->host->host_lock, lock_flags);
2058 wait_event(ioa_cfg->reset_wait_q, !ioa_cfg->in_reset_reload);
2063 static struct class_device_attribute ipr_ioa_reset_attr = {
2065 .name = "reset_host",
2068 .store = ipr_store_reset_adapter
2072 * ipr_alloc_ucode_buffer - Allocates a microcode download buffer
2073 * @buf_len: buffer length
2075 * Allocates a DMA'able buffer in chunks and assembles a scatter/gather
2076 * list to use for microcode download
2079 * pointer to sglist / NULL on failure
2081 static struct ipr_sglist *ipr_alloc_ucode_buffer(int buf_len)
2083 int sg_size, order, bsize_elem, num_elem, i, j;
2084 struct ipr_sglist *sglist;
2085 struct scatterlist *scatterlist;
2088 /* Get the minimum size per scatter/gather element */
2089 sg_size = buf_len / (IPR_MAX_SGLIST - 1);
2091 /* Get the actual size per element */
2092 order = get_order(sg_size);
2094 /* Determine the actual number of bytes per element */
2095 bsize_elem = PAGE_SIZE * (1 << order);
2097 /* Determine the actual number of sg entries needed */
2098 if (buf_len % bsize_elem)
2099 num_elem = (buf_len / bsize_elem) + 1;
2101 num_elem = buf_len / bsize_elem;
2103 /* Allocate a scatter/gather list for the DMA */
2104 sglist = kmalloc(sizeof(struct ipr_sglist) +
2105 (sizeof(struct scatterlist) * (num_elem - 1)),
2108 if (sglist == NULL) {
2113 memset(sglist, 0, sizeof(struct ipr_sglist) +
2114 (sizeof(struct scatterlist) * (num_elem - 1)));
2116 scatterlist = sglist->scatterlist;
2118 sglist->order = order;
2119 sglist->num_sg = num_elem;
2121 /* Allocate a bunch of sg elements */
2122 for (i = 0; i < num_elem; i++) {
2123 page = alloc_pages(GFP_KERNEL, order);
2127 /* Free up what we already allocated */
2128 for (j = i - 1; j >= 0; j--)
2129 __free_pages(scatterlist[j].page, order);
2134 scatterlist[i].page = page;
2141 * ipr_free_ucode_buffer - Frees a microcode download buffer
2142 * @p_dnld: scatter/gather list pointer
2144 * Free a DMA'able ucode download buffer previously allocated with
2145 * ipr_alloc_ucode_buffer
2150 static void ipr_free_ucode_buffer(struct ipr_sglist *sglist)
2154 for (i = 0; i < sglist->num_sg; i++)
2155 __free_pages(sglist->scatterlist[i].page, sglist->order);
2161 * ipr_copy_ucode_buffer - Copy user buffer to kernel buffer
2162 * @sglist: scatter/gather list pointer
2163 * @buffer: buffer pointer
2164 * @len: buffer length
2166 * Copy a microcode image from a user buffer into a buffer allocated by
2167 * ipr_alloc_ucode_buffer
2170 * 0 on success / other on failure
2172 static int ipr_copy_ucode_buffer(struct ipr_sglist *sglist,
2173 u8 *buffer, u32 len)
2175 int bsize_elem, i, result = 0;
2176 struct scatterlist *scatterlist;
2179 /* Determine the actual number of bytes per element */
2180 bsize_elem = PAGE_SIZE * (1 << sglist->order);
2182 scatterlist = sglist->scatterlist;
2184 for (i = 0; i < (len / bsize_elem); i++, buffer += bsize_elem) {
2185 kaddr = kmap(scatterlist[i].page);
2186 memcpy(kaddr, buffer, bsize_elem);
2187 kunmap(scatterlist[i].page);
2189 scatterlist[i].length = bsize_elem;
2197 if (len % bsize_elem) {
2198 kaddr = kmap(scatterlist[i].page);
2199 memcpy(kaddr, buffer, len % bsize_elem);
2200 kunmap(scatterlist[i].page);
2202 scatterlist[i].length = len % bsize_elem;
2205 sglist->buffer_len = len;
2210 * ipr_map_ucode_buffer - Map a microcode download buffer
2211 * @ipr_cmd: ipr command struct
2212 * @sglist: scatter/gather list
2213 * @len: total length of download buffer
2215 * Maps a microcode download scatter/gather list for DMA and
2219 * 0 on success / -EIO on failure
2221 static int ipr_map_ucode_buffer(struct ipr_cmnd *ipr_cmd,
2222 struct ipr_sglist *sglist, int len)
2224 struct ipr_ioa_cfg *ioa_cfg = ipr_cmd->ioa_cfg;
2225 struct ipr_ioarcb *ioarcb = &ipr_cmd->ioarcb;
2226 struct ipr_ioadl_desc *ioadl = ipr_cmd->ioadl;
2227 struct scatterlist *scatterlist = sglist->scatterlist;
2230 ipr_cmd->dma_use_sg = pci_map_sg(ioa_cfg->pdev, scatterlist,
2231 sglist->num_sg, DMA_TO_DEVICE);
2233 ioarcb->cmd_pkt.flags_hi |= IPR_FLAGS_HI_WRITE_NOT_READ;
2234 ioarcb->write_data_transfer_length = cpu_to_be32(len);
2235 ioarcb->write_ioadl_len =
2236 cpu_to_be32(sizeof(struct ipr_ioadl_desc) * ipr_cmd->dma_use_sg);
2238 for (i = 0; i < ipr_cmd->dma_use_sg; i++) {
2239 ioadl[i].flags_and_data_len =
2240 cpu_to_be32(IPR_IOADL_FLAGS_WRITE | sg_dma_len(&scatterlist[i]));
2242 cpu_to_be32(sg_dma_address(&scatterlist[i]));
2245 if (likely(ipr_cmd->dma_use_sg)) {
2246 ioadl[i-1].flags_and_data_len |=
2247 cpu_to_be32(IPR_IOADL_FLAGS_LAST);
2250 dev_err(&ioa_cfg->pdev->dev, "pci_map_sg failed!\n");
2258 * ipr_store_update_fw - Update the firmware on the adapter
2259 * @class_dev: class_device struct
2261 * @count: buffer size
2263 * This function will update the firmware on the adapter.
2266 * count on success / other on failure
2268 static ssize_t ipr_store_update_fw(struct class_device *class_dev,
2269 const char *buf, size_t count)
2271 struct Scsi_Host *shost = class_to_shost(class_dev);
2272 struct ipr_ioa_cfg *ioa_cfg = (struct ipr_ioa_cfg *)shost->hostdata;
2273 struct ipr_ucode_image_header *image_hdr;
2274 const struct firmware *fw_entry;
2275 struct ipr_sglist *sglist;
2276 unsigned long lock_flags;
2279 int len, result, dnld_size;
2281 if (!capable(CAP_SYS_ADMIN))
2284 len = snprintf(fname, 99, "%s", buf);
2285 fname[len-1] = '\0';
2287 if(request_firmware(&fw_entry, fname, &ioa_cfg->pdev->dev)) {
2288 dev_err(&ioa_cfg->pdev->dev, "Firmware file %s not found\n", fname);
2292 image_hdr = (struct ipr_ucode_image_header *)fw_entry->data;
2294 if (be32_to_cpu(image_hdr->header_length) > fw_entry->size ||
2295 (ioa_cfg->vpd_cbs->page3_data.card_type &&
2296 ioa_cfg->vpd_cbs->page3_data.card_type != image_hdr->card_type)) {
2297 dev_err(&ioa_cfg->pdev->dev, "Invalid microcode buffer\n");
2298 release_firmware(fw_entry);
2302 src = (u8 *)image_hdr + be32_to_cpu(image_hdr->header_length);
2303 dnld_size = fw_entry->size - be32_to_cpu(image_hdr->header_length);
2304 sglist = ipr_alloc_ucode_buffer(dnld_size);
2307 dev_err(&ioa_cfg->pdev->dev, "Microcode buffer allocation failed\n");
2308 release_firmware(fw_entry);
2312 result = ipr_copy_ucode_buffer(sglist, src, dnld_size);
2315 dev_err(&ioa_cfg->pdev->dev,
2316 "Microcode buffer copy to DMA buffer failed\n");
2317 ipr_free_ucode_buffer(sglist);
2318 release_firmware(fw_entry);
2322 spin_lock_irqsave(ioa_cfg->host->host_lock, lock_flags);
2324 if (ioa_cfg->ucode_sglist) {
2325 spin_unlock_irqrestore(ioa_cfg->host->host_lock, lock_flags);
2326 dev_err(&ioa_cfg->pdev->dev,
2327 "Microcode download already in progress\n");
2328 ipr_free_ucode_buffer(sglist);
2329 release_firmware(fw_entry);
2333 ioa_cfg->ucode_sglist = sglist;
2334 ipr_initiate_ioa_reset(ioa_cfg, IPR_SHUTDOWN_NORMAL);
2335 spin_unlock_irqrestore(ioa_cfg->host->host_lock, lock_flags);
2336 wait_event(ioa_cfg->reset_wait_q, !ioa_cfg->in_reset_reload);
2338 spin_lock_irqsave(ioa_cfg->host->host_lock, lock_flags);
2339 ioa_cfg->ucode_sglist = NULL;
2340 spin_unlock_irqrestore(ioa_cfg->host->host_lock, lock_flags);
2342 ipr_free_ucode_buffer(sglist);
2343 release_firmware(fw_entry);
2348 static struct class_device_attribute ipr_update_fw_attr = {
2350 .name = "update_fw",
2353 .store = ipr_store_update_fw
2356 static struct class_device_attribute *ipr_ioa_attrs[] = {
2357 &ipr_fw_version_attr,
2358 &ipr_log_level_attr,
2359 &ipr_diagnostics_attr,
2360 &ipr_ioa_reset_attr,
2361 &ipr_update_fw_attr,
2365 #ifdef CONFIG_SCSI_IPR_DUMP
2367 * ipr_read_dump - Dump the adapter
2368 * @kobj: kobject struct
2371 * @count: buffer size
2374 * number of bytes printed to buffer
2376 static ssize_t ipr_read_dump(struct kobject *kobj, char *buf,
2377 loff_t off, size_t count)
2379 struct class_device *cdev = container_of(kobj,struct class_device,kobj);
2380 struct Scsi_Host *shost = class_to_shost(cdev);
2381 struct ipr_ioa_cfg *ioa_cfg = (struct ipr_ioa_cfg *)shost->hostdata;
2382 struct ipr_dump *dump;
2383 unsigned long lock_flags = 0;
2388 if (!capable(CAP_SYS_ADMIN))
2391 spin_lock_irqsave(ioa_cfg->host->host_lock, lock_flags);
2392 dump = ioa_cfg->dump;
2394 if (ioa_cfg->sdt_state != DUMP_OBTAINED || !dump || !kobject_get(&dump->kobj)) {
2395 spin_unlock_irqrestore(ioa_cfg->host->host_lock, lock_flags);
2399 spin_unlock_irqrestore(ioa_cfg->host->host_lock, lock_flags);
2401 if (off > dump->driver_dump.hdr.len) {
2402 kobject_put(&dump->kobj);
2406 if (off + count > dump->driver_dump.hdr.len) {
2407 count = dump->driver_dump.hdr.len - off;
2411 if (count && off < sizeof(dump->driver_dump)) {
2412 if (off + count > sizeof(dump->driver_dump))
2413 len = sizeof(dump->driver_dump) - off;
2416 src = (u8 *)&dump->driver_dump + off;
2417 memcpy(buf, src, len);
2423 off -= sizeof(dump->driver_dump);
2425 if (count && off < offsetof(struct ipr_ioa_dump, ioa_data)) {
2426 if (off + count > offsetof(struct ipr_ioa_dump, ioa_data))
2427 len = offsetof(struct ipr_ioa_dump, ioa_data) - off;
2430 src = (u8 *)&dump->ioa_dump + off;
2431 memcpy(buf, src, len);
2437 off -= offsetof(struct ipr_ioa_dump, ioa_data);
2440 if ((off & PAGE_MASK) != ((off + count) & PAGE_MASK))
2441 len = PAGE_ALIGN(off) - off;
2444 src = (u8 *)dump->ioa_dump.ioa_data[(off & PAGE_MASK) >> PAGE_SHIFT];
2445 src += off & ~PAGE_MASK;
2446 memcpy(buf, src, len);
2452 kobject_put(&dump->kobj);
2457 * ipr_release_dump - Free adapter dump memory
2458 * @kobj: kobject struct
2463 static void ipr_release_dump(struct kobject *kobj)
2465 struct ipr_dump *dump = container_of(kobj,struct ipr_dump,kobj);
2466 struct ipr_ioa_cfg *ioa_cfg = dump->ioa_cfg;
2467 unsigned long lock_flags = 0;
2471 spin_lock_irqsave(ioa_cfg->host->host_lock, lock_flags);
2472 ioa_cfg->dump = NULL;
2473 ioa_cfg->sdt_state = INACTIVE;
2474 spin_unlock_irqrestore(ioa_cfg->host->host_lock, lock_flags);
2476 for (i = 0; i < dump->ioa_dump.next_page_index; i++)
2477 free_page((unsigned long) dump->ioa_dump.ioa_data[i]);
2483 static struct kobj_type ipr_dump_kobj_type = {
2484 .release = ipr_release_dump,
2488 * ipr_alloc_dump - Prepare for adapter dump
2489 * @ioa_cfg: ioa config struct
2492 * 0 on success / other on failure
2494 static int ipr_alloc_dump(struct ipr_ioa_cfg *ioa_cfg)
2496 struct ipr_dump *dump;
2497 unsigned long lock_flags = 0;
2500 dump = kmalloc(sizeof(struct ipr_dump), GFP_KERNEL);
2503 ipr_err("Dump memory allocation failed\n");
2507 memset(dump, 0, sizeof(struct ipr_dump));
2508 kobject_init(&dump->kobj);
2509 dump->kobj.ktype = &ipr_dump_kobj_type;
2510 dump->ioa_cfg = ioa_cfg;
2512 spin_lock_irqsave(ioa_cfg->host->host_lock, lock_flags);
2514 if (INACTIVE != ioa_cfg->sdt_state) {
2515 spin_unlock_irqrestore(ioa_cfg->host->host_lock, lock_flags);
2520 ioa_cfg->dump = dump;
2521 ioa_cfg->sdt_state = WAIT_FOR_DUMP;
2522 if (ioa_cfg->ioa_is_dead && !ioa_cfg->dump_taken) {
2523 ioa_cfg->dump_taken = 1;
2524 schedule_work(&ioa_cfg->work_q);
2526 spin_unlock_irqrestore(ioa_cfg->host->host_lock, lock_flags);
2533 * ipr_free_dump - Free adapter dump memory
2534 * @ioa_cfg: ioa config struct
2537 * 0 on success / other on failure
2539 static int ipr_free_dump(struct ipr_ioa_cfg *ioa_cfg)
2541 struct ipr_dump *dump;
2542 unsigned long lock_flags = 0;
2546 spin_lock_irqsave(ioa_cfg->host->host_lock, lock_flags);
2547 dump = ioa_cfg->dump;
2549 spin_unlock_irqrestore(ioa_cfg->host->host_lock, lock_flags);
2553 ioa_cfg->dump = NULL;
2554 spin_unlock_irqrestore(ioa_cfg->host->host_lock, lock_flags);
2556 kobject_put(&dump->kobj);
2563 * ipr_write_dump - Setup dump state of adapter
2564 * @kobj: kobject struct
2567 * @count: buffer size
2570 * number of bytes printed to buffer
2572 static ssize_t ipr_write_dump(struct kobject *kobj, char *buf,
2573 loff_t off, size_t count)
2575 struct class_device *cdev = container_of(kobj,struct class_device,kobj);
2576 struct Scsi_Host *shost = class_to_shost(cdev);
2577 struct ipr_ioa_cfg *ioa_cfg = (struct ipr_ioa_cfg *)shost->hostdata;
2580 if (!capable(CAP_SYS_ADMIN))
2584 rc = ipr_alloc_dump(ioa_cfg);
2585 else if (buf[0] == '0')
2586 rc = ipr_free_dump(ioa_cfg);
2596 static struct bin_attribute ipr_dump_attr = {
2599 .mode = S_IRUSR | S_IWUSR,
2602 .read = ipr_read_dump,
2603 .write = ipr_write_dump
2606 static int ipr_free_dump(struct ipr_ioa_cfg *ioa_cfg) { return 0; };
2610 * ipr_store_queue_depth - Change the device's queue depth
2611 * @dev: device struct
2615 * number of bytes printed to buffer
2617 static ssize_t ipr_store_queue_depth(struct device *dev,
2618 const char *buf, size_t count)
2620 struct scsi_device *sdev = to_scsi_device(dev);
2621 struct ipr_ioa_cfg *ioa_cfg = (struct ipr_ioa_cfg *)sdev->host->hostdata;
2622 struct ipr_resource_entry *res;
2623 int qdepth = simple_strtoul(buf, NULL, 10);
2625 unsigned long lock_flags = 0;
2626 ssize_t len = -ENXIO;
2628 spin_lock_irqsave(ioa_cfg->host->host_lock, lock_flags);
2629 res = (struct ipr_resource_entry *)sdev->hostdata;
2631 res->qdepth = qdepth;
2633 if (ipr_is_gscsi(res) && res->tcq_active)
2634 tagged = MSG_ORDERED_TAG;
2639 spin_unlock_irqrestore(ioa_cfg->host->host_lock, lock_flags);
2640 scsi_adjust_queue_depth(sdev, tagged, qdepth);
2644 static struct device_attribute ipr_queue_depth_attr = {
2646 .name = "queue_depth",
2647 .mode = S_IRUSR | S_IWUSR,
2649 .store = ipr_store_queue_depth
2653 * ipr_show_tcq_enable - Show if the device is enabled for tcqing
2654 * @dev: device struct
2658 * number of bytes printed to buffer
2660 static ssize_t ipr_show_tcq_enable(struct device *dev, char *buf)
2662 struct scsi_device *sdev = to_scsi_device(dev);
2663 struct ipr_ioa_cfg *ioa_cfg = (struct ipr_ioa_cfg *)sdev->host->hostdata;
2664 struct ipr_resource_entry *res;
2665 unsigned long lock_flags = 0;
2666 ssize_t len = -ENXIO;
2668 spin_lock_irqsave(ioa_cfg->host->host_lock, lock_flags);
2669 res = (struct ipr_resource_entry *)sdev->hostdata;
2671 len = snprintf(buf, PAGE_SIZE, "%d\n", res->tcq_active);
2672 spin_unlock_irqrestore(ioa_cfg->host->host_lock, lock_flags);
2677 * ipr_store_tcq_enable - Change the device's TCQing state
2678 * @dev: device struct
2682 * number of bytes printed to buffer
2684 static ssize_t ipr_store_tcq_enable(struct device *dev,
2685 const char *buf, size_t count)
2687 struct scsi_device *sdev = to_scsi_device(dev);
2688 struct ipr_ioa_cfg *ioa_cfg = (struct ipr_ioa_cfg *)sdev->host->hostdata;
2689 struct ipr_resource_entry *res;
2690 unsigned long lock_flags = 0;
2691 int tcq_active = simple_strtoul(buf, NULL, 10);
2692 int qdepth = IPR_MAX_CMD_PER_LUN;
2694 ssize_t len = -ENXIO;
2696 spin_lock_irqsave(ioa_cfg->host->host_lock, lock_flags);
2698 res = (struct ipr_resource_entry *)sdev->hostdata;
2701 res->tcq_active = 0;
2702 qdepth = res->qdepth;
2704 if (ipr_is_gscsi(res) && sdev->tagged_supported) {
2706 tagged = MSG_ORDERED_TAG;
2707 res->tcq_active = 1;
2711 } else if (tcq_active) {
2716 spin_unlock_irqrestore(ioa_cfg->host->host_lock, lock_flags);
2717 scsi_adjust_queue_depth(sdev, tagged, qdepth);
2721 static struct device_attribute ipr_tcqing_attr = {
2723 .name = "tcq_enable",
2724 .mode = S_IRUSR | S_IWUSR,
2726 .store = ipr_store_tcq_enable,
2727 .show = ipr_show_tcq_enable
2731 * ipr_show_adapter_handle - Show the adapter's resource handle for this device
2732 * @dev: device struct
2736 * number of bytes printed to buffer
2738 static ssize_t ipr_show_adapter_handle(struct device *dev, char *buf)
2740 struct scsi_device *sdev = to_scsi_device(dev);
2741 struct ipr_ioa_cfg *ioa_cfg = (struct ipr_ioa_cfg *)sdev->host->hostdata;
2742 struct ipr_resource_entry *res;
2743 unsigned long lock_flags = 0;
2744 ssize_t len = -ENXIO;
2746 spin_lock_irqsave(ioa_cfg->host->host_lock, lock_flags);
2747 res = (struct ipr_resource_entry *)sdev->hostdata;
2749 len = snprintf(buf, PAGE_SIZE, "%08X\n", res->cfgte.res_handle);
2750 spin_unlock_irqrestore(ioa_cfg->host->host_lock, lock_flags);
2754 static struct device_attribute ipr_adapter_handle_attr = {
2756 .name = "adapter_handle",
2759 .show = ipr_show_adapter_handle
2762 static struct device_attribute *ipr_dev_attrs[] = {
2763 &ipr_queue_depth_attr,
2765 &ipr_adapter_handle_attr,
2770 * ipr_biosparam - Return the HSC mapping
2771 * @sdev: scsi device struct
2772 * @block_device: block device pointer
2773 * @capacity: capacity of the device
2774 * @parm: Array containing returned HSC values.
2776 * This function generates the HSC parms that fdisk uses.
2777 * We want to make sure we return something that places partitions
2778 * on 4k boundaries for best performance with the IOA.
2783 static int ipr_biosparam(struct scsi_device *sdev,
2784 struct block_device *block_device,
2785 sector_t capacity, int *parm)
2787 int heads, sectors, cylinders;
2792 cylinders = capacity;
2793 sector_div(cylinders, (128 * 32));
2798 parm[2] = cylinders;
2804 * ipr_slave_destroy - Unconfigure a SCSI device
2805 * @sdev: scsi device struct
2810 static void ipr_slave_destroy(struct scsi_device *sdev)
2812 struct ipr_resource_entry *res;
2813 struct ipr_ioa_cfg *ioa_cfg;
2814 unsigned long lock_flags = 0;
2816 ioa_cfg = (struct ipr_ioa_cfg *) sdev->host->hostdata;
2818 spin_lock_irqsave(ioa_cfg->host->host_lock, lock_flags);
2819 res = (struct ipr_resource_entry *) sdev->hostdata;
2821 sdev->hostdata = NULL;
2824 spin_unlock_irqrestore(ioa_cfg->host->host_lock, lock_flags);
2828 * ipr_slave_configure - Configure a SCSI device
2829 * @sdev: scsi device struct
2831 * This function configures the specified scsi device.
2836 static int ipr_slave_configure(struct scsi_device *sdev)
2838 struct ipr_ioa_cfg *ioa_cfg = (struct ipr_ioa_cfg *) sdev->host->hostdata;
2839 struct ipr_resource_entry *res;
2840 unsigned long lock_flags = 0;
2842 spin_lock_irqsave(ioa_cfg->host->host_lock, lock_flags);
2843 res = sdev->hostdata;
2845 if (ipr_is_af_dasd_device(res))
2846 sdev->type = TYPE_RAID;
2847 if (ipr_is_af_dasd_device(res) || ipr_is_ioa_resource(res))
2848 sdev->scsi_level = 4;
2849 if (ipr_is_vset_device(res))
2850 sdev->timeout = IPR_VSET_RW_TIMEOUT;
2852 sdev->allow_restart = 1;
2853 scsi_adjust_queue_depth(sdev, 0, res->qdepth);
2855 spin_unlock_irqrestore(ioa_cfg->host->host_lock, lock_flags);
2860 * ipr_slave_alloc - Prepare for commands to a device.
2861 * @sdev: scsi device struct
2863 * This function saves a pointer to the resource entry
2864 * in the scsi device struct if the device exists. We
2865 * can then use this pointer in ipr_queuecommand when
2866 * handling new commands.
2871 static int ipr_slave_alloc(struct scsi_device *sdev)
2873 struct ipr_ioa_cfg *ioa_cfg = (struct ipr_ioa_cfg *) sdev->host->hostdata;
2874 struct ipr_resource_entry *res;
2875 unsigned long lock_flags;
2877 sdev->hostdata = NULL;
2879 spin_lock_irqsave(ioa_cfg->host->host_lock, lock_flags);
2881 list_for_each_entry(res, &ioa_cfg->used_res_q, queue) {
2882 if ((res->cfgte.res_addr.bus == sdev->channel) &&
2883 (res->cfgte.res_addr.target == sdev->id) &&
2884 (res->cfgte.res_addr.lun == sdev->lun)) {
2887 sdev->hostdata = res;
2888 res->needs_sync_complete = 1;
2893 spin_unlock_irqrestore(ioa_cfg->host->host_lock, lock_flags);
2899 * ipr_eh_host_reset - Reset the host adapter
2900 * @scsi_cmd: scsi command struct
2905 static int ipr_eh_host_reset(struct scsi_cmnd * scsi_cmd)
2907 struct ipr_ioa_cfg *ioa_cfg;
2911 ioa_cfg = (struct ipr_ioa_cfg *) scsi_cmd->device->host->hostdata;
2913 dev_err(&ioa_cfg->pdev->dev,
2914 "Adapter being reset as a result of error recovery.\n");
2916 if (WAIT_FOR_DUMP == ioa_cfg->sdt_state)
2917 ioa_cfg->sdt_state = GET_DUMP;
2919 rc = ipr_reset_reload(ioa_cfg, IPR_SHUTDOWN_ABBREV);
2926 * ipr_eh_dev_reset - Reset the device
2927 * @scsi_cmd: scsi command struct
2929 * This function issues a device reset to the affected device.
2930 * A LUN reset will be sent to the device first. If that does
2931 * not work, a target reset will be sent.
2936 static int ipr_eh_dev_reset(struct scsi_cmnd * scsi_cmd)
2938 struct ipr_cmnd *ipr_cmd;
2939 struct ipr_ioa_cfg *ioa_cfg;
2940 struct ipr_resource_entry *res;
2941 struct ipr_cmd_pkt *cmd_pkt;
2945 ioa_cfg = (struct ipr_ioa_cfg *) scsi_cmd->device->host->hostdata;
2946 res = scsi_cmd->device->hostdata;
2948 if (!res || (!ipr_is_gscsi(res) && !ipr_is_vset_device(res)))
2952 * If we are currently going through reset/reload, return failed. This will force the
2953 * mid-layer to call ipr_eh_host_reset, which will then go to sleep and wait for the
2956 if (ioa_cfg->in_reset_reload)
2958 if (ioa_cfg->ioa_is_dead)
2961 list_for_each_entry(ipr_cmd, &ioa_cfg->pending_q, queue) {
2962 if (ipr_cmd->ioarcb.res_handle == res->cfgte.res_handle) {
2963 if (ipr_cmd->scsi_cmd)
2964 ipr_cmd->done = ipr_scsi_eh_done;
2968 res->resetting_device = 1;
2970 ipr_cmd = ipr_get_free_ipr_cmnd(ioa_cfg);
2972 ipr_cmd->ioarcb.res_handle = res->cfgte.res_handle;
2973 cmd_pkt = &ipr_cmd->ioarcb.cmd_pkt;
2974 cmd_pkt->request_type = IPR_RQTYPE_IOACMD;
2975 cmd_pkt->cdb[0] = IPR_RESET_DEVICE;
2977 ipr_sdev_err(scsi_cmd->device, "Resetting device\n");
2978 ipr_send_blocking_cmd(ipr_cmd, ipr_timeout, IPR_DEVICE_RESET_TIMEOUT);
2980 ioasc = be32_to_cpu(ipr_cmd->ioasa.ioasc);
2982 res->resetting_device = 0;
2984 list_add_tail(&ipr_cmd->queue, &ioa_cfg->free_q);
2987 return (IPR_IOASC_SENSE_KEY(ioasc) ? FAILED : SUCCESS);
2991 * ipr_bus_reset_done - Op done function for bus reset.
2992 * @ipr_cmd: ipr command struct
2994 * This function is the op done function for a bus reset
2999 static void ipr_bus_reset_done(struct ipr_cmnd *ipr_cmd)
3001 struct ipr_ioa_cfg *ioa_cfg = ipr_cmd->ioa_cfg;
3002 struct ipr_resource_entry *res;
3005 list_for_each_entry(res, &ioa_cfg->used_res_q, queue) {
3006 if (!memcmp(&res->cfgte.res_handle, &ipr_cmd->ioarcb.res_handle,
3007 sizeof(res->cfgte.res_handle))) {
3008 scsi_report_bus_reset(ioa_cfg->host, res->cfgte.res_addr.bus);
3014 * If abort has not completed, indicate the reset has, else call the
3015 * abort's done function to wake the sleeping eh thread
3017 if (ipr_cmd->u.sibling->u.sibling)
3018 ipr_cmd->u.sibling->u.sibling = NULL;
3020 ipr_cmd->u.sibling->done(ipr_cmd->u.sibling);
3022 list_add_tail(&ipr_cmd->queue, &ioa_cfg->free_q);
3027 * ipr_abort_timeout - An abort task has timed out
3028 * @ipr_cmd: ipr command struct
3030 * This function handles when an abort task times out. If this
3031 * happens we issue a bus reset since we have resources tied
3032 * up that must be freed before returning to the midlayer.
3037 static void ipr_abort_timeout(struct ipr_cmnd *ipr_cmd)
3039 struct ipr_cmnd *reset_cmd;
3040 struct ipr_ioa_cfg *ioa_cfg = ipr_cmd->ioa_cfg;
3041 struct ipr_cmd_pkt *cmd_pkt;
3042 unsigned long lock_flags = 0;
3045 spin_lock_irqsave(ioa_cfg->host->host_lock, lock_flags);
3046 if (ipr_cmd->completion.done || ioa_cfg->in_reset_reload) {
3047 spin_unlock_irqrestore(ioa_cfg->host->host_lock, lock_flags);
3051 ipr_sdev_err(ipr_cmd->u.sdev, "Abort timed out. Resetting bus\n");
3052 reset_cmd = ipr_get_free_ipr_cmnd(ioa_cfg);
3053 ipr_cmd->u.sibling = reset_cmd;
3054 reset_cmd->u.sibling = ipr_cmd;
3055 reset_cmd->ioarcb.res_handle = ipr_cmd->ioarcb.res_handle;
3056 cmd_pkt = &reset_cmd->ioarcb.cmd_pkt;
3057 cmd_pkt->request_type = IPR_RQTYPE_IOACMD;
3058 cmd_pkt->cdb[0] = IPR_RESET_DEVICE;
3059 cmd_pkt->cdb[2] = IPR_RESET_TYPE_SELECT | IPR_BUS_RESET;
3061 ipr_do_req(reset_cmd, ipr_bus_reset_done, ipr_timeout, IPR_DEVICE_RESET_TIMEOUT);
3062 spin_unlock_irqrestore(ioa_cfg->host->host_lock, lock_flags);
3067 * ipr_cancel_op - Cancel specified op
3068 * @scsi_cmd: scsi command struct
3070 * This function cancels specified op.
3075 static int ipr_cancel_op(struct scsi_cmnd * scsi_cmd)
3077 struct ipr_cmnd *ipr_cmd;
3078 struct ipr_ioa_cfg *ioa_cfg;
3079 struct ipr_resource_entry *res;
3080 struct ipr_cmd_pkt *cmd_pkt;
3081 u32 ioasc, ioarcb_addr;
3085 ioa_cfg = (struct ipr_ioa_cfg *)scsi_cmd->device->host->hostdata;
3086 res = scsi_cmd->device->hostdata;
3088 if (!res || (!ipr_is_gscsi(res) && !ipr_is_vset_device(res)))
3091 list_for_each_entry(ipr_cmd, &ioa_cfg->pending_q, queue) {
3092 if (ipr_cmd->scsi_cmd == scsi_cmd) {
3093 ipr_cmd->done = ipr_scsi_eh_done;
3102 ioarcb_addr = be32_to_cpu(ipr_cmd->ioarcb.ioarcb_host_pci_addr);
3104 ipr_cmd = ipr_get_free_ipr_cmnd(ioa_cfg);
3105 ipr_cmd->ioarcb.res_handle = res->cfgte.res_handle;
3106 cmd_pkt = &ipr_cmd->ioarcb.cmd_pkt;
3107 cmd_pkt->request_type = IPR_RQTYPE_IOACMD;
3108 cmd_pkt->cdb[0] = IPR_ABORT_TASK;
3109 cmd_pkt->cdb[2] = (ioarcb_addr >> 24) & 0xff;
3110 cmd_pkt->cdb[3] = (ioarcb_addr >> 16) & 0xff;
3111 cmd_pkt->cdb[4] = (ioarcb_addr >> 8) & 0xff;
3112 cmd_pkt->cdb[5] = ioarcb_addr & 0xff;
3113 ipr_cmd->u.sdev = scsi_cmd->device;
3115 ipr_sdev_err(scsi_cmd->device, "Aborting command: %02X\n", scsi_cmd->cmnd[0]);
3116 ipr_send_blocking_cmd(ipr_cmd, ipr_abort_timeout, IPR_ABORT_TASK_TIMEOUT);
3117 ioasc = be32_to_cpu(ipr_cmd->ioasa.ioasc);
3120 * If the abort task timed out and we sent a bus reset, we will get
3121 * one the following responses to the abort
3123 if (ioasc == IPR_IOASC_BUS_WAS_RESET || ioasc == IPR_IOASC_SYNC_REQUIRED) {
3128 list_add_tail(&ipr_cmd->queue, &ioa_cfg->free_q);
3129 res->needs_sync_complete = 1;
3132 return (IPR_IOASC_SENSE_KEY(ioasc) ? FAILED : SUCCESS);
3136 * ipr_eh_abort - Abort a single op
3137 * @scsi_cmd: scsi command struct
3142 static int ipr_eh_abort(struct scsi_cmnd * scsi_cmd)
3144 struct ipr_ioa_cfg *ioa_cfg;
3147 ioa_cfg = (struct ipr_ioa_cfg *) scsi_cmd->device->host->hostdata;
3149 /* If we are currently going through reset/reload, return failed. This will force the
3150 mid-layer to call ipr_eh_host_reset, which will then go to sleep and wait for the
3151 reset to complete */
3152 if (ioa_cfg->in_reset_reload)
3154 if (ioa_cfg->ioa_is_dead)
3156 if (!scsi_cmd->device->hostdata)
3160 return ipr_cancel_op(scsi_cmd);
3164 * ipr_handle_other_interrupt - Handle "other" interrupts
3165 * @ioa_cfg: ioa config struct
3166 * @int_reg: interrupt register
3169 * IRQ_NONE / IRQ_HANDLED
3171 static irqreturn_t ipr_handle_other_interrupt(struct ipr_ioa_cfg *ioa_cfg,
3172 volatile u32 int_reg)
3174 irqreturn_t rc = IRQ_HANDLED;
3176 if (int_reg & IPR_PCII_IOA_TRANS_TO_OPER) {
3177 /* Mask the interrupt */
3178 writel(IPR_PCII_IOA_TRANS_TO_OPER, ioa_cfg->regs.set_interrupt_mask_reg);
3180 /* Clear the interrupt */
3181 writel(IPR_PCII_IOA_TRANS_TO_OPER, ioa_cfg->regs.clr_interrupt_reg);
3182 int_reg = readl(ioa_cfg->regs.sense_interrupt_reg);
3184 list_del(&ioa_cfg->reset_cmd->queue);
3185 del_timer(&ioa_cfg->reset_cmd->timer);
3186 ipr_reset_ioa_job(ioa_cfg->reset_cmd);
3188 if (int_reg & IPR_PCII_IOA_UNIT_CHECKED)
3189 ioa_cfg->ioa_unit_checked = 1;
3191 dev_err(&ioa_cfg->pdev->dev,
3192 "Permanent IOA failure. 0x%08X\n", int_reg);
3194 if (WAIT_FOR_DUMP == ioa_cfg->sdt_state)
3195 ioa_cfg->sdt_state = GET_DUMP;
3197 ipr_mask_and_clear_interrupts(ioa_cfg, ~0);
3198 ipr_initiate_ioa_reset(ioa_cfg, IPR_SHUTDOWN_NONE);
3205 * ipr_isr - Interrupt service routine
3207 * @devp: pointer to ioa config struct
3208 * @regs: pt_regs struct
3211 * IRQ_NONE / IRQ_HANDLED
3213 static irqreturn_t ipr_isr(int irq, void *devp, struct pt_regs *regs)
3215 struct ipr_ioa_cfg *ioa_cfg = (struct ipr_ioa_cfg *)devp;
3216 unsigned long lock_flags = 0;
3217 volatile u32 int_reg, int_mask_reg;
3220 struct ipr_cmnd *ipr_cmd;
3221 irqreturn_t rc = IRQ_NONE;
3223 spin_lock_irqsave(ioa_cfg->host->host_lock, lock_flags);
3225 /* If interrupts are disabled, ignore the interrupt */
3226 if (!ioa_cfg->allow_interrupts) {
3227 spin_unlock_irqrestore(ioa_cfg->host->host_lock, lock_flags);
3231 int_mask_reg = readl(ioa_cfg->regs.sense_interrupt_mask_reg);
3232 int_reg = readl(ioa_cfg->regs.sense_interrupt_reg) & ~int_mask_reg;
3234 /* If an interrupt on the adapter did not occur, ignore it */
3235 if (unlikely((int_reg & IPR_PCII_OPER_INTERRUPTS) == 0)) {
3236 spin_unlock_irqrestore(ioa_cfg->host->host_lock, lock_flags);
3243 while ((be32_to_cpu(*ioa_cfg->hrrq_curr) & IPR_HRRQ_TOGGLE_BIT) ==
3244 ioa_cfg->toggle_bit) {
3246 cmd_index = (be32_to_cpu(*ioa_cfg->hrrq_curr) &
3247 IPR_HRRQ_REQ_RESP_HANDLE_MASK) >> IPR_HRRQ_REQ_RESP_HANDLE_SHIFT;
3249 if (unlikely(cmd_index >= IPR_NUM_CMD_BLKS)) {
3250 ioa_cfg->errors_logged++;
3251 dev_err(&ioa_cfg->pdev->dev, "Invalid response handle from IOA\n");
3253 if (WAIT_FOR_DUMP == ioa_cfg->sdt_state)
3254 ioa_cfg->sdt_state = GET_DUMP;
3256 ipr_initiate_ioa_reset(ioa_cfg, IPR_SHUTDOWN_NONE);
3257 spin_unlock_irqrestore(ioa_cfg->host->host_lock, lock_flags);
3261 ipr_cmd = ioa_cfg->ipr_cmnd_list[cmd_index];
3263 ioasc = be32_to_cpu(ipr_cmd->ioasa.ioasc);
3265 ipr_trc_hook(ipr_cmd, IPR_TRACE_FINISH, ioasc);
3267 list_del(&ipr_cmd->queue);
3268 del_timer(&ipr_cmd->timer);
3269 ipr_cmd->done(ipr_cmd);
3273 if (ioa_cfg->hrrq_curr < ioa_cfg->hrrq_end) {
3274 ioa_cfg->hrrq_curr++;
3276 ioa_cfg->hrrq_curr = ioa_cfg->hrrq_start;
3277 ioa_cfg->toggle_bit ^= 1u;
3281 if (ipr_cmd != NULL) {
3282 /* Clear the PCI interrupt */
3283 writel(IPR_PCII_HRRQ_UPDATED, ioa_cfg->regs.clr_interrupt_reg);
3284 int_reg = readl(ioa_cfg->regs.sense_interrupt_reg) & ~int_mask_reg;
3289 if (unlikely(rc == IRQ_NONE))
3290 rc = ipr_handle_other_interrupt(ioa_cfg, int_reg);
3292 spin_unlock_irqrestore(ioa_cfg->host->host_lock, lock_flags);
3297 * ipr_build_ioadl - Build a scatter/gather list and map the buffer
3298 * @ioa_cfg: ioa config struct
3299 * @ipr_cmd: ipr command struct
3302 * 0 on success / -1 on failure
3304 static int ipr_build_ioadl(struct ipr_ioa_cfg *ioa_cfg,
3305 struct ipr_cmnd *ipr_cmd)
3308 struct scatterlist *sglist;
3310 u32 ioadl_flags = 0;
3311 struct scsi_cmnd *scsi_cmd = ipr_cmd->scsi_cmd;
3312 struct ipr_ioarcb *ioarcb = &ipr_cmd->ioarcb;
3313 struct ipr_ioadl_desc *ioadl = ipr_cmd->ioadl;
3315 length = scsi_cmd->request_bufflen;
3320 if (scsi_cmd->use_sg) {
3321 ipr_cmd->dma_use_sg = pci_map_sg(ioa_cfg->pdev,
3322 scsi_cmd->request_buffer,
3324 scsi_cmd->sc_data_direction);
3326 if (scsi_cmd->sc_data_direction == DMA_TO_DEVICE) {
3327 ioadl_flags = IPR_IOADL_FLAGS_WRITE;
3328 ioarcb->cmd_pkt.flags_hi |= IPR_FLAGS_HI_WRITE_NOT_READ;
3329 ioarcb->write_data_transfer_length = cpu_to_be32(length);
3330 ioarcb->write_ioadl_len =
3331 cpu_to_be32(sizeof(struct ipr_ioadl_desc) * ipr_cmd->dma_use_sg);
3332 } else if (scsi_cmd->sc_data_direction == DMA_FROM_DEVICE) {
3333 ioadl_flags = IPR_IOADL_FLAGS_READ;
3334 ioarcb->read_data_transfer_length = cpu_to_be32(length);
3335 ioarcb->read_ioadl_len =
3336 cpu_to_be32(sizeof(struct ipr_ioadl_desc) * ipr_cmd->dma_use_sg);
3339 sglist = scsi_cmd->request_buffer;
3341 for (i = 0; i < ipr_cmd->dma_use_sg; i++) {
3342 ioadl[i].flags_and_data_len =
3343 cpu_to_be32(ioadl_flags | sg_dma_len(&sglist[i]));
3345 cpu_to_be32(sg_dma_address(&sglist[i]));
3348 if (likely(ipr_cmd->dma_use_sg)) {
3349 ioadl[i-1].flags_and_data_len |=
3350 cpu_to_be32(IPR_IOADL_FLAGS_LAST);
3353 dev_err(&ioa_cfg->pdev->dev, "pci_map_sg failed!\n");
3355 if (scsi_cmd->sc_data_direction == DMA_TO_DEVICE) {
3356 ioadl_flags = IPR_IOADL_FLAGS_WRITE;
3357 ioarcb->cmd_pkt.flags_hi |= IPR_FLAGS_HI_WRITE_NOT_READ;
3358 ioarcb->write_data_transfer_length = cpu_to_be32(length);
3359 ioarcb->write_ioadl_len = cpu_to_be32(sizeof(struct ipr_ioadl_desc));
3360 } else if (scsi_cmd->sc_data_direction == DMA_FROM_DEVICE) {
3361 ioadl_flags = IPR_IOADL_FLAGS_READ;
3362 ioarcb->read_data_transfer_length = cpu_to_be32(length);
3363 ioarcb->read_ioadl_len = cpu_to_be32(sizeof(struct ipr_ioadl_desc));
3366 ipr_cmd->dma_handle = pci_map_single(ioa_cfg->pdev,
3367 scsi_cmd->request_buffer, length,
3368 scsi_cmd->sc_data_direction);
3370 if (likely(!pci_dma_mapping_error(ipr_cmd->dma_handle))) {
3371 ipr_cmd->dma_use_sg = 1;
3372 ioadl[0].flags_and_data_len =
3373 cpu_to_be32(ioadl_flags | length | IPR_IOADL_FLAGS_LAST);
3374 ioadl[0].address = cpu_to_be32(ipr_cmd->dma_handle);
3377 dev_err(&ioa_cfg->pdev->dev, "pci_map_single failed!\n");
3384 * ipr_get_task_attributes - Translate SPI Q-Tag to task attributes
3385 * @scsi_cmd: scsi command struct
3390 static u8 ipr_get_task_attributes(struct scsi_cmnd *scsi_cmd)
3393 u8 rc = IPR_FLAGS_LO_UNTAGGED_TASK;
3395 if (scsi_populate_tag_msg(scsi_cmd, tag)) {
3397 case MSG_SIMPLE_TAG:
3398 rc = IPR_FLAGS_LO_SIMPLE_TASK;
3401 rc = IPR_FLAGS_LO_HEAD_OF_Q_TASK;
3403 case MSG_ORDERED_TAG:
3404 rc = IPR_FLAGS_LO_ORDERED_TASK;
3413 * ipr_erp_done - Process completion of ERP for a device
3414 * @ipr_cmd: ipr command struct
3416 * This function copies the sense buffer into the scsi_cmd
3417 * struct and pushes the scsi_done function.
3422 static void ipr_erp_done(struct ipr_cmnd *ipr_cmd)
3424 struct scsi_cmnd *scsi_cmd = ipr_cmd->scsi_cmd;
3425 struct ipr_resource_entry *res = scsi_cmd->device->hostdata;
3426 struct ipr_ioa_cfg *ioa_cfg = ipr_cmd->ioa_cfg;
3427 u32 ioasc = be32_to_cpu(ipr_cmd->ioasa.ioasc);
3429 if (IPR_IOASC_SENSE_KEY(ioasc) > 0) {
3430 scsi_cmd->result |= (DID_ERROR << 16);
3431 ipr_sdev_err(scsi_cmd->device,
3432 "Request Sense failed with IOASC: 0x%08X\n", ioasc);
3434 memcpy(scsi_cmd->sense_buffer, ipr_cmd->sense_buffer,
3435 SCSI_SENSE_BUFFERSIZE);
3439 res->needs_sync_complete = 1;
3440 ipr_unmap_sglist(ioa_cfg, ipr_cmd);
3441 list_add_tail(&ipr_cmd->queue, &ioa_cfg->free_q);
3442 scsi_cmd->scsi_done(scsi_cmd);
3446 * ipr_reinit_ipr_cmnd_for_erp - Re-initialize a cmnd block to be used for ERP
3447 * @ipr_cmd: ipr command struct
3452 static void ipr_reinit_ipr_cmnd_for_erp(struct ipr_cmnd *ipr_cmd)
3454 struct ipr_ioarcb *ioarcb;
3455 struct ipr_ioasa *ioasa;
3457 ioarcb = &ipr_cmd->ioarcb;
3458 ioasa = &ipr_cmd->ioasa;
3460 memset(&ioarcb->cmd_pkt, 0, sizeof(struct ipr_cmd_pkt));
3461 ioarcb->write_data_transfer_length = 0;
3462 ioarcb->read_data_transfer_length = 0;
3463 ioarcb->write_ioadl_len = 0;
3464 ioarcb->read_ioadl_len = 0;
3466 ioasa->residual_data_len = 0;
3470 * ipr_erp_request_sense - Send request sense to a device
3471 * @ipr_cmd: ipr command struct
3473 * This function sends a request sense to a device as a result
3474 * of a check condition.
3479 static void ipr_erp_request_sense(struct ipr_cmnd *ipr_cmd)
3481 struct ipr_cmd_pkt *cmd_pkt = &ipr_cmd->ioarcb.cmd_pkt;
3483 ipr_reinit_ipr_cmnd_for_erp(ipr_cmd);
3485 cmd_pkt->request_type = IPR_RQTYPE_SCSICDB;
3486 cmd_pkt->cdb[0] = REQUEST_SENSE;
3487 cmd_pkt->cdb[4] = SCSI_SENSE_BUFFERSIZE;
3488 cmd_pkt->flags_hi |= IPR_FLAGS_HI_SYNC_OVERRIDE;
3489 cmd_pkt->flags_hi |= IPR_FLAGS_HI_NO_ULEN_CHK;
3490 cmd_pkt->timeout = cpu_to_be16(IPR_REQUEST_SENSE_TIMEOUT / HZ);
3492 ipr_cmd->ioadl[0].flags_and_data_len =
3493 cpu_to_be32(IPR_IOADL_FLAGS_READ_LAST | SCSI_SENSE_BUFFERSIZE);
3494 ipr_cmd->ioadl[0].address =
3495 cpu_to_be32(ipr_cmd->sense_buffer_dma);
3497 ipr_cmd->ioarcb.read_ioadl_len =
3498 cpu_to_be32(sizeof(struct ipr_ioadl_desc));
3499 ipr_cmd->ioarcb.read_data_transfer_length =
3500 cpu_to_be32(SCSI_SENSE_BUFFERSIZE);
3502 ipr_do_req(ipr_cmd, ipr_erp_done, ipr_timeout,
3503 IPR_REQUEST_SENSE_TIMEOUT * 2);
3507 * ipr_erp_cancel_all - Send cancel all to a device
3508 * @ipr_cmd: ipr command struct
3510 * This function sends a cancel all to a device to clear the
3511 * queue. If we are running TCQ on the device, QERR is set to 1,
3512 * which means all outstanding ops have been dropped on the floor.
3513 * Cancel all will return them to us.
3518 static void ipr_erp_cancel_all(struct ipr_cmnd *ipr_cmd)
3520 struct scsi_cmnd *scsi_cmd = ipr_cmd->scsi_cmd;
3521 struct ipr_resource_entry *res = scsi_cmd->device->hostdata;
3522 struct ipr_cmd_pkt *cmd_pkt;
3526 ipr_reinit_ipr_cmnd_for_erp(ipr_cmd);
3528 cmd_pkt = &ipr_cmd->ioarcb.cmd_pkt;
3529 cmd_pkt->request_type = IPR_RQTYPE_IOACMD;
3530 cmd_pkt->cdb[0] = IPR_CANCEL_ALL_REQUESTS;
3532 ipr_do_req(ipr_cmd, ipr_erp_request_sense, ipr_timeout,
3533 IPR_CANCEL_ALL_TIMEOUT);
3537 * ipr_dump_ioasa - Dump contents of IOASA
3538 * @ioa_cfg: ioa config struct
3539 * @ipr_cmd: ipr command struct
3541 * This function is invoked by the interrupt handler when ops
3542 * fail. It will log the IOASA if appropriate. Only called
3548 static void ipr_dump_ioasa(struct ipr_ioa_cfg *ioa_cfg,
3549 struct ipr_cmnd *ipr_cmd)
3554 struct ipr_ioasa *ioasa = &ipr_cmd->ioasa;
3555 u32 *ioasa_data = (u32 *)ioasa;
3558 ioasc = be32_to_cpu(ioasa->ioasc) & IPR_IOASC_IOASC_MASK;
3563 if (ioa_cfg->log_level < IPR_DEFAULT_LOG_LEVEL)
3566 error_index = ipr_get_error(ioasc);
3568 if (ioa_cfg->log_level < IPR_MAX_LOG_LEVEL) {
3569 /* Don't log an error if the IOA already logged one */
3570 if (ioasa->ilid != 0)
3573 if (ipr_error_table[error_index].log_ioasa == 0)
3577 ipr_sdev_err(ipr_cmd->scsi_cmd->device, "%s\n",
3578 ipr_error_table[error_index].error);
3580 if ((ioasa->u.gpdd.end_state <= ARRAY_SIZE(ipr_gpdd_dev_end_states)) &&
3581 (ioasa->u.gpdd.bus_phase <= ARRAY_SIZE(ipr_gpdd_dev_bus_phases))) {
3582 ipr_sdev_err(ipr_cmd->scsi_cmd->device,
3583 "Device End state: %s Phase: %s\n",
3584 ipr_gpdd_dev_end_states[ioasa->u.gpdd.end_state],
3585 ipr_gpdd_dev_bus_phases[ioasa->u.gpdd.bus_phase]);
3588 if (sizeof(struct ipr_ioasa) < be16_to_cpu(ioasa->ret_stat_len))
3589 data_len = sizeof(struct ipr_ioasa);
3591 data_len = be16_to_cpu(ioasa->ret_stat_len);
3593 ipr_err("IOASA Dump:\n");
3595 for (i = 0; i < data_len / 4; i += 4) {
3596 ipr_err("%08X: %08X %08X %08X %08X\n", i*4,
3597 be32_to_cpu(ioasa_data[i]),
3598 be32_to_cpu(ioasa_data[i+1]),
3599 be32_to_cpu(ioasa_data[i+2]),
3600 be32_to_cpu(ioasa_data[i+3]));
3605 * ipr_gen_sense - Generate SCSI sense data from an IOASA
3607 * @sense_buf: sense data buffer
3612 static void ipr_gen_sense(struct ipr_cmnd *ipr_cmd)
3615 u8 *sense_buf = ipr_cmd->scsi_cmd->sense_buffer;
3616 struct ipr_resource_entry *res = ipr_cmd->scsi_cmd->device->hostdata;
3617 struct ipr_ioasa *ioasa = &ipr_cmd->ioasa;
3618 u32 ioasc = be32_to_cpu(ioasa->ioasc);
3620 memset(sense_buf, 0, SCSI_SENSE_BUFFERSIZE);
3622 if (ioasc >= IPR_FIRST_DRIVER_IOASC)
3625 ipr_cmd->scsi_cmd->result = SAM_STAT_CHECK_CONDITION;
3627 if (ipr_is_vset_device(res) &&
3628 ioasc == IPR_IOASC_MED_DO_NOT_REALLOC &&
3629 ioasa->u.vset.failing_lba_hi != 0) {
3630 sense_buf[0] = 0x72;
3631 sense_buf[1] = IPR_IOASC_SENSE_KEY(ioasc);
3632 sense_buf[2] = IPR_IOASC_SENSE_CODE(ioasc);
3633 sense_buf[3] = IPR_IOASC_SENSE_QUAL(ioasc);
3637 sense_buf[9] = 0x0A;
3638 sense_buf[10] = 0x80;
3640 failing_lba = be32_to_cpu(ioasa->u.vset.failing_lba_hi);
3642 sense_buf[12] = (failing_lba & 0xff000000) >> 24;
3643 sense_buf[13] = (failing_lba & 0x00ff0000) >> 16;
3644 sense_buf[14] = (failing_lba & 0x0000ff00) >> 8;
3645 sense_buf[15] = failing_lba & 0x000000ff;
3647 failing_lba = be32_to_cpu(ioasa->u.vset.failing_lba_lo);
3649 sense_buf[16] = (failing_lba & 0xff000000) >> 24;
3650 sense_buf[17] = (failing_lba & 0x00ff0000) >> 16;
3651 sense_buf[18] = (failing_lba & 0x0000ff00) >> 8;
3652 sense_buf[19] = failing_lba & 0x000000ff;
3654 sense_buf[0] = 0x70;
3655 sense_buf[2] = IPR_IOASC_SENSE_KEY(ioasc);
3656 sense_buf[12] = IPR_IOASC_SENSE_CODE(ioasc);
3657 sense_buf[13] = IPR_IOASC_SENSE_QUAL(ioasc);
3659 /* Illegal request */
3660 if ((IPR_IOASC_SENSE_KEY(ioasc) == 0x05) &&
3661 (be32_to_cpu(ioasa->ioasc_specific) & IPR_FIELD_POINTER_VALID)) {
3662 sense_buf[7] = 10; /* additional length */
3664 /* IOARCB was in error */
3665 if (IPR_IOASC_SENSE_CODE(ioasc) == 0x24)
3666 sense_buf[15] = 0xC0;
3667 else /* Parameter data was invalid */
3668 sense_buf[15] = 0x80;
3671 ((IPR_FIELD_POINTER_MASK &
3672 be32_to_cpu(ioasa->ioasc_specific)) >> 8) & 0xff;
3674 (IPR_FIELD_POINTER_MASK &
3675 be32_to_cpu(ioasa->ioasc_specific)) & 0xff;
3677 if (ioasc == IPR_IOASC_MED_DO_NOT_REALLOC) {
3678 if (ipr_is_vset_device(res))
3679 failing_lba = be32_to_cpu(ioasa->u.vset.failing_lba_lo);
3681 failing_lba = be32_to_cpu(ioasa->u.dasd.failing_lba);
3683 sense_buf[0] |= 0x80; /* Or in the Valid bit */
3684 sense_buf[3] = (failing_lba & 0xff000000) >> 24;
3685 sense_buf[4] = (failing_lba & 0x00ff0000) >> 16;
3686 sense_buf[5] = (failing_lba & 0x0000ff00) >> 8;
3687 sense_buf[6] = failing_lba & 0x000000ff;
3690 sense_buf[7] = 6; /* additional length */
3696 * ipr_erp_start - Process an error response for a SCSI op
3697 * @ioa_cfg: ioa config struct
3698 * @ipr_cmd: ipr command struct
3700 * This function determines whether or not to initiate ERP
3701 * on the affected device.
3706 static void ipr_erp_start(struct ipr_ioa_cfg *ioa_cfg,
3707 struct ipr_cmnd *ipr_cmd)
3709 struct scsi_cmnd *scsi_cmd = ipr_cmd->scsi_cmd;
3710 struct ipr_resource_entry *res = scsi_cmd->device->hostdata;
3711 u32 ioasc = be32_to_cpu(ipr_cmd->ioasa.ioasc);
3714 ipr_scsi_eh_done(ipr_cmd);
3718 if (ipr_is_gscsi(res))
3719 ipr_dump_ioasa(ioa_cfg, ipr_cmd);
3721 ipr_gen_sense(ipr_cmd);
3723 switch (ioasc & IPR_IOASC_IOASC_MASK) {
3724 case IPR_IOASC_ABORTED_CMD_TERM_BY_HOST:
3725 scsi_cmd->result |= (DID_ERROR << 16);
3727 case IPR_IOASC_IR_RESOURCE_HANDLE:
3728 scsi_cmd->result |= (DID_NO_CONNECT << 16);
3730 case IPR_IOASC_HW_SEL_TIMEOUT:
3731 scsi_cmd->result |= (DID_NO_CONNECT << 16);
3732 res->needs_sync_complete = 1;
3734 case IPR_IOASC_SYNC_REQUIRED:
3736 res->needs_sync_complete = 1;
3737 scsi_cmd->result |= (DID_IMM_RETRY << 16);
3739 case IPR_IOASC_MED_DO_NOT_REALLOC: /* prevent retries */
3740 scsi_cmd->result |= (DID_PASSTHROUGH << 16);
3742 case IPR_IOASC_BUS_WAS_RESET:
3743 case IPR_IOASC_BUS_WAS_RESET_BY_OTHER:
3745 * Report the bus reset and ask for a retry. The device
3746 * will give CC/UA the next command.
3748 if (!res->resetting_device)
3749 scsi_report_bus_reset(ioa_cfg->host, scsi_cmd->device->channel);
3750 scsi_cmd->result |= (DID_ERROR << 16);
3751 res->needs_sync_complete = 1;
3753 case IPR_IOASC_HW_DEV_BUS_STATUS:
3754 scsi_cmd->result |= IPR_IOASC_SENSE_STATUS(ioasc);
3755 if (IPR_IOASC_SENSE_STATUS(ioasc) == SAM_STAT_CHECK_CONDITION) {
3756 ipr_erp_cancel_all(ipr_cmd);
3760 case IPR_IOASC_NR_INIT_CMD_REQUIRED:
3763 scsi_cmd->result |= (DID_ERROR << 16);
3764 if (!ipr_is_vset_device(res))
3765 res->needs_sync_complete = 1;
3769 ipr_unmap_sglist(ioa_cfg, ipr_cmd);
3770 list_add_tail(&ipr_cmd->queue, &ioa_cfg->free_q);
3771 scsi_cmd->scsi_done(scsi_cmd);
3775 * ipr_scsi_done - mid-layer done function
3776 * @ipr_cmd: ipr command struct
3778 * This function is invoked by the interrupt handler for
3779 * ops generated by the SCSI mid-layer
3784 static void ipr_scsi_done(struct ipr_cmnd *ipr_cmd)
3786 struct ipr_ioa_cfg *ioa_cfg = ipr_cmd->ioa_cfg;
3787 struct scsi_cmnd *scsi_cmd = ipr_cmd->scsi_cmd;
3788 u32 ioasc = be32_to_cpu(ipr_cmd->ioasa.ioasc);
3790 scsi_cmd->resid = be32_to_cpu(ipr_cmd->ioasa.residual_data_len);
3792 if (likely(IPR_IOASC_SENSE_KEY(ioasc) == 0)) {
3793 ipr_unmap_sglist(ioa_cfg, ipr_cmd);
3794 list_add_tail(&ipr_cmd->queue, &ioa_cfg->free_q);
3795 scsi_cmd->scsi_done(scsi_cmd);
3797 ipr_erp_start(ioa_cfg, ipr_cmd);
3801 * ipr_save_ioafp_mode_select - Save adapters mode select data
3802 * @ioa_cfg: ioa config struct
3803 * @scsi_cmd: scsi command struct
3805 * This function saves mode select data for the adapter to
3806 * use following an adapter reset.
3809 * 0 on success / SCSI_MLQUEUE_HOST_BUSY on failure
3811 static int ipr_save_ioafp_mode_select(struct ipr_ioa_cfg *ioa_cfg,
3812 struct scsi_cmnd *scsi_cmd)
3814 if (!ioa_cfg->saved_mode_pages) {
3815 ioa_cfg->saved_mode_pages = kmalloc(sizeof(struct ipr_mode_pages),
3817 if (!ioa_cfg->saved_mode_pages) {
3818 dev_err(&ioa_cfg->pdev->dev,
3819 "IOA mode select buffer allocation failed\n");
3820 return SCSI_MLQUEUE_HOST_BUSY;
3824 memcpy(ioa_cfg->saved_mode_pages, scsi_cmd->buffer, scsi_cmd->cmnd[4]);
3825 ioa_cfg->saved_mode_page_len = scsi_cmd->cmnd[4];
3830 * ipr_queuecommand - Queue a mid-layer request
3831 * @scsi_cmd: scsi command struct
3832 * @done: done function
3834 * This function queues a request generated by the mid-layer.
3838 * SCSI_MLQUEUE_DEVICE_BUSY if device is busy
3839 * SCSI_MLQUEUE_HOST_BUSY if host is busy
3841 static int ipr_queuecommand(struct scsi_cmnd *scsi_cmd,
3842 void (*done) (struct scsi_cmnd *))
3844 struct ipr_ioa_cfg *ioa_cfg;
3845 struct ipr_resource_entry *res;
3846 struct ipr_ioarcb *ioarcb;
3847 struct ipr_cmnd *ipr_cmd;
3850 scsi_cmd->scsi_done = done;
3851 ioa_cfg = (struct ipr_ioa_cfg *)scsi_cmd->device->host->hostdata;
3852 res = scsi_cmd->device->hostdata;
3853 scsi_cmd->result = (DID_OK << 16);
3856 * We are currently blocking all devices due to a host reset
3857 * We have told the host to stop giving us new requests, but
3858 * ERP ops don't count. FIXME
3860 if (unlikely(!ioa_cfg->allow_cmds))
3861 return SCSI_MLQUEUE_HOST_BUSY;
3864 * FIXME - Create scsi_set_host_offline interface
3865 * and the ioa_is_dead check can be removed
3867 if (unlikely(ioa_cfg->ioa_is_dead || !res)) {
3868 memset(scsi_cmd->sense_buffer, 0, SCSI_SENSE_BUFFERSIZE);
3869 scsi_cmd->result = (DID_NO_CONNECT << 16);
3870 scsi_cmd->scsi_done(scsi_cmd);
3874 ipr_cmd = ipr_get_free_ipr_cmnd(ioa_cfg);
3875 ioarcb = &ipr_cmd->ioarcb;
3876 list_add_tail(&ipr_cmd->queue, &ioa_cfg->pending_q);
3878 memcpy(ioarcb->cmd_pkt.cdb, scsi_cmd->cmnd, scsi_cmd->cmd_len);
3879 ipr_cmd->scsi_cmd = scsi_cmd;
3880 ioarcb->res_handle = res->cfgte.res_handle;
3881 ipr_cmd->done = ipr_scsi_done;
3882 ipr_trc_hook(ipr_cmd, IPR_TRACE_START, IPR_GET_PHYS_LOC(res->cfgte.res_addr));
3884 if (ipr_is_gscsi(res) || ipr_is_vset_device(res)) {
3885 if (scsi_cmd->underflow == 0)
3886 ioarcb->cmd_pkt.flags_hi |= IPR_FLAGS_HI_NO_ULEN_CHK;
3888 if (res->needs_sync_complete) {
3889 ioarcb->cmd_pkt.flags_hi |= IPR_FLAGS_HI_SYNC_COMPLETE;
3890 res->needs_sync_complete = 0;
3893 ioarcb->cmd_pkt.flags_hi |= IPR_FLAGS_HI_NO_LINK_DESC;
3894 ioarcb->cmd_pkt.flags_lo |= IPR_FLAGS_LO_DELAY_AFTER_RST;
3895 ioarcb->cmd_pkt.flags_lo |= IPR_FLAGS_LO_ALIGNED_BFR;
3896 ioarcb->cmd_pkt.flags_lo |= ipr_get_task_attributes(scsi_cmd);
3899 if (!ipr_is_gscsi(res) && scsi_cmd->cmnd[0] >= 0xC0)
3900 ioarcb->cmd_pkt.request_type = IPR_RQTYPE_IOACMD;
3902 if (ipr_is_ioa_resource(res) && scsi_cmd->cmnd[0] == MODE_SELECT)
3903 rc = ipr_save_ioafp_mode_select(ioa_cfg, scsi_cmd);
3905 if (likely(rc == 0))
3906 rc = ipr_build_ioadl(ioa_cfg, ipr_cmd);
3908 if (likely(rc == 0)) {
3910 writel(be32_to_cpu(ipr_cmd->ioarcb.ioarcb_host_pci_addr),
3911 ioa_cfg->regs.ioarrin_reg);
3913 list_move_tail(&ipr_cmd->queue, &ioa_cfg->free_q);
3914 return SCSI_MLQUEUE_HOST_BUSY;
3921 * ipr_info - Get information about the card/driver
3922 * @scsi_host: scsi host struct
3925 * pointer to buffer with description string
3927 static const char * ipr_ioa_info(struct Scsi_Host *host)
3929 static char buffer[512];
3930 struct ipr_ioa_cfg *ioa_cfg;
3931 unsigned long lock_flags = 0;
3933 ioa_cfg = (struct ipr_ioa_cfg *) host->hostdata;
3935 spin_lock_irqsave(host->host_lock, lock_flags);
3936 sprintf(buffer, "IBM %X Storage Adapter", ioa_cfg->type);
3937 spin_unlock_irqrestore(host->host_lock, lock_flags);
3942 static struct scsi_host_template driver_template = {
3943 .module = THIS_MODULE,
3945 .info = ipr_ioa_info,
3946 .queuecommand = ipr_queuecommand,
3947 .eh_abort_handler = ipr_eh_abort,
3948 .eh_device_reset_handler = ipr_eh_dev_reset,
3949 .eh_host_reset_handler = ipr_eh_host_reset,
3950 .slave_alloc = ipr_slave_alloc,
3951 .slave_configure = ipr_slave_configure,
3952 .slave_destroy = ipr_slave_destroy,
3953 .bios_param = ipr_biosparam,
3954 .can_queue = IPR_MAX_COMMANDS,
3956 .sg_tablesize = IPR_MAX_SGLIST,
3957 .max_sectors = IPR_MAX_SECTORS,
3958 .cmd_per_lun = IPR_MAX_CMD_PER_LUN,
3959 .use_clustering = ENABLE_CLUSTERING,
3960 .shost_attrs = ipr_ioa_attrs,
3961 .sdev_attrs = ipr_dev_attrs,
3962 .proc_name = IPR_NAME
3965 #ifdef CONFIG_PPC_PSERIES
3966 static const u16 ipr_blocked_processors[] = {
3978 * ipr_invalid_adapter - Determine if this adapter is supported on this hardware
3979 * @ioa_cfg: ioa cfg struct
3981 * Adapters that use Gemstone revision < 3.1 do not work reliably on
3982 * certain pSeries hardware. This function determines if the given
3983 * adapter is in one of these confgurations or not.
3986 * 1 if adapter is not supported / 0 if adapter is supported
3988 static int ipr_invalid_adapter(struct ipr_ioa_cfg *ioa_cfg)
3993 if (ioa_cfg->type == 0x5702) {
3994 if (pci_read_config_byte(ioa_cfg->pdev, PCI_REVISION_ID,
3995 &rev_id) == PCIBIOS_SUCCESSFUL) {
3997 for (i = 0; i < ARRAY_SIZE(ipr_blocked_processors); i++){
3998 if (__is_processor(ipr_blocked_processors[i]))
4007 #define ipr_invalid_adapter(ioa_cfg) 0
4011 * ipr_ioa_bringdown_done - IOA bring down completion.
4012 * @ipr_cmd: ipr command struct
4014 * This function processes the completion of an adapter bring down.
4015 * It wakes any reset sleepers.
4020 static int ipr_ioa_bringdown_done(struct ipr_cmnd *ipr_cmd)
4022 struct ipr_ioa_cfg *ioa_cfg = ipr_cmd->ioa_cfg;
4025 ioa_cfg->in_reset_reload = 0;
4026 ioa_cfg->reset_retries = 0;
4027 list_add_tail(&ipr_cmd->queue, &ioa_cfg->free_q);
4028 wake_up_all(&ioa_cfg->reset_wait_q);
4030 spin_unlock_irq(ioa_cfg->host->host_lock);
4031 scsi_unblock_requests(ioa_cfg->host);
4032 spin_lock_irq(ioa_cfg->host->host_lock);
4035 return IPR_RC_JOB_RETURN;
4039 * ipr_ioa_reset_done - IOA reset completion.
4040 * @ipr_cmd: ipr command struct
4042 * This function processes the completion of an adapter reset.
4043 * It schedules any necessary mid-layer add/removes and
4044 * wakes any reset sleepers.
4049 static int ipr_ioa_reset_done(struct ipr_cmnd *ipr_cmd)
4051 struct ipr_ioa_cfg *ioa_cfg = ipr_cmd->ioa_cfg;
4052 struct ipr_resource_entry *res;
4053 struct ipr_hostrcb *hostrcb, *temp;
4057 ioa_cfg->in_reset_reload = 0;
4058 ioa_cfg->allow_cmds = 1;
4059 ioa_cfg->reset_cmd = NULL;
4061 list_for_each_entry(res, &ioa_cfg->used_res_q, queue) {
4062 if (ioa_cfg->allow_ml_add_del && (res->add_to_ml || res->del_from_ml)) {
4064 schedule_work(&ioa_cfg->work_q);
4069 list_for_each_entry_safe(hostrcb, temp, &ioa_cfg->hostrcb_free_q, queue) {
4070 list_del(&hostrcb->queue);
4071 if (i++ < IPR_NUM_LOG_HCAMS)
4072 ipr_send_hcam(ioa_cfg, IPR_HCAM_CDB_OP_CODE_LOG_DATA, hostrcb);
4074 ipr_send_hcam(ioa_cfg, IPR_HCAM_CDB_OP_CODE_CONFIG_CHANGE, hostrcb);
4077 dev_info(&ioa_cfg->pdev->dev, "IOA initialized.\n");
4079 ioa_cfg->reset_retries = 0;
4080 list_add_tail(&ipr_cmd->queue, &ioa_cfg->free_q);
4081 wake_up_all(&ioa_cfg->reset_wait_q);
4083 spin_unlock_irq(ioa_cfg->host->host_lock);
4084 scsi_unblock_requests(ioa_cfg->host);
4085 spin_lock_irq(ioa_cfg->host->host_lock);
4087 if (!ioa_cfg->allow_cmds)
4088 scsi_block_requests(ioa_cfg->host);
4091 return IPR_RC_JOB_RETURN;
4095 * ipr_set_sup_dev_dflt - Initialize a Set Supported Device buffer
4096 * @supported_dev: supported device struct
4097 * @vpids: vendor product id struct
4102 static void ipr_set_sup_dev_dflt(struct ipr_supported_device *supported_dev,
4103 struct ipr_std_inq_vpids *vpids)
4105 memset(supported_dev, 0, sizeof(struct ipr_supported_device));
4106 memcpy(&supported_dev->vpids, vpids, sizeof(struct ipr_std_inq_vpids));
4107 supported_dev->num_records = 1;
4108 supported_dev->data_length =
4109 cpu_to_be16(sizeof(struct ipr_supported_device));
4110 supported_dev->reserved = 0;
4114 * ipr_set_supported_devs - Send Set Supported Devices for a device
4115 * @ipr_cmd: ipr command struct
4117 * This function send a Set Supported Devices to the adapter
4120 * IPR_RC_JOB_CONTINUE / IPR_RC_JOB_RETURN
4122 static int ipr_set_supported_devs(struct ipr_cmnd *ipr_cmd)
4124 struct ipr_ioa_cfg *ioa_cfg = ipr_cmd->ioa_cfg;
4125 struct ipr_supported_device *supp_dev = &ioa_cfg->vpd_cbs->supp_dev;
4126 struct ipr_ioadl_desc *ioadl = ipr_cmd->ioadl;
4127 struct ipr_ioarcb *ioarcb = &ipr_cmd->ioarcb;
4128 struct ipr_resource_entry *res = ipr_cmd->u.res;
4130 ipr_cmd->job_step = ipr_ioa_reset_done;
4132 list_for_each_entry_continue(res, &ioa_cfg->used_res_q, queue) {
4133 if (!ipr_is_af_dasd_device(res))
4136 ipr_cmd->u.res = res;
4137 ipr_set_sup_dev_dflt(supp_dev, &res->cfgte.std_inq_data.vpids);
4139 ioarcb->res_handle = cpu_to_be32(IPR_IOA_RES_HANDLE);
4140 ioarcb->cmd_pkt.flags_hi |= IPR_FLAGS_HI_WRITE_NOT_READ;
4141 ioarcb->cmd_pkt.request_type = IPR_RQTYPE_IOACMD;
4143 ioarcb->cmd_pkt.cdb[0] = IPR_SET_SUPPORTED_DEVICES;
4144 ioarcb->cmd_pkt.cdb[7] = (sizeof(struct ipr_supported_device) >> 8) & 0xff;
4145 ioarcb->cmd_pkt.cdb[8] = sizeof(struct ipr_supported_device) & 0xff;
4147 ioadl->flags_and_data_len = cpu_to_be32(IPR_IOADL_FLAGS_WRITE_LAST |
4148 sizeof(struct ipr_supported_device));
4149 ioadl->address = cpu_to_be32(ioa_cfg->vpd_cbs_dma +
4150 offsetof(struct ipr_misc_cbs, supp_dev));
4151 ioarcb->write_ioadl_len = cpu_to_be32(sizeof(struct ipr_ioadl_desc));
4152 ioarcb->write_data_transfer_length =
4153 cpu_to_be32(sizeof(struct ipr_supported_device));
4155 ipr_do_req(ipr_cmd, ipr_reset_ioa_job, ipr_timeout,
4156 IPR_SET_SUP_DEVICE_TIMEOUT);
4158 ipr_cmd->job_step = ipr_set_supported_devs;
4159 return IPR_RC_JOB_RETURN;
4162 return IPR_RC_JOB_CONTINUE;
4166 * ipr_get_mode_page - Locate specified mode page
4167 * @mode_pages: mode page buffer
4168 * @page_code: page code to find
4169 * @len: minimum required length for mode page
4172 * pointer to mode page / NULL on failure
4174 static void *ipr_get_mode_page(struct ipr_mode_pages *mode_pages,
4175 u32 page_code, u32 len)
4177 struct ipr_mode_page_hdr *mode_hdr;
4181 if (!mode_pages || (mode_pages->hdr.length == 0))
4184 length = (mode_pages->hdr.length + 1) - 4 - mode_pages->hdr.block_desc_len;
4185 mode_hdr = (struct ipr_mode_page_hdr *)
4186 (mode_pages->data + mode_pages->hdr.block_desc_len);
4189 if (IPR_GET_MODE_PAGE_CODE(mode_hdr) == page_code) {
4190 if (mode_hdr->page_length >= (len - sizeof(struct ipr_mode_page_hdr)))
4194 page_length = (sizeof(struct ipr_mode_page_hdr) +
4195 mode_hdr->page_length);
4196 length -= page_length;
4197 mode_hdr = (struct ipr_mode_page_hdr *)
4198 ((unsigned long)mode_hdr + page_length);
4205 * ipr_check_term_power - Check for term power errors
4206 * @ioa_cfg: ioa config struct
4207 * @mode_pages: IOAFP mode pages buffer
4209 * Check the IOAFP's mode page 28 for term power errors
4214 static void ipr_check_term_power(struct ipr_ioa_cfg *ioa_cfg,
4215 struct ipr_mode_pages *mode_pages)
4219 struct ipr_dev_bus_entry *bus;
4220 struct ipr_mode_page28 *mode_page;
4222 mode_page = ipr_get_mode_page(mode_pages, 0x28,
4223 sizeof(struct ipr_mode_page28));
4225 entry_length = mode_page->entry_length;
4227 bus = mode_page->bus;
4229 for (i = 0; i < mode_page->num_entries; i++) {
4230 if (bus->flags & IPR_SCSI_ATTR_NO_TERM_PWR) {
4231 dev_err(&ioa_cfg->pdev->dev,
4232 "Term power is absent on scsi bus %d\n",
4236 bus = (struct ipr_dev_bus_entry *)((char *)bus + entry_length);
4241 * ipr_scsi_bus_speed_limit - Limit the SCSI speed based on SES table
4242 * @ioa_cfg: ioa config struct
4244 * Looks through the config table checking for SES devices. If
4245 * the SES device is in the SES table indicating a maximum SCSI
4246 * bus speed, the speed is limited for the bus.
4251 static void ipr_scsi_bus_speed_limit(struct ipr_ioa_cfg *ioa_cfg)
4256 for (i = 0; i < IPR_MAX_NUM_BUSES; i++) {
4257 max_xfer_rate = ipr_get_max_scsi_speed(ioa_cfg, i,
4258 ioa_cfg->bus_attr[i].bus_width);
4260 if (max_xfer_rate < ioa_cfg->bus_attr[i].max_xfer_rate)
4261 ioa_cfg->bus_attr[i].max_xfer_rate = max_xfer_rate;
4266 * ipr_modify_ioafp_mode_page_28 - Modify IOAFP Mode Page 28
4267 * @ioa_cfg: ioa config struct
4268 * @mode_pages: mode page 28 buffer
4270 * Updates mode page 28 based on driver configuration
4275 static void ipr_modify_ioafp_mode_page_28(struct ipr_ioa_cfg *ioa_cfg,
4276 struct ipr_mode_pages *mode_pages)
4278 int i, entry_length;
4279 struct ipr_dev_bus_entry *bus;
4280 struct ipr_bus_attributes *bus_attr;
4281 struct ipr_mode_page28 *mode_page;
4283 mode_page = ipr_get_mode_page(mode_pages, 0x28,
4284 sizeof(struct ipr_mode_page28));
4286 entry_length = mode_page->entry_length;
4288 /* Loop for each device bus entry */
4289 for (i = 0, bus = mode_page->bus;
4290 i < mode_page->num_entries;
4291 i++, bus = (struct ipr_dev_bus_entry *)((u8 *)bus + entry_length)) {
4292 if (bus->res_addr.bus > IPR_MAX_NUM_BUSES) {
4293 dev_err(&ioa_cfg->pdev->dev,
4294 "Invalid resource address reported: 0x%08X\n",
4295 IPR_GET_PHYS_LOC(bus->res_addr));
4299 bus_attr = &ioa_cfg->bus_attr[i];
4300 bus->extended_reset_delay = IPR_EXTENDED_RESET_DELAY;
4301 bus->bus_width = bus_attr->bus_width;
4302 bus->max_xfer_rate = cpu_to_be32(bus_attr->max_xfer_rate);
4303 bus->flags &= ~IPR_SCSI_ATTR_QAS_MASK;
4304 if (bus_attr->qas_enabled)
4305 bus->flags |= IPR_SCSI_ATTR_ENABLE_QAS;
4307 bus->flags |= IPR_SCSI_ATTR_DISABLE_QAS;
4312 * ipr_build_mode_select - Build a mode select command
4313 * @ipr_cmd: ipr command struct
4314 * @res_handle: resource handle to send command to
4315 * @parm: Byte 2 of Mode Sense command
4316 * @dma_addr: DMA buffer address
4317 * @xfer_len: data transfer length
4322 static void ipr_build_mode_select(struct ipr_cmnd *ipr_cmd,
4323 u32 res_handle, u8 parm, u32 dma_addr,
4326 struct ipr_ioadl_desc *ioadl = ipr_cmd->ioadl;
4327 struct ipr_ioarcb *ioarcb = &ipr_cmd->ioarcb;
4329 ioarcb->res_handle = res_handle;
4330 ioarcb->cmd_pkt.request_type = IPR_RQTYPE_SCSICDB;
4331 ioarcb->cmd_pkt.flags_hi |= IPR_FLAGS_HI_WRITE_NOT_READ;
4332 ioarcb->cmd_pkt.cdb[0] = MODE_SELECT;
4333 ioarcb->cmd_pkt.cdb[1] = parm;
4334 ioarcb->cmd_pkt.cdb[4] = xfer_len;
4336 ioadl->flags_and_data_len =
4337 cpu_to_be32(IPR_IOADL_FLAGS_WRITE_LAST | xfer_len);
4338 ioadl->address = cpu_to_be32(dma_addr);
4339 ioarcb->write_ioadl_len = cpu_to_be32(sizeof(struct ipr_ioadl_desc));
4340 ioarcb->write_data_transfer_length = cpu_to_be32(xfer_len);
4344 * ipr_ioafp_mode_select_page28 - Issue Mode Select Page 28 to IOA
4345 * @ipr_cmd: ipr command struct
4347 * This function sets up the SCSI bus attributes and sends
4348 * a Mode Select for Page 28 to activate them.
4353 static int ipr_ioafp_mode_select_page28(struct ipr_cmnd *ipr_cmd)
4355 struct ipr_ioa_cfg *ioa_cfg = ipr_cmd->ioa_cfg;
4356 struct ipr_mode_pages *mode_pages = &ioa_cfg->vpd_cbs->mode_pages;
4360 if (ioa_cfg->saved_mode_pages) {
4361 memcpy(mode_pages, ioa_cfg->saved_mode_pages,
4362 ioa_cfg->saved_mode_page_len);
4363 length = ioa_cfg->saved_mode_page_len;
4365 ipr_scsi_bus_speed_limit(ioa_cfg);
4366 ipr_check_term_power(ioa_cfg, mode_pages);
4367 ipr_modify_ioafp_mode_page_28(ioa_cfg, mode_pages);
4368 length = mode_pages->hdr.length + 1;
4369 mode_pages->hdr.length = 0;
4372 ipr_build_mode_select(ipr_cmd, cpu_to_be32(IPR_IOA_RES_HANDLE), 0x11,
4373 ioa_cfg->vpd_cbs_dma + offsetof(struct ipr_misc_cbs, mode_pages),
4376 ipr_cmd->job_step = ipr_set_supported_devs;
4377 ipr_cmd->u.res = list_entry(ioa_cfg->used_res_q.next,
4378 struct ipr_resource_entry, queue);
4380 ipr_do_req(ipr_cmd, ipr_reset_ioa_job, ipr_timeout, IPR_INTERNAL_TIMEOUT);
4383 return IPR_RC_JOB_RETURN;
4387 * ipr_build_mode_sense - Builds a mode sense command
4388 * @ipr_cmd: ipr command struct
4389 * @res: resource entry struct
4390 * @parm: Byte 2 of mode sense command
4391 * @dma_addr: DMA address of mode sense buffer
4392 * @xfer_len: Size of DMA buffer
4397 static void ipr_build_mode_sense(struct ipr_cmnd *ipr_cmd,
4399 u8 parm, u32 dma_addr, u8 xfer_len)
4401 struct ipr_ioadl_desc *ioadl = ipr_cmd->ioadl;
4402 struct ipr_ioarcb *ioarcb = &ipr_cmd->ioarcb;
4404 ioarcb->res_handle = res_handle;
4405 ioarcb->cmd_pkt.cdb[0] = MODE_SENSE;
4406 ioarcb->cmd_pkt.cdb[2] = parm;
4407 ioarcb->cmd_pkt.cdb[4] = xfer_len;
4408 ioarcb->cmd_pkt.request_type = IPR_RQTYPE_SCSICDB;
4410 ioadl->flags_and_data_len =
4411 cpu_to_be32(IPR_IOADL_FLAGS_READ_LAST | xfer_len);
4412 ioadl->address = cpu_to_be32(dma_addr);
4413 ioarcb->read_ioadl_len = cpu_to_be32(sizeof(struct ipr_ioadl_desc));
4414 ioarcb->read_data_transfer_length = cpu_to_be32(xfer_len);
4418 * ipr_ioafp_mode_sense_page28 - Issue Mode Sense Page 28 to IOA
4419 * @ipr_cmd: ipr command struct
4421 * This function send a Page 28 mode sense to the IOA to
4422 * retrieve SCSI bus attributes.
4427 static int ipr_ioafp_mode_sense_page28(struct ipr_cmnd *ipr_cmd)
4429 struct ipr_ioa_cfg *ioa_cfg = ipr_cmd->ioa_cfg;
4432 ipr_build_mode_sense(ipr_cmd, cpu_to_be32(IPR_IOA_RES_HANDLE),
4433 0x28, ioa_cfg->vpd_cbs_dma +
4434 offsetof(struct ipr_misc_cbs, mode_pages),
4435 sizeof(struct ipr_mode_pages));
4437 ipr_cmd->job_step = ipr_ioafp_mode_select_page28;
4439 ipr_do_req(ipr_cmd, ipr_reset_ioa_job, ipr_timeout, IPR_INTERNAL_TIMEOUT);
4442 return IPR_RC_JOB_RETURN;
4446 * ipr_init_res_table - Initialize the resource table
4447 * @ipr_cmd: ipr command struct
4449 * This function looks through the existing resource table, comparing
4450 * it with the config table. This function will take care of old/new
4451 * devices and schedule adding/removing them from the mid-layer
4455 * IPR_RC_JOB_CONTINUE
4457 static int ipr_init_res_table(struct ipr_cmnd *ipr_cmd)
4459 struct ipr_ioa_cfg *ioa_cfg = ipr_cmd->ioa_cfg;
4460 struct ipr_resource_entry *res, *temp;
4461 struct ipr_config_table_entry *cfgte;
4466 if (ioa_cfg->cfg_table->hdr.flags & IPR_UCODE_DOWNLOAD_REQ)
4467 dev_err(&ioa_cfg->pdev->dev, "Microcode download required\n");
4469 list_for_each_entry_safe(res, temp, &ioa_cfg->used_res_q, queue)
4470 list_move_tail(&res->queue, &old_res);
4472 for (i = 0; i < ioa_cfg->cfg_table->hdr.num_entries; i++) {
4473 cfgte = &ioa_cfg->cfg_table->dev[i];
4476 list_for_each_entry_safe(res, temp, &old_res, queue) {
4477 if (!memcmp(&res->cfgte.res_addr,
4478 &cfgte->res_addr, sizeof(cfgte->res_addr))) {
4479 list_move_tail(&res->queue, &ioa_cfg->used_res_q);
4486 if (list_empty(&ioa_cfg->free_res_q)) {
4487 dev_err(&ioa_cfg->pdev->dev, "Too many devices attached\n");
4492 res = list_entry(ioa_cfg->free_res_q.next,
4493 struct ipr_resource_entry, queue);
4494 list_move_tail(&res->queue, &ioa_cfg->used_res_q);
4495 ipr_init_res_entry(res);
4500 memcpy(&res->cfgte, cfgte, sizeof(struct ipr_config_table_entry));
4503 list_for_each_entry_safe(res, temp, &old_res, queue) {
4505 res->del_from_ml = 1;
4506 list_move_tail(&res->queue, &ioa_cfg->used_res_q);
4508 list_move_tail(&res->queue, &ioa_cfg->free_res_q);
4512 ipr_cmd->job_step = ipr_ioafp_mode_sense_page28;
4515 return IPR_RC_JOB_CONTINUE;
4519 * ipr_ioafp_query_ioa_cfg - Send a Query IOA Config to the adapter.
4520 * @ipr_cmd: ipr command struct
4522 * This function sends a Query IOA Configuration command
4523 * to the adapter to retrieve the IOA configuration table.
4528 static int ipr_ioafp_query_ioa_cfg(struct ipr_cmnd *ipr_cmd)
4530 struct ipr_ioa_cfg *ioa_cfg = ipr_cmd->ioa_cfg;
4531 struct ipr_ioarcb *ioarcb = &ipr_cmd->ioarcb;
4532 struct ipr_ioadl_desc *ioadl = ipr_cmd->ioadl;
4533 struct ipr_inquiry_page3 *ucode_vpd = &ioa_cfg->vpd_cbs->page3_data;
4536 dev_info(&ioa_cfg->pdev->dev, "Adapter firmware version: %02X%02X%02X%02X\n",
4537 ucode_vpd->major_release, ucode_vpd->card_type,
4538 ucode_vpd->minor_release[0], ucode_vpd->minor_release[1]);
4539 ioarcb->cmd_pkt.request_type = IPR_RQTYPE_IOACMD;
4540 ioarcb->res_handle = cpu_to_be32(IPR_IOA_RES_HANDLE);
4542 ioarcb->cmd_pkt.cdb[0] = IPR_QUERY_IOA_CONFIG;
4543 ioarcb->cmd_pkt.cdb[7] = (sizeof(struct ipr_config_table) >> 8) & 0xff;
4544 ioarcb->cmd_pkt.cdb[8] = sizeof(struct ipr_config_table) & 0xff;
4546 ioarcb->read_ioadl_len = cpu_to_be32(sizeof(struct ipr_ioadl_desc));
4547 ioarcb->read_data_transfer_length =
4548 cpu_to_be32(sizeof(struct ipr_config_table));
4550 ioadl->address = cpu_to_be32(ioa_cfg->cfg_table_dma);
4551 ioadl->flags_and_data_len =
4552 cpu_to_be32(IPR_IOADL_FLAGS_READ_LAST | sizeof(struct ipr_config_table));
4554 ipr_cmd->job_step = ipr_init_res_table;
4556 ipr_do_req(ipr_cmd, ipr_reset_ioa_job, ipr_timeout, IPR_INTERNAL_TIMEOUT);
4559 return IPR_RC_JOB_RETURN;
4563 * ipr_ioafp_inquiry - Send an Inquiry to the adapter.
4564 * @ipr_cmd: ipr command struct
4566 * This utility function sends an inquiry to the adapter.
4571 static void ipr_ioafp_inquiry(struct ipr_cmnd *ipr_cmd, u8 flags, u8 page,
4572 u32 dma_addr, u8 xfer_len)
4574 struct ipr_ioarcb *ioarcb = &ipr_cmd->ioarcb;
4575 struct ipr_ioadl_desc *ioadl = ipr_cmd->ioadl;
4578 ioarcb->cmd_pkt.request_type = IPR_RQTYPE_SCSICDB;
4579 ioarcb->res_handle = cpu_to_be32(IPR_IOA_RES_HANDLE);
4581 ioarcb->cmd_pkt.cdb[0] = INQUIRY;
4582 ioarcb->cmd_pkt.cdb[1] = flags;
4583 ioarcb->cmd_pkt.cdb[2] = page;
4584 ioarcb->cmd_pkt.cdb[4] = xfer_len;
4586 ioarcb->read_ioadl_len = cpu_to_be32(sizeof(struct ipr_ioadl_desc));
4587 ioarcb->read_data_transfer_length = cpu_to_be32(xfer_len);
4589 ioadl->address = cpu_to_be32(dma_addr);
4590 ioadl->flags_and_data_len =
4591 cpu_to_be32(IPR_IOADL_FLAGS_READ_LAST | xfer_len);
4593 ipr_do_req(ipr_cmd, ipr_reset_ioa_job, ipr_timeout, IPR_INTERNAL_TIMEOUT);
4598 * ipr_ioafp_page3_inquiry - Send a Page 3 Inquiry to the adapter.
4599 * @ipr_cmd: ipr command struct
4601 * This function sends a Page 3 inquiry to the adapter
4602 * to retrieve software VPD information.
4605 * IPR_RC_JOB_CONTINUE / IPR_RC_JOB_RETURN
4607 static int ipr_ioafp_page3_inquiry(struct ipr_cmnd *ipr_cmd)
4609 struct ipr_ioa_cfg *ioa_cfg = ipr_cmd->ioa_cfg;
4614 /* Grab the type out of the VPD and store it away */
4615 memcpy(type, ioa_cfg->vpd_cbs->ioa_vpd.std_inq_data.vpids.product_id, 4);
4617 ioa_cfg->type = simple_strtoul((char *)type, NULL, 16);
4619 ipr_cmd->job_step = ipr_ioafp_query_ioa_cfg;
4621 ipr_ioafp_inquiry(ipr_cmd, 1, 3,
4622 ioa_cfg->vpd_cbs_dma + offsetof(struct ipr_misc_cbs, page3_data),
4623 sizeof(struct ipr_inquiry_page3));
4626 return IPR_RC_JOB_RETURN;
4630 * ipr_ioafp_std_inquiry - Send a Standard Inquiry to the adapter.
4631 * @ipr_cmd: ipr command struct
4633 * This function sends a standard inquiry to the adapter.
4638 static int ipr_ioafp_std_inquiry(struct ipr_cmnd *ipr_cmd)
4640 struct ipr_ioa_cfg *ioa_cfg = ipr_cmd->ioa_cfg;
4643 ipr_cmd->job_step = ipr_ioafp_page3_inquiry;
4645 ipr_ioafp_inquiry(ipr_cmd, 0, 0,
4646 ioa_cfg->vpd_cbs_dma + offsetof(struct ipr_misc_cbs, ioa_vpd),
4647 sizeof(struct ipr_ioa_vpd));
4650 return IPR_RC_JOB_RETURN;
4654 * ipr_ioafp_indentify_hrrq - Send Identify Host RRQ.
4655 * @ipr_cmd: ipr command struct
4657 * This function send an Identify Host Request Response Queue
4658 * command to establish the HRRQ with the adapter.
4663 static int ipr_ioafp_indentify_hrrq(struct ipr_cmnd *ipr_cmd)
4665 struct ipr_ioa_cfg *ioa_cfg = ipr_cmd->ioa_cfg;
4666 struct ipr_ioarcb *ioarcb = &ipr_cmd->ioarcb;
4669 dev_info(&ioa_cfg->pdev->dev, "Starting IOA initialization sequence.\n");
4671 ioarcb->cmd_pkt.cdb[0] = IPR_ID_HOST_RR_Q;
4672 ioarcb->res_handle = cpu_to_be32(IPR_IOA_RES_HANDLE);
4674 ioarcb->cmd_pkt.request_type = IPR_RQTYPE_IOACMD;
4675 ioarcb->cmd_pkt.cdb[2] =
4676 ((u32) ioa_cfg->host_rrq_dma >> 24) & 0xff;
4677 ioarcb->cmd_pkt.cdb[3] =
4678 ((u32) ioa_cfg->host_rrq_dma >> 16) & 0xff;
4679 ioarcb->cmd_pkt.cdb[4] =
4680 ((u32) ioa_cfg->host_rrq_dma >> 8) & 0xff;
4681 ioarcb->cmd_pkt.cdb[5] =
4682 ((u32) ioa_cfg->host_rrq_dma) & 0xff;
4683 ioarcb->cmd_pkt.cdb[7] =
4684 ((sizeof(u32) * IPR_NUM_CMD_BLKS) >> 8) & 0xff;
4685 ioarcb->cmd_pkt.cdb[8] =
4686 (sizeof(u32) * IPR_NUM_CMD_BLKS) & 0xff;
4688 ipr_cmd->job_step = ipr_ioafp_std_inquiry;
4690 ipr_do_req(ipr_cmd, ipr_reset_ioa_job, ipr_timeout, IPR_INTERNAL_TIMEOUT);
4693 return IPR_RC_JOB_RETURN;
4697 * ipr_reset_timer_done - Adapter reset timer function
4698 * @ipr_cmd: ipr command struct
4700 * Description: This function is used in adapter reset processing
4701 * for timing events. If the reset_cmd pointer in the IOA
4702 * config struct is not this adapter's we are doing nested
4703 * resets and fail_all_ops will take care of freeing the
4709 static void ipr_reset_timer_done(struct ipr_cmnd *ipr_cmd)
4711 struct ipr_ioa_cfg *ioa_cfg = ipr_cmd->ioa_cfg;
4712 unsigned long lock_flags = 0;
4714 spin_lock_irqsave(ioa_cfg->host->host_lock, lock_flags);
4716 if (ioa_cfg->reset_cmd == ipr_cmd) {
4717 list_del(&ipr_cmd->queue);
4718 ipr_cmd->done(ipr_cmd);
4721 spin_unlock_irqrestore(ioa_cfg->host->host_lock, lock_flags);
4725 * ipr_reset_start_timer - Start a timer for adapter reset job
4726 * @ipr_cmd: ipr command struct
4727 * @timeout: timeout value
4729 * Description: This function is used in adapter reset processing
4730 * for timing events. If the reset_cmd pointer in the IOA
4731 * config struct is not this adapter's we are doing nested
4732 * resets and fail_all_ops will take care of freeing the
4738 static void ipr_reset_start_timer(struct ipr_cmnd *ipr_cmd,
4739 unsigned long timeout)
4741 list_add_tail(&ipr_cmd->queue, &ipr_cmd->ioa_cfg->pending_q);
4742 ipr_cmd->done = ipr_reset_ioa_job;
4744 ipr_cmd->timer.data = (unsigned long) ipr_cmd;
4745 ipr_cmd->timer.expires = jiffies + timeout;
4746 ipr_cmd->timer.function = (void (*)(unsigned long))ipr_reset_timer_done;
4747 add_timer(&ipr_cmd->timer);
4751 * ipr_init_ioa_mem - Initialize ioa_cfg control block
4752 * @ioa_cfg: ioa cfg struct
4757 static void ipr_init_ioa_mem(struct ipr_ioa_cfg *ioa_cfg)
4759 memset(ioa_cfg->host_rrq, 0, sizeof(u32) * IPR_NUM_CMD_BLKS);
4761 /* Initialize Host RRQ pointers */
4762 ioa_cfg->hrrq_start = ioa_cfg->host_rrq;
4763 ioa_cfg->hrrq_end = &ioa_cfg->host_rrq[IPR_NUM_CMD_BLKS - 1];
4764 ioa_cfg->hrrq_curr = ioa_cfg->hrrq_start;
4765 ioa_cfg->toggle_bit = 1;
4767 /* Zero out config table */
4768 memset(ioa_cfg->cfg_table, 0, sizeof(struct ipr_config_table));
4772 * ipr_reset_enable_ioa - Enable the IOA following a reset.
4773 * @ipr_cmd: ipr command struct
4775 * This function reinitializes some control blocks and
4776 * enables destructive diagnostics on the adapter.
4781 static int ipr_reset_enable_ioa(struct ipr_cmnd *ipr_cmd)
4783 struct ipr_ioa_cfg *ioa_cfg = ipr_cmd->ioa_cfg;
4784 volatile u32 int_reg;
4787 ipr_cmd->job_step = ipr_ioafp_indentify_hrrq;
4788 ipr_init_ioa_mem(ioa_cfg);
4790 ioa_cfg->allow_interrupts = 1;
4791 int_reg = readl(ioa_cfg->regs.sense_interrupt_reg);
4793 if (int_reg & IPR_PCII_IOA_TRANS_TO_OPER) {
4794 writel((IPR_PCII_ERROR_INTERRUPTS | IPR_PCII_HRRQ_UPDATED),
4795 ioa_cfg->regs.clr_interrupt_mask_reg);
4796 int_reg = readl(ioa_cfg->regs.sense_interrupt_mask_reg);
4797 return IPR_RC_JOB_CONTINUE;
4800 /* Enable destructive diagnostics on IOA */
4801 writel(IPR_DOORBELL, ioa_cfg->regs.set_uproc_interrupt_reg);
4803 writel(IPR_PCII_OPER_INTERRUPTS, ioa_cfg->regs.clr_interrupt_mask_reg);
4804 int_reg = readl(ioa_cfg->regs.sense_interrupt_mask_reg);
4806 dev_info(&ioa_cfg->pdev->dev, "Initializing IOA.\n");
4808 ipr_cmd->timer.data = (unsigned long) ipr_cmd;
4809 ipr_cmd->timer.expires = jiffies + IPR_OPERATIONAL_TIMEOUT;
4810 ipr_cmd->timer.function = (void (*)(unsigned long))ipr_timeout;
4811 add_timer(&ipr_cmd->timer);
4812 list_add_tail(&ipr_cmd->queue, &ioa_cfg->pending_q);
4815 return IPR_RC_JOB_RETURN;
4819 * ipr_reset_wait_for_dump - Wait for a dump to timeout.
4820 * @ipr_cmd: ipr command struct
4822 * This function is invoked when an adapter dump has run out
4823 * of processing time.
4826 * IPR_RC_JOB_CONTINUE
4828 static int ipr_reset_wait_for_dump(struct ipr_cmnd *ipr_cmd)
4830 struct ipr_ioa_cfg *ioa_cfg = ipr_cmd->ioa_cfg;
4832 if (ioa_cfg->sdt_state == GET_DUMP)
4833 ioa_cfg->sdt_state = ABORT_DUMP;
4835 ipr_cmd->job_step = ipr_reset_alert;
4837 return IPR_RC_JOB_CONTINUE;
4841 * ipr_unit_check_no_data - Log a unit check/no data error log
4842 * @ioa_cfg: ioa config struct
4844 * Logs an error indicating the adapter unit checked, but for some
4845 * reason, we were unable to fetch the unit check buffer.
4850 static void ipr_unit_check_no_data(struct ipr_ioa_cfg *ioa_cfg)
4852 ioa_cfg->errors_logged++;
4853 dev_err(&ioa_cfg->pdev->dev, "IOA unit check with no data\n");
4857 * ipr_get_unit_check_buffer - Get the unit check buffer from the IOA
4858 * @ioa_cfg: ioa config struct
4860 * Fetches the unit check buffer from the adapter by clocking the data
4861 * through the mailbox register.
4866 static void ipr_get_unit_check_buffer(struct ipr_ioa_cfg *ioa_cfg)
4868 unsigned long mailbox;
4869 struct ipr_hostrcb *hostrcb;
4870 struct ipr_uc_sdt sdt;
4873 mailbox = readl(ioa_cfg->ioa_mailbox);
4875 if (!ipr_sdt_is_fmt2(mailbox)) {
4876 ipr_unit_check_no_data(ioa_cfg);
4880 memset(&sdt, 0, sizeof(struct ipr_uc_sdt));
4881 rc = ipr_get_ldump_data_section(ioa_cfg, mailbox, (u32 *) &sdt,
4882 (sizeof(struct ipr_uc_sdt)) / sizeof(u32));
4884 if (rc || (be32_to_cpu(sdt.hdr.state) != IPR_FMT2_SDT_READY_TO_USE) ||
4885 !(sdt.entry[0].flags & IPR_SDT_VALID_ENTRY)) {
4886 ipr_unit_check_no_data(ioa_cfg);
4890 /* Find length of the first sdt entry (UC buffer) */
4891 length = (be32_to_cpu(sdt.entry[0].end_offset) -
4892 be32_to_cpu(sdt.entry[0].bar_str_offset)) & IPR_FMT2_MBX_ADDR_MASK;
4894 hostrcb = list_entry(ioa_cfg->hostrcb_free_q.next,
4895 struct ipr_hostrcb, queue);
4896 list_del(&hostrcb->queue);
4897 memset(&hostrcb->hcam, 0, sizeof(hostrcb->hcam));
4899 rc = ipr_get_ldump_data_section(ioa_cfg,
4900 be32_to_cpu(sdt.entry[0].bar_str_offset),
4901 (u32 *)&hostrcb->hcam,
4902 min(length, (int)sizeof(hostrcb->hcam)) / sizeof(u32));
4905 ipr_handle_log_data(ioa_cfg, hostrcb);
4907 ipr_unit_check_no_data(ioa_cfg);
4909 list_add_tail(&hostrcb->queue, &ioa_cfg->hostrcb_free_q);
4913 * ipr_reset_restore_cfg_space - Restore PCI config space.
4914 * @ipr_cmd: ipr command struct
4916 * Description: This function restores the saved PCI config space of
4917 * the adapter, fails all outstanding ops back to the callers, and
4918 * fetches the dump/unit check if applicable to this reset.
4921 * IPR_RC_JOB_CONTINUE / IPR_RC_JOB_RETURN
4923 static int ipr_reset_restore_cfg_space(struct ipr_cmnd *ipr_cmd)
4925 struct ipr_ioa_cfg *ioa_cfg = ipr_cmd->ioa_cfg;
4929 rc = pci_restore_state(ioa_cfg->pdev, ioa_cfg->pci_cfg_buf);
4931 if (rc != PCIBIOS_SUCCESSFUL) {
4932 ipr_cmd->ioasa.ioasc = cpu_to_be32(IPR_IOASC_PCI_ACCESS_ERROR);
4933 return IPR_RC_JOB_CONTINUE;
4936 if (ipr_set_pcix_cmd_reg(ioa_cfg)) {
4937 ipr_cmd->ioasa.ioasc = cpu_to_be32(IPR_IOASC_PCI_ACCESS_ERROR);
4938 return IPR_RC_JOB_CONTINUE;
4941 ipr_fail_all_ops(ioa_cfg);
4943 if (ioa_cfg->ioa_unit_checked) {
4944 ioa_cfg->ioa_unit_checked = 0;
4945 ipr_get_unit_check_buffer(ioa_cfg);
4946 ipr_cmd->job_step = ipr_reset_alert;
4947 ipr_reset_start_timer(ipr_cmd, 0);
4948 return IPR_RC_JOB_RETURN;
4951 if (ioa_cfg->in_ioa_bringdown) {
4952 ipr_cmd->job_step = ipr_ioa_bringdown_done;
4954 ipr_cmd->job_step = ipr_reset_enable_ioa;
4956 if (GET_DUMP == ioa_cfg->sdt_state) {
4957 ipr_reset_start_timer(ipr_cmd, IPR_DUMP_TIMEOUT);
4958 ipr_cmd->job_step = ipr_reset_wait_for_dump;
4959 schedule_work(&ioa_cfg->work_q);
4960 return IPR_RC_JOB_RETURN;
4965 return IPR_RC_JOB_CONTINUE;
4969 * ipr_reset_start_bist - Run BIST on the adapter.
4970 * @ipr_cmd: ipr command struct
4972 * Description: This function runs BIST on the adapter, then delays 2 seconds.
4975 * IPR_RC_JOB_CONTINUE / IPR_RC_JOB_RETURN
4977 static int ipr_reset_start_bist(struct ipr_cmnd *ipr_cmd)
4979 struct ipr_ioa_cfg *ioa_cfg = ipr_cmd->ioa_cfg;
4983 rc = pci_write_config_byte(ioa_cfg->pdev, PCI_BIST, PCI_BIST_START);
4985 if (rc != PCIBIOS_SUCCESSFUL) {
4986 ipr_cmd->ioasa.ioasc = cpu_to_be32(IPR_IOASC_PCI_ACCESS_ERROR);
4987 rc = IPR_RC_JOB_CONTINUE;
4989 ipr_cmd->job_step = ipr_reset_restore_cfg_space;
4990 ipr_reset_start_timer(ipr_cmd, IPR_WAIT_FOR_BIST_TIMEOUT);
4991 rc = IPR_RC_JOB_RETURN;
4999 * ipr_reset_allowed - Query whether or not IOA can be reset
5000 * @ioa_cfg: ioa config struct
5003 * 0 if reset not allowed / non-zero if reset is allowed
5005 static int ipr_reset_allowed(struct ipr_ioa_cfg *ioa_cfg)
5007 volatile u32 temp_reg;
5009 temp_reg = readl(ioa_cfg->regs.sense_interrupt_reg);
5010 return ((temp_reg & IPR_PCII_CRITICAL_OPERATION) == 0);
5014 * ipr_reset_wait_to_start_bist - Wait for permission to reset IOA.
5015 * @ipr_cmd: ipr command struct
5017 * Description: This function waits for adapter permission to run BIST,
5018 * then runs BIST. If the adapter does not give permission after a
5019 * reasonable time, we will reset the adapter anyway. The impact of
5020 * resetting the adapter without warning the adapter is the risk of
5021 * losing the persistent error log on the adapter. If the adapter is
5022 * reset while it is writing to the flash on the adapter, the flash
5023 * segment will have bad ECC and be zeroed.
5026 * IPR_RC_JOB_CONTINUE / IPR_RC_JOB_RETURN
5028 static int ipr_reset_wait_to_start_bist(struct ipr_cmnd *ipr_cmd)
5030 struct ipr_ioa_cfg *ioa_cfg = ipr_cmd->ioa_cfg;
5031 int rc = IPR_RC_JOB_RETURN;
5033 if (!ipr_reset_allowed(ioa_cfg) && ipr_cmd->u.time_left) {
5034 ipr_cmd->u.time_left -= IPR_CHECK_FOR_RESET_TIMEOUT;
5035 ipr_reset_start_timer(ipr_cmd, IPR_CHECK_FOR_RESET_TIMEOUT);
5037 ipr_cmd->job_step = ipr_reset_start_bist;
5038 rc = IPR_RC_JOB_CONTINUE;
5045 * ipr_reset_alert_part2 - Alert the adapter of a pending reset
5046 * @ipr_cmd: ipr command struct
5048 * Description: This function alerts the adapter that it will be reset.
5049 * If memory space is not currently enabled, proceed directly
5050 * to running BIST on the adapter. The timer must always be started
5051 * so we guarantee we do not run BIST from ipr_isr.
5056 static int ipr_reset_alert(struct ipr_cmnd *ipr_cmd)
5058 struct ipr_ioa_cfg *ioa_cfg = ipr_cmd->ioa_cfg;
5063 rc = pci_read_config_word(ioa_cfg->pdev, PCI_COMMAND, &cmd_reg);
5065 if ((rc == PCIBIOS_SUCCESSFUL) && (cmd_reg & PCI_COMMAND_MEMORY)) {
5066 ipr_mask_and_clear_interrupts(ioa_cfg, ~0);
5067 writel(IPR_UPROCI_RESET_ALERT, ioa_cfg->regs.set_uproc_interrupt_reg);
5068 ipr_cmd->job_step = ipr_reset_wait_to_start_bist;
5070 ipr_cmd->job_step = ipr_reset_start_bist;
5073 ipr_cmd->u.time_left = IPR_WAIT_FOR_RESET_TIMEOUT;
5074 ipr_reset_start_timer(ipr_cmd, IPR_CHECK_FOR_RESET_TIMEOUT);
5077 return IPR_RC_JOB_RETURN;
5081 * ipr_reset_ucode_download_done - Microcode download completion
5082 * @ipr_cmd: ipr command struct
5084 * Description: This function unmaps the microcode download buffer.
5087 * IPR_RC_JOB_CONTINUE
5089 static int ipr_reset_ucode_download_done(struct ipr_cmnd *ipr_cmd)
5091 struct ipr_ioa_cfg *ioa_cfg = ipr_cmd->ioa_cfg;
5092 struct ipr_sglist *sglist = ioa_cfg->ucode_sglist;
5094 pci_unmap_sg(ioa_cfg->pdev, sglist->scatterlist,
5095 sglist->num_sg, DMA_TO_DEVICE);
5097 ipr_cmd->job_step = ipr_reset_alert;
5098 return IPR_RC_JOB_CONTINUE;
5102 * ipr_reset_ucode_download - Download microcode to the adapter
5103 * @ipr_cmd: ipr command struct
5105 * Description: This function checks to see if it there is microcode
5106 * to download to the adapter. If there is, a download is performed.
5109 * IPR_RC_JOB_CONTINUE / IPR_RC_JOB_RETURN
5111 static int ipr_reset_ucode_download(struct ipr_cmnd *ipr_cmd)
5113 struct ipr_ioa_cfg *ioa_cfg = ipr_cmd->ioa_cfg;
5114 struct ipr_sglist *sglist = ioa_cfg->ucode_sglist;
5117 ipr_cmd->job_step = ipr_reset_alert;
5120 return IPR_RC_JOB_CONTINUE;
5122 ipr_cmd->ioarcb.res_handle = cpu_to_be32(IPR_IOA_RES_HANDLE);
5123 ipr_cmd->ioarcb.cmd_pkt.request_type = IPR_RQTYPE_SCSICDB;
5124 ipr_cmd->ioarcb.cmd_pkt.cdb[0] = WRITE_BUFFER;
5125 ipr_cmd->ioarcb.cmd_pkt.cdb[1] = IPR_WR_BUF_DOWNLOAD_AND_SAVE;
5126 ipr_cmd->ioarcb.cmd_pkt.cdb[6] = (sglist->buffer_len & 0xff0000) >> 16;
5127 ipr_cmd->ioarcb.cmd_pkt.cdb[7] = (sglist->buffer_len & 0x00ff00) >> 8;
5128 ipr_cmd->ioarcb.cmd_pkt.cdb[8] = sglist->buffer_len & 0x0000ff;
5130 if (ipr_map_ucode_buffer(ipr_cmd, sglist, sglist->buffer_len)) {
5131 dev_err(&ioa_cfg->pdev->dev,
5132 "Failed to map microcode download buffer\n");
5133 return IPR_RC_JOB_CONTINUE;
5136 ipr_cmd->job_step = ipr_reset_ucode_download_done;
5138 ipr_do_req(ipr_cmd, ipr_reset_ioa_job, ipr_timeout,
5139 IPR_WRITE_BUFFER_TIMEOUT);
5142 return IPR_RC_JOB_RETURN;
5146 * ipr_reset_shutdown_ioa - Shutdown the adapter
5147 * @ipr_cmd: ipr command struct
5149 * Description: This function issues an adapter shutdown of the
5150 * specified type to the specified adapter as part of the
5151 * adapter reset job.
5154 * IPR_RC_JOB_CONTINUE / IPR_RC_JOB_RETURN
5156 static int ipr_reset_shutdown_ioa(struct ipr_cmnd *ipr_cmd)
5158 struct ipr_ioa_cfg *ioa_cfg = ipr_cmd->ioa_cfg;
5159 enum ipr_shutdown_type shutdown_type = ipr_cmd->u.shutdown_type;
5160 unsigned long timeout;
5161 int rc = IPR_RC_JOB_CONTINUE;
5164 if (shutdown_type != IPR_SHUTDOWN_NONE && !ioa_cfg->ioa_is_dead) {
5165 ipr_cmd->ioarcb.res_handle = cpu_to_be32(IPR_IOA_RES_HANDLE);
5166 ipr_cmd->ioarcb.cmd_pkt.request_type = IPR_RQTYPE_IOACMD;
5167 ipr_cmd->ioarcb.cmd_pkt.cdb[0] = IPR_IOA_SHUTDOWN;
5168 ipr_cmd->ioarcb.cmd_pkt.cdb[1] = shutdown_type;
5170 if (shutdown_type == IPR_SHUTDOWN_ABBREV)
5171 timeout = IPR_ABBREV_SHUTDOWN_TIMEOUT;
5172 else if (shutdown_type == IPR_SHUTDOWN_PREPARE_FOR_NORMAL)
5173 timeout = IPR_INTERNAL_TIMEOUT;
5175 timeout = IPR_SHUTDOWN_TIMEOUT;
5177 ipr_do_req(ipr_cmd, ipr_reset_ioa_job, ipr_timeout, timeout);
5179 rc = IPR_RC_JOB_RETURN;
5180 ipr_cmd->job_step = ipr_reset_ucode_download;
5182 ipr_cmd->job_step = ipr_reset_alert;
5189 * ipr_reset_ioa_job - Adapter reset job
5190 * @ipr_cmd: ipr command struct
5192 * Description: This function is the job router for the adapter reset job.
5197 static void ipr_reset_ioa_job(struct ipr_cmnd *ipr_cmd)
5200 unsigned long scratch = ipr_cmd->u.scratch;
5201 struct ipr_ioa_cfg *ioa_cfg = ipr_cmd->ioa_cfg;
5204 ioasc = be32_to_cpu(ipr_cmd->ioasa.ioasc);
5206 if (ioa_cfg->reset_cmd != ipr_cmd) {
5208 * We are doing nested adapter resets and this is
5209 * not the current reset job.
5211 list_add_tail(&ipr_cmd->queue, &ioa_cfg->free_q);
5215 if (IPR_IOASC_SENSE_KEY(ioasc)) {
5216 dev_err(&ioa_cfg->pdev->dev,
5217 "0x%02X failed with IOASC: 0x%08X\n",
5218 ipr_cmd->ioarcb.cmd_pkt.cdb[0], ioasc);
5220 ipr_initiate_ioa_reset(ioa_cfg, IPR_SHUTDOWN_NONE);
5221 list_add_tail(&ipr_cmd->queue, &ioa_cfg->free_q);
5225 ipr_reinit_ipr_cmnd(ipr_cmd);
5226 ipr_cmd->u.scratch = scratch;
5227 rc = ipr_cmd->job_step(ipr_cmd);
5228 } while(rc == IPR_RC_JOB_CONTINUE);
5232 * _ipr_initiate_ioa_reset - Initiate an adapter reset
5233 * @ioa_cfg: ioa config struct
5234 * @job_step: first job step of reset job
5235 * @shutdown_type: shutdown type
5237 * Description: This function will initiate the reset of the given adapter
5238 * starting at the selected job step.
5239 * If the caller needs to wait on the completion of the reset,
5240 * the caller must sleep on the reset_wait_q.
5245 static void _ipr_initiate_ioa_reset(struct ipr_ioa_cfg *ioa_cfg,
5246 int (*job_step) (struct ipr_cmnd *),
5247 enum ipr_shutdown_type shutdown_type)
5249 struct ipr_cmnd *ipr_cmd;
5251 ioa_cfg->in_reset_reload = 1;
5252 ioa_cfg->allow_cmds = 0;
5253 scsi_block_requests(ioa_cfg->host);
5255 ipr_cmd = ipr_get_free_ipr_cmnd(ioa_cfg);
5256 ioa_cfg->reset_cmd = ipr_cmd;
5257 ipr_cmd->job_step = job_step;
5258 ipr_cmd->u.shutdown_type = shutdown_type;
5260 ipr_reset_ioa_job(ipr_cmd);
5264 * ipr_initiate_ioa_reset - Initiate an adapter reset
5265 * @ioa_cfg: ioa config struct
5266 * @shutdown_type: shutdown type
5268 * Description: This function will initiate the reset of the given adapter.
5269 * If the caller needs to wait on the completion of the reset,
5270 * the caller must sleep on the reset_wait_q.
5275 static void ipr_initiate_ioa_reset(struct ipr_ioa_cfg *ioa_cfg,
5276 enum ipr_shutdown_type shutdown_type)
5278 if (ioa_cfg->ioa_is_dead)
5281 if (ioa_cfg->in_reset_reload && ioa_cfg->sdt_state == GET_DUMP)
5282 ioa_cfg->sdt_state = ABORT_DUMP;
5284 if (ioa_cfg->reset_retries++ > IPR_NUM_RESET_RELOAD_RETRIES) {
5285 dev_err(&ioa_cfg->pdev->dev,
5286 "IOA taken offline - error recovery failed\n");
5288 ioa_cfg->reset_retries = 0;
5289 ioa_cfg->ioa_is_dead = 1;
5291 if (ioa_cfg->in_ioa_bringdown) {
5292 ioa_cfg->reset_cmd = NULL;
5293 ioa_cfg->in_reset_reload = 0;
5294 ipr_fail_all_ops(ioa_cfg);
5295 wake_up_all(&ioa_cfg->reset_wait_q);
5297 spin_unlock_irq(ioa_cfg->host->host_lock);
5298 scsi_unblock_requests(ioa_cfg->host);
5299 spin_lock_irq(ioa_cfg->host->host_lock);
5302 ioa_cfg->in_ioa_bringdown = 1;
5303 shutdown_type = IPR_SHUTDOWN_NONE;
5307 _ipr_initiate_ioa_reset(ioa_cfg, ipr_reset_shutdown_ioa,
5312 * ipr_probe_ioa_part2 - Initializes IOAs found in ipr_probe_ioa(..)
5313 * @ioa_cfg: ioa cfg struct
5315 * Description: This is the second phase of adapter intialization
5316 * This function takes care of initilizing the adapter to the point
5317 * where it can accept new commands.
5320 * 0 on sucess / -EIO on failure
5322 static int __devinit ipr_probe_ioa_part2(struct ipr_ioa_cfg *ioa_cfg)
5325 unsigned long host_lock_flags = 0;
5328 spin_lock_irqsave(ioa_cfg->host->host_lock, host_lock_flags);
5329 dev_dbg(&ioa_cfg->pdev->dev, "ioa_cfg adx: 0x%p\n", ioa_cfg);
5330 _ipr_initiate_ioa_reset(ioa_cfg, ipr_reset_enable_ioa, IPR_SHUTDOWN_NONE);
5332 spin_unlock_irqrestore(ioa_cfg->host->host_lock, host_lock_flags);
5333 wait_event(ioa_cfg->reset_wait_q, !ioa_cfg->in_reset_reload);
5334 spin_lock_irqsave(ioa_cfg->host->host_lock, host_lock_flags);
5336 if (ioa_cfg->ioa_is_dead) {
5338 } else if (ipr_invalid_adapter(ioa_cfg)) {
5342 dev_err(&ioa_cfg->pdev->dev,
5343 "Adapter not supported in this hardware configuration.\n");
5346 spin_unlock_irqrestore(ioa_cfg->host->host_lock, host_lock_flags);
5353 * ipr_free_cmd_blks - Frees command blocks allocated for an adapter
5354 * @ioa_cfg: ioa config struct
5359 static void ipr_free_cmd_blks(struct ipr_ioa_cfg *ioa_cfg)
5363 for (i = 0; i < IPR_NUM_CMD_BLKS; i++) {
5364 if (ioa_cfg->ipr_cmnd_list[i])
5365 pci_pool_free(ioa_cfg->ipr_cmd_pool,
5366 ioa_cfg->ipr_cmnd_list[i],
5367 ioa_cfg->ipr_cmnd_list_dma[i]);
5369 ioa_cfg->ipr_cmnd_list[i] = NULL;
5372 if (ioa_cfg->ipr_cmd_pool)
5373 pci_pool_destroy (ioa_cfg->ipr_cmd_pool);
5375 ioa_cfg->ipr_cmd_pool = NULL;
5379 * ipr_free_mem - Frees memory allocated for an adapter
5380 * @ioa_cfg: ioa cfg struct
5385 static void ipr_free_mem(struct ipr_ioa_cfg *ioa_cfg)
5389 kfree(ioa_cfg->res_entries);
5390 pci_free_consistent(ioa_cfg->pdev, sizeof(struct ipr_misc_cbs),
5391 ioa_cfg->vpd_cbs, ioa_cfg->vpd_cbs_dma);
5392 ipr_free_cmd_blks(ioa_cfg);
5393 pci_free_consistent(ioa_cfg->pdev, sizeof(u32) * IPR_NUM_CMD_BLKS,
5394 ioa_cfg->host_rrq, ioa_cfg->host_rrq_dma);
5395 pci_free_consistent(ioa_cfg->pdev, sizeof(struct ipr_config_table),
5397 ioa_cfg->cfg_table_dma);
5399 for (i = 0; i < IPR_NUM_HCAMS; i++) {
5400 pci_free_consistent(ioa_cfg->pdev,
5401 sizeof(struct ipr_hostrcb),
5402 ioa_cfg->hostrcb[i],
5403 ioa_cfg->hostrcb_dma[i]);
5406 ipr_free_dump(ioa_cfg);
5407 kfree(ioa_cfg->saved_mode_pages);
5408 kfree(ioa_cfg->trace);
5412 * ipr_free_all_resources - Free all allocated resources for an adapter.
5413 * @ipr_cmd: ipr command struct
5415 * This function frees all allocated resources for the
5416 * specified adapter.
5421 static void ipr_free_all_resources(struct ipr_ioa_cfg *ioa_cfg)
5424 free_irq(ioa_cfg->pdev->irq, ioa_cfg);
5425 iounmap((void *) ioa_cfg->hdw_dma_regs);
5426 release_mem_region(ioa_cfg->hdw_dma_regs_pci,
5427 pci_resource_len(ioa_cfg->pdev, 0));
5428 ipr_free_mem(ioa_cfg);
5429 scsi_host_put(ioa_cfg->host);
5434 * ipr_alloc_cmd_blks - Allocate command blocks for an adapter
5435 * @ioa_cfg: ioa config struct
5438 * 0 on success / -ENOMEM on allocation failure
5440 static int __devinit ipr_alloc_cmd_blks(struct ipr_ioa_cfg *ioa_cfg)
5442 struct ipr_cmnd *ipr_cmd;
5443 struct ipr_ioarcb *ioarcb;
5447 ioa_cfg->ipr_cmd_pool = pci_pool_create (IPR_NAME, ioa_cfg->pdev,
5448 sizeof(struct ipr_cmnd), 8, 0);
5450 if (!ioa_cfg->ipr_cmd_pool)
5453 for (i = 0; i < IPR_NUM_CMD_BLKS; i++) {
5454 ipr_cmd = pci_pool_alloc (ioa_cfg->ipr_cmd_pool, SLAB_KERNEL, &dma_addr);
5457 ipr_free_cmd_blks(ioa_cfg);
5461 memset(ipr_cmd, 0, sizeof(*ipr_cmd));
5462 ioa_cfg->ipr_cmnd_list[i] = ipr_cmd;
5463 ioa_cfg->ipr_cmnd_list_dma[i] = dma_addr;
5465 ioarcb = &ipr_cmd->ioarcb;
5466 ioarcb->ioarcb_host_pci_addr = cpu_to_be32(dma_addr);
5467 ioarcb->host_response_handle = cpu_to_be32(i << 2);
5468 ioarcb->write_ioadl_addr =
5469 cpu_to_be32(dma_addr + offsetof(struct ipr_cmnd, ioadl));
5470 ioarcb->read_ioadl_addr = ioarcb->write_ioadl_addr;
5471 ioarcb->ioasa_host_pci_addr =
5472 cpu_to_be32(dma_addr + offsetof(struct ipr_cmnd, ioasa));
5473 ioarcb->ioasa_len = cpu_to_be16(sizeof(struct ipr_ioasa));
5474 ipr_cmd->cmd_index = i;
5475 ipr_cmd->ioa_cfg = ioa_cfg;
5476 ipr_cmd->sense_buffer_dma = dma_addr +
5477 offsetof(struct ipr_cmnd, sense_buffer);
5479 list_add_tail(&ipr_cmd->queue, &ioa_cfg->free_q);
5486 * ipr_alloc_mem - Allocate memory for an adapter
5487 * @ioa_cfg: ioa config struct
5490 * 0 on success / non-zero for error
5492 static int __devinit ipr_alloc_mem(struct ipr_ioa_cfg *ioa_cfg)
5497 ioa_cfg->res_entries = kmalloc(sizeof(struct ipr_resource_entry) *
5498 IPR_MAX_PHYSICAL_DEVS, GFP_KERNEL);
5500 if (!ioa_cfg->res_entries)
5503 memset(ioa_cfg->res_entries, 0,
5504 sizeof(struct ipr_resource_entry) * IPR_MAX_PHYSICAL_DEVS);
5506 for (i = 0; i < IPR_MAX_PHYSICAL_DEVS; i++)
5507 list_add_tail(&ioa_cfg->res_entries[i].queue, &ioa_cfg->free_res_q);
5509 ioa_cfg->vpd_cbs = pci_alloc_consistent(ioa_cfg->pdev,
5510 sizeof(struct ipr_misc_cbs),
5511 &ioa_cfg->vpd_cbs_dma);
5513 if (!ioa_cfg->vpd_cbs)
5516 if (ipr_alloc_cmd_blks(ioa_cfg))
5519 ioa_cfg->host_rrq = pci_alloc_consistent(ioa_cfg->pdev,
5520 sizeof(u32) * IPR_NUM_CMD_BLKS,
5521 &ioa_cfg->host_rrq_dma);
5523 if (!ioa_cfg->host_rrq)
5526 ioa_cfg->cfg_table = pci_alloc_consistent(ioa_cfg->pdev,
5527 sizeof(struct ipr_config_table),
5528 &ioa_cfg->cfg_table_dma);
5530 if (!ioa_cfg->cfg_table)
5533 for (i = 0; i < IPR_NUM_HCAMS; i++) {
5534 ioa_cfg->hostrcb[i] = pci_alloc_consistent(ioa_cfg->pdev,
5535 sizeof(struct ipr_hostrcb),
5536 &ioa_cfg->hostrcb_dma[i]);
5538 if (!ioa_cfg->hostrcb[i])
5541 memset(ioa_cfg->hostrcb[i], 0, sizeof(struct ipr_hostrcb));
5542 ioa_cfg->hostrcb[i]->hostrcb_dma =
5543 ioa_cfg->hostrcb_dma[i] + offsetof(struct ipr_hostrcb, hcam);
5544 list_add_tail(&ioa_cfg->hostrcb[i]->queue, &ioa_cfg->hostrcb_free_q);
5547 ioa_cfg->trace = kmalloc(sizeof(struct ipr_trace_entry) *
5548 IPR_NUM_TRACE_ENTRIES, GFP_KERNEL);
5550 if (!ioa_cfg->trace)
5553 memset(ioa_cfg->trace, 0,
5554 sizeof(struct ipr_trace_entry) * IPR_NUM_TRACE_ENTRIES);
5560 ipr_free_mem(ioa_cfg);
5567 * ipr_initialize_bus_attr - Initialize SCSI bus attributes to default values
5568 * @ioa_cfg: ioa config struct
5573 static void __devinit ipr_initialize_bus_attr(struct ipr_ioa_cfg *ioa_cfg)
5577 for (i = 0; i < IPR_MAX_NUM_BUSES; i++) {
5578 ioa_cfg->bus_attr[i].bus = i;
5579 ioa_cfg->bus_attr[i].qas_enabled = 0;
5580 ioa_cfg->bus_attr[i].bus_width = IPR_DEFAULT_BUS_WIDTH;
5581 if (ipr_max_speed < ARRAY_SIZE(ipr_max_bus_speeds))
5582 ioa_cfg->bus_attr[i].max_xfer_rate = ipr_max_bus_speeds[ipr_max_speed];
5584 ioa_cfg->bus_attr[i].max_xfer_rate = IPR_U160_SCSI_RATE;
5589 * ipr_init_ioa_cfg - Initialize IOA config struct
5590 * @ioa_cfg: ioa config struct
5591 * @host: scsi host struct
5592 * @pdev: PCI dev struct
5597 static void __devinit ipr_init_ioa_cfg(struct ipr_ioa_cfg *ioa_cfg,
5598 struct Scsi_Host *host, struct pci_dev *pdev)
5600 ioa_cfg->host = host;
5601 ioa_cfg->pdev = pdev;
5602 ioa_cfg->log_level = ipr_log_level;
5603 sprintf(ioa_cfg->eye_catcher, IPR_EYECATCHER);
5604 sprintf(ioa_cfg->trace_start, IPR_TRACE_START_LABEL);
5605 sprintf(ioa_cfg->ipr_free_label, IPR_FREEQ_LABEL);
5606 sprintf(ioa_cfg->ipr_pending_label, IPR_PENDQ_LABEL);
5607 sprintf(ioa_cfg->cfg_table_start, IPR_CFG_TBL_START);
5608 sprintf(ioa_cfg->resource_table_label, IPR_RES_TABLE_LABEL);
5609 sprintf(ioa_cfg->ipr_hcam_label, IPR_HCAM_LABEL);
5610 sprintf(ioa_cfg->ipr_cmd_label, IPR_CMD_LABEL);
5612 INIT_LIST_HEAD(&ioa_cfg->free_q);
5613 INIT_LIST_HEAD(&ioa_cfg->pending_q);
5614 INIT_LIST_HEAD(&ioa_cfg->hostrcb_free_q);
5615 INIT_LIST_HEAD(&ioa_cfg->hostrcb_pending_q);
5616 INIT_LIST_HEAD(&ioa_cfg->free_res_q);
5617 INIT_LIST_HEAD(&ioa_cfg->used_res_q);
5618 INIT_WORK(&ioa_cfg->work_q, ipr_worker_thread, ioa_cfg);
5619 init_waitqueue_head(&ioa_cfg->reset_wait_q);
5620 ioa_cfg->sdt_state = INACTIVE;
5622 ipr_initialize_bus_attr(ioa_cfg);
5624 host->max_id = IPR_MAX_NUM_TARGETS_PER_BUS;
5625 host->max_lun = IPR_MAX_NUM_LUNS_PER_TARGET;
5626 host->max_channel = IPR_MAX_BUS_TO_SCAN;
5627 host->unique_id = host->host_no;
5628 host->max_cmd_len = IPR_MAX_CDB_LEN;
5629 pci_set_drvdata(pdev, ioa_cfg);
5631 memcpy(&ioa_cfg->regs, &ioa_cfg->chip_cfg->regs, sizeof(ioa_cfg->regs));
5633 ioa_cfg->regs.set_interrupt_mask_reg += ioa_cfg->hdw_dma_regs;
5634 ioa_cfg->regs.clr_interrupt_mask_reg += ioa_cfg->hdw_dma_regs;
5635 ioa_cfg->regs.sense_interrupt_mask_reg += ioa_cfg->hdw_dma_regs;
5636 ioa_cfg->regs.clr_interrupt_reg += ioa_cfg->hdw_dma_regs;
5637 ioa_cfg->regs.sense_interrupt_reg += ioa_cfg->hdw_dma_regs;
5638 ioa_cfg->regs.ioarrin_reg += ioa_cfg->hdw_dma_regs;
5639 ioa_cfg->regs.sense_uproc_interrupt_reg += ioa_cfg->hdw_dma_regs;
5640 ioa_cfg->regs.set_uproc_interrupt_reg += ioa_cfg->hdw_dma_regs;
5641 ioa_cfg->regs.clr_uproc_interrupt_reg += ioa_cfg->hdw_dma_regs;
5645 * ipr_probe_ioa - Allocates memory and does first stage of initialization
5646 * @pdev: PCI device struct
5647 * @dev_id: PCI device id struct
5650 * 0 on success / non-zero on failure
5652 static int __devinit ipr_probe_ioa(struct pci_dev *pdev,
5653 const struct pci_device_id *dev_id)
5655 struct ipr_ioa_cfg *ioa_cfg;
5656 struct Scsi_Host *host;
5657 unsigned long ipr_regs, ipr_regs_pci;
5658 u32 rc = PCIBIOS_SUCCESSFUL;
5662 if ((rc = pci_enable_device(pdev))) {
5663 dev_err(&pdev->dev, "Cannot enable adapter\n");
5667 dev_info(&pdev->dev, "Found IOA with IRQ: %d\n", pdev->irq);
5669 host = scsi_host_alloc(&driver_template, sizeof(*ioa_cfg));
5672 dev_err(&pdev->dev, "call to scsi_host_alloc failed!\n");
5676 ioa_cfg = (struct ipr_ioa_cfg *)host->hostdata;
5677 memset(ioa_cfg, 0, sizeof(struct ipr_ioa_cfg));
5679 ioa_cfg->chip_cfg = (const struct ipr_chip_cfg_t *)dev_id->driver_data;
5681 ipr_regs_pci = pci_resource_start(pdev, 0);
5683 if (!request_mem_region(ipr_regs_pci,
5684 pci_resource_len(pdev, 0), IPR_NAME)) {
5686 "Couldn't register memory range of registers\n");
5687 scsi_host_put(host);
5691 ipr_regs = (unsigned long)ioremap(ipr_regs_pci,
5692 pci_resource_len(pdev, 0));
5696 "Couldn't map memory range of registers\n");
5697 release_mem_region(ipr_regs_pci, pci_resource_len(pdev, 0));
5698 scsi_host_put(host);
5702 ioa_cfg->hdw_dma_regs = ipr_regs;
5703 ioa_cfg->hdw_dma_regs_pci = ipr_regs_pci;
5704 ioa_cfg->ioa_mailbox = ioa_cfg->chip_cfg->mailbox + ipr_regs;
5706 ipr_init_ioa_cfg(ioa_cfg, host, pdev);
5708 pci_set_master(pdev);
5709 rc = pci_set_dma_mask(pdev, 0xffffffff);
5711 if (rc != PCIBIOS_SUCCESSFUL) {
5712 dev_err(&pdev->dev, "Failed to set PCI DMA mask\n");
5717 rc = pci_write_config_byte(pdev, PCI_CACHE_LINE_SIZE,
5718 ioa_cfg->chip_cfg->cache_line_size);
5720 if (rc != PCIBIOS_SUCCESSFUL) {
5721 dev_err(&pdev->dev, "Write of cache line size failed\n");
5726 /* Save away PCI config space for use following IOA reset */
5727 rc = pci_save_state(pdev, ioa_cfg->pci_cfg_buf);
5729 if (rc != PCIBIOS_SUCCESSFUL) {
5730 dev_err(&pdev->dev, "Failed to save PCI config space\n");
5735 if ((rc = ipr_save_pcix_cmd_reg(ioa_cfg)))
5738 if ((rc = ipr_set_pcix_cmd_reg(ioa_cfg)))
5741 if ((rc = ipr_alloc_mem(ioa_cfg)))
5744 ipr_mask_and_clear_interrupts(ioa_cfg, ~IPR_PCII_IOA_TRANS_TO_OPER);
5745 rc = request_irq(pdev->irq, ipr_isr, SA_SHIRQ, IPR_NAME, ioa_cfg);
5748 dev_err(&pdev->dev, "Couldn't register IRQ %d! rc=%d\n",
5753 spin_lock(&ipr_driver_lock);
5754 list_add_tail(&ioa_cfg->queue, &ipr_ioa_head);
5755 spin_unlock(&ipr_driver_lock);
5761 dev_err(&pdev->dev, "Couldn't allocate enough memory for device driver!\n");
5763 ipr_free_mem(ioa_cfg);
5765 iounmap((void *) ipr_regs);
5766 release_mem_region(ipr_regs_pci, pci_resource_len(pdev, 0));
5767 scsi_host_put(host);
5773 * ipr_scan_vsets - Scans for VSET devices
5774 * @ioa_cfg: ioa config struct
5776 * Description: Since the VSET resources do not follow SAM in that we can have
5777 * sparse LUNs with no LUN 0, we have to scan for these ourselves.
5782 static void ipr_scan_vsets(struct ipr_ioa_cfg *ioa_cfg)
5786 for (target = 0; target < IPR_MAX_NUM_TARGETS_PER_BUS; target++)
5787 for (lun = 0; lun < IPR_MAX_NUM_VSET_LUNS_PER_TARGET; lun++ )
5788 scsi_add_device(ioa_cfg->host, IPR_VSET_BUS, target, lun);
5792 * ipr_initiate_ioa_bringdown - Bring down an adapter
5793 * @ioa_cfg: ioa config struct
5794 * @shutdown_type: shutdown type
5796 * Description: This function will initiate bringing down the adapter.
5797 * This consists of issuing an IOA shutdown to the adapter
5798 * to flush the cache, and running BIST.
5799 * If the caller needs to wait on the completion of the reset,
5800 * the caller must sleep on the reset_wait_q.
5805 static void ipr_initiate_ioa_bringdown(struct ipr_ioa_cfg *ioa_cfg,
5806 enum ipr_shutdown_type shutdown_type)
5809 if (ioa_cfg->sdt_state == WAIT_FOR_DUMP)
5810 ioa_cfg->sdt_state = ABORT_DUMP;
5811 ioa_cfg->reset_retries = 0;
5812 ioa_cfg->in_ioa_bringdown = 1;
5813 ipr_initiate_ioa_reset(ioa_cfg, shutdown_type);
5818 * __ipr_remove - Remove a single adapter
5819 * @pdev: pci device struct
5821 * Adapter hot plug remove entry point.
5826 static void __ipr_remove(struct pci_dev *pdev)
5828 unsigned long host_lock_flags = 0;
5829 struct ipr_ioa_cfg *ioa_cfg = pci_get_drvdata(pdev);
5832 spin_lock_irqsave(ioa_cfg->host->host_lock, host_lock_flags);
5833 ipr_initiate_ioa_bringdown(ioa_cfg, IPR_SHUTDOWN_NORMAL);
5835 spin_unlock_irqrestore(ioa_cfg->host->host_lock, host_lock_flags);
5836 wait_event(ioa_cfg->reset_wait_q, !ioa_cfg->in_reset_reload);
5837 spin_lock_irqsave(ioa_cfg->host->host_lock, host_lock_flags);
5839 spin_lock(&ipr_driver_lock);
5840 list_del(&ioa_cfg->queue);
5841 spin_unlock(&ipr_driver_lock);
5843 if (ioa_cfg->sdt_state == ABORT_DUMP)
5844 ioa_cfg->sdt_state = WAIT_FOR_DUMP;
5845 spin_unlock_irqrestore(ioa_cfg->host->host_lock, host_lock_flags);
5847 ipr_free_all_resources(ioa_cfg);
5853 * ipr_remove - IOA hot plug remove entry point
5854 * @pdev: pci device struct
5856 * Adapter hot plug remove entry point.
5861 static void ipr_remove(struct pci_dev *pdev)
5863 struct ipr_ioa_cfg *ioa_cfg = pci_get_drvdata(pdev);
5867 ioa_cfg->allow_cmds = 0;
5868 flush_scheduled_work();
5869 ipr_remove_trace_file(&ioa_cfg->host->shost_classdev.kobj,
5871 ipr_remove_dump_file(&ioa_cfg->host->shost_classdev.kobj,
5873 scsi_remove_host(ioa_cfg->host);
5881 * ipr_probe - Adapter hot plug add entry point
5884 * 0 on success / non-zero on failure
5886 static int __devinit ipr_probe(struct pci_dev *pdev,
5887 const struct pci_device_id *dev_id)
5889 struct ipr_ioa_cfg *ioa_cfg;
5892 rc = ipr_probe_ioa(pdev, dev_id);
5897 ioa_cfg = pci_get_drvdata(pdev);
5898 rc = ipr_probe_ioa_part2(ioa_cfg);
5905 rc = scsi_add_host(ioa_cfg->host, &pdev->dev);
5912 rc = ipr_create_trace_file(&ioa_cfg->host->shost_classdev.kobj,
5916 scsi_remove_host(ioa_cfg->host);
5921 rc = ipr_create_dump_file(&ioa_cfg->host->shost_classdev.kobj,
5925 ipr_remove_trace_file(&ioa_cfg->host->shost_classdev.kobj,
5927 scsi_remove_host(ioa_cfg->host);
5932 scsi_scan_host(ioa_cfg->host);
5933 ipr_scan_vsets(ioa_cfg);
5934 scsi_add_device(ioa_cfg->host, IPR_IOA_BUS, IPR_IOA_TARGET, IPR_IOA_LUN);
5935 ioa_cfg->allow_ml_add_del = 1;
5936 schedule_work(&ioa_cfg->work_q);
5941 * ipr_shutdown - Shutdown handler.
5942 * @dev: device struct
5944 * This function is invoked upon system shutdown/reboot. It will issue
5945 * an adapter shutdown to the adapter to flush the write cache.
5950 static void ipr_shutdown(struct device *dev)
5952 struct ipr_ioa_cfg *ioa_cfg = pci_get_drvdata(to_pci_dev(dev));
5953 unsigned long lock_flags = 0;
5955 spin_lock_irqsave(ioa_cfg->host->host_lock, lock_flags);
5956 ipr_initiate_ioa_bringdown(ioa_cfg, IPR_SHUTDOWN_NORMAL);
5957 spin_unlock_irqrestore(ioa_cfg->host->host_lock, lock_flags);
5958 wait_event(ioa_cfg->reset_wait_q, !ioa_cfg->in_reset_reload);
5961 static struct pci_device_id ipr_pci_table[] __devinitdata = {
5962 { PCI_VENDOR_ID_MYLEX, PCI_DEVICE_ID_IBM_GEMSTONE,
5963 PCI_VENDOR_ID_IBM, IPR_SUBS_DEV_ID_5702,
5964 0, 0, (kernel_ulong_t)&ipr_chip_cfg[0] },
5965 { PCI_VENDOR_ID_MYLEX, PCI_DEVICE_ID_IBM_GEMSTONE,
5966 PCI_VENDOR_ID_IBM, IPR_SUBS_DEV_ID_572E,
5967 0, 0, (kernel_ulong_t)&ipr_chip_cfg[0] },
5968 { PCI_VENDOR_ID_MYLEX, PCI_DEVICE_ID_IBM_GEMSTONE,
5969 PCI_VENDOR_ID_IBM, IPR_SUBS_DEV_ID_5703,
5970 0, 0, (kernel_ulong_t)&ipr_chip_cfg[0] },
5971 { PCI_VENDOR_ID_MYLEX, PCI_DEVICE_ID_IBM_GEMSTONE,
5972 PCI_VENDOR_ID_IBM, IPR_SUBS_DEV_ID_573D,
5973 0, 0, (kernel_ulong_t)&ipr_chip_cfg[0] },
5974 { PCI_VENDOR_ID_IBM, PCI_DEVICE_ID_IBM_SNIPE,
5975 PCI_VENDOR_ID_IBM, IPR_SUBS_DEV_ID_2780,
5976 0, 0, (kernel_ulong_t)&ipr_chip_cfg[1] },
5979 MODULE_DEVICE_TABLE(pci, ipr_pci_table);
5981 static struct pci_driver ipr_driver = {
5983 .id_table = ipr_pci_table,
5985 .remove = ipr_remove,
5987 .shutdown = ipr_shutdown,
5992 * ipr_init - Module entry point
5995 * 0 on success / non-zero on failure
5997 static int __init ipr_init(void)
5999 ipr_info("IBM Power RAID SCSI Device Driver version: %s %s\n",
6000 IPR_DRIVER_VERSION, IPR_DRIVER_DATE);
6002 pci_register_driver(&ipr_driver);
6008 * ipr_exit - Module unload
6010 * Module unload entry point.
6015 static void __exit ipr_exit(void)
6017 pci_unregister_driver(&ipr_driver);
6020 module_init(ipr_init);
6021 module_exit(ipr_exit);