2 * ipr.c -- driver for IBM Power Linux RAID adapters
4 * Written By: Brian King, IBM Corporation
6 * Copyright (C) 2003, 2004 IBM Corporation
8 * This program is free software; you can redistribute it and/or modify
9 * it under the terms of the GNU General Public License as published by
10 * the Free Software Foundation; either version 2 of the License, or
11 * (at your option) any later version.
13 * This program is distributed in the hope that it will be useful,
14 * but WITHOUT ANY WARRANTY; without even the implied warranty of
15 * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
16 * GNU General Public License for more details.
18 * You should have received a copy of the GNU General Public License
19 * along with this program; if not, write to the Free Software
20 * Foundation, Inc., 59 Temple Place, Suite 330, Boston, MA 02111-1307 USA
27 * This driver is used to control the following SCSI adapters:
29 * IBM iSeries: 5702, 5703, 2780, 5709, 570A, 570B
31 * IBM pSeries: PCI-X Dual Channel Ultra 320 SCSI RAID Adapter
32 * PCI-X Dual Channel Ultra 320 SCSI Adapter
33 * PCI-X Dual Channel Ultra 320 SCSI RAID Enablement Card
34 * Embedded SCSI adapter on p615 and p655 systems
36 * Supported Hardware Features:
37 * - Ultra 320 SCSI controller
38 * - PCI-X host interface
39 * - Embedded PowerPC RISC Processor and Hardware XOR DMA Engine
40 * - Non-Volatile Write Cache
41 * - Supports attachment of non-RAID disks, tape, and optical devices
42 * - RAID Levels 0, 5, 10
44 * - Background Parity Checking
45 * - Background Data Scrubbing
46 * - Ability to increase the capacity of an existing RAID 5 disk array
50 * - Tagged command queuing
51 * - Adapter microcode download
53 * - SCSI device hot plug
57 #include <linux/config.h>
59 #include <linux/init.h>
60 #include <linux/types.h>
61 #include <linux/errno.h>
62 #include <linux/kernel.h>
63 #include <linux/ioport.h>
64 #include <linux/delay.h>
65 #include <linux/pci.h>
66 #include <linux/wait.h>
67 #include <linux/spinlock.h>
68 #include <linux/sched.h>
69 #include <linux/interrupt.h>
70 #include <linux/blkdev.h>
71 #include <linux/firmware.h>
72 #include <linux/module.h>
73 #include <linux/moduleparam.h>
76 #include <asm/processor.h>
77 #include <scsi/scsi.h>
78 #include <scsi/scsi_host.h>
79 #include <scsi/scsi_tcq.h>
80 #include <scsi/scsi_eh.h>
81 #include <scsi/scsi_cmnd.h>
82 #include <scsi/scsi_request.h>
88 static struct list_head ipr_ioa_head = LIST_HEAD_INIT(ipr_ioa_head);
89 static unsigned int ipr_log_level = IPR_DEFAULT_LOG_LEVEL;
90 static unsigned int ipr_max_speed = 1;
91 static int ipr_testmode = 0;
92 static spinlock_t ipr_driver_lock = SPIN_LOCK_UNLOCKED;
94 /* This table describes the differences between DMA controller chips */
95 static const struct ipr_chip_cfg_t ipr_chip_cfg[] = {
98 .cache_line_size = 0x20,
100 .set_interrupt_mask_reg = 0x0022C,
101 .clr_interrupt_mask_reg = 0x00230,
102 .sense_interrupt_mask_reg = 0x0022C,
103 .clr_interrupt_reg = 0x00228,
104 .sense_interrupt_reg = 0x00224,
105 .ioarrin_reg = 0x00404,
106 .sense_uproc_interrupt_reg = 0x00214,
107 .set_uproc_interrupt_reg = 0x00214,
108 .clr_uproc_interrupt_reg = 0x00218
113 .cache_line_size = 0x20,
115 .set_interrupt_mask_reg = 0x00288,
116 .clr_interrupt_mask_reg = 0x0028C,
117 .sense_interrupt_mask_reg = 0x00288,
118 .clr_interrupt_reg = 0x00284,
119 .sense_interrupt_reg = 0x00280,
120 .ioarrin_reg = 0x00504,
121 .sense_uproc_interrupt_reg = 0x00290,
122 .set_uproc_interrupt_reg = 0x00290,
123 .clr_uproc_interrupt_reg = 0x00294
128 static int ipr_max_bus_speeds [] = {
129 IPR_80MBs_SCSI_RATE, IPR_U160_SCSI_RATE, IPR_U320_SCSI_RATE
132 MODULE_AUTHOR("Brian King <brking@us.ibm.com>");
133 MODULE_DESCRIPTION("IBM Power RAID SCSI Adapter Driver");
134 module_param_named(max_speed, ipr_max_speed, uint, 0);
135 MODULE_PARM_DESC(max_speed, "Maximum bus speed (0-2). Default: 1=U160. Speeds: 0=80 MB/s, 1=U160, 2=U320");
136 module_param_named(log_level, ipr_log_level, uint, 0);
137 MODULE_PARM_DESC(log_level, "Set to 0 - 4 for increasing verbosity of device driver");
138 module_param_named(testmode, ipr_testmode, int, 0);
139 MODULE_PARM_DESC(testmode, "DANGEROUS!!! Allows unsupported configurations");
140 MODULE_LICENSE("GPL");
141 MODULE_VERSION(IPR_DRIVER_VERSION);
143 static const char *ipr_gpdd_dev_end_states[] = {
145 "Terminated by host",
146 "Terminated by device reset",
147 "Terminated by bus reset",
149 "Command not started"
152 static const char *ipr_gpdd_dev_bus_phases[] = {
166 /* A constant array of IOASCs/URCs/Error Messages */
168 struct ipr_error_table_t ipr_error_table[] = {
170 "8155: An unknown error was received"},
172 "Soft underlength error"},
174 "Command to be cancelled not found"},
176 "Qualified success"},
178 "FFFE: Soft device bus error recovered by the IOA"},
180 "FFF9: Device sector reassign successful"},
182 "FFF7: Media error recovered by device rewrite procedures"},
184 "7001: IOA sector reassignment successful"},
186 "FFF9: Soft media error. Sector reassignment recommended"},
188 "FFF7: Media error recovered by IOA rewrite procedures"},
190 "FF3D: Soft PCI bus error recovered by the IOA"},
192 "FFF6: Device hardware error recovered by the IOA"},
194 "FFF6: Device hardware error recovered by the device"},
196 "FF3D: Soft IOA error recovered by the IOA"},
198 "FFFA: Undefined device response recovered by the IOA"},
200 "FFF6: Device bus error, message or command phase"},
202 "FFF6: Failure prediction threshold exceeded"},
204 "8009: Impending cache battery pack failure"},
206 "34FF: Disk device format in progress"},
208 "Synchronization required"},
210 "No ready, IOA shutdown"},
212 "3020: Storage subsystem configuration error"},
214 "FFF5: Medium error, data unreadable, recommend reassign"},
216 "7000: Medium error, data unreadable, do not reassign"},
218 "FFF3: Disk media format bad"},
220 "3002: Addressed device failed to respond to selection"},
222 "3100: Device bus error"},
224 "3109: IOA timed out a device command"},
226 "3120: SCSI bus is not operational"},
228 "9000: IOA reserved area data check"},
230 "9001: IOA reserved area invalid data pattern"},
232 "9002: IOA reserved area LRC error"},
234 "102E: Out of alternate sectors for disk storage"},
236 "FFF4: Data transfer underlength error"},
238 "FFF4: Data transfer overlength error"},
240 "3400: Logical unit failure"},
242 "FFF4: Device microcode is corrupt"},
244 "8150: PCI bus error"},
246 "Unsupported device bus message received"},
248 "FFF4: Disk device problem"},
250 "8150: Permanent IOA failure"},
252 "3010: Disk device returned wrong response to IOA"},
254 "8151: IOA microcode error"},
256 "Device bus status error"},
258 "8157: IOA error requiring IOA reset to recover"},
260 "Message reject received from the device"},
262 "8008: A permanent cache battery pack failure occurred"},
264 "9090: Disk unit has been modified after the last known status"},
266 "9081: IOA detected device error"},
268 "9082: IOA detected device error"},
270 "3110: Device bus error, message or command phase"},
272 "9091: Incorrect hardware configuration change has been detected"},
274 "FFF4: Command to logical unit failed"},
276 "Illegal request, invalid request type or request packet"},
278 "Illegal request, invalid resource handle"},
280 "Illegal request, invalid field in parameter list"},
282 "Illegal request, parameter not supported"},
284 "Illegal request, parameter value invalid"},
286 "Illegal request, command sequence error"},
288 "9031: Array protection temporarily suspended, protection resuming"},
290 "9040: Array protection temporarily suspended, protection resuming"},
292 "FFFB: SCSI bus was reset"},
294 "FFFE: SCSI bus transition to single ended"},
296 "FFFE: SCSI bus transition to LVD"},
298 "FFFB: SCSI bus was reset by another initiator"},
300 "3029: A device replacement has occurred"},
302 "9051: IOA cache data exists for a missing or failed device"},
304 "9025: Disk unit is not supported at its physical location"},
306 "3020: IOA detected a SCSI bus configuration error"},
308 "3150: SCSI bus configuration error"},
310 "9041: Array protection temporarily suspended"},
312 "9030: Array no longer protected due to missing or failed disk unit"},
314 "Failure due to other device"},
316 "9008: IOA does not support functions expected by devices"},
318 "9010: Cache data associated with attached devices cannot be found"},
320 "9011: Cache data belongs to devices other than those attached"},
322 "9020: Array missing 2 or more devices with only 1 device present"},
324 "9021: Array missing 2 or more devices with 2 or more devices present"},
326 "9022: Exposed array is missing a required device"},
328 "9023: Array member(s) not at required physical locations"},
330 "9024: Array not functional due to present hardware configuration"},
332 "9026: Array not functional due to present hardware configuration"},
334 "9027: Array is missing a device and parity is out of sync"},
336 "9028: Maximum number of arrays already exist"},
338 "9050: Required cache data cannot be located for a disk unit"},
340 "9052: Cache data exists for a device that has been modified"},
342 "9054: IOA resources not available due to previous problems"},
344 "9092: Disk unit requires initialization before use"},
346 "9029: Incorrect hardware configuration change has been detected"},
348 "9060: One or more disk pairs are missing from an array"},
350 "9061: One or more disks are missing from an array"},
352 "9062: One or more disks are missing from an array"},
354 "9063: Maximum number of functional arrays has been exceeded"},
356 "Aborted command, invalid descriptor"},
358 "Command terminated by host"}
361 static const struct ipr_ses_table_entry ipr_ses_table[] = {
362 { "2104-DL1 ", "XXXXXXXXXXXXXXXX", 80 },
363 { "2104-TL1 ", "XXXXXXXXXXXXXXXX", 80 },
364 { "HSBP07M P U2SCSI", "XXXXXXXXXXXXXXXX", 80 }, /* Hidive 7 slot */
365 { "HSBP05M P U2SCSI", "XXXXXXXXXXXXXXXX", 80 }, /* Hidive 5 slot */
366 { "HSBP05M S U2SCSI", "XXXXXXXXXXXXXXXX", 80 }, /* Bowtie */
367 { "HSBP06E ASU2SCSI", "XXXXXXXXXXXXXXXX", 80 }, /* MartinFenning */
368 { "2104-DU3 ", "XXXXXXXXXXXXXXXX", 160 },
369 { "2104-TU3 ", "XXXXXXXXXXXXXXXX", 160 },
370 { "HSBP04C RSU2SCSI", "XXXXXXX*XXXXXXXX", 160 },
371 { "HSBP06E RSU2SCSI", "XXXXXXX*XXXXXXXX", 160 },
372 { "St V1S2 ", "XXXXXXXXXXXXXXXX", 160 },
373 { "HSBPD4M PU3SCSI", "XXXXXXX*XXXXXXXX", 160 },
374 { "VSBPD1H U3SCSI", "XXXXXXX*XXXXXXXX", 160 }
378 * Function Prototypes
380 static int ipr_reset_alert(struct ipr_cmnd *);
381 static void ipr_process_ccn(struct ipr_cmnd *);
382 static void ipr_process_error(struct ipr_cmnd *);
383 static void ipr_reset_ioa_job(struct ipr_cmnd *);
384 static void ipr_initiate_ioa_reset(struct ipr_ioa_cfg *,
385 enum ipr_shutdown_type);
387 #ifdef CONFIG_SCSI_IPR_TRACE
389 * ipr_trc_hook - Add a trace entry to the driver trace
390 * @ipr_cmd: ipr command struct
392 * @add_data: additional data
397 static void ipr_trc_hook(struct ipr_cmnd *ipr_cmd,
398 u8 type, u32 add_data)
400 struct ipr_trace_entry *trace_entry;
401 struct ipr_ioa_cfg *ioa_cfg = ipr_cmd->ioa_cfg;
403 trace_entry = &ioa_cfg->trace[ioa_cfg->trace_index++];
404 trace_entry->time = jiffies;
405 trace_entry->op_code = ipr_cmd->ioarcb.cmd_pkt.cdb[0];
406 trace_entry->type = type;
407 trace_entry->cmd_index = ipr_cmd->cmd_index;
408 trace_entry->res_handle = ipr_cmd->ioarcb.res_handle;
409 trace_entry->u.add_data = add_data;
412 #define ipr_trc_hook(ipr_cmd, type, add_data) do { } while(0)
416 * ipr_reinit_ipr_cmnd - Re-initialize an IPR Cmnd block for reuse
417 * @ipr_cmd: ipr command struct
422 static void ipr_reinit_ipr_cmnd(struct ipr_cmnd *ipr_cmd)
424 struct ipr_ioarcb *ioarcb = &ipr_cmd->ioarcb;
425 struct ipr_ioasa *ioasa = &ipr_cmd->ioasa;
427 memset(&ioarcb->cmd_pkt, 0, sizeof(struct ipr_cmd_pkt));
428 ioarcb->write_data_transfer_length = 0;
429 ioarcb->read_data_transfer_length = 0;
430 ioarcb->write_ioadl_len = 0;
431 ioarcb->read_ioadl_len = 0;
433 ioasa->residual_data_len = 0;
435 ipr_cmd->scsi_cmd = NULL;
436 ipr_cmd->sense_buffer[0] = 0;
437 ipr_cmd->dma_use_sg = 0;
441 * ipr_init_ipr_cmnd - Initialize an IPR Cmnd block
442 * @ipr_cmd: ipr command struct
447 static void ipr_init_ipr_cmnd(struct ipr_cmnd *ipr_cmd)
449 ipr_reinit_ipr_cmnd(ipr_cmd);
450 ipr_cmd->u.scratch = 0;
451 init_timer(&ipr_cmd->timer);
455 * ipr_get_free_ipr_cmnd - Get a free IPR Cmnd block
456 * @ioa_cfg: ioa config struct
459 * pointer to ipr command struct
462 struct ipr_cmnd *ipr_get_free_ipr_cmnd(struct ipr_ioa_cfg *ioa_cfg)
464 struct ipr_cmnd *ipr_cmd;
466 ipr_cmd = list_entry(ioa_cfg->free_q.next, struct ipr_cmnd, queue);
467 list_del(&ipr_cmd->queue);
468 ipr_init_ipr_cmnd(ipr_cmd);
474 * ipr_unmap_sglist - Unmap scatterlist if mapped
475 * @ioa_cfg: ioa config struct
476 * @ipr_cmd: ipr command struct
481 static void ipr_unmap_sglist(struct ipr_ioa_cfg *ioa_cfg,
482 struct ipr_cmnd *ipr_cmd)
484 struct scsi_cmnd *scsi_cmd = ipr_cmd->scsi_cmd;
486 if (ipr_cmd->dma_use_sg) {
487 if (scsi_cmd->use_sg > 0) {
488 pci_unmap_sg(ioa_cfg->pdev, scsi_cmd->request_buffer,
490 scsi_cmd->sc_data_direction);
492 pci_unmap_single(ioa_cfg->pdev, ipr_cmd->dma_handle,
493 scsi_cmd->request_bufflen,
494 scsi_cmd->sc_data_direction);
500 * ipr_mask_and_clear_interrupts - Mask all and clear specified interrupts
501 * @ioa_cfg: ioa config struct
502 * @clr_ints: interrupts to clear
504 * This function masks all interrupts on the adapter, then clears the
505 * interrupts specified in the mask
510 static void ipr_mask_and_clear_interrupts(struct ipr_ioa_cfg *ioa_cfg,
513 volatile u32 int_reg;
515 /* Stop new interrupts */
516 ioa_cfg->allow_interrupts = 0;
518 /* Set interrupt mask to stop all new interrupts */
519 writel(~0, ioa_cfg->regs.set_interrupt_mask_reg);
521 /* Clear any pending interrupts */
522 writel(clr_ints, ioa_cfg->regs.clr_interrupt_reg);
523 int_reg = readl(ioa_cfg->regs.sense_interrupt_reg);
527 * ipr_save_pcix_cmd_reg - Save PCI-X command register
528 * @ioa_cfg: ioa config struct
531 * 0 on success / -EIO on failure
533 static int ipr_save_pcix_cmd_reg(struct ipr_ioa_cfg *ioa_cfg)
535 int pcix_cmd_reg = pci_find_capability(ioa_cfg->pdev, PCI_CAP_ID_PCIX);
537 if (pcix_cmd_reg == 0) {
538 dev_err(&ioa_cfg->pdev->dev, "Failed to save PCI-X command register\n");
542 if (pci_read_config_word(ioa_cfg->pdev, pcix_cmd_reg,
543 &ioa_cfg->saved_pcix_cmd_reg) != PCIBIOS_SUCCESSFUL) {
544 dev_err(&ioa_cfg->pdev->dev, "Failed to save PCI-X command register\n");
548 ioa_cfg->saved_pcix_cmd_reg |= PCI_X_CMD_DPERR_E | PCI_X_CMD_ERO;
553 * ipr_set_pcix_cmd_reg - Setup PCI-X command register
554 * @ioa_cfg: ioa config struct
557 * 0 on success / -EIO on failure
559 static int ipr_set_pcix_cmd_reg(struct ipr_ioa_cfg *ioa_cfg)
561 int pcix_cmd_reg = pci_find_capability(ioa_cfg->pdev, PCI_CAP_ID_PCIX);
564 if (pci_write_config_word(ioa_cfg->pdev, pcix_cmd_reg,
565 ioa_cfg->saved_pcix_cmd_reg) != PCIBIOS_SUCCESSFUL) {
566 dev_err(&ioa_cfg->pdev->dev, "Failed to setup PCI-X command register\n");
570 dev_err(&ioa_cfg->pdev->dev,
571 "Failed to setup PCI-X command register\n");
579 * ipr_scsi_eh_done - mid-layer done function for aborted ops
580 * @ipr_cmd: ipr command struct
582 * This function is invoked by the interrupt handler for
583 * ops generated by the SCSI mid-layer which are being aborted.
588 static void ipr_scsi_eh_done(struct ipr_cmnd *ipr_cmd)
590 struct ipr_ioa_cfg *ioa_cfg = ipr_cmd->ioa_cfg;
591 struct scsi_cmnd *scsi_cmd = ipr_cmd->scsi_cmd;
593 scsi_cmd->result |= (DID_ERROR << 16);
595 ipr_unmap_sglist(ioa_cfg, ipr_cmd);
596 scsi_cmd->scsi_done(scsi_cmd);
597 list_add_tail(&ipr_cmd->queue, &ioa_cfg->free_q);
601 * ipr_fail_all_ops - Fails all outstanding ops.
602 * @ioa_cfg: ioa config struct
604 * This function fails all outstanding ops.
609 static void ipr_fail_all_ops(struct ipr_ioa_cfg *ioa_cfg)
611 struct ipr_cmnd *ipr_cmd, *temp;
614 list_for_each_entry_safe(ipr_cmd, temp, &ioa_cfg->pending_q, queue) {
615 list_del(&ipr_cmd->queue);
617 ipr_cmd->ioasa.ioasc = cpu_to_be32(IPR_IOASC_IOA_WAS_RESET);
618 ipr_cmd->ioasa.ilid = cpu_to_be32(IPR_DRIVER_ILID);
620 if (ipr_cmd->scsi_cmd)
621 ipr_cmd->done = ipr_scsi_eh_done;
623 ipr_trc_hook(ipr_cmd, IPR_TRACE_FINISH, IPR_IOASC_IOA_WAS_RESET);
624 del_timer(&ipr_cmd->timer);
625 ipr_cmd->done(ipr_cmd);
632 * ipr_do_req - Send driver initiated requests.
633 * @ipr_cmd: ipr command struct
634 * @done: done function
635 * @timeout_func: timeout function
636 * @timeout: timeout value
638 * This function sends the specified command to the adapter with the
639 * timeout given. The done function is invoked on command completion.
644 static void ipr_do_req(struct ipr_cmnd *ipr_cmd,
645 void (*done) (struct ipr_cmnd *),
646 void (*timeout_func) (struct ipr_cmnd *), u32 timeout)
648 struct ipr_ioa_cfg *ioa_cfg = ipr_cmd->ioa_cfg;
650 list_add_tail(&ipr_cmd->queue, &ioa_cfg->pending_q);
652 ipr_cmd->done = done;
654 ipr_cmd->timer.data = (unsigned long) ipr_cmd;
655 ipr_cmd->timer.expires = jiffies + timeout;
656 ipr_cmd->timer.function = (void (*)(unsigned long))timeout_func;
658 add_timer(&ipr_cmd->timer);
660 ipr_trc_hook(ipr_cmd, IPR_TRACE_START, 0);
663 writel(be32_to_cpu(ipr_cmd->ioarcb.ioarcb_host_pci_addr),
664 ioa_cfg->regs.ioarrin_reg);
668 * ipr_internal_cmd_done - Op done function for an internally generated op.
669 * @ipr_cmd: ipr command struct
671 * This function is the op done function for an internally generated,
672 * blocking op. It simply wakes the sleeping thread.
677 static void ipr_internal_cmd_done(struct ipr_cmnd *ipr_cmd)
679 if (ipr_cmd->u.sibling)
680 ipr_cmd->u.sibling = NULL;
682 complete(&ipr_cmd->completion);
686 * ipr_send_blocking_cmd - Send command and sleep on its completion.
687 * @ipr_cmd: ipr command struct
688 * @timeout_func: function to invoke if command times out
694 static void ipr_send_blocking_cmd(struct ipr_cmnd *ipr_cmd,
695 void (*timeout_func) (struct ipr_cmnd *ipr_cmd),
698 struct ipr_ioa_cfg *ioa_cfg = ipr_cmd->ioa_cfg;
700 init_completion(&ipr_cmd->completion);
701 ipr_do_req(ipr_cmd, ipr_internal_cmd_done, timeout_func, timeout);
703 spin_unlock_irq(ioa_cfg->host->host_lock);
704 wait_for_completion(&ipr_cmd->completion);
705 spin_lock_irq(ioa_cfg->host->host_lock);
709 * ipr_send_hcam - Send an HCAM to the adapter.
710 * @ioa_cfg: ioa config struct
712 * @hostrcb: hostrcb struct
714 * This function will send a Host Controlled Async command to the adapter.
715 * If HCAMs are currently not allowed to be issued to the adapter, it will
716 * place the hostrcb on the free queue.
721 static void ipr_send_hcam(struct ipr_ioa_cfg *ioa_cfg, u8 type,
722 struct ipr_hostrcb *hostrcb)
724 struct ipr_cmnd *ipr_cmd;
725 struct ipr_ioarcb *ioarcb;
727 if (ioa_cfg->allow_cmds) {
728 ipr_cmd = ipr_get_free_ipr_cmnd(ioa_cfg);
729 list_add_tail(&ipr_cmd->queue, &ioa_cfg->pending_q);
730 list_add_tail(&hostrcb->queue, &ioa_cfg->hostrcb_pending_q);
732 ipr_cmd->u.hostrcb = hostrcb;
733 ioarcb = &ipr_cmd->ioarcb;
735 ioarcb->res_handle = cpu_to_be32(IPR_IOA_RES_HANDLE);
736 ioarcb->cmd_pkt.request_type = IPR_RQTYPE_HCAM;
737 ioarcb->cmd_pkt.cdb[0] = IPR_HOST_CONTROLLED_ASYNC;
738 ioarcb->cmd_pkt.cdb[1] = type;
739 ioarcb->cmd_pkt.cdb[7] = (sizeof(hostrcb->hcam) >> 8) & 0xff;
740 ioarcb->cmd_pkt.cdb[8] = sizeof(hostrcb->hcam) & 0xff;
742 ioarcb->read_data_transfer_length = cpu_to_be32(sizeof(hostrcb->hcam));
743 ioarcb->read_ioadl_len = cpu_to_be32(sizeof(struct ipr_ioadl_desc));
744 ipr_cmd->ioadl[0].flags_and_data_len =
745 cpu_to_be32(IPR_IOADL_FLAGS_READ_LAST | sizeof(hostrcb->hcam));
746 ipr_cmd->ioadl[0].address = cpu_to_be32(hostrcb->hostrcb_dma);
748 if (type == IPR_HCAM_CDB_OP_CODE_CONFIG_CHANGE)
749 ipr_cmd->done = ipr_process_ccn;
751 ipr_cmd->done = ipr_process_error;
753 ipr_trc_hook(ipr_cmd, IPR_TRACE_START, IPR_IOA_RES_ADDR);
756 writel(be32_to_cpu(ipr_cmd->ioarcb.ioarcb_host_pci_addr),
757 ioa_cfg->regs.ioarrin_reg);
759 list_add_tail(&hostrcb->queue, &ioa_cfg->hostrcb_free_q);
764 * ipr_init_res_entry - Initialize a resource entry struct.
765 * @res: resource entry struct
770 static void ipr_init_res_entry(struct ipr_resource_entry *res)
772 res->needs_sync_complete = 1;
775 res->del_from_ml = 0;
776 res->resetting_device = 0;
778 res->qdepth = IPR_MAX_CMD_PER_LUN;
783 * ipr_handle_config_change - Handle a config change from the adapter
784 * @ioa_cfg: ioa config struct
790 static void ipr_handle_config_change(struct ipr_ioa_cfg *ioa_cfg,
791 struct ipr_hostrcb *hostrcb)
793 struct ipr_resource_entry *res = NULL;
794 struct ipr_config_table_entry *cfgte;
797 cfgte = &hostrcb->hcam.u.ccn.cfgte;
799 list_for_each_entry(res, &ioa_cfg->used_res_q, queue) {
800 if (!memcmp(&res->cfgte.res_addr, &cfgte->res_addr,
801 sizeof(cfgte->res_addr))) {
808 if (list_empty(&ioa_cfg->free_res_q)) {
809 ipr_send_hcam(ioa_cfg,
810 IPR_HCAM_CDB_OP_CODE_CONFIG_CHANGE,
815 res = list_entry(ioa_cfg->free_res_q.next,
816 struct ipr_resource_entry, queue);
818 list_del(&res->queue);
819 ipr_init_res_entry(res);
820 list_add_tail(&res->queue, &ioa_cfg->used_res_q);
823 memcpy(&res->cfgte, cfgte, sizeof(struct ipr_config_table_entry));
825 if (hostrcb->hcam.notify_type == IPR_HOST_RCB_NOTIF_TYPE_REM_ENTRY) {
827 res->sdev->hostdata = NULL;
828 res->del_from_ml = 1;
829 if (ioa_cfg->allow_ml_add_del)
830 schedule_work(&ioa_cfg->work_q);
832 list_move_tail(&res->queue, &ioa_cfg->free_res_q);
833 } else if (!res->sdev) {
835 if (ioa_cfg->allow_ml_add_del)
836 schedule_work(&ioa_cfg->work_q);
839 ipr_send_hcam(ioa_cfg, IPR_HCAM_CDB_OP_CODE_CONFIG_CHANGE, hostrcb);
843 * ipr_process_ccn - Op done function for a CCN.
844 * @ipr_cmd: ipr command struct
846 * This function is the op done function for a configuration
847 * change notification host controlled async from the adapter.
852 static void ipr_process_ccn(struct ipr_cmnd *ipr_cmd)
854 struct ipr_ioa_cfg *ioa_cfg = ipr_cmd->ioa_cfg;
855 struct ipr_hostrcb *hostrcb = ipr_cmd->u.hostrcb;
856 u32 ioasc = be32_to_cpu(ipr_cmd->ioasa.ioasc);
858 list_del(&hostrcb->queue);
859 list_add_tail(&ipr_cmd->queue, &ioa_cfg->free_q);
862 if (ioasc != IPR_IOASC_IOA_WAS_RESET)
863 dev_err(&ioa_cfg->pdev->dev,
864 "Host RCB failed with IOASC: 0x%08X\n", ioasc);
866 ipr_send_hcam(ioa_cfg, IPR_HCAM_CDB_OP_CODE_CONFIG_CHANGE, hostrcb);
868 ipr_handle_config_change(ioa_cfg, hostrcb);
873 * ipr_log_vpd - Log the passed VPD to the error log.
874 * @vpids: vendor/product id struct
875 * @serial_num: serial number string
880 static void ipr_log_vpd(struct ipr_std_inq_vpids *vpids, u8 *serial_num)
882 char buffer[max_t(int, sizeof(struct ipr_std_inq_vpids),
883 IPR_SERIAL_NUM_LEN) + 1];
885 memcpy(buffer, vpids, sizeof(struct ipr_std_inq_vpids));
886 buffer[sizeof(struct ipr_std_inq_vpids)] = '\0';
887 ipr_err("Vendor/Product ID: %s\n", buffer);
889 memcpy(buffer, serial_num, IPR_SERIAL_NUM_LEN);
890 buffer[IPR_SERIAL_NUM_LEN] = '\0';
891 ipr_err(" Serial Number: %s\n", buffer);
895 * ipr_log_cache_error - Log a cache error.
896 * @ioa_cfg: ioa config struct
897 * @hostrcb: hostrcb struct
902 static void ipr_log_cache_error(struct ipr_ioa_cfg *ioa_cfg,
903 struct ipr_hostrcb *hostrcb)
905 struct ipr_hostrcb_type_02_error *error =
906 &hostrcb->hcam.u.error.u.type_02_error;
908 ipr_err("-----Current Configuration-----\n");
909 ipr_err("Cache Directory Card Information:\n");
910 ipr_log_vpd(&error->ioa_vpids, error->ioa_sn);
911 ipr_err("Adapter Card Information:\n");
912 ipr_log_vpd(&error->cfc_vpids, error->cfc_sn);
914 ipr_err("-----Expected Configuration-----\n");
915 ipr_err("Cache Directory Card Information:\n");
916 ipr_log_vpd(&error->ioa_last_attached_to_cfc_vpids,
917 error->ioa_last_attached_to_cfc_sn);
918 ipr_err("Adapter Card Information:\n");
919 ipr_log_vpd(&error->cfc_last_attached_to_ioa_vpids,
920 error->cfc_last_attached_to_ioa_sn);
922 ipr_err("Additional IOA Data: %08X %08X %08X\n",
923 be32_to_cpu(error->ioa_data[0]),
924 be32_to_cpu(error->ioa_data[1]),
925 be32_to_cpu(error->ioa_data[2]));
929 * ipr_log_config_error - Log a configuration error.
930 * @ioa_cfg: ioa config struct
931 * @hostrcb: hostrcb struct
936 static void ipr_log_config_error(struct ipr_ioa_cfg *ioa_cfg,
937 struct ipr_hostrcb *hostrcb)
939 int errors_logged, i;
940 struct ipr_hostrcb_device_data_entry *dev_entry;
941 struct ipr_hostrcb_type_03_error *error;
943 error = &hostrcb->hcam.u.error.u.type_03_error;
944 errors_logged = be32_to_cpu(error->errors_logged);
946 ipr_err("Device Errors Detected/Logged: %d/%d\n",
947 be32_to_cpu(error->errors_detected), errors_logged);
949 dev_entry = error->dev_entry;
951 for (i = 0; i < errors_logged; i++, dev_entry++) {
954 if (dev_entry->dev_res_addr.bus >= IPR_MAX_NUM_BUSES) {
955 ipr_err("Device %d: missing\n", i + 1);
957 ipr_err("Device %d: %d:%d:%d:%d\n", i + 1,
958 ioa_cfg->host->host_no, dev_entry->dev_res_addr.bus,
959 dev_entry->dev_res_addr.target, dev_entry->dev_res_addr.lun);
961 ipr_log_vpd(&dev_entry->dev_vpids, dev_entry->dev_sn);
963 ipr_err("-----New Device Information-----\n");
964 ipr_log_vpd(&dev_entry->new_dev_vpids, dev_entry->new_dev_sn);
966 ipr_err("Cache Directory Card Information:\n");
967 ipr_log_vpd(&dev_entry->ioa_last_with_dev_vpids,
968 dev_entry->ioa_last_with_dev_sn);
970 ipr_err("Adapter Card Information:\n");
971 ipr_log_vpd(&dev_entry->cfc_last_with_dev_vpids,
972 dev_entry->cfc_last_with_dev_sn);
974 ipr_err("Additional IOA Data: %08X %08X %08X %08X %08X\n",
975 be32_to_cpu(dev_entry->ioa_data[0]),
976 be32_to_cpu(dev_entry->ioa_data[1]),
977 be32_to_cpu(dev_entry->ioa_data[2]),
978 be32_to_cpu(dev_entry->ioa_data[3]),
979 be32_to_cpu(dev_entry->ioa_data[4]));
984 * ipr_log_array_error - Log an array configuration error.
985 * @ioa_cfg: ioa config struct
986 * @hostrcb: hostrcb struct
991 static void ipr_log_array_error(struct ipr_ioa_cfg *ioa_cfg,
992 struct ipr_hostrcb *hostrcb)
995 struct ipr_hostrcb_type_04_error *error;
996 struct ipr_hostrcb_array_data_entry *array_entry;
997 u8 zero_sn[IPR_SERIAL_NUM_LEN];
999 memset(zero_sn, '0', IPR_SERIAL_NUM_LEN);
1001 error = &hostrcb->hcam.u.error.u.type_04_error;
1005 ipr_err("RAID %s Array Configuration: %d:%d:%d:%d\n",
1006 error->protection_level,
1007 ioa_cfg->host->host_no,
1008 error->last_func_vset_res_addr.bus,
1009 error->last_func_vset_res_addr.target,
1010 error->last_func_vset_res_addr.lun);
1014 array_entry = error->array_member;
1016 for (i = 0; i < 18; i++) {
1017 if (!memcmp(array_entry->serial_num, zero_sn, IPR_SERIAL_NUM_LEN))
1020 if (error->exposed_mode_adn == i) {
1021 ipr_err("Exposed Array Member %d:\n", i);
1023 ipr_err("Array Member %d:\n", i);
1026 ipr_log_vpd(&array_entry->vpids, array_entry->serial_num);
1028 if (array_entry->dev_res_addr.bus >= IPR_MAX_NUM_BUSES) {
1029 ipr_err("Current Location: unknown\n");
1031 ipr_err("Current Location: %d:%d:%d:%d\n",
1032 ioa_cfg->host->host_no,
1033 array_entry->dev_res_addr.bus,
1034 array_entry->dev_res_addr.target,
1035 array_entry->dev_res_addr.lun);
1038 if (array_entry->dev_res_addr.bus >= IPR_MAX_NUM_BUSES) {
1039 ipr_err("Expected Location: unknown\n");
1041 ipr_err("Expected Location: %d:%d:%d:%d\n",
1042 ioa_cfg->host->host_no,
1043 array_entry->expected_dev_res_addr.bus,
1044 array_entry->expected_dev_res_addr.target,
1045 array_entry->expected_dev_res_addr.lun);
1051 array_entry = error->array_member2;
1058 * ipr_log_generic_error - Log an adapter error.
1059 * @ioa_cfg: ioa config struct
1060 * @hostrcb: hostrcb struct
1065 static void ipr_log_generic_error(struct ipr_ioa_cfg *ioa_cfg,
1066 struct ipr_hostrcb *hostrcb)
1069 int ioa_data_len = be32_to_cpu(hostrcb->hcam.length);
1071 if (ioa_data_len == 0)
1074 ipr_err("IOA Error Data:\n");
1075 ipr_err("Offset 0 1 2 3 4 5 6 7 8 9 A B C D E F\n");
1077 for (i = 0; i < ioa_data_len / 4; i += 4) {
1078 ipr_err("%08X: %08X %08X %08X %08X\n", i*4,
1079 be32_to_cpu(hostrcb->hcam.u.raw.data[i]),
1080 be32_to_cpu(hostrcb->hcam.u.raw.data[i+1]),
1081 be32_to_cpu(hostrcb->hcam.u.raw.data[i+2]),
1082 be32_to_cpu(hostrcb->hcam.u.raw.data[i+3]));
1087 * ipr_get_error - Find the specfied IOASC in the ipr_error_table.
1090 * This function will return the index of into the ipr_error_table
1091 * for the specified IOASC. If the IOASC is not in the table,
1092 * 0 will be returned, which points to the entry used for unknown errors.
1095 * index into the ipr_error_table
1097 static u32 ipr_get_error(u32 ioasc)
1101 for (i = 0; i < ARRAY_SIZE(ipr_error_table); i++)
1102 if (ipr_error_table[i].ioasc == ioasc)
1109 * ipr_handle_log_data - Log an adapter error.
1110 * @ioa_cfg: ioa config struct
1111 * @hostrcb: hostrcb struct
1113 * This function logs an adapter error to the system.
1118 static void ipr_handle_log_data(struct ipr_ioa_cfg *ioa_cfg,
1119 struct ipr_hostrcb *hostrcb)
1124 if (hostrcb->hcam.notify_type != IPR_HOST_RCB_NOTIF_TYPE_ERROR_LOG_ENTRY)
1127 if (hostrcb->hcam.notifications_lost == IPR_HOST_RCB_NOTIFICATIONS_LOST)
1128 dev_err(&ioa_cfg->pdev->dev, "Error notifications lost\n");
1130 ioasc = be32_to_cpu(hostrcb->hcam.u.error.failing_dev_ioasc);
1132 if (ioasc == IPR_IOASC_BUS_WAS_RESET ||
1133 ioasc == IPR_IOASC_BUS_WAS_RESET_BY_OTHER) {
1134 /* Tell the midlayer we had a bus reset so it will handle the UA properly */
1135 scsi_report_bus_reset(ioa_cfg->host,
1136 hostrcb->hcam.u.error.failing_dev_res_addr.bus);
1139 error_index = ipr_get_error(ioasc);
1141 if (!ipr_error_table[error_index].log_hcam)
1144 if (ipr_is_device(&hostrcb->hcam.u.error.failing_dev_res_addr)) {
1145 ipr_res_err(ioa_cfg, hostrcb->hcam.u.error.failing_dev_res_addr,
1146 "%s\n", ipr_error_table[error_index].error);
1148 dev_err(&ioa_cfg->pdev->dev, "%s\n",
1149 ipr_error_table[error_index].error);
1152 /* Set indication we have logged an error */
1153 ioa_cfg->errors_logged++;
1155 if (ioa_cfg->log_level < IPR_DEFAULT_LOG_LEVEL)
1158 switch (hostrcb->hcam.overlay_id) {
1159 case IPR_HOST_RCB_OVERLAY_ID_1:
1160 ipr_log_generic_error(ioa_cfg, hostrcb);
1162 case IPR_HOST_RCB_OVERLAY_ID_2:
1163 ipr_log_cache_error(ioa_cfg, hostrcb);
1165 case IPR_HOST_RCB_OVERLAY_ID_3:
1166 ipr_log_config_error(ioa_cfg, hostrcb);
1168 case IPR_HOST_RCB_OVERLAY_ID_4:
1169 case IPR_HOST_RCB_OVERLAY_ID_6:
1170 ipr_log_array_error(ioa_cfg, hostrcb);
1172 case IPR_HOST_RCB_OVERLAY_ID_DEFAULT:
1173 ipr_log_generic_error(ioa_cfg, hostrcb);
1176 dev_err(&ioa_cfg->pdev->dev,
1177 "Unknown error received. Overlay ID: %d\n",
1178 hostrcb->hcam.overlay_id);
1184 * ipr_process_error - Op done function for an adapter error log.
1185 * @ipr_cmd: ipr command struct
1187 * This function is the op done function for an error log host
1188 * controlled async from the adapter. It will log the error and
1189 * send the HCAM back to the adapter.
1194 static void ipr_process_error(struct ipr_cmnd *ipr_cmd)
1196 struct ipr_ioa_cfg *ioa_cfg = ipr_cmd->ioa_cfg;
1197 struct ipr_hostrcb *hostrcb = ipr_cmd->u.hostrcb;
1198 u32 ioasc = be32_to_cpu(ipr_cmd->ioasa.ioasc);
1200 list_del(&hostrcb->queue);
1201 list_add_tail(&ipr_cmd->queue, &ioa_cfg->free_q);
1204 ipr_handle_log_data(ioa_cfg, hostrcb);
1205 } else if (ioasc != IPR_IOASC_IOA_WAS_RESET) {
1206 dev_err(&ioa_cfg->pdev->dev,
1207 "Host RCB failed with IOASC: 0x%08X\n", ioasc);
1210 ipr_send_hcam(ioa_cfg, IPR_HCAM_CDB_OP_CODE_LOG_DATA, hostrcb);
1214 * ipr_timeout - An internally generated op has timed out.
1215 * @ipr_cmd: ipr command struct
1217 * This function blocks host requests and initiates an
1223 static void ipr_timeout(struct ipr_cmnd *ipr_cmd)
1225 unsigned long lock_flags = 0;
1226 struct ipr_ioa_cfg *ioa_cfg = ipr_cmd->ioa_cfg;
1229 spin_lock_irqsave(ioa_cfg->host->host_lock, lock_flags);
1231 ioa_cfg->errors_logged++;
1232 dev_err(&ioa_cfg->pdev->dev,
1233 "Adapter being reset due to command timeout.\n");
1235 if (WAIT_FOR_DUMP == ioa_cfg->sdt_state)
1236 ioa_cfg->sdt_state = GET_DUMP;
1238 if (!ioa_cfg->in_reset_reload || ioa_cfg->reset_cmd == ipr_cmd)
1239 ipr_initiate_ioa_reset(ioa_cfg, IPR_SHUTDOWN_NONE);
1241 spin_unlock_irqrestore(ioa_cfg->host->host_lock, lock_flags);
1246 * ipr_reset_reload - Reset/Reload the IOA
1247 * @ioa_cfg: ioa config struct
1248 * @shutdown_type: shutdown type
1250 * This function resets the adapter and re-initializes it.
1251 * This function assumes that all new host commands have been stopped.
1255 static int ipr_reset_reload(struct ipr_ioa_cfg *ioa_cfg,
1256 enum ipr_shutdown_type shutdown_type)
1258 if (!ioa_cfg->in_reset_reload)
1259 ipr_initiate_ioa_reset(ioa_cfg, shutdown_type);
1261 spin_unlock_irq(ioa_cfg->host->host_lock);
1262 wait_event(ioa_cfg->reset_wait_q, !ioa_cfg->in_reset_reload);
1263 spin_lock_irq(ioa_cfg->host->host_lock);
1265 /* If we got hit with a host reset while we were already resetting
1266 the adapter for some reason, and the reset failed. */
1267 if (ioa_cfg->ioa_is_dead) {
1276 * ipr_find_ses_entry - Find matching SES in SES table
1277 * @res: resource entry struct of SES
1280 * pointer to SES table entry / NULL on failure
1282 static const struct ipr_ses_table_entry *
1283 ipr_find_ses_entry(struct ipr_resource_entry *res)
1286 const struct ipr_ses_table_entry *ste = ipr_ses_table;
1288 for (i = 0; i < ARRAY_SIZE(ipr_ses_table); i++, ste++) {
1289 for (j = 0, matches = 0; j < IPR_PROD_ID_LEN; j++) {
1290 if (ste->compare_product_id_byte[j] == 'X') {
1291 if (res->cfgte.std_inq_data.vpids.product_id[j] == ste->product_id[j])
1299 if (matches == IPR_PROD_ID_LEN)
1307 * ipr_get_max_scsi_speed - Determine max SCSI speed for a given bus
1308 * @ioa_cfg: ioa config struct
1310 * @bus_width: bus width
1313 * SCSI bus speed in units of 100KHz, 1600 is 160 MHz
1314 * For a 2-byte wide SCSI bus, the maximum transfer speed is
1315 * twice the maximum transfer rate (e.g. for a wide enabled bus,
1316 * max 160MHz = max 320MB/sec).
1318 static u32 ipr_get_max_scsi_speed(struct ipr_ioa_cfg *ioa_cfg, u8 bus, u8 bus_width)
1320 struct ipr_resource_entry *res;
1321 const struct ipr_ses_table_entry *ste;
1322 u32 max_xfer_rate = IPR_MAX_SCSI_RATE(bus_width);
1324 /* Loop through each config table entry in the config table buffer */
1325 list_for_each_entry(res, &ioa_cfg->used_res_q, queue) {
1326 if (!(IPR_IS_SES_DEVICE(res->cfgte.std_inq_data)))
1329 if (bus != res->cfgte.res_addr.bus)
1332 if (!(ste = ipr_find_ses_entry(res)))
1335 max_xfer_rate = (ste->max_bus_speed_limit * 10) / (bus_width / 8);
1338 return max_xfer_rate;
1342 * ipr_wait_iodbg_ack - Wait for an IODEBUG ACK from the IOA
1343 * @ioa_cfg: ioa config struct
1344 * @max_delay: max delay in micro-seconds to wait
1346 * Waits for an IODEBUG ACK from the IOA, doing busy looping.
1349 * 0 on success / other on failure
1351 static int ipr_wait_iodbg_ack(struct ipr_ioa_cfg *ioa_cfg, int max_delay)
1353 volatile u32 pcii_reg;
1356 /* Read interrupt reg until IOA signals IO Debug Acknowledge */
1357 while (delay < max_delay) {
1358 pcii_reg = readl(ioa_cfg->regs.sense_interrupt_reg);
1360 if (pcii_reg & IPR_PCII_IO_DEBUG_ACKNOWLEDGE)
1363 /* udelay cannot be used if delay is more than a few milliseconds */
1364 if ((delay / 1000) > MAX_UDELAY_MS)
1365 mdelay(delay / 1000);
1375 * ipr_get_ldump_data_section - Dump IOA memory
1376 * @ioa_cfg: ioa config struct
1377 * @start_addr: adapter address to dump
1378 * @dest: destination kernel buffer
1379 * @length_in_words: length to dump in 4 byte words
1382 * 0 on success / -EIO on failure
1384 static int ipr_get_ldump_data_section(struct ipr_ioa_cfg *ioa_cfg,
1386 u32 *dest, u32 length_in_words)
1388 volatile u32 temp_pcii_reg;
1391 /* Write IOA interrupt reg starting LDUMP state */
1392 writel((IPR_UPROCI_RESET_ALERT | IPR_UPROCI_IO_DEBUG_ALERT),
1393 ioa_cfg->regs.set_uproc_interrupt_reg);
1395 /* Wait for IO debug acknowledge */
1396 if (ipr_wait_iodbg_ack(ioa_cfg,
1397 IPR_LDUMP_MAX_LONG_ACK_DELAY_IN_USEC)) {
1398 dev_err(&ioa_cfg->pdev->dev,
1399 "IOA dump long data transfer timeout\n");
1403 /* Signal LDUMP interlocked - clear IO debug ack */
1404 writel(IPR_PCII_IO_DEBUG_ACKNOWLEDGE,
1405 ioa_cfg->regs.clr_interrupt_reg);
1407 /* Write Mailbox with starting address */
1408 writel(start_addr, ioa_cfg->ioa_mailbox);
1410 /* Signal address valid - clear IOA Reset alert */
1411 writel(IPR_UPROCI_RESET_ALERT,
1412 ioa_cfg->regs.clr_uproc_interrupt_reg);
1414 for (i = 0; i < length_in_words; i++) {
1415 /* Wait for IO debug acknowledge */
1416 if (ipr_wait_iodbg_ack(ioa_cfg,
1417 IPR_LDUMP_MAX_SHORT_ACK_DELAY_IN_USEC)) {
1418 dev_err(&ioa_cfg->pdev->dev,
1419 "IOA dump short data transfer timeout\n");
1423 /* Read data from mailbox and increment destination pointer */
1424 *dest = cpu_to_be32(readl(ioa_cfg->ioa_mailbox));
1427 /* For all but the last word of data, signal data received */
1428 if (i < (length_in_words - 1)) {
1429 /* Signal dump data received - Clear IO debug Ack */
1430 writel(IPR_PCII_IO_DEBUG_ACKNOWLEDGE,
1431 ioa_cfg->regs.clr_interrupt_reg);
1435 /* Signal end of block transfer. Set reset alert then clear IO debug ack */
1436 writel(IPR_UPROCI_RESET_ALERT,
1437 ioa_cfg->regs.set_uproc_interrupt_reg);
1439 writel(IPR_UPROCI_IO_DEBUG_ALERT,
1440 ioa_cfg->regs.clr_uproc_interrupt_reg);
1442 /* Signal dump data received - Clear IO debug Ack */
1443 writel(IPR_PCII_IO_DEBUG_ACKNOWLEDGE,
1444 ioa_cfg->regs.clr_interrupt_reg);
1446 /* Wait for IOA to signal LDUMP exit - IOA reset alert will be cleared */
1447 while (delay < IPR_LDUMP_MAX_SHORT_ACK_DELAY_IN_USEC) {
1449 readl(ioa_cfg->regs.sense_uproc_interrupt_reg);
1451 if (!(temp_pcii_reg & IPR_UPROCI_RESET_ALERT))
1461 #ifdef CONFIG_SCSI_IPR_DUMP
1463 * ipr_sdt_copy - Copy Smart Dump Table to kernel buffer
1464 * @ioa_cfg: ioa config struct
1465 * @pci_address: adapter address
1466 * @length: length of data to copy
1468 * Copy data from PCI adapter to kernel buffer.
1469 * Note: length MUST be a 4 byte multiple
1471 * 0 on success / other on failure
1473 static int ipr_sdt_copy(struct ipr_ioa_cfg *ioa_cfg,
1474 unsigned long pci_address, u32 length)
1476 int bytes_copied = 0;
1477 int cur_len, rc, rem_len, rem_page_len;
1479 unsigned long lock_flags = 0;
1480 struct ipr_ioa_dump *ioa_dump = &ioa_cfg->dump->ioa_dump;
1482 while (bytes_copied < length &&
1483 (ioa_dump->hdr.len + bytes_copied) < IPR_MAX_IOA_DUMP_SIZE) {
1484 if (ioa_dump->page_offset >= PAGE_SIZE ||
1485 ioa_dump->page_offset == 0) {
1486 page = (u32 *)__get_free_page(GFP_ATOMIC);
1490 return bytes_copied;
1493 ioa_dump->page_offset = 0;
1494 ioa_dump->ioa_data[ioa_dump->next_page_index] = page;
1495 ioa_dump->next_page_index++;
1497 page = ioa_dump->ioa_data[ioa_dump->next_page_index - 1];
1499 rem_len = length - bytes_copied;
1500 rem_page_len = PAGE_SIZE - ioa_dump->page_offset;
1501 cur_len = min(rem_len, rem_page_len);
1503 spin_lock_irqsave(ioa_cfg->host->host_lock, lock_flags);
1504 if (ioa_cfg->sdt_state == ABORT_DUMP) {
1507 rc = ipr_get_ldump_data_section(ioa_cfg,
1508 pci_address + bytes_copied,
1509 &page[ioa_dump->page_offset / 4],
1510 (cur_len / sizeof(u32)));
1512 spin_unlock_irqrestore(ioa_cfg->host->host_lock, lock_flags);
1515 ioa_dump->page_offset += cur_len;
1516 bytes_copied += cur_len;
1524 return bytes_copied;
1528 * ipr_init_dump_entry_hdr - Initialize a dump entry header.
1529 * @hdr: dump entry header struct
1534 static void ipr_init_dump_entry_hdr(struct ipr_dump_entry_header *hdr)
1536 hdr->eye_catcher = IPR_DUMP_EYE_CATCHER;
1538 hdr->offset = sizeof(*hdr);
1539 hdr->status = IPR_DUMP_STATUS_SUCCESS;
1543 * ipr_dump_ioa_type_data - Fill in the adapter type in the dump.
1544 * @ioa_cfg: ioa config struct
1545 * @driver_dump: driver dump struct
1550 static void ipr_dump_ioa_type_data(struct ipr_ioa_cfg *ioa_cfg,
1551 struct ipr_driver_dump *driver_dump)
1553 struct ipr_inquiry_page3 *ucode_vpd = &ioa_cfg->vpd_cbs->page3_data;
1555 ipr_init_dump_entry_hdr(&driver_dump->ioa_type_entry.hdr);
1556 driver_dump->ioa_type_entry.hdr.len =
1557 sizeof(struct ipr_dump_ioa_type_entry) -
1558 sizeof(struct ipr_dump_entry_header);
1559 driver_dump->ioa_type_entry.hdr.data_type = IPR_DUMP_DATA_TYPE_BINARY;
1560 driver_dump->ioa_type_entry.hdr.id = IPR_DUMP_DRIVER_TYPE_ID;
1561 driver_dump->ioa_type_entry.type = ioa_cfg->type;
1562 driver_dump->ioa_type_entry.fw_version = (ucode_vpd->major_release << 24) |
1563 (ucode_vpd->card_type << 16) | (ucode_vpd->minor_release[0] << 8) |
1564 ucode_vpd->minor_release[1];
1565 driver_dump->hdr.num_entries++;
1569 * ipr_dump_version_data - Fill in the driver version in the dump.
1570 * @ioa_cfg: ioa config struct
1571 * @driver_dump: driver dump struct
1576 static void ipr_dump_version_data(struct ipr_ioa_cfg *ioa_cfg,
1577 struct ipr_driver_dump *driver_dump)
1579 ipr_init_dump_entry_hdr(&driver_dump->version_entry.hdr);
1580 driver_dump->version_entry.hdr.len =
1581 sizeof(struct ipr_dump_version_entry) -
1582 sizeof(struct ipr_dump_entry_header);
1583 driver_dump->version_entry.hdr.data_type = IPR_DUMP_DATA_TYPE_ASCII;
1584 driver_dump->version_entry.hdr.id = IPR_DUMP_DRIVER_VERSION_ID;
1585 strcpy(driver_dump->version_entry.version, IPR_DRIVER_VERSION);
1586 driver_dump->hdr.num_entries++;
1590 * ipr_dump_trace_data - Fill in the IOA trace in the dump.
1591 * @ioa_cfg: ioa config struct
1592 * @driver_dump: driver dump struct
1597 static void ipr_dump_trace_data(struct ipr_ioa_cfg *ioa_cfg,
1598 struct ipr_driver_dump *driver_dump)
1600 ipr_init_dump_entry_hdr(&driver_dump->trace_entry.hdr);
1601 driver_dump->trace_entry.hdr.len =
1602 sizeof(struct ipr_dump_trace_entry) -
1603 sizeof(struct ipr_dump_entry_header);
1604 driver_dump->trace_entry.hdr.data_type = IPR_DUMP_DATA_TYPE_BINARY;
1605 driver_dump->trace_entry.hdr.id = IPR_DUMP_TRACE_ID;
1606 memcpy(driver_dump->trace_entry.trace, ioa_cfg->trace, IPR_TRACE_SIZE);
1607 driver_dump->hdr.num_entries++;
1611 * ipr_dump_location_data - Fill in the IOA location in the dump.
1612 * @ioa_cfg: ioa config struct
1613 * @driver_dump: driver dump struct
1618 static void ipr_dump_location_data(struct ipr_ioa_cfg *ioa_cfg,
1619 struct ipr_driver_dump *driver_dump)
1621 ipr_init_dump_entry_hdr(&driver_dump->location_entry.hdr);
1622 driver_dump->location_entry.hdr.len =
1623 sizeof(struct ipr_dump_location_entry) -
1624 sizeof(struct ipr_dump_entry_header);
1625 driver_dump->location_entry.hdr.data_type = IPR_DUMP_DATA_TYPE_ASCII;
1626 driver_dump->location_entry.hdr.id = IPR_DUMP_LOCATION_ID;
1627 strcpy(driver_dump->location_entry.location, ioa_cfg->pdev->dev.bus_id);
1628 driver_dump->hdr.num_entries++;
1632 * ipr_get_ioa_dump - Perform a dump of the driver and adapter.
1633 * @ioa_cfg: ioa config struct
1634 * @dump: dump struct
1639 static void ipr_get_ioa_dump(struct ipr_ioa_cfg *ioa_cfg, struct ipr_dump *dump)
1641 unsigned long start_addr, sdt_word;
1642 unsigned long lock_flags = 0;
1643 struct ipr_driver_dump *driver_dump = &dump->driver_dump;
1644 struct ipr_ioa_dump *ioa_dump = &dump->ioa_dump;
1645 u32 num_entries, start_off, end_off;
1646 u32 bytes_to_copy, bytes_copied, rc;
1647 struct ipr_sdt *sdt;
1652 spin_lock_irqsave(ioa_cfg->host->host_lock, lock_flags);
1654 if (ioa_cfg->sdt_state != GET_DUMP) {
1655 spin_unlock_irqrestore(ioa_cfg->host->host_lock, lock_flags);
1659 start_addr = readl(ioa_cfg->ioa_mailbox);
1661 if (!ipr_sdt_is_fmt2(start_addr)) {
1662 dev_err(&ioa_cfg->pdev->dev,
1663 "Invalid dump table format: %lx\n", start_addr);
1664 spin_unlock_irqrestore(ioa_cfg->host->host_lock, lock_flags);
1668 dev_err(&ioa_cfg->pdev->dev, "Dump of IOA initiated\n");
1670 driver_dump->hdr.eye_catcher = IPR_DUMP_EYE_CATCHER;
1672 /* Initialize the overall dump header */
1673 driver_dump->hdr.len = sizeof(struct ipr_driver_dump);
1674 driver_dump->hdr.num_entries = 1;
1675 driver_dump->hdr.first_entry_offset = sizeof(struct ipr_dump_header);
1676 driver_dump->hdr.status = IPR_DUMP_STATUS_SUCCESS;
1677 driver_dump->hdr.os = IPR_DUMP_OS_LINUX;
1678 driver_dump->hdr.driver_name = IPR_DUMP_DRIVER_NAME;
1680 ipr_dump_version_data(ioa_cfg, driver_dump);
1681 ipr_dump_location_data(ioa_cfg, driver_dump);
1682 ipr_dump_ioa_type_data(ioa_cfg, driver_dump);
1683 ipr_dump_trace_data(ioa_cfg, driver_dump);
1685 /* Update dump_header */
1686 driver_dump->hdr.len += sizeof(struct ipr_dump_entry_header);
1688 /* IOA Dump entry */
1689 ipr_init_dump_entry_hdr(&ioa_dump->hdr);
1690 ioa_dump->format = IPR_SDT_FMT2;
1691 ioa_dump->hdr.len = 0;
1692 ioa_dump->hdr.data_type = IPR_DUMP_DATA_TYPE_BINARY;
1693 ioa_dump->hdr.id = IPR_DUMP_IOA_DUMP_ID;
1695 /* First entries in sdt are actually a list of dump addresses and
1696 lengths to gather the real dump data. sdt represents the pointer
1697 to the ioa generated dump table. Dump data will be extracted based
1698 on entries in this table */
1699 sdt = &ioa_dump->sdt;
1701 rc = ipr_get_ldump_data_section(ioa_cfg, start_addr, (u32 *)sdt,
1702 sizeof(struct ipr_sdt) / sizeof(u32));
1704 /* Smart Dump table is ready to use and the first entry is valid */
1705 if (rc || (be32_to_cpu(sdt->hdr.state) != IPR_FMT2_SDT_READY_TO_USE)) {
1706 dev_err(&ioa_cfg->pdev->dev,
1707 "Dump of IOA failed. Dump table not valid: %d, %X.\n",
1708 rc, be32_to_cpu(sdt->hdr.state));
1709 driver_dump->hdr.status = IPR_DUMP_STATUS_FAILED;
1710 ioa_cfg->sdt_state = DUMP_OBTAINED;
1711 spin_unlock_irqrestore(ioa_cfg->host->host_lock, lock_flags);
1715 num_entries = be32_to_cpu(sdt->hdr.num_entries_used);
1717 if (num_entries > IPR_NUM_SDT_ENTRIES)
1718 num_entries = IPR_NUM_SDT_ENTRIES;
1720 spin_unlock_irqrestore(ioa_cfg->host->host_lock, lock_flags);
1722 for (i = 0; i < num_entries; i++) {
1723 if (ioa_dump->hdr.len > IPR_MAX_IOA_DUMP_SIZE) {
1724 driver_dump->hdr.status = IPR_DUMP_STATUS_QUAL_SUCCESS;
1728 if (sdt->entry[i].flags & IPR_SDT_VALID_ENTRY) {
1729 sdt_word = be32_to_cpu(sdt->entry[i].bar_str_offset);
1730 start_off = sdt_word & IPR_FMT2_MBX_ADDR_MASK;
1731 end_off = be32_to_cpu(sdt->entry[i].end_offset);
1733 if (ipr_sdt_is_fmt2(sdt_word) && sdt_word) {
1734 bytes_to_copy = end_off - start_off;
1735 if (bytes_to_copy > IPR_MAX_IOA_DUMP_SIZE) {
1736 sdt->entry[i].flags &= ~IPR_SDT_VALID_ENTRY;
1740 /* Copy data from adapter to driver buffers */
1741 bytes_copied = ipr_sdt_copy(ioa_cfg, sdt_word,
1744 ioa_dump->hdr.len += bytes_copied;
1746 if (bytes_copied != bytes_to_copy) {
1747 driver_dump->hdr.status = IPR_DUMP_STATUS_QUAL_SUCCESS;
1754 dev_err(&ioa_cfg->pdev->dev, "Dump of IOA completed.\n");
1756 /* Update dump_header */
1757 driver_dump->hdr.len += ioa_dump->hdr.len;
1759 ioa_cfg->sdt_state = DUMP_OBTAINED;
1764 #define ipr_get_ioa_dump(ioa_cfg, dump) do { } while(0)
1768 * ipr_worker_thread - Worker thread
1769 * @data: ioa config struct
1771 * Called at task level from a work thread. This function takes care
1772 * of adding and removing device from the mid-layer as configuration
1773 * changes are detected by the adapter.
1778 static void ipr_worker_thread(void *data)
1780 unsigned long lock_flags;
1781 struct ipr_resource_entry *res;
1782 struct scsi_device *sdev;
1783 struct ipr_dump *dump;
1784 struct ipr_ioa_cfg *ioa_cfg = data;
1785 u8 bus, target, lun;
1789 spin_lock_irqsave(ioa_cfg->host->host_lock, lock_flags);
1791 if (ioa_cfg->sdt_state == GET_DUMP) {
1792 dump = ioa_cfg->dump;
1793 if (!dump || !kobject_get(&dump->kobj)) {
1794 spin_unlock_irqrestore(ioa_cfg->host->host_lock, lock_flags);
1797 spin_unlock_irqrestore(ioa_cfg->host->host_lock, lock_flags);
1798 ipr_get_ioa_dump(ioa_cfg, dump);
1799 kobject_put(&dump->kobj);
1801 spin_lock_irqsave(ioa_cfg->host->host_lock, lock_flags);
1802 if (ioa_cfg->sdt_state == DUMP_OBTAINED)
1803 ipr_initiate_ioa_reset(ioa_cfg, IPR_SHUTDOWN_NONE);
1804 spin_unlock_irqrestore(ioa_cfg->host->host_lock, lock_flags);
1811 if (!ioa_cfg->allow_cmds || !ioa_cfg->allow_ml_add_del) {
1812 spin_unlock_irqrestore(ioa_cfg->host->host_lock, lock_flags);
1816 list_for_each_entry(res, &ioa_cfg->used_res_q, queue) {
1817 if (res->del_from_ml && res->sdev) {
1820 if (!scsi_device_get(sdev)) {
1822 list_move_tail(&res->queue, &ioa_cfg->free_res_q);
1823 spin_unlock_irqrestore(ioa_cfg->host->host_lock, lock_flags);
1824 scsi_remove_device(sdev);
1825 scsi_device_put(sdev);
1826 spin_lock_irqsave(ioa_cfg->host->host_lock, lock_flags);
1833 list_for_each_entry(res, &ioa_cfg->used_res_q, queue) {
1834 if (res->add_to_ml) {
1835 bus = res->cfgte.res_addr.bus;
1836 target = res->cfgte.res_addr.target;
1837 lun = res->cfgte.res_addr.lun;
1838 spin_unlock_irqrestore(ioa_cfg->host->host_lock, lock_flags);
1839 scsi_add_device(ioa_cfg->host, bus, target, lun);
1840 spin_lock_irqsave(ioa_cfg->host->host_lock, lock_flags);
1845 spin_unlock_irqrestore(ioa_cfg->host->host_lock, lock_flags);
1849 #ifdef CONFIG_SCSI_IPR_TRACE
1851 * ipr_read_trace - Dump the adapter trace
1852 * @kobj: kobject struct
1855 * @count: buffer size
1858 * number of bytes printed to buffer
1860 static ssize_t ipr_read_trace(struct kobject *kobj, char *buf,
1861 loff_t off, size_t count)
1863 struct class_device *cdev = container_of(kobj,struct class_device,kobj);
1864 struct Scsi_Host *shost = class_to_shost(cdev);
1865 struct ipr_ioa_cfg *ioa_cfg = (struct ipr_ioa_cfg *)shost->hostdata;
1866 unsigned long lock_flags = 0;
1867 int size = IPR_TRACE_SIZE;
1868 char *src = (char *)ioa_cfg->trace;
1872 if (off + count > size) {
1877 spin_lock_irqsave(ioa_cfg->host->host_lock, lock_flags);
1878 memcpy(buf, &src[off], count);
1879 spin_unlock_irqrestore(ioa_cfg->host->host_lock, lock_flags);
1883 static struct bin_attribute ipr_trace_attr = {
1889 .read = ipr_read_trace,
1894 * ipr_show_fw_version - Show the firmware version
1895 * @class_dev: class device struct
1899 * number of bytes printed to buffer
1901 static ssize_t ipr_show_fw_version(struct class_device *class_dev, char *buf)
1903 struct Scsi_Host *shost = class_to_shost(class_dev);
1904 struct ipr_ioa_cfg *ioa_cfg = (struct ipr_ioa_cfg *)shost->hostdata;
1905 struct ipr_inquiry_page3 *ucode_vpd = &ioa_cfg->vpd_cbs->page3_data;
1906 unsigned long lock_flags = 0;
1909 spin_lock_irqsave(ioa_cfg->host->host_lock, lock_flags);
1910 len = snprintf(buf, PAGE_SIZE, "%02X%02X%02X%02X\n",
1911 ucode_vpd->major_release, ucode_vpd->card_type,
1912 ucode_vpd->minor_release[0],
1913 ucode_vpd->minor_release[1]);
1914 spin_unlock_irqrestore(ioa_cfg->host->host_lock, lock_flags);
1918 static struct class_device_attribute ipr_fw_version_attr = {
1920 .name = "fw_version",
1923 .show = ipr_show_fw_version,
1927 * ipr_show_log_level - Show the adapter's error logging level
1928 * @class_dev: class device struct
1932 * number of bytes printed to buffer
1934 static ssize_t ipr_show_log_level(struct class_device *class_dev, char *buf)
1936 struct Scsi_Host *shost = class_to_shost(class_dev);
1937 struct ipr_ioa_cfg *ioa_cfg = (struct ipr_ioa_cfg *)shost->hostdata;
1938 unsigned long lock_flags = 0;
1941 spin_lock_irqsave(ioa_cfg->host->host_lock, lock_flags);
1942 len = snprintf(buf, PAGE_SIZE, "%d\n", ioa_cfg->log_level);
1943 spin_unlock_irqrestore(ioa_cfg->host->host_lock, lock_flags);
1948 * ipr_store_log_level - Change the adapter's error logging level
1949 * @class_dev: class device struct
1953 * number of bytes printed to buffer
1955 static ssize_t ipr_store_log_level(struct class_device *class_dev,
1956 const char *buf, size_t count)
1958 struct Scsi_Host *shost = class_to_shost(class_dev);
1959 struct ipr_ioa_cfg *ioa_cfg = (struct ipr_ioa_cfg *)shost->hostdata;
1960 unsigned long lock_flags = 0;
1962 spin_lock_irqsave(ioa_cfg->host->host_lock, lock_flags);
1963 ioa_cfg->log_level = simple_strtoul(buf, NULL, 10);
1964 spin_unlock_irqrestore(ioa_cfg->host->host_lock, lock_flags);
1968 static struct class_device_attribute ipr_log_level_attr = {
1970 .name = "log_level",
1971 .mode = S_IRUGO | S_IWUSR,
1973 .show = ipr_show_log_level,
1974 .store = ipr_store_log_level
1978 * ipr_store_diagnostics - IOA Diagnostics interface
1979 * @class_dev: class_device struct
1981 * @count: buffer size
1983 * This function will reset the adapter and wait a reasonable
1984 * amount of time for any errors that the adapter might log.
1987 * count on success / other on failure
1989 static ssize_t ipr_store_diagnostics(struct class_device *class_dev,
1990 const char *buf, size_t count)
1992 struct Scsi_Host *shost = class_to_shost(class_dev);
1993 struct ipr_ioa_cfg *ioa_cfg = (struct ipr_ioa_cfg *)shost->hostdata;
1994 unsigned long lock_flags = 0;
1997 if (!capable(CAP_SYS_ADMIN))
2000 wait_event(ioa_cfg->reset_wait_q, !ioa_cfg->in_reset_reload);
2001 spin_lock_irqsave(ioa_cfg->host->host_lock, lock_flags);
2002 ioa_cfg->errors_logged = 0;
2003 ipr_initiate_ioa_reset(ioa_cfg, IPR_SHUTDOWN_NORMAL);
2005 if (ioa_cfg->in_reset_reload) {
2006 spin_unlock_irqrestore(ioa_cfg->host->host_lock, lock_flags);
2007 wait_event(ioa_cfg->reset_wait_q, !ioa_cfg->in_reset_reload);
2009 /* Wait for a second for any errors to be logged */
2010 schedule_timeout(HZ);
2012 spin_unlock_irqrestore(ioa_cfg->host->host_lock, lock_flags);
2016 spin_lock_irqsave(ioa_cfg->host->host_lock, lock_flags);
2017 if (ioa_cfg->in_reset_reload || ioa_cfg->errors_logged)
2019 spin_unlock_irqrestore(ioa_cfg->host->host_lock, lock_flags);
2024 static struct class_device_attribute ipr_diagnostics_attr = {
2026 .name = "run_diagnostics",
2029 .store = ipr_store_diagnostics
2033 * ipr_store_reset_adapter - Reset the adapter
2034 * @class_dev: class_device struct
2036 * @count: buffer size
2038 * This function will reset the adapter.
2041 * count on success / other on failure
2043 static ssize_t ipr_store_reset_adapter(struct class_device *class_dev,
2044 const char *buf, size_t count)
2046 struct Scsi_Host *shost = class_to_shost(class_dev);
2047 struct ipr_ioa_cfg *ioa_cfg = (struct ipr_ioa_cfg *)shost->hostdata;
2048 unsigned long lock_flags;
2051 if (!capable(CAP_SYS_ADMIN))
2054 spin_lock_irqsave(ioa_cfg->host->host_lock, lock_flags);
2055 if (!ioa_cfg->in_reset_reload)
2056 ipr_initiate_ioa_reset(ioa_cfg, IPR_SHUTDOWN_NORMAL);
2057 spin_unlock_irqrestore(ioa_cfg->host->host_lock, lock_flags);
2058 wait_event(ioa_cfg->reset_wait_q, !ioa_cfg->in_reset_reload);
2063 static struct class_device_attribute ipr_ioa_reset_attr = {
2065 .name = "reset_host",
2068 .store = ipr_store_reset_adapter
2072 * ipr_alloc_ucode_buffer - Allocates a microcode download buffer
2073 * @buf_len: buffer length
2075 * Allocates a DMA'able buffer in chunks and assembles a scatter/gather
2076 * list to use for microcode download
2079 * pointer to sglist / NULL on failure
2081 static struct ipr_sglist *ipr_alloc_ucode_buffer(int buf_len)
2083 int sg_size, order, bsize_elem, num_elem, i, j;
2084 struct ipr_sglist *sglist;
2085 struct scatterlist *scatterlist;
2088 /* Get the minimum size per scatter/gather element */
2089 sg_size = buf_len / (IPR_MAX_SGLIST - 1);
2091 /* Get the actual size per element */
2092 order = get_order(sg_size);
2094 /* Determine the actual number of bytes per element */
2095 bsize_elem = PAGE_SIZE * (1 << order);
2097 /* Determine the actual number of sg entries needed */
2098 if (buf_len % bsize_elem)
2099 num_elem = (buf_len / bsize_elem) + 1;
2101 num_elem = buf_len / bsize_elem;
2103 /* Allocate a scatter/gather list for the DMA */
2104 sglist = kmalloc(sizeof(struct ipr_sglist) +
2105 (sizeof(struct scatterlist) * (num_elem - 1)),
2108 if (sglist == NULL) {
2113 memset(sglist, 0, sizeof(struct ipr_sglist) +
2114 (sizeof(struct scatterlist) * (num_elem - 1)));
2116 scatterlist = sglist->scatterlist;
2118 sglist->order = order;
2119 sglist->num_sg = num_elem;
2121 /* Allocate a bunch of sg elements */
2122 for (i = 0; i < num_elem; i++) {
2123 page = alloc_pages(GFP_KERNEL, order);
2127 /* Free up what we already allocated */
2128 for (j = i - 1; j >= 0; j--)
2129 __free_pages(scatterlist[j].page, order);
2134 scatterlist[i].page = page;
2141 * ipr_free_ucode_buffer - Frees a microcode download buffer
2142 * @p_dnld: scatter/gather list pointer
2144 * Free a DMA'able ucode download buffer previously allocated with
2145 * ipr_alloc_ucode_buffer
2150 static void ipr_free_ucode_buffer(struct ipr_sglist *sglist)
2154 for (i = 0; i < sglist->num_sg; i++)
2155 __free_pages(sglist->scatterlist[i].page, sglist->order);
2161 * ipr_copy_ucode_buffer - Copy user buffer to kernel buffer
2162 * @sglist: scatter/gather list pointer
2163 * @buffer: buffer pointer
2164 * @len: buffer length
2166 * Copy a microcode image from a user buffer into a buffer allocated by
2167 * ipr_alloc_ucode_buffer
2170 * 0 on success / other on failure
2172 static int ipr_copy_ucode_buffer(struct ipr_sglist *sglist,
2173 u8 *buffer, u32 len)
2175 int bsize_elem, i, result = 0;
2176 struct scatterlist *scatterlist;
2179 /* Determine the actual number of bytes per element */
2180 bsize_elem = PAGE_SIZE * (1 << sglist->order);
2182 scatterlist = sglist->scatterlist;
2184 for (i = 0; i < (len / bsize_elem); i++, buffer += bsize_elem) {
2185 kaddr = kmap(scatterlist[i].page);
2186 memcpy(kaddr, buffer, bsize_elem);
2187 kunmap(scatterlist[i].page);
2189 scatterlist[i].length = bsize_elem;
2197 if (len % bsize_elem) {
2198 kaddr = kmap(scatterlist[i].page);
2199 memcpy(kaddr, buffer, len % bsize_elem);
2200 kunmap(scatterlist[i].page);
2202 scatterlist[i].length = len % bsize_elem;
2205 sglist->buffer_len = len;
2210 * ipr_map_ucode_buffer - Map a microcode download buffer
2211 * @ipr_cmd: ipr command struct
2212 * @sglist: scatter/gather list
2213 * @len: total length of download buffer
2215 * Maps a microcode download scatter/gather list for DMA and
2219 * 0 on success / -EIO on failure
2221 static int ipr_map_ucode_buffer(struct ipr_cmnd *ipr_cmd,
2222 struct ipr_sglist *sglist, int len)
2224 struct ipr_ioa_cfg *ioa_cfg = ipr_cmd->ioa_cfg;
2225 struct ipr_ioarcb *ioarcb = &ipr_cmd->ioarcb;
2226 struct ipr_ioadl_desc *ioadl = ipr_cmd->ioadl;
2227 struct scatterlist *scatterlist = sglist->scatterlist;
2230 ipr_cmd->dma_use_sg = pci_map_sg(ioa_cfg->pdev, scatterlist,
2231 sglist->num_sg, DMA_TO_DEVICE);
2233 ioarcb->cmd_pkt.flags_hi |= IPR_FLAGS_HI_WRITE_NOT_READ;
2234 ioarcb->write_data_transfer_length = cpu_to_be32(len);
2235 ioarcb->write_ioadl_len =
2236 cpu_to_be32(sizeof(struct ipr_ioadl_desc) * ipr_cmd->dma_use_sg);
2238 for (i = 0; i < ipr_cmd->dma_use_sg; i++) {
2239 ioadl[i].flags_and_data_len =
2240 cpu_to_be32(IPR_IOADL_FLAGS_WRITE | sg_dma_len(&scatterlist[i]));
2242 cpu_to_be32(sg_dma_address(&scatterlist[i]));
2245 if (likely(ipr_cmd->dma_use_sg)) {
2246 ioadl[i-1].flags_and_data_len |=
2247 cpu_to_be32(IPR_IOADL_FLAGS_LAST);
2250 dev_err(&ioa_cfg->pdev->dev, "pci_map_sg failed!\n");
2258 * ipr_store_update_fw - Update the firmware on the adapter
2259 * @class_dev: class_device struct
2261 * @count: buffer size
2263 * This function will update the firmware on the adapter.
2266 * count on success / other on failure
2268 static ssize_t ipr_store_update_fw(struct class_device *class_dev,
2269 const char *buf, size_t count)
2271 struct Scsi_Host *shost = class_to_shost(class_dev);
2272 struct ipr_ioa_cfg *ioa_cfg = (struct ipr_ioa_cfg *)shost->hostdata;
2273 struct ipr_ucode_image_header *image_hdr;
2274 const struct firmware *fw_entry;
2275 struct ipr_sglist *sglist;
2276 unsigned long lock_flags;
2279 int len, result, dnld_size;
2281 if (!capable(CAP_SYS_ADMIN))
2284 len = snprintf(fname, 99, "%s", buf);
2285 fname[len-1] = '\0';
2287 if(request_firmware(&fw_entry, fname, &ioa_cfg->pdev->dev)) {
2288 dev_err(&ioa_cfg->pdev->dev, "Firmware file %s not found\n", fname);
2292 image_hdr = (struct ipr_ucode_image_header *)fw_entry->data;
2294 if (be32_to_cpu(image_hdr->header_length) > fw_entry->size ||
2295 (ioa_cfg->vpd_cbs->page3_data.card_type &&
2296 ioa_cfg->vpd_cbs->page3_data.card_type != image_hdr->card_type)) {
2297 dev_err(&ioa_cfg->pdev->dev, "Invalid microcode buffer\n");
2298 release_firmware(fw_entry);
2302 src = (u8 *)image_hdr + be32_to_cpu(image_hdr->header_length);
2303 dnld_size = fw_entry->size - be32_to_cpu(image_hdr->header_length);
2304 sglist = ipr_alloc_ucode_buffer(dnld_size);
2307 dev_err(&ioa_cfg->pdev->dev, "Microcode buffer allocation failed\n");
2308 release_firmware(fw_entry);
2312 result = ipr_copy_ucode_buffer(sglist, src, dnld_size);
2315 dev_err(&ioa_cfg->pdev->dev,
2316 "Microcode buffer copy to DMA buffer failed\n");
2317 ipr_free_ucode_buffer(sglist);
2318 release_firmware(fw_entry);
2322 spin_lock_irqsave(ioa_cfg->host->host_lock, lock_flags);
2324 if (ioa_cfg->ucode_sglist) {
2325 spin_unlock_irqrestore(ioa_cfg->host->host_lock, lock_flags);
2326 dev_err(&ioa_cfg->pdev->dev,
2327 "Microcode download already in progress\n");
2328 ipr_free_ucode_buffer(sglist);
2329 release_firmware(fw_entry);
2333 ioa_cfg->ucode_sglist = sglist;
2334 ipr_initiate_ioa_reset(ioa_cfg, IPR_SHUTDOWN_NORMAL);
2335 spin_unlock_irqrestore(ioa_cfg->host->host_lock, lock_flags);
2336 wait_event(ioa_cfg->reset_wait_q, !ioa_cfg->in_reset_reload);
2338 spin_lock_irqsave(ioa_cfg->host->host_lock, lock_flags);
2339 ioa_cfg->ucode_sglist = NULL;
2340 spin_unlock_irqrestore(ioa_cfg->host->host_lock, lock_flags);
2342 ipr_free_ucode_buffer(sglist);
2343 release_firmware(fw_entry);
2348 static struct class_device_attribute ipr_update_fw_attr = {
2350 .name = "update_fw",
2353 .store = ipr_store_update_fw
2356 static struct class_device_attribute *ipr_ioa_attrs[] = {
2357 &ipr_fw_version_attr,
2358 &ipr_log_level_attr,
2359 &ipr_diagnostics_attr,
2360 &ipr_ioa_reset_attr,
2361 &ipr_update_fw_attr,
2365 #ifdef CONFIG_SCSI_IPR_DUMP
2367 * ipr_read_dump - Dump the adapter
2368 * @kobj: kobject struct
2371 * @count: buffer size
2374 * number of bytes printed to buffer
2376 static ssize_t ipr_read_dump(struct kobject *kobj, char *buf,
2377 loff_t off, size_t count)
2379 struct class_device *cdev = container_of(kobj,struct class_device,kobj);
2380 struct Scsi_Host *shost = class_to_shost(cdev);
2381 struct ipr_ioa_cfg *ioa_cfg = (struct ipr_ioa_cfg *)shost->hostdata;
2382 struct ipr_dump *dump;
2383 unsigned long lock_flags = 0;
2388 if (!capable(CAP_SYS_ADMIN))
2391 spin_lock_irqsave(ioa_cfg->host->host_lock, lock_flags);
2392 dump = ioa_cfg->dump;
2394 if (ioa_cfg->sdt_state != DUMP_OBTAINED || !dump || !kobject_get(&dump->kobj)) {
2395 spin_unlock_irqrestore(ioa_cfg->host->host_lock, lock_flags);
2399 spin_unlock_irqrestore(ioa_cfg->host->host_lock, lock_flags);
2401 if (off > dump->driver_dump.hdr.len) {
2402 kobject_put(&dump->kobj);
2406 if (off + count > dump->driver_dump.hdr.len) {
2407 count = dump->driver_dump.hdr.len - off;
2411 if (count && off < sizeof(dump->driver_dump)) {
2412 if (off + count > sizeof(dump->driver_dump))
2413 len = sizeof(dump->driver_dump) - off;
2416 src = (u8 *)&dump->driver_dump + off;
2417 memcpy(buf, src, len);
2423 off -= sizeof(dump->driver_dump);
2425 if (count && off < offsetof(struct ipr_ioa_dump, ioa_data)) {
2426 if (off + count > offsetof(struct ipr_ioa_dump, ioa_data))
2427 len = offsetof(struct ipr_ioa_dump, ioa_data) - off;
2430 src = (u8 *)&dump->ioa_dump + off;
2431 memcpy(buf, src, len);
2437 off -= offsetof(struct ipr_ioa_dump, ioa_data);
2440 if ((off & PAGE_MASK) != ((off + count) & PAGE_MASK))
2441 len = PAGE_ALIGN(off) - off;
2444 src = (u8 *)dump->ioa_dump.ioa_data[(off & PAGE_MASK) >> PAGE_SHIFT];
2445 src += off & ~PAGE_MASK;
2446 memcpy(buf, src, len);
2452 kobject_put(&dump->kobj);
2457 * ipr_release_dump - Free adapter dump memory
2458 * @kobj: kobject struct
2463 static void ipr_release_dump(struct kobject *kobj)
2465 struct ipr_dump *dump = container_of(kobj,struct ipr_dump,kobj);
2466 struct ipr_ioa_cfg *ioa_cfg = dump->ioa_cfg;
2467 unsigned long lock_flags = 0;
2471 spin_lock_irqsave(ioa_cfg->host->host_lock, lock_flags);
2472 ioa_cfg->dump = NULL;
2473 ioa_cfg->sdt_state = INACTIVE;
2474 spin_unlock_irqrestore(ioa_cfg->host->host_lock, lock_flags);
2476 for (i = 0; i < dump->ioa_dump.next_page_index; i++)
2477 free_page((unsigned long) dump->ioa_dump.ioa_data[i]);
2483 static struct kobj_type ipr_dump_kobj_type = {
2484 .release = ipr_release_dump,
2488 * ipr_alloc_dump - Prepare for adapter dump
2489 * @ioa_cfg: ioa config struct
2492 * 0 on success / other on failure
2494 static int ipr_alloc_dump(struct ipr_ioa_cfg *ioa_cfg)
2496 struct ipr_dump *dump;
2497 unsigned long lock_flags = 0;
2500 dump = kmalloc(sizeof(struct ipr_dump), GFP_KERNEL);
2503 ipr_err("Dump memory allocation failed\n");
2507 memset(dump, 0, sizeof(struct ipr_dump));
2508 kobject_init(&dump->kobj);
2509 dump->kobj.ktype = &ipr_dump_kobj_type;
2510 dump->ioa_cfg = ioa_cfg;
2512 spin_lock_irqsave(ioa_cfg->host->host_lock, lock_flags);
2514 if (INACTIVE != ioa_cfg->sdt_state) {
2515 spin_unlock_irqrestore(ioa_cfg->host->host_lock, lock_flags);
2520 ioa_cfg->dump = dump;
2521 ioa_cfg->sdt_state = WAIT_FOR_DUMP;
2522 if (ioa_cfg->ioa_is_dead && !ioa_cfg->dump_taken) {
2523 ioa_cfg->dump_taken = 1;
2524 schedule_work(&ioa_cfg->work_q);
2526 spin_unlock_irqrestore(ioa_cfg->host->host_lock, lock_flags);
2533 * ipr_free_dump - Free adapter dump memory
2534 * @ioa_cfg: ioa config struct
2537 * 0 on success / other on failure
2539 static int ipr_free_dump(struct ipr_ioa_cfg *ioa_cfg)
2541 struct ipr_dump *dump;
2542 unsigned long lock_flags = 0;
2546 spin_lock_irqsave(ioa_cfg->host->host_lock, lock_flags);
2547 dump = ioa_cfg->dump;
2549 spin_unlock_irqrestore(ioa_cfg->host->host_lock, lock_flags);
2553 ioa_cfg->dump = NULL;
2554 spin_unlock_irqrestore(ioa_cfg->host->host_lock, lock_flags);
2556 kobject_put(&dump->kobj);
2563 * ipr_write_dump - Setup dump state of adapter
2564 * @kobj: kobject struct
2567 * @count: buffer size
2570 * number of bytes printed to buffer
2572 static ssize_t ipr_write_dump(struct kobject *kobj, char *buf,
2573 loff_t off, size_t count)
2575 struct class_device *cdev = container_of(kobj,struct class_device,kobj);
2576 struct Scsi_Host *shost = class_to_shost(cdev);
2577 struct ipr_ioa_cfg *ioa_cfg = (struct ipr_ioa_cfg *)shost->hostdata;
2580 if (!capable(CAP_SYS_ADMIN))
2584 rc = ipr_alloc_dump(ioa_cfg);
2585 else if (buf[0] == '0')
2586 rc = ipr_free_dump(ioa_cfg);
2596 static struct bin_attribute ipr_dump_attr = {
2599 .mode = S_IRUSR | S_IWUSR,
2602 .read = ipr_read_dump,
2603 .write = ipr_write_dump
2606 static int ipr_free_dump(struct ipr_ioa_cfg *ioa_cfg) { return 0; };
2610 * ipr_store_queue_depth - Change the device's queue depth
2611 * @dev: device struct
2615 * number of bytes printed to buffer
2617 static ssize_t ipr_store_queue_depth(struct device *dev,
2618 const char *buf, size_t count)
2620 struct scsi_device *sdev = to_scsi_device(dev);
2621 struct ipr_ioa_cfg *ioa_cfg = (struct ipr_ioa_cfg *)sdev->host->hostdata;
2622 struct ipr_resource_entry *res;
2623 int qdepth = simple_strtoul(buf, NULL, 10);
2625 unsigned long lock_flags = 0;
2626 ssize_t len = -ENXIO;
2628 spin_lock_irqsave(ioa_cfg->host->host_lock, lock_flags);
2629 res = (struct ipr_resource_entry *)sdev->hostdata;
2631 res->qdepth = qdepth;
2633 if (ipr_is_gscsi(res) && res->tcq_active)
2634 tagged = MSG_ORDERED_TAG;
2639 spin_unlock_irqrestore(ioa_cfg->host->host_lock, lock_flags);
2640 scsi_adjust_queue_depth(sdev, tagged, qdepth);
2644 static struct device_attribute ipr_queue_depth_attr = {
2646 .name = "queue_depth",
2647 .mode = S_IRUSR | S_IWUSR,
2649 .store = ipr_store_queue_depth
2653 * ipr_show_tcq_enable - Show if the device is enabled for tcqing
2654 * @dev: device struct
2658 * number of bytes printed to buffer
2660 static ssize_t ipr_show_tcq_enable(struct device *dev, char *buf)
2662 struct scsi_device *sdev = to_scsi_device(dev);
2663 struct ipr_ioa_cfg *ioa_cfg = (struct ipr_ioa_cfg *)sdev->host->hostdata;
2664 struct ipr_resource_entry *res;
2665 unsigned long lock_flags = 0;
2666 ssize_t len = -ENXIO;
2668 spin_lock_irqsave(ioa_cfg->host->host_lock, lock_flags);
2669 res = (struct ipr_resource_entry *)sdev->hostdata;
2671 len = snprintf(buf, PAGE_SIZE, "%d\n", res->tcq_active);
2672 spin_unlock_irqrestore(ioa_cfg->host->host_lock, lock_flags);
2677 * ipr_store_tcq_enable - Change the device's TCQing state
2678 * @dev: device struct
2682 * number of bytes printed to buffer
2684 static ssize_t ipr_store_tcq_enable(struct device *dev,
2685 const char *buf, size_t count)
2687 struct scsi_device *sdev = to_scsi_device(dev);
2688 struct ipr_ioa_cfg *ioa_cfg = (struct ipr_ioa_cfg *)sdev->host->hostdata;
2689 struct ipr_resource_entry *res;
2690 unsigned long lock_flags = 0;
2691 int tcq_active = simple_strtoul(buf, NULL, 10);
2692 int qdepth = IPR_MAX_CMD_PER_LUN;
2694 ssize_t len = -ENXIO;
2696 spin_lock_irqsave(ioa_cfg->host->host_lock, lock_flags);
2698 res = (struct ipr_resource_entry *)sdev->hostdata;
2701 res->tcq_active = 0;
2702 qdepth = res->qdepth;
2704 if (ipr_is_gscsi(res) && sdev->tagged_supported) {
2706 tagged = MSG_ORDERED_TAG;
2707 res->tcq_active = 1;
2711 } else if (tcq_active) {
2716 spin_unlock_irqrestore(ioa_cfg->host->host_lock, lock_flags);
2717 scsi_adjust_queue_depth(sdev, tagged, qdepth);
2721 static struct device_attribute ipr_tcqing_attr = {
2723 .name = "tcq_enable",
2724 .mode = S_IRUSR | S_IWUSR,
2726 .store = ipr_store_tcq_enable,
2727 .show = ipr_show_tcq_enable
2731 * ipr_show_adapter_handle - Show the adapter's resource handle for this device
2732 * @dev: device struct
2736 * number of bytes printed to buffer
2738 static ssize_t ipr_show_adapter_handle(struct device *dev, char *buf)
2740 struct scsi_device *sdev = to_scsi_device(dev);
2741 struct ipr_ioa_cfg *ioa_cfg = (struct ipr_ioa_cfg *)sdev->host->hostdata;
2742 struct ipr_resource_entry *res;
2743 unsigned long lock_flags = 0;
2744 ssize_t len = -ENXIO;
2746 spin_lock_irqsave(ioa_cfg->host->host_lock, lock_flags);
2747 res = (struct ipr_resource_entry *)sdev->hostdata;
2749 len = snprintf(buf, PAGE_SIZE, "%08X\n", res->cfgte.res_handle);
2750 spin_unlock_irqrestore(ioa_cfg->host->host_lock, lock_flags);
2754 static struct device_attribute ipr_adapter_handle_attr = {
2756 .name = "adapter_handle",
2759 .show = ipr_show_adapter_handle
2762 static struct device_attribute *ipr_dev_attrs[] = {
2763 &ipr_queue_depth_attr,
2765 &ipr_adapter_handle_attr,
2770 * ipr_biosparam - Return the HSC mapping
2771 * @sdev: scsi device struct
2772 * @block_device: block device pointer
2773 * @capacity: capacity of the device
2774 * @parm: Array containing returned HSC values.
2776 * This function generates the HSC parms that fdisk uses.
2777 * We want to make sure we return something that places partitions
2778 * on 4k boundaries for best performance with the IOA.
2783 static int ipr_biosparam(struct scsi_device *sdev,
2784 struct block_device *block_device,
2785 sector_t capacity, int *parm)
2787 int heads, sectors, cylinders;
2792 cylinders = capacity;
2793 sector_div(cylinders, (128 * 32));
2798 parm[2] = cylinders;
2804 * ipr_slave_destroy - Unconfigure a SCSI device
2805 * @sdev: scsi device struct
2810 static void ipr_slave_destroy(struct scsi_device *sdev)
2812 struct ipr_resource_entry *res;
2813 struct ipr_ioa_cfg *ioa_cfg;
2814 unsigned long lock_flags = 0;
2816 ioa_cfg = (struct ipr_ioa_cfg *) sdev->host->hostdata;
2818 spin_lock_irqsave(ioa_cfg->host->host_lock, lock_flags);
2819 res = (struct ipr_resource_entry *) sdev->hostdata;
2821 sdev->hostdata = NULL;
2824 spin_unlock_irqrestore(ioa_cfg->host->host_lock, lock_flags);
2828 * ipr_slave_configure - Configure a SCSI device
2829 * @sdev: scsi device struct
2831 * This function configures the specified scsi device.
2836 static int ipr_slave_configure(struct scsi_device *sdev)
2838 struct ipr_ioa_cfg *ioa_cfg = (struct ipr_ioa_cfg *) sdev->host->hostdata;
2839 struct ipr_resource_entry *res;
2840 unsigned long lock_flags = 0;
2842 spin_lock_irqsave(ioa_cfg->host->host_lock, lock_flags);
2843 res = sdev->hostdata;
2845 if (ipr_is_af_dasd_device(res))
2846 sdev->type = TYPE_RAID;
2847 if (ipr_is_af_dasd_device(res) || ipr_is_ioa_resource(res))
2848 sdev->scsi_level = 4;
2849 if (ipr_is_vset_device(res))
2850 sdev->timeout = IPR_VSET_RW_TIMEOUT;
2852 sdev->allow_restart = 1;
2853 scsi_adjust_queue_depth(sdev, 0, res->qdepth);
2855 spin_unlock_irqrestore(ioa_cfg->host->host_lock, lock_flags);
2860 * ipr_slave_alloc - Prepare for commands to a device.
2861 * @sdev: scsi device struct
2863 * This function saves a pointer to the resource entry
2864 * in the scsi device struct if the device exists. We
2865 * can then use this pointer in ipr_queuecommand when
2866 * handling new commands.
2871 static int ipr_slave_alloc(struct scsi_device *sdev)
2873 struct ipr_ioa_cfg *ioa_cfg = (struct ipr_ioa_cfg *) sdev->host->hostdata;
2874 struct ipr_resource_entry *res;
2875 unsigned long lock_flags;
2877 sdev->hostdata = NULL;
2879 spin_lock_irqsave(ioa_cfg->host->host_lock, lock_flags);
2881 list_for_each_entry(res, &ioa_cfg->used_res_q, queue) {
2882 if ((res->cfgte.res_addr.bus == sdev->channel) &&
2883 (res->cfgte.res_addr.target == sdev->id) &&
2884 (res->cfgte.res_addr.lun == sdev->lun)) {
2888 sdev->hostdata = res;
2889 res->needs_sync_complete = 1;
2894 spin_unlock_irqrestore(ioa_cfg->host->host_lock, lock_flags);
2900 * ipr_eh_host_reset - Reset the host adapter
2901 * @scsi_cmd: scsi command struct
2906 static int ipr_eh_host_reset(struct scsi_cmnd * scsi_cmd)
2908 struct ipr_ioa_cfg *ioa_cfg;
2912 ioa_cfg = (struct ipr_ioa_cfg *) scsi_cmd->device->host->hostdata;
2914 dev_err(&ioa_cfg->pdev->dev,
2915 "Adapter being reset as a result of error recovery.\n");
2917 if (WAIT_FOR_DUMP == ioa_cfg->sdt_state)
2918 ioa_cfg->sdt_state = GET_DUMP;
2920 rc = ipr_reset_reload(ioa_cfg, IPR_SHUTDOWN_ABBREV);
2927 * ipr_eh_dev_reset - Reset the device
2928 * @scsi_cmd: scsi command struct
2930 * This function issues a device reset to the affected device.
2931 * A LUN reset will be sent to the device first. If that does
2932 * not work, a target reset will be sent.
2937 static int ipr_eh_dev_reset(struct scsi_cmnd * scsi_cmd)
2939 struct ipr_cmnd *ipr_cmd;
2940 struct ipr_ioa_cfg *ioa_cfg;
2941 struct ipr_resource_entry *res;
2942 struct ipr_cmd_pkt *cmd_pkt;
2946 ioa_cfg = (struct ipr_ioa_cfg *) scsi_cmd->device->host->hostdata;
2947 res = scsi_cmd->device->hostdata;
2949 if (!res || (!ipr_is_gscsi(res) && !ipr_is_vset_device(res)))
2953 * If we are currently going through reset/reload, return failed. This will force the
2954 * mid-layer to call ipr_eh_host_reset, which will then go to sleep and wait for the
2957 if (ioa_cfg->in_reset_reload)
2959 if (ioa_cfg->ioa_is_dead)
2962 list_for_each_entry(ipr_cmd, &ioa_cfg->pending_q, queue) {
2963 if (ipr_cmd->ioarcb.res_handle == res->cfgte.res_handle) {
2964 if (ipr_cmd->scsi_cmd)
2965 ipr_cmd->done = ipr_scsi_eh_done;
2969 res->resetting_device = 1;
2971 ipr_cmd = ipr_get_free_ipr_cmnd(ioa_cfg);
2973 ipr_cmd->ioarcb.res_handle = res->cfgte.res_handle;
2974 cmd_pkt = &ipr_cmd->ioarcb.cmd_pkt;
2975 cmd_pkt->request_type = IPR_RQTYPE_IOACMD;
2976 cmd_pkt->cdb[0] = IPR_RESET_DEVICE;
2978 ipr_sdev_err(scsi_cmd->device, "Resetting device\n");
2979 ipr_send_blocking_cmd(ipr_cmd, ipr_timeout, IPR_DEVICE_RESET_TIMEOUT);
2981 ioasc = be32_to_cpu(ipr_cmd->ioasa.ioasc);
2983 res->resetting_device = 0;
2985 list_add_tail(&ipr_cmd->queue, &ioa_cfg->free_q);
2988 return (IPR_IOASC_SENSE_KEY(ioasc) ? FAILED : SUCCESS);
2992 * ipr_bus_reset_done - Op done function for bus reset.
2993 * @ipr_cmd: ipr command struct
2995 * This function is the op done function for a bus reset
3000 static void ipr_bus_reset_done(struct ipr_cmnd *ipr_cmd)
3002 struct ipr_ioa_cfg *ioa_cfg = ipr_cmd->ioa_cfg;
3003 struct ipr_resource_entry *res;
3006 list_for_each_entry(res, &ioa_cfg->used_res_q, queue) {
3007 if (!memcmp(&res->cfgte.res_handle, &ipr_cmd->ioarcb.res_handle,
3008 sizeof(res->cfgte.res_handle))) {
3009 scsi_report_bus_reset(ioa_cfg->host, res->cfgte.res_addr.bus);
3015 * If abort has not completed, indicate the reset has, else call the
3016 * abort's done function to wake the sleeping eh thread
3018 if (ipr_cmd->u.sibling->u.sibling)
3019 ipr_cmd->u.sibling->u.sibling = NULL;
3021 ipr_cmd->u.sibling->done(ipr_cmd->u.sibling);
3023 list_add_tail(&ipr_cmd->queue, &ioa_cfg->free_q);
3028 * ipr_abort_timeout - An abort task has timed out
3029 * @ipr_cmd: ipr command struct
3031 * This function handles when an abort task times out. If this
3032 * happens we issue a bus reset since we have resources tied
3033 * up that must be freed before returning to the midlayer.
3038 static void ipr_abort_timeout(struct ipr_cmnd *ipr_cmd)
3040 struct ipr_cmnd *reset_cmd;
3041 struct ipr_ioa_cfg *ioa_cfg = ipr_cmd->ioa_cfg;
3042 struct ipr_cmd_pkt *cmd_pkt;
3043 unsigned long lock_flags = 0;
3046 spin_lock_irqsave(ioa_cfg->host->host_lock, lock_flags);
3047 if (ipr_cmd->completion.done || ioa_cfg->in_reset_reload) {
3048 spin_unlock_irqrestore(ioa_cfg->host->host_lock, lock_flags);
3052 ipr_sdev_err(ipr_cmd->u.sdev, "Abort timed out. Resetting bus\n");
3053 reset_cmd = ipr_get_free_ipr_cmnd(ioa_cfg);
3054 ipr_cmd->u.sibling = reset_cmd;
3055 reset_cmd->u.sibling = ipr_cmd;
3056 reset_cmd->ioarcb.res_handle = ipr_cmd->ioarcb.res_handle;
3057 cmd_pkt = &reset_cmd->ioarcb.cmd_pkt;
3058 cmd_pkt->request_type = IPR_RQTYPE_IOACMD;
3059 cmd_pkt->cdb[0] = IPR_RESET_DEVICE;
3060 cmd_pkt->cdb[2] = IPR_RESET_TYPE_SELECT | IPR_BUS_RESET;
3062 ipr_do_req(reset_cmd, ipr_bus_reset_done, ipr_timeout, IPR_DEVICE_RESET_TIMEOUT);
3063 spin_unlock_irqrestore(ioa_cfg->host->host_lock, lock_flags);
3068 * ipr_cancel_op - Cancel specified op
3069 * @scsi_cmd: scsi command struct
3071 * This function cancels specified op.
3076 static int ipr_cancel_op(struct scsi_cmnd * scsi_cmd)
3078 struct ipr_cmnd *ipr_cmd;
3079 struct ipr_ioa_cfg *ioa_cfg;
3080 struct ipr_resource_entry *res;
3081 struct ipr_cmd_pkt *cmd_pkt;
3082 u32 ioasc, ioarcb_addr;
3086 ioa_cfg = (struct ipr_ioa_cfg *)scsi_cmd->device->host->hostdata;
3087 res = scsi_cmd->device->hostdata;
3089 if (!res || (!ipr_is_gscsi(res) && !ipr_is_vset_device(res)))
3092 list_for_each_entry(ipr_cmd, &ioa_cfg->pending_q, queue) {
3093 if (ipr_cmd->scsi_cmd == scsi_cmd) {
3094 ipr_cmd->done = ipr_scsi_eh_done;
3103 ioarcb_addr = be32_to_cpu(ipr_cmd->ioarcb.ioarcb_host_pci_addr);
3105 ipr_cmd = ipr_get_free_ipr_cmnd(ioa_cfg);
3106 ipr_cmd->ioarcb.res_handle = res->cfgte.res_handle;
3107 cmd_pkt = &ipr_cmd->ioarcb.cmd_pkt;
3108 cmd_pkt->request_type = IPR_RQTYPE_IOACMD;
3109 cmd_pkt->cdb[0] = IPR_ABORT_TASK;
3110 cmd_pkt->cdb[2] = (ioarcb_addr >> 24) & 0xff;
3111 cmd_pkt->cdb[3] = (ioarcb_addr >> 16) & 0xff;
3112 cmd_pkt->cdb[4] = (ioarcb_addr >> 8) & 0xff;
3113 cmd_pkt->cdb[5] = ioarcb_addr & 0xff;
3114 ipr_cmd->u.sdev = scsi_cmd->device;
3116 ipr_sdev_err(scsi_cmd->device, "Aborting command: %02X\n", scsi_cmd->cmnd[0]);
3117 ipr_send_blocking_cmd(ipr_cmd, ipr_abort_timeout, IPR_ABORT_TASK_TIMEOUT);
3118 ioasc = be32_to_cpu(ipr_cmd->ioasa.ioasc);
3121 * If the abort task timed out and we sent a bus reset, we will get
3122 * one the following responses to the abort
3124 if (ioasc == IPR_IOASC_BUS_WAS_RESET || ioasc == IPR_IOASC_SYNC_REQUIRED) {
3129 list_add_tail(&ipr_cmd->queue, &ioa_cfg->free_q);
3130 res->needs_sync_complete = 1;
3133 return (IPR_IOASC_SENSE_KEY(ioasc) ? FAILED : SUCCESS);
3137 * ipr_eh_abort - Abort a single op
3138 * @scsi_cmd: scsi command struct
3143 static int ipr_eh_abort(struct scsi_cmnd * scsi_cmd)
3145 struct ipr_ioa_cfg *ioa_cfg;
3148 ioa_cfg = (struct ipr_ioa_cfg *) scsi_cmd->device->host->hostdata;
3150 /* If we are currently going through reset/reload, return failed. This will force the
3151 mid-layer to call ipr_eh_host_reset, which will then go to sleep and wait for the
3152 reset to complete */
3153 if (ioa_cfg->in_reset_reload)
3155 if (ioa_cfg->ioa_is_dead)
3157 if (!scsi_cmd->device->hostdata)
3161 return ipr_cancel_op(scsi_cmd);
3165 * ipr_handle_other_interrupt - Handle "other" interrupts
3166 * @ioa_cfg: ioa config struct
3167 * @int_reg: interrupt register
3170 * IRQ_NONE / IRQ_HANDLED
3172 static irqreturn_t ipr_handle_other_interrupt(struct ipr_ioa_cfg *ioa_cfg,
3173 volatile u32 int_reg)
3175 irqreturn_t rc = IRQ_HANDLED;
3177 if (int_reg & IPR_PCII_IOA_TRANS_TO_OPER) {
3178 /* Mask the interrupt */
3179 writel(IPR_PCII_IOA_TRANS_TO_OPER, ioa_cfg->regs.set_interrupt_mask_reg);
3181 /* Clear the interrupt */
3182 writel(IPR_PCII_IOA_TRANS_TO_OPER, ioa_cfg->regs.clr_interrupt_reg);
3183 int_reg = readl(ioa_cfg->regs.sense_interrupt_reg);
3185 list_del(&ioa_cfg->reset_cmd->queue);
3186 del_timer(&ioa_cfg->reset_cmd->timer);
3187 ipr_reset_ioa_job(ioa_cfg->reset_cmd);
3189 if (int_reg & IPR_PCII_IOA_UNIT_CHECKED)
3190 ioa_cfg->ioa_unit_checked = 1;
3192 dev_err(&ioa_cfg->pdev->dev,
3193 "Permanent IOA failure. 0x%08X\n", int_reg);
3195 if (WAIT_FOR_DUMP == ioa_cfg->sdt_state)
3196 ioa_cfg->sdt_state = GET_DUMP;
3198 ipr_mask_and_clear_interrupts(ioa_cfg, ~0);
3199 ipr_initiate_ioa_reset(ioa_cfg, IPR_SHUTDOWN_NONE);
3206 * ipr_isr - Interrupt service routine
3208 * @devp: pointer to ioa config struct
3209 * @regs: pt_regs struct
3212 * IRQ_NONE / IRQ_HANDLED
3214 static irqreturn_t ipr_isr(int irq, void *devp, struct pt_regs *regs)
3216 struct ipr_ioa_cfg *ioa_cfg = (struct ipr_ioa_cfg *)devp;
3217 unsigned long lock_flags = 0;
3218 volatile u32 int_reg, int_mask_reg;
3221 struct ipr_cmnd *ipr_cmd;
3222 irqreturn_t rc = IRQ_NONE;
3224 spin_lock_irqsave(ioa_cfg->host->host_lock, lock_flags);
3226 /* If interrupts are disabled, ignore the interrupt */
3227 if (!ioa_cfg->allow_interrupts) {
3228 spin_unlock_irqrestore(ioa_cfg->host->host_lock, lock_flags);
3232 int_mask_reg = readl(ioa_cfg->regs.sense_interrupt_mask_reg);
3233 int_reg = readl(ioa_cfg->regs.sense_interrupt_reg) & ~int_mask_reg;
3235 /* If an interrupt on the adapter did not occur, ignore it */
3236 if (unlikely((int_reg & IPR_PCII_OPER_INTERRUPTS) == 0)) {
3237 spin_unlock_irqrestore(ioa_cfg->host->host_lock, lock_flags);
3244 while ((be32_to_cpu(*ioa_cfg->hrrq_curr) & IPR_HRRQ_TOGGLE_BIT) ==
3245 ioa_cfg->toggle_bit) {
3247 cmd_index = (be32_to_cpu(*ioa_cfg->hrrq_curr) &
3248 IPR_HRRQ_REQ_RESP_HANDLE_MASK) >> IPR_HRRQ_REQ_RESP_HANDLE_SHIFT;
3250 if (unlikely(cmd_index >= IPR_NUM_CMD_BLKS)) {
3251 ioa_cfg->errors_logged++;
3252 dev_err(&ioa_cfg->pdev->dev, "Invalid response handle from IOA\n");
3254 if (WAIT_FOR_DUMP == ioa_cfg->sdt_state)
3255 ioa_cfg->sdt_state = GET_DUMP;
3257 ipr_initiate_ioa_reset(ioa_cfg, IPR_SHUTDOWN_NONE);
3258 spin_unlock_irqrestore(ioa_cfg->host->host_lock, lock_flags);
3262 ipr_cmd = ioa_cfg->ipr_cmnd_list[cmd_index];
3264 ioasc = be32_to_cpu(ipr_cmd->ioasa.ioasc);
3266 ipr_trc_hook(ipr_cmd, IPR_TRACE_FINISH, ioasc);
3268 list_del(&ipr_cmd->queue);
3269 del_timer(&ipr_cmd->timer);
3270 ipr_cmd->done(ipr_cmd);
3274 if (ioa_cfg->hrrq_curr < ioa_cfg->hrrq_end) {
3275 ioa_cfg->hrrq_curr++;
3277 ioa_cfg->hrrq_curr = ioa_cfg->hrrq_start;
3278 ioa_cfg->toggle_bit ^= 1u;
3282 if (ipr_cmd != NULL) {
3283 /* Clear the PCI interrupt */
3284 writel(IPR_PCII_HRRQ_UPDATED, ioa_cfg->regs.clr_interrupt_reg);
3285 int_reg = readl(ioa_cfg->regs.sense_interrupt_reg) & ~int_mask_reg;
3290 if (unlikely(rc == IRQ_NONE))
3291 rc = ipr_handle_other_interrupt(ioa_cfg, int_reg);
3293 spin_unlock_irqrestore(ioa_cfg->host->host_lock, lock_flags);
3298 * ipr_build_ioadl - Build a scatter/gather list and map the buffer
3299 * @ioa_cfg: ioa config struct
3300 * @ipr_cmd: ipr command struct
3303 * 0 on success / -1 on failure
3305 static int ipr_build_ioadl(struct ipr_ioa_cfg *ioa_cfg,
3306 struct ipr_cmnd *ipr_cmd)
3309 struct scatterlist *sglist;
3311 u32 ioadl_flags = 0;
3312 struct scsi_cmnd *scsi_cmd = ipr_cmd->scsi_cmd;
3313 struct ipr_ioarcb *ioarcb = &ipr_cmd->ioarcb;
3314 struct ipr_ioadl_desc *ioadl = ipr_cmd->ioadl;
3316 length = scsi_cmd->request_bufflen;
3321 if (scsi_cmd->use_sg) {
3322 ipr_cmd->dma_use_sg = pci_map_sg(ioa_cfg->pdev,
3323 scsi_cmd->request_buffer,
3325 scsi_cmd->sc_data_direction);
3327 if (scsi_cmd->sc_data_direction == DMA_TO_DEVICE) {
3328 ioadl_flags = IPR_IOADL_FLAGS_WRITE;
3329 ioarcb->cmd_pkt.flags_hi |= IPR_FLAGS_HI_WRITE_NOT_READ;
3330 ioarcb->write_data_transfer_length = cpu_to_be32(length);
3331 ioarcb->write_ioadl_len =
3332 cpu_to_be32(sizeof(struct ipr_ioadl_desc) * ipr_cmd->dma_use_sg);
3333 } else if (scsi_cmd->sc_data_direction == DMA_FROM_DEVICE) {
3334 ioadl_flags = IPR_IOADL_FLAGS_READ;
3335 ioarcb->read_data_transfer_length = cpu_to_be32(length);
3336 ioarcb->read_ioadl_len =
3337 cpu_to_be32(sizeof(struct ipr_ioadl_desc) * ipr_cmd->dma_use_sg);
3340 sglist = scsi_cmd->request_buffer;
3342 for (i = 0; i < ipr_cmd->dma_use_sg; i++) {
3343 ioadl[i].flags_and_data_len =
3344 cpu_to_be32(ioadl_flags | sg_dma_len(&sglist[i]));
3346 cpu_to_be32(sg_dma_address(&sglist[i]));
3349 if (likely(ipr_cmd->dma_use_sg)) {
3350 ioadl[i-1].flags_and_data_len |=
3351 cpu_to_be32(IPR_IOADL_FLAGS_LAST);
3354 dev_err(&ioa_cfg->pdev->dev, "pci_map_sg failed!\n");
3356 if (scsi_cmd->sc_data_direction == DMA_TO_DEVICE) {
3357 ioadl_flags = IPR_IOADL_FLAGS_WRITE;
3358 ioarcb->cmd_pkt.flags_hi |= IPR_FLAGS_HI_WRITE_NOT_READ;
3359 ioarcb->write_data_transfer_length = cpu_to_be32(length);
3360 ioarcb->write_ioadl_len = cpu_to_be32(sizeof(struct ipr_ioadl_desc));
3361 } else if (scsi_cmd->sc_data_direction == DMA_FROM_DEVICE) {
3362 ioadl_flags = IPR_IOADL_FLAGS_READ;
3363 ioarcb->read_data_transfer_length = cpu_to_be32(length);
3364 ioarcb->read_ioadl_len = cpu_to_be32(sizeof(struct ipr_ioadl_desc));
3367 ipr_cmd->dma_handle = pci_map_single(ioa_cfg->pdev,
3368 scsi_cmd->request_buffer, length,
3369 scsi_cmd->sc_data_direction);
3371 if (likely(!pci_dma_mapping_error(ipr_cmd->dma_handle))) {
3372 ipr_cmd->dma_use_sg = 1;
3373 ioadl[0].flags_and_data_len =
3374 cpu_to_be32(ioadl_flags | length | IPR_IOADL_FLAGS_LAST);
3375 ioadl[0].address = cpu_to_be32(ipr_cmd->dma_handle);
3378 dev_err(&ioa_cfg->pdev->dev, "pci_map_single failed!\n");
3385 * ipr_get_task_attributes - Translate SPI Q-Tag to task attributes
3386 * @scsi_cmd: scsi command struct
3391 static u8 ipr_get_task_attributes(struct scsi_cmnd *scsi_cmd)
3394 u8 rc = IPR_FLAGS_LO_UNTAGGED_TASK;
3396 if (scsi_populate_tag_msg(scsi_cmd, tag)) {
3398 case MSG_SIMPLE_TAG:
3399 rc = IPR_FLAGS_LO_SIMPLE_TASK;
3402 rc = IPR_FLAGS_LO_HEAD_OF_Q_TASK;
3404 case MSG_ORDERED_TAG:
3405 rc = IPR_FLAGS_LO_ORDERED_TASK;
3414 * ipr_erp_done - Process completion of ERP for a device
3415 * @ipr_cmd: ipr command struct
3417 * This function copies the sense buffer into the scsi_cmd
3418 * struct and pushes the scsi_done function.
3423 static void ipr_erp_done(struct ipr_cmnd *ipr_cmd)
3425 struct scsi_cmnd *scsi_cmd = ipr_cmd->scsi_cmd;
3426 struct ipr_resource_entry *res = scsi_cmd->device->hostdata;
3427 struct ipr_ioa_cfg *ioa_cfg = ipr_cmd->ioa_cfg;
3428 u32 ioasc = be32_to_cpu(ipr_cmd->ioasa.ioasc);
3430 if (IPR_IOASC_SENSE_KEY(ioasc) > 0) {
3431 scsi_cmd->result |= (DID_ERROR << 16);
3432 ipr_sdev_err(scsi_cmd->device,
3433 "Request Sense failed with IOASC: 0x%08X\n", ioasc);
3435 memcpy(scsi_cmd->sense_buffer, ipr_cmd->sense_buffer,
3436 SCSI_SENSE_BUFFERSIZE);
3440 res->needs_sync_complete = 1;
3443 ipr_unmap_sglist(ioa_cfg, ipr_cmd);
3444 list_add_tail(&ipr_cmd->queue, &ioa_cfg->free_q);
3445 scsi_cmd->scsi_done(scsi_cmd);
3449 * ipr_reinit_ipr_cmnd_for_erp - Re-initialize a cmnd block to be used for ERP
3450 * @ipr_cmd: ipr command struct
3455 static void ipr_reinit_ipr_cmnd_for_erp(struct ipr_cmnd *ipr_cmd)
3457 struct ipr_ioarcb *ioarcb;
3458 struct ipr_ioasa *ioasa;
3460 ioarcb = &ipr_cmd->ioarcb;
3461 ioasa = &ipr_cmd->ioasa;
3463 memset(&ioarcb->cmd_pkt, 0, sizeof(struct ipr_cmd_pkt));
3464 ioarcb->write_data_transfer_length = 0;
3465 ioarcb->read_data_transfer_length = 0;
3466 ioarcb->write_ioadl_len = 0;
3467 ioarcb->read_ioadl_len = 0;
3469 ioasa->residual_data_len = 0;
3473 * ipr_erp_request_sense - Send request sense to a device
3474 * @ipr_cmd: ipr command struct
3476 * This function sends a request sense to a device as a result
3477 * of a check condition.
3482 static void ipr_erp_request_sense(struct ipr_cmnd *ipr_cmd)
3484 struct ipr_cmd_pkt *cmd_pkt = &ipr_cmd->ioarcb.cmd_pkt;
3485 u32 ioasc = be32_to_cpu(ipr_cmd->ioasa.ioasc);
3487 if (IPR_IOASC_SENSE_KEY(ioasc) > 0) {
3488 ipr_erp_done(ipr_cmd);
3492 ipr_reinit_ipr_cmnd_for_erp(ipr_cmd);
3494 cmd_pkt->request_type = IPR_RQTYPE_SCSICDB;
3495 cmd_pkt->cdb[0] = REQUEST_SENSE;
3496 cmd_pkt->cdb[4] = SCSI_SENSE_BUFFERSIZE;
3497 cmd_pkt->flags_hi |= IPR_FLAGS_HI_SYNC_OVERRIDE;
3498 cmd_pkt->flags_hi |= IPR_FLAGS_HI_NO_ULEN_CHK;
3499 cmd_pkt->timeout = cpu_to_be16(IPR_REQUEST_SENSE_TIMEOUT / HZ);
3501 ipr_cmd->ioadl[0].flags_and_data_len =
3502 cpu_to_be32(IPR_IOADL_FLAGS_READ_LAST | SCSI_SENSE_BUFFERSIZE);
3503 ipr_cmd->ioadl[0].address =
3504 cpu_to_be32(ipr_cmd->sense_buffer_dma);
3506 ipr_cmd->ioarcb.read_ioadl_len =
3507 cpu_to_be32(sizeof(struct ipr_ioadl_desc));
3508 ipr_cmd->ioarcb.read_data_transfer_length =
3509 cpu_to_be32(SCSI_SENSE_BUFFERSIZE);
3511 ipr_do_req(ipr_cmd, ipr_erp_done, ipr_timeout,
3512 IPR_REQUEST_SENSE_TIMEOUT * 2);
3516 * ipr_erp_cancel_all - Send cancel all to a device
3517 * @ipr_cmd: ipr command struct
3519 * This function sends a cancel all to a device to clear the
3520 * queue. If we are running TCQ on the device, QERR is set to 1,
3521 * which means all outstanding ops have been dropped on the floor.
3522 * Cancel all will return them to us.
3527 static void ipr_erp_cancel_all(struct ipr_cmnd *ipr_cmd)
3529 struct scsi_cmnd *scsi_cmd = ipr_cmd->scsi_cmd;
3530 struct ipr_resource_entry *res = scsi_cmd->device->hostdata;
3531 struct ipr_cmd_pkt *cmd_pkt;
3535 ipr_reinit_ipr_cmnd_for_erp(ipr_cmd);
3537 cmd_pkt = &ipr_cmd->ioarcb.cmd_pkt;
3538 cmd_pkt->request_type = IPR_RQTYPE_IOACMD;
3539 cmd_pkt->cdb[0] = IPR_CANCEL_ALL_REQUESTS;
3541 ipr_do_req(ipr_cmd, ipr_erp_request_sense, ipr_timeout,
3542 IPR_CANCEL_ALL_TIMEOUT);
3546 * ipr_dump_ioasa - Dump contents of IOASA
3547 * @ioa_cfg: ioa config struct
3548 * @ipr_cmd: ipr command struct
3550 * This function is invoked by the interrupt handler when ops
3551 * fail. It will log the IOASA if appropriate. Only called
3557 static void ipr_dump_ioasa(struct ipr_ioa_cfg *ioa_cfg,
3558 struct ipr_cmnd *ipr_cmd)
3563 struct ipr_ioasa *ioasa = &ipr_cmd->ioasa;
3564 u32 *ioasa_data = (u32 *)ioasa;
3567 ioasc = be32_to_cpu(ioasa->ioasc) & IPR_IOASC_IOASC_MASK;
3572 if (ioa_cfg->log_level < IPR_DEFAULT_LOG_LEVEL)
3575 error_index = ipr_get_error(ioasc);
3577 if (ioa_cfg->log_level < IPR_MAX_LOG_LEVEL) {
3578 /* Don't log an error if the IOA already logged one */
3579 if (ioasa->ilid != 0)
3582 if (ipr_error_table[error_index].log_ioasa == 0)
3586 ipr_sdev_err(ipr_cmd->scsi_cmd->device, "%s\n",
3587 ipr_error_table[error_index].error);
3589 if ((ioasa->u.gpdd.end_state <= ARRAY_SIZE(ipr_gpdd_dev_end_states)) &&
3590 (ioasa->u.gpdd.bus_phase <= ARRAY_SIZE(ipr_gpdd_dev_bus_phases))) {
3591 ipr_sdev_err(ipr_cmd->scsi_cmd->device,
3592 "Device End state: %s Phase: %s\n",
3593 ipr_gpdd_dev_end_states[ioasa->u.gpdd.end_state],
3594 ipr_gpdd_dev_bus_phases[ioasa->u.gpdd.bus_phase]);
3597 if (sizeof(struct ipr_ioasa) < be16_to_cpu(ioasa->ret_stat_len))
3598 data_len = sizeof(struct ipr_ioasa);
3600 data_len = be16_to_cpu(ioasa->ret_stat_len);
3602 ipr_err("IOASA Dump:\n");
3604 for (i = 0; i < data_len / 4; i += 4) {
3605 ipr_err("%08X: %08X %08X %08X %08X\n", i*4,
3606 be32_to_cpu(ioasa_data[i]),
3607 be32_to_cpu(ioasa_data[i+1]),
3608 be32_to_cpu(ioasa_data[i+2]),
3609 be32_to_cpu(ioasa_data[i+3]));
3614 * ipr_gen_sense - Generate SCSI sense data from an IOASA
3616 * @sense_buf: sense data buffer
3621 static void ipr_gen_sense(struct ipr_cmnd *ipr_cmd)
3624 u8 *sense_buf = ipr_cmd->scsi_cmd->sense_buffer;
3625 struct ipr_resource_entry *res = ipr_cmd->scsi_cmd->device->hostdata;
3626 struct ipr_ioasa *ioasa = &ipr_cmd->ioasa;
3627 u32 ioasc = be32_to_cpu(ioasa->ioasc);
3629 memset(sense_buf, 0, SCSI_SENSE_BUFFERSIZE);
3631 if (ioasc >= IPR_FIRST_DRIVER_IOASC)
3634 ipr_cmd->scsi_cmd->result = SAM_STAT_CHECK_CONDITION;
3636 if (ipr_is_vset_device(res) &&
3637 ioasc == IPR_IOASC_MED_DO_NOT_REALLOC &&
3638 ioasa->u.vset.failing_lba_hi != 0) {
3639 sense_buf[0] = 0x72;
3640 sense_buf[1] = IPR_IOASC_SENSE_KEY(ioasc);
3641 sense_buf[2] = IPR_IOASC_SENSE_CODE(ioasc);
3642 sense_buf[3] = IPR_IOASC_SENSE_QUAL(ioasc);
3646 sense_buf[9] = 0x0A;
3647 sense_buf[10] = 0x80;
3649 failing_lba = be32_to_cpu(ioasa->u.vset.failing_lba_hi);
3651 sense_buf[12] = (failing_lba & 0xff000000) >> 24;
3652 sense_buf[13] = (failing_lba & 0x00ff0000) >> 16;
3653 sense_buf[14] = (failing_lba & 0x0000ff00) >> 8;
3654 sense_buf[15] = failing_lba & 0x000000ff;
3656 failing_lba = be32_to_cpu(ioasa->u.vset.failing_lba_lo);
3658 sense_buf[16] = (failing_lba & 0xff000000) >> 24;
3659 sense_buf[17] = (failing_lba & 0x00ff0000) >> 16;
3660 sense_buf[18] = (failing_lba & 0x0000ff00) >> 8;
3661 sense_buf[19] = failing_lba & 0x000000ff;
3663 sense_buf[0] = 0x70;
3664 sense_buf[2] = IPR_IOASC_SENSE_KEY(ioasc);
3665 sense_buf[12] = IPR_IOASC_SENSE_CODE(ioasc);
3666 sense_buf[13] = IPR_IOASC_SENSE_QUAL(ioasc);
3668 /* Illegal request */
3669 if ((IPR_IOASC_SENSE_KEY(ioasc) == 0x05) &&
3670 (be32_to_cpu(ioasa->ioasc_specific) & IPR_FIELD_POINTER_VALID)) {
3671 sense_buf[7] = 10; /* additional length */
3673 /* IOARCB was in error */
3674 if (IPR_IOASC_SENSE_CODE(ioasc) == 0x24)
3675 sense_buf[15] = 0xC0;
3676 else /* Parameter data was invalid */
3677 sense_buf[15] = 0x80;
3680 ((IPR_FIELD_POINTER_MASK &
3681 be32_to_cpu(ioasa->ioasc_specific)) >> 8) & 0xff;
3683 (IPR_FIELD_POINTER_MASK &
3684 be32_to_cpu(ioasa->ioasc_specific)) & 0xff;
3686 if (ioasc == IPR_IOASC_MED_DO_NOT_REALLOC) {
3687 if (ipr_is_vset_device(res))
3688 failing_lba = be32_to_cpu(ioasa->u.vset.failing_lba_lo);
3690 failing_lba = be32_to_cpu(ioasa->u.dasd.failing_lba);
3692 sense_buf[0] |= 0x80; /* Or in the Valid bit */
3693 sense_buf[3] = (failing_lba & 0xff000000) >> 24;
3694 sense_buf[4] = (failing_lba & 0x00ff0000) >> 16;
3695 sense_buf[5] = (failing_lba & 0x0000ff00) >> 8;
3696 sense_buf[6] = failing_lba & 0x000000ff;
3699 sense_buf[7] = 6; /* additional length */
3705 * ipr_erp_start - Process an error response for a SCSI op
3706 * @ioa_cfg: ioa config struct
3707 * @ipr_cmd: ipr command struct
3709 * This function determines whether or not to initiate ERP
3710 * on the affected device.
3715 static void ipr_erp_start(struct ipr_ioa_cfg *ioa_cfg,
3716 struct ipr_cmnd *ipr_cmd)
3718 struct scsi_cmnd *scsi_cmd = ipr_cmd->scsi_cmd;
3719 struct ipr_resource_entry *res = scsi_cmd->device->hostdata;
3720 u32 ioasc = be32_to_cpu(ipr_cmd->ioasa.ioasc);
3723 ipr_scsi_eh_done(ipr_cmd);
3727 if (ipr_is_gscsi(res))
3728 ipr_dump_ioasa(ioa_cfg, ipr_cmd);
3730 ipr_gen_sense(ipr_cmd);
3732 switch (ioasc & IPR_IOASC_IOASC_MASK) {
3733 case IPR_IOASC_ABORTED_CMD_TERM_BY_HOST:
3734 scsi_cmd->result |= (DID_ERROR << 16);
3736 case IPR_IOASC_IR_RESOURCE_HANDLE:
3737 scsi_cmd->result |= (DID_NO_CONNECT << 16);
3739 case IPR_IOASC_HW_SEL_TIMEOUT:
3740 scsi_cmd->result |= (DID_NO_CONNECT << 16);
3741 res->needs_sync_complete = 1;
3743 case IPR_IOASC_SYNC_REQUIRED:
3745 res->needs_sync_complete = 1;
3746 scsi_cmd->result |= (DID_IMM_RETRY << 16);
3748 case IPR_IOASC_MED_DO_NOT_REALLOC: /* prevent retries */
3749 scsi_cmd->result |= (DID_PASSTHROUGH << 16);
3751 case IPR_IOASC_BUS_WAS_RESET:
3752 case IPR_IOASC_BUS_WAS_RESET_BY_OTHER:
3754 * Report the bus reset and ask for a retry. The device
3755 * will give CC/UA the next command.
3757 if (!res->resetting_device)
3758 scsi_report_bus_reset(ioa_cfg->host, scsi_cmd->device->channel);
3759 scsi_cmd->result |= (DID_ERROR << 16);
3760 res->needs_sync_complete = 1;
3762 case IPR_IOASC_HW_DEV_BUS_STATUS:
3763 scsi_cmd->result |= IPR_IOASC_SENSE_STATUS(ioasc);
3764 if (IPR_IOASC_SENSE_STATUS(ioasc) == SAM_STAT_CHECK_CONDITION) {
3765 ipr_erp_cancel_all(ipr_cmd);
3768 res->needs_sync_complete = 1;
3770 case IPR_IOASC_NR_INIT_CMD_REQUIRED:
3773 scsi_cmd->result |= (DID_ERROR << 16);
3774 if (!ipr_is_vset_device(res))
3775 res->needs_sync_complete = 1;
3779 ipr_unmap_sglist(ioa_cfg, ipr_cmd);
3780 list_add_tail(&ipr_cmd->queue, &ioa_cfg->free_q);
3781 scsi_cmd->scsi_done(scsi_cmd);
3785 * ipr_scsi_done - mid-layer done function
3786 * @ipr_cmd: ipr command struct
3788 * This function is invoked by the interrupt handler for
3789 * ops generated by the SCSI mid-layer
3794 static void ipr_scsi_done(struct ipr_cmnd *ipr_cmd)
3796 struct ipr_ioa_cfg *ioa_cfg = ipr_cmd->ioa_cfg;
3797 struct scsi_cmnd *scsi_cmd = ipr_cmd->scsi_cmd;
3798 u32 ioasc = be32_to_cpu(ipr_cmd->ioasa.ioasc);
3800 scsi_cmd->resid = be32_to_cpu(ipr_cmd->ioasa.residual_data_len);
3802 if (likely(IPR_IOASC_SENSE_KEY(ioasc) == 0)) {
3803 ipr_unmap_sglist(ioa_cfg, ipr_cmd);
3804 list_add_tail(&ipr_cmd->queue, &ioa_cfg->free_q);
3805 scsi_cmd->scsi_done(scsi_cmd);
3807 ipr_erp_start(ioa_cfg, ipr_cmd);
3811 * ipr_save_ioafp_mode_select - Save adapters mode select data
3812 * @ioa_cfg: ioa config struct
3813 * @scsi_cmd: scsi command struct
3815 * This function saves mode select data for the adapter to
3816 * use following an adapter reset.
3819 * 0 on success / SCSI_MLQUEUE_HOST_BUSY on failure
3821 static int ipr_save_ioafp_mode_select(struct ipr_ioa_cfg *ioa_cfg,
3822 struct scsi_cmnd *scsi_cmd)
3824 if (!ioa_cfg->saved_mode_pages) {
3825 ioa_cfg->saved_mode_pages = kmalloc(sizeof(struct ipr_mode_pages),
3827 if (!ioa_cfg->saved_mode_pages) {
3828 dev_err(&ioa_cfg->pdev->dev,
3829 "IOA mode select buffer allocation failed\n");
3830 return SCSI_MLQUEUE_HOST_BUSY;
3834 memcpy(ioa_cfg->saved_mode_pages, scsi_cmd->buffer, scsi_cmd->cmnd[4]);
3835 ioa_cfg->saved_mode_page_len = scsi_cmd->cmnd[4];
3840 * ipr_queuecommand - Queue a mid-layer request
3841 * @scsi_cmd: scsi command struct
3842 * @done: done function
3844 * This function queues a request generated by the mid-layer.
3848 * SCSI_MLQUEUE_DEVICE_BUSY if device is busy
3849 * SCSI_MLQUEUE_HOST_BUSY if host is busy
3851 static int ipr_queuecommand(struct scsi_cmnd *scsi_cmd,
3852 void (*done) (struct scsi_cmnd *))
3854 struct ipr_ioa_cfg *ioa_cfg;
3855 struct ipr_resource_entry *res;
3856 struct ipr_ioarcb *ioarcb;
3857 struct ipr_cmnd *ipr_cmd;
3860 scsi_cmd->scsi_done = done;
3861 ioa_cfg = (struct ipr_ioa_cfg *)scsi_cmd->device->host->hostdata;
3862 res = scsi_cmd->device->hostdata;
3863 scsi_cmd->result = (DID_OK << 16);
3866 * We are currently blocking all devices due to a host reset
3867 * We have told the host to stop giving us new requests, but
3868 * ERP ops don't count. FIXME
3870 if (unlikely(!ioa_cfg->allow_cmds))
3871 return SCSI_MLQUEUE_HOST_BUSY;
3874 * FIXME - Create scsi_set_host_offline interface
3875 * and the ioa_is_dead check can be removed
3877 if (unlikely(ioa_cfg->ioa_is_dead || !res)) {
3878 memset(scsi_cmd->sense_buffer, 0, SCSI_SENSE_BUFFERSIZE);
3879 scsi_cmd->result = (DID_NO_CONNECT << 16);
3880 scsi_cmd->scsi_done(scsi_cmd);
3884 ipr_cmd = ipr_get_free_ipr_cmnd(ioa_cfg);
3885 ioarcb = &ipr_cmd->ioarcb;
3886 list_add_tail(&ipr_cmd->queue, &ioa_cfg->pending_q);
3888 memcpy(ioarcb->cmd_pkt.cdb, scsi_cmd->cmnd, scsi_cmd->cmd_len);
3889 ipr_cmd->scsi_cmd = scsi_cmd;
3890 ioarcb->res_handle = res->cfgte.res_handle;
3891 ipr_cmd->done = ipr_scsi_done;
3892 ipr_trc_hook(ipr_cmd, IPR_TRACE_START, IPR_GET_PHYS_LOC(res->cfgte.res_addr));
3894 if (ipr_is_gscsi(res) || ipr_is_vset_device(res)) {
3895 if (scsi_cmd->underflow == 0)
3896 ioarcb->cmd_pkt.flags_hi |= IPR_FLAGS_HI_NO_ULEN_CHK;
3898 if (res->needs_sync_complete) {
3899 ioarcb->cmd_pkt.flags_hi |= IPR_FLAGS_HI_SYNC_COMPLETE;
3900 res->needs_sync_complete = 0;
3903 ioarcb->cmd_pkt.flags_hi |= IPR_FLAGS_HI_NO_LINK_DESC;
3904 ioarcb->cmd_pkt.flags_lo |= IPR_FLAGS_LO_DELAY_AFTER_RST;
3905 ioarcb->cmd_pkt.flags_lo |= IPR_FLAGS_LO_ALIGNED_BFR;
3906 ioarcb->cmd_pkt.flags_lo |= ipr_get_task_attributes(scsi_cmd);
3909 if (!ipr_is_gscsi(res) && scsi_cmd->cmnd[0] >= 0xC0)
3910 ioarcb->cmd_pkt.request_type = IPR_RQTYPE_IOACMD;
3912 if (ipr_is_ioa_resource(res) && scsi_cmd->cmnd[0] == MODE_SELECT)
3913 rc = ipr_save_ioafp_mode_select(ioa_cfg, scsi_cmd);
3915 if (likely(rc == 0))
3916 rc = ipr_build_ioadl(ioa_cfg, ipr_cmd);
3918 if (likely(rc == 0)) {
3920 writel(be32_to_cpu(ipr_cmd->ioarcb.ioarcb_host_pci_addr),
3921 ioa_cfg->regs.ioarrin_reg);
3923 list_move_tail(&ipr_cmd->queue, &ioa_cfg->free_q);
3924 return SCSI_MLQUEUE_HOST_BUSY;
3931 * ipr_info - Get information about the card/driver
3932 * @scsi_host: scsi host struct
3935 * pointer to buffer with description string
3937 static const char * ipr_ioa_info(struct Scsi_Host *host)
3939 static char buffer[512];
3940 struct ipr_ioa_cfg *ioa_cfg;
3941 unsigned long lock_flags = 0;
3943 ioa_cfg = (struct ipr_ioa_cfg *) host->hostdata;
3945 spin_lock_irqsave(host->host_lock, lock_flags);
3946 sprintf(buffer, "IBM %X Storage Adapter", ioa_cfg->type);
3947 spin_unlock_irqrestore(host->host_lock, lock_flags);
3952 static struct scsi_host_template driver_template = {
3953 .module = THIS_MODULE,
3955 .info = ipr_ioa_info,
3956 .queuecommand = ipr_queuecommand,
3957 .eh_abort_handler = ipr_eh_abort,
3958 .eh_device_reset_handler = ipr_eh_dev_reset,
3959 .eh_host_reset_handler = ipr_eh_host_reset,
3960 .slave_alloc = ipr_slave_alloc,
3961 .slave_configure = ipr_slave_configure,
3962 .slave_destroy = ipr_slave_destroy,
3963 .bios_param = ipr_biosparam,
3964 .can_queue = IPR_MAX_COMMANDS,
3966 .sg_tablesize = IPR_MAX_SGLIST,
3967 .max_sectors = IPR_MAX_SECTORS,
3968 .cmd_per_lun = IPR_MAX_CMD_PER_LUN,
3969 .use_clustering = ENABLE_CLUSTERING,
3970 .shost_attrs = ipr_ioa_attrs,
3971 .sdev_attrs = ipr_dev_attrs,
3972 .proc_name = IPR_NAME
3975 #ifdef CONFIG_PPC_PSERIES
3976 static const u16 ipr_blocked_processors[] = {
3988 * ipr_invalid_adapter - Determine if this adapter is supported on this hardware
3989 * @ioa_cfg: ioa cfg struct
3991 * Adapters that use Gemstone revision < 3.1 do not work reliably on
3992 * certain pSeries hardware. This function determines if the given
3993 * adapter is in one of these confgurations or not.
3996 * 1 if adapter is not supported / 0 if adapter is supported
3998 static int ipr_invalid_adapter(struct ipr_ioa_cfg *ioa_cfg)
4003 if (ioa_cfg->type == 0x5702) {
4004 if (pci_read_config_byte(ioa_cfg->pdev, PCI_REVISION_ID,
4005 &rev_id) == PCIBIOS_SUCCESSFUL) {
4007 for (i = 0; i < ARRAY_SIZE(ipr_blocked_processors); i++){
4008 if (__is_processor(ipr_blocked_processors[i]))
4017 #define ipr_invalid_adapter(ioa_cfg) 0
4021 * ipr_ioa_bringdown_done - IOA bring down completion.
4022 * @ipr_cmd: ipr command struct
4024 * This function processes the completion of an adapter bring down.
4025 * It wakes any reset sleepers.
4030 static int ipr_ioa_bringdown_done(struct ipr_cmnd *ipr_cmd)
4032 struct ipr_ioa_cfg *ioa_cfg = ipr_cmd->ioa_cfg;
4035 ioa_cfg->in_reset_reload = 0;
4036 ioa_cfg->reset_retries = 0;
4037 list_add_tail(&ipr_cmd->queue, &ioa_cfg->free_q);
4038 wake_up_all(&ioa_cfg->reset_wait_q);
4040 spin_unlock_irq(ioa_cfg->host->host_lock);
4041 scsi_unblock_requests(ioa_cfg->host);
4042 spin_lock_irq(ioa_cfg->host->host_lock);
4045 return IPR_RC_JOB_RETURN;
4049 * ipr_ioa_reset_done - IOA reset completion.
4050 * @ipr_cmd: ipr command struct
4052 * This function processes the completion of an adapter reset.
4053 * It schedules any necessary mid-layer add/removes and
4054 * wakes any reset sleepers.
4059 static int ipr_ioa_reset_done(struct ipr_cmnd *ipr_cmd)
4061 struct ipr_ioa_cfg *ioa_cfg = ipr_cmd->ioa_cfg;
4062 struct ipr_resource_entry *res;
4063 struct ipr_hostrcb *hostrcb, *temp;
4067 ioa_cfg->in_reset_reload = 0;
4068 ioa_cfg->allow_cmds = 1;
4069 ioa_cfg->reset_cmd = NULL;
4071 list_for_each_entry(res, &ioa_cfg->used_res_q, queue) {
4072 if (ioa_cfg->allow_ml_add_del && (res->add_to_ml || res->del_from_ml)) {
4074 schedule_work(&ioa_cfg->work_q);
4079 list_for_each_entry_safe(hostrcb, temp, &ioa_cfg->hostrcb_free_q, queue) {
4080 list_del(&hostrcb->queue);
4081 if (i++ < IPR_NUM_LOG_HCAMS)
4082 ipr_send_hcam(ioa_cfg, IPR_HCAM_CDB_OP_CODE_LOG_DATA, hostrcb);
4084 ipr_send_hcam(ioa_cfg, IPR_HCAM_CDB_OP_CODE_CONFIG_CHANGE, hostrcb);
4087 dev_info(&ioa_cfg->pdev->dev, "IOA initialized.\n");
4089 ioa_cfg->reset_retries = 0;
4090 list_add_tail(&ipr_cmd->queue, &ioa_cfg->free_q);
4091 wake_up_all(&ioa_cfg->reset_wait_q);
4093 spin_unlock_irq(ioa_cfg->host->host_lock);
4094 scsi_unblock_requests(ioa_cfg->host);
4095 spin_lock_irq(ioa_cfg->host->host_lock);
4097 if (!ioa_cfg->allow_cmds)
4098 scsi_block_requests(ioa_cfg->host);
4101 return IPR_RC_JOB_RETURN;
4105 * ipr_set_sup_dev_dflt - Initialize a Set Supported Device buffer
4106 * @supported_dev: supported device struct
4107 * @vpids: vendor product id struct
4112 static void ipr_set_sup_dev_dflt(struct ipr_supported_device *supported_dev,
4113 struct ipr_std_inq_vpids *vpids)
4115 memset(supported_dev, 0, sizeof(struct ipr_supported_device));
4116 memcpy(&supported_dev->vpids, vpids, sizeof(struct ipr_std_inq_vpids));
4117 supported_dev->num_records = 1;
4118 supported_dev->data_length =
4119 cpu_to_be16(sizeof(struct ipr_supported_device));
4120 supported_dev->reserved = 0;
4124 * ipr_set_supported_devs - Send Set Supported Devices for a device
4125 * @ipr_cmd: ipr command struct
4127 * This function send a Set Supported Devices to the adapter
4130 * IPR_RC_JOB_CONTINUE / IPR_RC_JOB_RETURN
4132 static int ipr_set_supported_devs(struct ipr_cmnd *ipr_cmd)
4134 struct ipr_ioa_cfg *ioa_cfg = ipr_cmd->ioa_cfg;
4135 struct ipr_supported_device *supp_dev = &ioa_cfg->vpd_cbs->supp_dev;
4136 struct ipr_ioadl_desc *ioadl = ipr_cmd->ioadl;
4137 struct ipr_ioarcb *ioarcb = &ipr_cmd->ioarcb;
4138 struct ipr_resource_entry *res = ipr_cmd->u.res;
4140 ipr_cmd->job_step = ipr_ioa_reset_done;
4142 list_for_each_entry_continue(res, &ioa_cfg->used_res_q, queue) {
4143 if (!ipr_is_af_dasd_device(res))
4146 ipr_cmd->u.res = res;
4147 ipr_set_sup_dev_dflt(supp_dev, &res->cfgte.std_inq_data.vpids);
4149 ioarcb->res_handle = cpu_to_be32(IPR_IOA_RES_HANDLE);
4150 ioarcb->cmd_pkt.flags_hi |= IPR_FLAGS_HI_WRITE_NOT_READ;
4151 ioarcb->cmd_pkt.request_type = IPR_RQTYPE_IOACMD;
4153 ioarcb->cmd_pkt.cdb[0] = IPR_SET_SUPPORTED_DEVICES;
4154 ioarcb->cmd_pkt.cdb[7] = (sizeof(struct ipr_supported_device) >> 8) & 0xff;
4155 ioarcb->cmd_pkt.cdb[8] = sizeof(struct ipr_supported_device) & 0xff;
4157 ioadl->flags_and_data_len = cpu_to_be32(IPR_IOADL_FLAGS_WRITE_LAST |
4158 sizeof(struct ipr_supported_device));
4159 ioadl->address = cpu_to_be32(ioa_cfg->vpd_cbs_dma +
4160 offsetof(struct ipr_misc_cbs, supp_dev));
4161 ioarcb->write_ioadl_len = cpu_to_be32(sizeof(struct ipr_ioadl_desc));
4162 ioarcb->write_data_transfer_length =
4163 cpu_to_be32(sizeof(struct ipr_supported_device));
4165 ipr_do_req(ipr_cmd, ipr_reset_ioa_job, ipr_timeout,
4166 IPR_SET_SUP_DEVICE_TIMEOUT);
4168 ipr_cmd->job_step = ipr_set_supported_devs;
4169 return IPR_RC_JOB_RETURN;
4172 return IPR_RC_JOB_CONTINUE;
4176 * ipr_get_mode_page - Locate specified mode page
4177 * @mode_pages: mode page buffer
4178 * @page_code: page code to find
4179 * @len: minimum required length for mode page
4182 * pointer to mode page / NULL on failure
4184 static void *ipr_get_mode_page(struct ipr_mode_pages *mode_pages,
4185 u32 page_code, u32 len)
4187 struct ipr_mode_page_hdr *mode_hdr;
4191 if (!mode_pages || (mode_pages->hdr.length == 0))
4194 length = (mode_pages->hdr.length + 1) - 4 - mode_pages->hdr.block_desc_len;
4195 mode_hdr = (struct ipr_mode_page_hdr *)
4196 (mode_pages->data + mode_pages->hdr.block_desc_len);
4199 if (IPR_GET_MODE_PAGE_CODE(mode_hdr) == page_code) {
4200 if (mode_hdr->page_length >= (len - sizeof(struct ipr_mode_page_hdr)))
4204 page_length = (sizeof(struct ipr_mode_page_hdr) +
4205 mode_hdr->page_length);
4206 length -= page_length;
4207 mode_hdr = (struct ipr_mode_page_hdr *)
4208 ((unsigned long)mode_hdr + page_length);
4215 * ipr_check_term_power - Check for term power errors
4216 * @ioa_cfg: ioa config struct
4217 * @mode_pages: IOAFP mode pages buffer
4219 * Check the IOAFP's mode page 28 for term power errors
4224 static void ipr_check_term_power(struct ipr_ioa_cfg *ioa_cfg,
4225 struct ipr_mode_pages *mode_pages)
4229 struct ipr_dev_bus_entry *bus;
4230 struct ipr_mode_page28 *mode_page;
4232 mode_page = ipr_get_mode_page(mode_pages, 0x28,
4233 sizeof(struct ipr_mode_page28));
4235 entry_length = mode_page->entry_length;
4237 bus = mode_page->bus;
4239 for (i = 0; i < mode_page->num_entries; i++) {
4240 if (bus->flags & IPR_SCSI_ATTR_NO_TERM_PWR) {
4241 dev_err(&ioa_cfg->pdev->dev,
4242 "Term power is absent on scsi bus %d\n",
4246 bus = (struct ipr_dev_bus_entry *)((char *)bus + entry_length);
4251 * ipr_scsi_bus_speed_limit - Limit the SCSI speed based on SES table
4252 * @ioa_cfg: ioa config struct
4254 * Looks through the config table checking for SES devices. If
4255 * the SES device is in the SES table indicating a maximum SCSI
4256 * bus speed, the speed is limited for the bus.
4261 static void ipr_scsi_bus_speed_limit(struct ipr_ioa_cfg *ioa_cfg)
4266 for (i = 0; i < IPR_MAX_NUM_BUSES; i++) {
4267 max_xfer_rate = ipr_get_max_scsi_speed(ioa_cfg, i,
4268 ioa_cfg->bus_attr[i].bus_width);
4270 if (max_xfer_rate < ioa_cfg->bus_attr[i].max_xfer_rate)
4271 ioa_cfg->bus_attr[i].max_xfer_rate = max_xfer_rate;
4276 * ipr_modify_ioafp_mode_page_28 - Modify IOAFP Mode Page 28
4277 * @ioa_cfg: ioa config struct
4278 * @mode_pages: mode page 28 buffer
4280 * Updates mode page 28 based on driver configuration
4285 static void ipr_modify_ioafp_mode_page_28(struct ipr_ioa_cfg *ioa_cfg,
4286 struct ipr_mode_pages *mode_pages)
4288 int i, entry_length;
4289 struct ipr_dev_bus_entry *bus;
4290 struct ipr_bus_attributes *bus_attr;
4291 struct ipr_mode_page28 *mode_page;
4293 mode_page = ipr_get_mode_page(mode_pages, 0x28,
4294 sizeof(struct ipr_mode_page28));
4296 entry_length = mode_page->entry_length;
4298 /* Loop for each device bus entry */
4299 for (i = 0, bus = mode_page->bus;
4300 i < mode_page->num_entries;
4301 i++, bus = (struct ipr_dev_bus_entry *)((u8 *)bus + entry_length)) {
4302 if (bus->res_addr.bus > IPR_MAX_NUM_BUSES) {
4303 dev_err(&ioa_cfg->pdev->dev,
4304 "Invalid resource address reported: 0x%08X\n",
4305 IPR_GET_PHYS_LOC(bus->res_addr));
4309 bus_attr = &ioa_cfg->bus_attr[i];
4310 bus->extended_reset_delay = IPR_EXTENDED_RESET_DELAY;
4311 bus->bus_width = bus_attr->bus_width;
4312 bus->max_xfer_rate = cpu_to_be32(bus_attr->max_xfer_rate);
4313 bus->flags &= ~IPR_SCSI_ATTR_QAS_MASK;
4314 if (bus_attr->qas_enabled)
4315 bus->flags |= IPR_SCSI_ATTR_ENABLE_QAS;
4317 bus->flags |= IPR_SCSI_ATTR_DISABLE_QAS;
4322 * ipr_build_mode_select - Build a mode select command
4323 * @ipr_cmd: ipr command struct
4324 * @res_handle: resource handle to send command to
4325 * @parm: Byte 2 of Mode Sense command
4326 * @dma_addr: DMA buffer address
4327 * @xfer_len: data transfer length
4332 static void ipr_build_mode_select(struct ipr_cmnd *ipr_cmd,
4333 u32 res_handle, u8 parm, u32 dma_addr,
4336 struct ipr_ioadl_desc *ioadl = ipr_cmd->ioadl;
4337 struct ipr_ioarcb *ioarcb = &ipr_cmd->ioarcb;
4339 ioarcb->res_handle = res_handle;
4340 ioarcb->cmd_pkt.request_type = IPR_RQTYPE_SCSICDB;
4341 ioarcb->cmd_pkt.flags_hi |= IPR_FLAGS_HI_WRITE_NOT_READ;
4342 ioarcb->cmd_pkt.cdb[0] = MODE_SELECT;
4343 ioarcb->cmd_pkt.cdb[1] = parm;
4344 ioarcb->cmd_pkt.cdb[4] = xfer_len;
4346 ioadl->flags_and_data_len =
4347 cpu_to_be32(IPR_IOADL_FLAGS_WRITE_LAST | xfer_len);
4348 ioadl->address = cpu_to_be32(dma_addr);
4349 ioarcb->write_ioadl_len = cpu_to_be32(sizeof(struct ipr_ioadl_desc));
4350 ioarcb->write_data_transfer_length = cpu_to_be32(xfer_len);
4354 * ipr_ioafp_mode_select_page28 - Issue Mode Select Page 28 to IOA
4355 * @ipr_cmd: ipr command struct
4357 * This function sets up the SCSI bus attributes and sends
4358 * a Mode Select for Page 28 to activate them.
4363 static int ipr_ioafp_mode_select_page28(struct ipr_cmnd *ipr_cmd)
4365 struct ipr_ioa_cfg *ioa_cfg = ipr_cmd->ioa_cfg;
4366 struct ipr_mode_pages *mode_pages = &ioa_cfg->vpd_cbs->mode_pages;
4370 if (ioa_cfg->saved_mode_pages) {
4371 memcpy(mode_pages, ioa_cfg->saved_mode_pages,
4372 ioa_cfg->saved_mode_page_len);
4373 length = ioa_cfg->saved_mode_page_len;
4375 ipr_scsi_bus_speed_limit(ioa_cfg);
4376 ipr_check_term_power(ioa_cfg, mode_pages);
4377 ipr_modify_ioafp_mode_page_28(ioa_cfg, mode_pages);
4378 length = mode_pages->hdr.length + 1;
4379 mode_pages->hdr.length = 0;
4382 ipr_build_mode_select(ipr_cmd, cpu_to_be32(IPR_IOA_RES_HANDLE), 0x11,
4383 ioa_cfg->vpd_cbs_dma + offsetof(struct ipr_misc_cbs, mode_pages),
4386 ipr_cmd->job_step = ipr_set_supported_devs;
4387 ipr_cmd->u.res = list_entry(ioa_cfg->used_res_q.next,
4388 struct ipr_resource_entry, queue);
4390 ipr_do_req(ipr_cmd, ipr_reset_ioa_job, ipr_timeout, IPR_INTERNAL_TIMEOUT);
4393 return IPR_RC_JOB_RETURN;
4397 * ipr_build_mode_sense - Builds a mode sense command
4398 * @ipr_cmd: ipr command struct
4399 * @res: resource entry struct
4400 * @parm: Byte 2 of mode sense command
4401 * @dma_addr: DMA address of mode sense buffer
4402 * @xfer_len: Size of DMA buffer
4407 static void ipr_build_mode_sense(struct ipr_cmnd *ipr_cmd,
4409 u8 parm, u32 dma_addr, u8 xfer_len)
4411 struct ipr_ioadl_desc *ioadl = ipr_cmd->ioadl;
4412 struct ipr_ioarcb *ioarcb = &ipr_cmd->ioarcb;
4414 ioarcb->res_handle = res_handle;
4415 ioarcb->cmd_pkt.cdb[0] = MODE_SENSE;
4416 ioarcb->cmd_pkt.cdb[2] = parm;
4417 ioarcb->cmd_pkt.cdb[4] = xfer_len;
4418 ioarcb->cmd_pkt.request_type = IPR_RQTYPE_SCSICDB;
4420 ioadl->flags_and_data_len =
4421 cpu_to_be32(IPR_IOADL_FLAGS_READ_LAST | xfer_len);
4422 ioadl->address = cpu_to_be32(dma_addr);
4423 ioarcb->read_ioadl_len = cpu_to_be32(sizeof(struct ipr_ioadl_desc));
4424 ioarcb->read_data_transfer_length = cpu_to_be32(xfer_len);
4428 * ipr_ioafp_mode_sense_page28 - Issue Mode Sense Page 28 to IOA
4429 * @ipr_cmd: ipr command struct
4431 * This function send a Page 28 mode sense to the IOA to
4432 * retrieve SCSI bus attributes.
4437 static int ipr_ioafp_mode_sense_page28(struct ipr_cmnd *ipr_cmd)
4439 struct ipr_ioa_cfg *ioa_cfg = ipr_cmd->ioa_cfg;
4442 ipr_build_mode_sense(ipr_cmd, cpu_to_be32(IPR_IOA_RES_HANDLE),
4443 0x28, ioa_cfg->vpd_cbs_dma +
4444 offsetof(struct ipr_misc_cbs, mode_pages),
4445 sizeof(struct ipr_mode_pages));
4447 ipr_cmd->job_step = ipr_ioafp_mode_select_page28;
4449 ipr_do_req(ipr_cmd, ipr_reset_ioa_job, ipr_timeout, IPR_INTERNAL_TIMEOUT);
4452 return IPR_RC_JOB_RETURN;
4456 * ipr_init_res_table - Initialize the resource table
4457 * @ipr_cmd: ipr command struct
4459 * This function looks through the existing resource table, comparing
4460 * it with the config table. This function will take care of old/new
4461 * devices and schedule adding/removing them from the mid-layer
4465 * IPR_RC_JOB_CONTINUE
4467 static int ipr_init_res_table(struct ipr_cmnd *ipr_cmd)
4469 struct ipr_ioa_cfg *ioa_cfg = ipr_cmd->ioa_cfg;
4470 struct ipr_resource_entry *res, *temp;
4471 struct ipr_config_table_entry *cfgte;
4476 if (ioa_cfg->cfg_table->hdr.flags & IPR_UCODE_DOWNLOAD_REQ)
4477 dev_err(&ioa_cfg->pdev->dev, "Microcode download required\n");
4479 list_for_each_entry_safe(res, temp, &ioa_cfg->used_res_q, queue)
4480 list_move_tail(&res->queue, &old_res);
4482 for (i = 0; i < ioa_cfg->cfg_table->hdr.num_entries; i++) {
4483 cfgte = &ioa_cfg->cfg_table->dev[i];
4486 list_for_each_entry_safe(res, temp, &old_res, queue) {
4487 if (!memcmp(&res->cfgte.res_addr,
4488 &cfgte->res_addr, sizeof(cfgte->res_addr))) {
4489 list_move_tail(&res->queue, &ioa_cfg->used_res_q);
4496 if (list_empty(&ioa_cfg->free_res_q)) {
4497 dev_err(&ioa_cfg->pdev->dev, "Too many devices attached\n");
4502 res = list_entry(ioa_cfg->free_res_q.next,
4503 struct ipr_resource_entry, queue);
4504 list_move_tail(&res->queue, &ioa_cfg->used_res_q);
4505 ipr_init_res_entry(res);
4510 memcpy(&res->cfgte, cfgte, sizeof(struct ipr_config_table_entry));
4513 list_for_each_entry_safe(res, temp, &old_res, queue) {
4515 res->del_from_ml = 1;
4516 list_move_tail(&res->queue, &ioa_cfg->used_res_q);
4518 list_move_tail(&res->queue, &ioa_cfg->free_res_q);
4522 ipr_cmd->job_step = ipr_ioafp_mode_sense_page28;
4525 return IPR_RC_JOB_CONTINUE;
4529 * ipr_ioafp_query_ioa_cfg - Send a Query IOA Config to the adapter.
4530 * @ipr_cmd: ipr command struct
4532 * This function sends a Query IOA Configuration command
4533 * to the adapter to retrieve the IOA configuration table.
4538 static int ipr_ioafp_query_ioa_cfg(struct ipr_cmnd *ipr_cmd)
4540 struct ipr_ioa_cfg *ioa_cfg = ipr_cmd->ioa_cfg;
4541 struct ipr_ioarcb *ioarcb = &ipr_cmd->ioarcb;
4542 struct ipr_ioadl_desc *ioadl = ipr_cmd->ioadl;
4543 struct ipr_inquiry_page3 *ucode_vpd = &ioa_cfg->vpd_cbs->page3_data;
4546 dev_info(&ioa_cfg->pdev->dev, "Adapter firmware version: %02X%02X%02X%02X\n",
4547 ucode_vpd->major_release, ucode_vpd->card_type,
4548 ucode_vpd->minor_release[0], ucode_vpd->minor_release[1]);
4549 ioarcb->cmd_pkt.request_type = IPR_RQTYPE_IOACMD;
4550 ioarcb->res_handle = cpu_to_be32(IPR_IOA_RES_HANDLE);
4552 ioarcb->cmd_pkt.cdb[0] = IPR_QUERY_IOA_CONFIG;
4553 ioarcb->cmd_pkt.cdb[7] = (sizeof(struct ipr_config_table) >> 8) & 0xff;
4554 ioarcb->cmd_pkt.cdb[8] = sizeof(struct ipr_config_table) & 0xff;
4556 ioarcb->read_ioadl_len = cpu_to_be32(sizeof(struct ipr_ioadl_desc));
4557 ioarcb->read_data_transfer_length =
4558 cpu_to_be32(sizeof(struct ipr_config_table));
4560 ioadl->address = cpu_to_be32(ioa_cfg->cfg_table_dma);
4561 ioadl->flags_and_data_len =
4562 cpu_to_be32(IPR_IOADL_FLAGS_READ_LAST | sizeof(struct ipr_config_table));
4564 ipr_cmd->job_step = ipr_init_res_table;
4566 ipr_do_req(ipr_cmd, ipr_reset_ioa_job, ipr_timeout, IPR_INTERNAL_TIMEOUT);
4569 return IPR_RC_JOB_RETURN;
4573 * ipr_ioafp_inquiry - Send an Inquiry to the adapter.
4574 * @ipr_cmd: ipr command struct
4576 * This utility function sends an inquiry to the adapter.
4581 static void ipr_ioafp_inquiry(struct ipr_cmnd *ipr_cmd, u8 flags, u8 page,
4582 u32 dma_addr, u8 xfer_len)
4584 struct ipr_ioarcb *ioarcb = &ipr_cmd->ioarcb;
4585 struct ipr_ioadl_desc *ioadl = ipr_cmd->ioadl;
4588 ioarcb->cmd_pkt.request_type = IPR_RQTYPE_SCSICDB;
4589 ioarcb->res_handle = cpu_to_be32(IPR_IOA_RES_HANDLE);
4591 ioarcb->cmd_pkt.cdb[0] = INQUIRY;
4592 ioarcb->cmd_pkt.cdb[1] = flags;
4593 ioarcb->cmd_pkt.cdb[2] = page;
4594 ioarcb->cmd_pkt.cdb[4] = xfer_len;
4596 ioarcb->read_ioadl_len = cpu_to_be32(sizeof(struct ipr_ioadl_desc));
4597 ioarcb->read_data_transfer_length = cpu_to_be32(xfer_len);
4599 ioadl->address = cpu_to_be32(dma_addr);
4600 ioadl->flags_and_data_len =
4601 cpu_to_be32(IPR_IOADL_FLAGS_READ_LAST | xfer_len);
4603 ipr_do_req(ipr_cmd, ipr_reset_ioa_job, ipr_timeout, IPR_INTERNAL_TIMEOUT);
4608 * ipr_ioafp_page3_inquiry - Send a Page 3 Inquiry to the adapter.
4609 * @ipr_cmd: ipr command struct
4611 * This function sends a Page 3 inquiry to the adapter
4612 * to retrieve software VPD information.
4615 * IPR_RC_JOB_CONTINUE / IPR_RC_JOB_RETURN
4617 static int ipr_ioafp_page3_inquiry(struct ipr_cmnd *ipr_cmd)
4619 struct ipr_ioa_cfg *ioa_cfg = ipr_cmd->ioa_cfg;
4624 /* Grab the type out of the VPD and store it away */
4625 memcpy(type, ioa_cfg->vpd_cbs->ioa_vpd.std_inq_data.vpids.product_id, 4);
4627 ioa_cfg->type = simple_strtoul((char *)type, NULL, 16);
4629 ipr_cmd->job_step = ipr_ioafp_query_ioa_cfg;
4631 ipr_ioafp_inquiry(ipr_cmd, 1, 3,
4632 ioa_cfg->vpd_cbs_dma + offsetof(struct ipr_misc_cbs, page3_data),
4633 sizeof(struct ipr_inquiry_page3));
4636 return IPR_RC_JOB_RETURN;
4640 * ipr_ioafp_std_inquiry - Send a Standard Inquiry to the adapter.
4641 * @ipr_cmd: ipr command struct
4643 * This function sends a standard inquiry to the adapter.
4648 static int ipr_ioafp_std_inquiry(struct ipr_cmnd *ipr_cmd)
4650 struct ipr_ioa_cfg *ioa_cfg = ipr_cmd->ioa_cfg;
4653 ipr_cmd->job_step = ipr_ioafp_page3_inquiry;
4655 ipr_ioafp_inquiry(ipr_cmd, 0, 0,
4656 ioa_cfg->vpd_cbs_dma + offsetof(struct ipr_misc_cbs, ioa_vpd),
4657 sizeof(struct ipr_ioa_vpd));
4660 return IPR_RC_JOB_RETURN;
4664 * ipr_ioafp_indentify_hrrq - Send Identify Host RRQ.
4665 * @ipr_cmd: ipr command struct
4667 * This function send an Identify Host Request Response Queue
4668 * command to establish the HRRQ with the adapter.
4673 static int ipr_ioafp_indentify_hrrq(struct ipr_cmnd *ipr_cmd)
4675 struct ipr_ioa_cfg *ioa_cfg = ipr_cmd->ioa_cfg;
4676 struct ipr_ioarcb *ioarcb = &ipr_cmd->ioarcb;
4679 dev_info(&ioa_cfg->pdev->dev, "Starting IOA initialization sequence.\n");
4681 ioarcb->cmd_pkt.cdb[0] = IPR_ID_HOST_RR_Q;
4682 ioarcb->res_handle = cpu_to_be32(IPR_IOA_RES_HANDLE);
4684 ioarcb->cmd_pkt.request_type = IPR_RQTYPE_IOACMD;
4685 ioarcb->cmd_pkt.cdb[2] =
4686 ((u32) ioa_cfg->host_rrq_dma >> 24) & 0xff;
4687 ioarcb->cmd_pkt.cdb[3] =
4688 ((u32) ioa_cfg->host_rrq_dma >> 16) & 0xff;
4689 ioarcb->cmd_pkt.cdb[4] =
4690 ((u32) ioa_cfg->host_rrq_dma >> 8) & 0xff;
4691 ioarcb->cmd_pkt.cdb[5] =
4692 ((u32) ioa_cfg->host_rrq_dma) & 0xff;
4693 ioarcb->cmd_pkt.cdb[7] =
4694 ((sizeof(u32) * IPR_NUM_CMD_BLKS) >> 8) & 0xff;
4695 ioarcb->cmd_pkt.cdb[8] =
4696 (sizeof(u32) * IPR_NUM_CMD_BLKS) & 0xff;
4698 ipr_cmd->job_step = ipr_ioafp_std_inquiry;
4700 ipr_do_req(ipr_cmd, ipr_reset_ioa_job, ipr_timeout, IPR_INTERNAL_TIMEOUT);
4703 return IPR_RC_JOB_RETURN;
4707 * ipr_reset_timer_done - Adapter reset timer function
4708 * @ipr_cmd: ipr command struct
4710 * Description: This function is used in adapter reset processing
4711 * for timing events. If the reset_cmd pointer in the IOA
4712 * config struct is not this adapter's we are doing nested
4713 * resets and fail_all_ops will take care of freeing the
4719 static void ipr_reset_timer_done(struct ipr_cmnd *ipr_cmd)
4721 struct ipr_ioa_cfg *ioa_cfg = ipr_cmd->ioa_cfg;
4722 unsigned long lock_flags = 0;
4724 spin_lock_irqsave(ioa_cfg->host->host_lock, lock_flags);
4726 if (ioa_cfg->reset_cmd == ipr_cmd) {
4727 list_del(&ipr_cmd->queue);
4728 ipr_cmd->done(ipr_cmd);
4731 spin_unlock_irqrestore(ioa_cfg->host->host_lock, lock_flags);
4735 * ipr_reset_start_timer - Start a timer for adapter reset job
4736 * @ipr_cmd: ipr command struct
4737 * @timeout: timeout value
4739 * Description: This function is used in adapter reset processing
4740 * for timing events. If the reset_cmd pointer in the IOA
4741 * config struct is not this adapter's we are doing nested
4742 * resets and fail_all_ops will take care of freeing the
4748 static void ipr_reset_start_timer(struct ipr_cmnd *ipr_cmd,
4749 unsigned long timeout)
4751 list_add_tail(&ipr_cmd->queue, &ipr_cmd->ioa_cfg->pending_q);
4752 ipr_cmd->done = ipr_reset_ioa_job;
4754 ipr_cmd->timer.data = (unsigned long) ipr_cmd;
4755 ipr_cmd->timer.expires = jiffies + timeout;
4756 ipr_cmd->timer.function = (void (*)(unsigned long))ipr_reset_timer_done;
4757 add_timer(&ipr_cmd->timer);
4761 * ipr_init_ioa_mem - Initialize ioa_cfg control block
4762 * @ioa_cfg: ioa cfg struct
4767 static void ipr_init_ioa_mem(struct ipr_ioa_cfg *ioa_cfg)
4769 memset(ioa_cfg->host_rrq, 0, sizeof(u32) * IPR_NUM_CMD_BLKS);
4771 /* Initialize Host RRQ pointers */
4772 ioa_cfg->hrrq_start = ioa_cfg->host_rrq;
4773 ioa_cfg->hrrq_end = &ioa_cfg->host_rrq[IPR_NUM_CMD_BLKS - 1];
4774 ioa_cfg->hrrq_curr = ioa_cfg->hrrq_start;
4775 ioa_cfg->toggle_bit = 1;
4777 /* Zero out config table */
4778 memset(ioa_cfg->cfg_table, 0, sizeof(struct ipr_config_table));
4782 * ipr_reset_enable_ioa - Enable the IOA following a reset.
4783 * @ipr_cmd: ipr command struct
4785 * This function reinitializes some control blocks and
4786 * enables destructive diagnostics on the adapter.
4791 static int ipr_reset_enable_ioa(struct ipr_cmnd *ipr_cmd)
4793 struct ipr_ioa_cfg *ioa_cfg = ipr_cmd->ioa_cfg;
4794 volatile u32 int_reg;
4797 ipr_cmd->job_step = ipr_ioafp_indentify_hrrq;
4798 ipr_init_ioa_mem(ioa_cfg);
4800 ioa_cfg->allow_interrupts = 1;
4801 int_reg = readl(ioa_cfg->regs.sense_interrupt_reg);
4803 if (int_reg & IPR_PCII_IOA_TRANS_TO_OPER) {
4804 writel((IPR_PCII_ERROR_INTERRUPTS | IPR_PCII_HRRQ_UPDATED),
4805 ioa_cfg->regs.clr_interrupt_mask_reg);
4806 int_reg = readl(ioa_cfg->regs.sense_interrupt_mask_reg);
4807 return IPR_RC_JOB_CONTINUE;
4810 /* Enable destructive diagnostics on IOA */
4811 writel(IPR_DOORBELL, ioa_cfg->regs.set_uproc_interrupt_reg);
4813 writel(IPR_PCII_OPER_INTERRUPTS, ioa_cfg->regs.clr_interrupt_mask_reg);
4814 int_reg = readl(ioa_cfg->regs.sense_interrupt_mask_reg);
4816 dev_info(&ioa_cfg->pdev->dev, "Initializing IOA.\n");
4818 ipr_cmd->timer.data = (unsigned long) ipr_cmd;
4819 ipr_cmd->timer.expires = jiffies + IPR_OPERATIONAL_TIMEOUT;
4820 ipr_cmd->timer.function = (void (*)(unsigned long))ipr_timeout;
4821 ipr_cmd->done = ipr_reset_ioa_job;
4822 add_timer(&ipr_cmd->timer);
4823 list_add_tail(&ipr_cmd->queue, &ioa_cfg->pending_q);
4826 return IPR_RC_JOB_RETURN;
4830 * ipr_reset_wait_for_dump - Wait for a dump to timeout.
4831 * @ipr_cmd: ipr command struct
4833 * This function is invoked when an adapter dump has run out
4834 * of processing time.
4837 * IPR_RC_JOB_CONTINUE
4839 static int ipr_reset_wait_for_dump(struct ipr_cmnd *ipr_cmd)
4841 struct ipr_ioa_cfg *ioa_cfg = ipr_cmd->ioa_cfg;
4843 if (ioa_cfg->sdt_state == GET_DUMP)
4844 ioa_cfg->sdt_state = ABORT_DUMP;
4846 ipr_cmd->job_step = ipr_reset_alert;
4848 return IPR_RC_JOB_CONTINUE;
4852 * ipr_unit_check_no_data - Log a unit check/no data error log
4853 * @ioa_cfg: ioa config struct
4855 * Logs an error indicating the adapter unit checked, but for some
4856 * reason, we were unable to fetch the unit check buffer.
4861 static void ipr_unit_check_no_data(struct ipr_ioa_cfg *ioa_cfg)
4863 ioa_cfg->errors_logged++;
4864 dev_err(&ioa_cfg->pdev->dev, "IOA unit check with no data\n");
4868 * ipr_get_unit_check_buffer - Get the unit check buffer from the IOA
4869 * @ioa_cfg: ioa config struct
4871 * Fetches the unit check buffer from the adapter by clocking the data
4872 * through the mailbox register.
4877 static void ipr_get_unit_check_buffer(struct ipr_ioa_cfg *ioa_cfg)
4879 unsigned long mailbox;
4880 struct ipr_hostrcb *hostrcb;
4881 struct ipr_uc_sdt sdt;
4884 mailbox = readl(ioa_cfg->ioa_mailbox);
4886 if (!ipr_sdt_is_fmt2(mailbox)) {
4887 ipr_unit_check_no_data(ioa_cfg);
4891 memset(&sdt, 0, sizeof(struct ipr_uc_sdt));
4892 rc = ipr_get_ldump_data_section(ioa_cfg, mailbox, (u32 *) &sdt,
4893 (sizeof(struct ipr_uc_sdt)) / sizeof(u32));
4895 if (rc || (be32_to_cpu(sdt.hdr.state) != IPR_FMT2_SDT_READY_TO_USE) ||
4896 !(sdt.entry[0].flags & IPR_SDT_VALID_ENTRY)) {
4897 ipr_unit_check_no_data(ioa_cfg);
4901 /* Find length of the first sdt entry (UC buffer) */
4902 length = (be32_to_cpu(sdt.entry[0].end_offset) -
4903 be32_to_cpu(sdt.entry[0].bar_str_offset)) & IPR_FMT2_MBX_ADDR_MASK;
4905 hostrcb = list_entry(ioa_cfg->hostrcb_free_q.next,
4906 struct ipr_hostrcb, queue);
4907 list_del(&hostrcb->queue);
4908 memset(&hostrcb->hcam, 0, sizeof(hostrcb->hcam));
4910 rc = ipr_get_ldump_data_section(ioa_cfg,
4911 be32_to_cpu(sdt.entry[0].bar_str_offset),
4912 (u32 *)&hostrcb->hcam,
4913 min(length, (int)sizeof(hostrcb->hcam)) / sizeof(u32));
4916 ipr_handle_log_data(ioa_cfg, hostrcb);
4918 ipr_unit_check_no_data(ioa_cfg);
4920 list_add_tail(&hostrcb->queue, &ioa_cfg->hostrcb_free_q);
4924 * ipr_reset_restore_cfg_space - Restore PCI config space.
4925 * @ipr_cmd: ipr command struct
4927 * Description: This function restores the saved PCI config space of
4928 * the adapter, fails all outstanding ops back to the callers, and
4929 * fetches the dump/unit check if applicable to this reset.
4932 * IPR_RC_JOB_CONTINUE / IPR_RC_JOB_RETURN
4934 static int ipr_reset_restore_cfg_space(struct ipr_cmnd *ipr_cmd)
4936 struct ipr_ioa_cfg *ioa_cfg = ipr_cmd->ioa_cfg;
4940 rc = pci_restore_state(ioa_cfg->pdev, ioa_cfg->pci_cfg_buf);
4942 if (rc != PCIBIOS_SUCCESSFUL) {
4943 ipr_cmd->ioasa.ioasc = cpu_to_be32(IPR_IOASC_PCI_ACCESS_ERROR);
4944 return IPR_RC_JOB_CONTINUE;
4947 if (ipr_set_pcix_cmd_reg(ioa_cfg)) {
4948 ipr_cmd->ioasa.ioasc = cpu_to_be32(IPR_IOASC_PCI_ACCESS_ERROR);
4949 return IPR_RC_JOB_CONTINUE;
4952 ipr_fail_all_ops(ioa_cfg);
4954 if (ioa_cfg->ioa_unit_checked) {
4955 ioa_cfg->ioa_unit_checked = 0;
4956 ipr_get_unit_check_buffer(ioa_cfg);
4957 ipr_cmd->job_step = ipr_reset_alert;
4958 ipr_reset_start_timer(ipr_cmd, 0);
4959 return IPR_RC_JOB_RETURN;
4962 if (ioa_cfg->in_ioa_bringdown) {
4963 ipr_cmd->job_step = ipr_ioa_bringdown_done;
4965 ipr_cmd->job_step = ipr_reset_enable_ioa;
4967 if (GET_DUMP == ioa_cfg->sdt_state) {
4968 ipr_reset_start_timer(ipr_cmd, IPR_DUMP_TIMEOUT);
4969 ipr_cmd->job_step = ipr_reset_wait_for_dump;
4970 schedule_work(&ioa_cfg->work_q);
4971 return IPR_RC_JOB_RETURN;
4976 return IPR_RC_JOB_CONTINUE;
4980 * ipr_reset_start_bist - Run BIST on the adapter.
4981 * @ipr_cmd: ipr command struct
4983 * Description: This function runs BIST on the adapter, then delays 2 seconds.
4986 * IPR_RC_JOB_CONTINUE / IPR_RC_JOB_RETURN
4988 static int ipr_reset_start_bist(struct ipr_cmnd *ipr_cmd)
4990 struct ipr_ioa_cfg *ioa_cfg = ipr_cmd->ioa_cfg;
4994 rc = pci_write_config_byte(ioa_cfg->pdev, PCI_BIST, PCI_BIST_START);
4996 if (rc != PCIBIOS_SUCCESSFUL) {
4997 ipr_cmd->ioasa.ioasc = cpu_to_be32(IPR_IOASC_PCI_ACCESS_ERROR);
4998 rc = IPR_RC_JOB_CONTINUE;
5000 ipr_cmd->job_step = ipr_reset_restore_cfg_space;
5001 ipr_reset_start_timer(ipr_cmd, IPR_WAIT_FOR_BIST_TIMEOUT);
5002 rc = IPR_RC_JOB_RETURN;
5010 * ipr_reset_allowed - Query whether or not IOA can be reset
5011 * @ioa_cfg: ioa config struct
5014 * 0 if reset not allowed / non-zero if reset is allowed
5016 static int ipr_reset_allowed(struct ipr_ioa_cfg *ioa_cfg)
5018 volatile u32 temp_reg;
5020 temp_reg = readl(ioa_cfg->regs.sense_interrupt_reg);
5021 return ((temp_reg & IPR_PCII_CRITICAL_OPERATION) == 0);
5025 * ipr_reset_wait_to_start_bist - Wait for permission to reset IOA.
5026 * @ipr_cmd: ipr command struct
5028 * Description: This function waits for adapter permission to run BIST,
5029 * then runs BIST. If the adapter does not give permission after a
5030 * reasonable time, we will reset the adapter anyway. The impact of
5031 * resetting the adapter without warning the adapter is the risk of
5032 * losing the persistent error log on the adapter. If the adapter is
5033 * reset while it is writing to the flash on the adapter, the flash
5034 * segment will have bad ECC and be zeroed.
5037 * IPR_RC_JOB_CONTINUE / IPR_RC_JOB_RETURN
5039 static int ipr_reset_wait_to_start_bist(struct ipr_cmnd *ipr_cmd)
5041 struct ipr_ioa_cfg *ioa_cfg = ipr_cmd->ioa_cfg;
5042 int rc = IPR_RC_JOB_RETURN;
5044 if (!ipr_reset_allowed(ioa_cfg) && ipr_cmd->u.time_left) {
5045 ipr_cmd->u.time_left -= IPR_CHECK_FOR_RESET_TIMEOUT;
5046 ipr_reset_start_timer(ipr_cmd, IPR_CHECK_FOR_RESET_TIMEOUT);
5048 ipr_cmd->job_step = ipr_reset_start_bist;
5049 rc = IPR_RC_JOB_CONTINUE;
5056 * ipr_reset_alert_part2 - Alert the adapter of a pending reset
5057 * @ipr_cmd: ipr command struct
5059 * Description: This function alerts the adapter that it will be reset.
5060 * If memory space is not currently enabled, proceed directly
5061 * to running BIST on the adapter. The timer must always be started
5062 * so we guarantee we do not run BIST from ipr_isr.
5067 static int ipr_reset_alert(struct ipr_cmnd *ipr_cmd)
5069 struct ipr_ioa_cfg *ioa_cfg = ipr_cmd->ioa_cfg;
5074 rc = pci_read_config_word(ioa_cfg->pdev, PCI_COMMAND, &cmd_reg);
5076 if ((rc == PCIBIOS_SUCCESSFUL) && (cmd_reg & PCI_COMMAND_MEMORY)) {
5077 ipr_mask_and_clear_interrupts(ioa_cfg, ~0);
5078 writel(IPR_UPROCI_RESET_ALERT, ioa_cfg->regs.set_uproc_interrupt_reg);
5079 ipr_cmd->job_step = ipr_reset_wait_to_start_bist;
5081 ipr_cmd->job_step = ipr_reset_start_bist;
5084 ipr_cmd->u.time_left = IPR_WAIT_FOR_RESET_TIMEOUT;
5085 ipr_reset_start_timer(ipr_cmd, IPR_CHECK_FOR_RESET_TIMEOUT);
5088 return IPR_RC_JOB_RETURN;
5092 * ipr_reset_ucode_download_done - Microcode download completion
5093 * @ipr_cmd: ipr command struct
5095 * Description: This function unmaps the microcode download buffer.
5098 * IPR_RC_JOB_CONTINUE
5100 static int ipr_reset_ucode_download_done(struct ipr_cmnd *ipr_cmd)
5102 struct ipr_ioa_cfg *ioa_cfg = ipr_cmd->ioa_cfg;
5103 struct ipr_sglist *sglist = ioa_cfg->ucode_sglist;
5105 pci_unmap_sg(ioa_cfg->pdev, sglist->scatterlist,
5106 sglist->num_sg, DMA_TO_DEVICE);
5108 ipr_cmd->job_step = ipr_reset_alert;
5109 return IPR_RC_JOB_CONTINUE;
5113 * ipr_reset_ucode_download - Download microcode to the adapter
5114 * @ipr_cmd: ipr command struct
5116 * Description: This function checks to see if it there is microcode
5117 * to download to the adapter. If there is, a download is performed.
5120 * IPR_RC_JOB_CONTINUE / IPR_RC_JOB_RETURN
5122 static int ipr_reset_ucode_download(struct ipr_cmnd *ipr_cmd)
5124 struct ipr_ioa_cfg *ioa_cfg = ipr_cmd->ioa_cfg;
5125 struct ipr_sglist *sglist = ioa_cfg->ucode_sglist;
5128 ipr_cmd->job_step = ipr_reset_alert;
5131 return IPR_RC_JOB_CONTINUE;
5133 ipr_cmd->ioarcb.res_handle = cpu_to_be32(IPR_IOA_RES_HANDLE);
5134 ipr_cmd->ioarcb.cmd_pkt.request_type = IPR_RQTYPE_SCSICDB;
5135 ipr_cmd->ioarcb.cmd_pkt.cdb[0] = WRITE_BUFFER;
5136 ipr_cmd->ioarcb.cmd_pkt.cdb[1] = IPR_WR_BUF_DOWNLOAD_AND_SAVE;
5137 ipr_cmd->ioarcb.cmd_pkt.cdb[6] = (sglist->buffer_len & 0xff0000) >> 16;
5138 ipr_cmd->ioarcb.cmd_pkt.cdb[7] = (sglist->buffer_len & 0x00ff00) >> 8;
5139 ipr_cmd->ioarcb.cmd_pkt.cdb[8] = sglist->buffer_len & 0x0000ff;
5141 if (ipr_map_ucode_buffer(ipr_cmd, sglist, sglist->buffer_len)) {
5142 dev_err(&ioa_cfg->pdev->dev,
5143 "Failed to map microcode download buffer\n");
5144 return IPR_RC_JOB_CONTINUE;
5147 ipr_cmd->job_step = ipr_reset_ucode_download_done;
5149 ipr_do_req(ipr_cmd, ipr_reset_ioa_job, ipr_timeout,
5150 IPR_WRITE_BUFFER_TIMEOUT);
5153 return IPR_RC_JOB_RETURN;
5157 * ipr_reset_shutdown_ioa - Shutdown the adapter
5158 * @ipr_cmd: ipr command struct
5160 * Description: This function issues an adapter shutdown of the
5161 * specified type to the specified adapter as part of the
5162 * adapter reset job.
5165 * IPR_RC_JOB_CONTINUE / IPR_RC_JOB_RETURN
5167 static int ipr_reset_shutdown_ioa(struct ipr_cmnd *ipr_cmd)
5169 struct ipr_ioa_cfg *ioa_cfg = ipr_cmd->ioa_cfg;
5170 enum ipr_shutdown_type shutdown_type = ipr_cmd->u.shutdown_type;
5171 unsigned long timeout;
5172 int rc = IPR_RC_JOB_CONTINUE;
5175 if (shutdown_type != IPR_SHUTDOWN_NONE && !ioa_cfg->ioa_is_dead) {
5176 ipr_cmd->ioarcb.res_handle = cpu_to_be32(IPR_IOA_RES_HANDLE);
5177 ipr_cmd->ioarcb.cmd_pkt.request_type = IPR_RQTYPE_IOACMD;
5178 ipr_cmd->ioarcb.cmd_pkt.cdb[0] = IPR_IOA_SHUTDOWN;
5179 ipr_cmd->ioarcb.cmd_pkt.cdb[1] = shutdown_type;
5181 if (shutdown_type == IPR_SHUTDOWN_ABBREV)
5182 timeout = IPR_ABBREV_SHUTDOWN_TIMEOUT;
5183 else if (shutdown_type == IPR_SHUTDOWN_PREPARE_FOR_NORMAL)
5184 timeout = IPR_INTERNAL_TIMEOUT;
5186 timeout = IPR_SHUTDOWN_TIMEOUT;
5188 ipr_do_req(ipr_cmd, ipr_reset_ioa_job, ipr_timeout, timeout);
5190 rc = IPR_RC_JOB_RETURN;
5191 ipr_cmd->job_step = ipr_reset_ucode_download;
5193 ipr_cmd->job_step = ipr_reset_alert;
5200 * ipr_reset_ioa_job - Adapter reset job
5201 * @ipr_cmd: ipr command struct
5203 * Description: This function is the job router for the adapter reset job.
5208 static void ipr_reset_ioa_job(struct ipr_cmnd *ipr_cmd)
5211 unsigned long scratch = ipr_cmd->u.scratch;
5212 struct ipr_ioa_cfg *ioa_cfg = ipr_cmd->ioa_cfg;
5215 ioasc = be32_to_cpu(ipr_cmd->ioasa.ioasc);
5217 if (ioa_cfg->reset_cmd != ipr_cmd) {
5219 * We are doing nested adapter resets and this is
5220 * not the current reset job.
5222 list_add_tail(&ipr_cmd->queue, &ioa_cfg->free_q);
5226 if (IPR_IOASC_SENSE_KEY(ioasc)) {
5227 dev_err(&ioa_cfg->pdev->dev,
5228 "0x%02X failed with IOASC: 0x%08X\n",
5229 ipr_cmd->ioarcb.cmd_pkt.cdb[0], ioasc);
5231 ipr_initiate_ioa_reset(ioa_cfg, IPR_SHUTDOWN_NONE);
5232 list_add_tail(&ipr_cmd->queue, &ioa_cfg->free_q);
5236 ipr_reinit_ipr_cmnd(ipr_cmd);
5237 ipr_cmd->u.scratch = scratch;
5238 rc = ipr_cmd->job_step(ipr_cmd);
5239 } while(rc == IPR_RC_JOB_CONTINUE);
5243 * _ipr_initiate_ioa_reset - Initiate an adapter reset
5244 * @ioa_cfg: ioa config struct
5245 * @job_step: first job step of reset job
5246 * @shutdown_type: shutdown type
5248 * Description: This function will initiate the reset of the given adapter
5249 * starting at the selected job step.
5250 * If the caller needs to wait on the completion of the reset,
5251 * the caller must sleep on the reset_wait_q.
5256 static void _ipr_initiate_ioa_reset(struct ipr_ioa_cfg *ioa_cfg,
5257 int (*job_step) (struct ipr_cmnd *),
5258 enum ipr_shutdown_type shutdown_type)
5260 struct ipr_cmnd *ipr_cmd;
5262 ioa_cfg->in_reset_reload = 1;
5263 ioa_cfg->allow_cmds = 0;
5264 scsi_block_requests(ioa_cfg->host);
5266 ipr_cmd = ipr_get_free_ipr_cmnd(ioa_cfg);
5267 ioa_cfg->reset_cmd = ipr_cmd;
5268 ipr_cmd->job_step = job_step;
5269 ipr_cmd->u.shutdown_type = shutdown_type;
5271 ipr_reset_ioa_job(ipr_cmd);
5275 * ipr_initiate_ioa_reset - Initiate an adapter reset
5276 * @ioa_cfg: ioa config struct
5277 * @shutdown_type: shutdown type
5279 * Description: This function will initiate the reset of the given adapter.
5280 * If the caller needs to wait on the completion of the reset,
5281 * the caller must sleep on the reset_wait_q.
5286 static void ipr_initiate_ioa_reset(struct ipr_ioa_cfg *ioa_cfg,
5287 enum ipr_shutdown_type shutdown_type)
5289 if (ioa_cfg->ioa_is_dead)
5292 if (ioa_cfg->in_reset_reload && ioa_cfg->sdt_state == GET_DUMP)
5293 ioa_cfg->sdt_state = ABORT_DUMP;
5295 if (ioa_cfg->reset_retries++ > IPR_NUM_RESET_RELOAD_RETRIES) {
5296 dev_err(&ioa_cfg->pdev->dev,
5297 "IOA taken offline - error recovery failed\n");
5299 ioa_cfg->reset_retries = 0;
5300 ioa_cfg->ioa_is_dead = 1;
5302 if (ioa_cfg->in_ioa_bringdown) {
5303 ioa_cfg->reset_cmd = NULL;
5304 ioa_cfg->in_reset_reload = 0;
5305 ipr_fail_all_ops(ioa_cfg);
5306 wake_up_all(&ioa_cfg->reset_wait_q);
5308 spin_unlock_irq(ioa_cfg->host->host_lock);
5309 scsi_unblock_requests(ioa_cfg->host);
5310 spin_lock_irq(ioa_cfg->host->host_lock);
5313 ioa_cfg->in_ioa_bringdown = 1;
5314 shutdown_type = IPR_SHUTDOWN_NONE;
5318 _ipr_initiate_ioa_reset(ioa_cfg, ipr_reset_shutdown_ioa,
5323 * ipr_probe_ioa_part2 - Initializes IOAs found in ipr_probe_ioa(..)
5324 * @ioa_cfg: ioa cfg struct
5326 * Description: This is the second phase of adapter intialization
5327 * This function takes care of initilizing the adapter to the point
5328 * where it can accept new commands.
5331 * 0 on sucess / -EIO on failure
5333 static int __devinit ipr_probe_ioa_part2(struct ipr_ioa_cfg *ioa_cfg)
5336 unsigned long host_lock_flags = 0;
5339 spin_lock_irqsave(ioa_cfg->host->host_lock, host_lock_flags);
5340 dev_dbg(&ioa_cfg->pdev->dev, "ioa_cfg adx: 0x%p\n", ioa_cfg);
5341 _ipr_initiate_ioa_reset(ioa_cfg, ipr_reset_enable_ioa, IPR_SHUTDOWN_NONE);
5343 spin_unlock_irqrestore(ioa_cfg->host->host_lock, host_lock_flags);
5344 wait_event(ioa_cfg->reset_wait_q, !ioa_cfg->in_reset_reload);
5345 spin_lock_irqsave(ioa_cfg->host->host_lock, host_lock_flags);
5347 if (ioa_cfg->ioa_is_dead) {
5349 } else if (ipr_invalid_adapter(ioa_cfg)) {
5353 dev_err(&ioa_cfg->pdev->dev,
5354 "Adapter not supported in this hardware configuration.\n");
5357 spin_unlock_irqrestore(ioa_cfg->host->host_lock, host_lock_flags);
5364 * ipr_free_cmd_blks - Frees command blocks allocated for an adapter
5365 * @ioa_cfg: ioa config struct
5370 static void ipr_free_cmd_blks(struct ipr_ioa_cfg *ioa_cfg)
5374 for (i = 0; i < IPR_NUM_CMD_BLKS; i++) {
5375 if (ioa_cfg->ipr_cmnd_list[i])
5376 pci_pool_free(ioa_cfg->ipr_cmd_pool,
5377 ioa_cfg->ipr_cmnd_list[i],
5378 ioa_cfg->ipr_cmnd_list_dma[i]);
5380 ioa_cfg->ipr_cmnd_list[i] = NULL;
5383 if (ioa_cfg->ipr_cmd_pool)
5384 pci_pool_destroy (ioa_cfg->ipr_cmd_pool);
5386 ioa_cfg->ipr_cmd_pool = NULL;
5390 * ipr_free_mem - Frees memory allocated for an adapter
5391 * @ioa_cfg: ioa cfg struct
5396 static void ipr_free_mem(struct ipr_ioa_cfg *ioa_cfg)
5400 kfree(ioa_cfg->res_entries);
5401 pci_free_consistent(ioa_cfg->pdev, sizeof(struct ipr_misc_cbs),
5402 ioa_cfg->vpd_cbs, ioa_cfg->vpd_cbs_dma);
5403 ipr_free_cmd_blks(ioa_cfg);
5404 pci_free_consistent(ioa_cfg->pdev, sizeof(u32) * IPR_NUM_CMD_BLKS,
5405 ioa_cfg->host_rrq, ioa_cfg->host_rrq_dma);
5406 pci_free_consistent(ioa_cfg->pdev, sizeof(struct ipr_config_table),
5408 ioa_cfg->cfg_table_dma);
5410 for (i = 0; i < IPR_NUM_HCAMS; i++) {
5411 pci_free_consistent(ioa_cfg->pdev,
5412 sizeof(struct ipr_hostrcb),
5413 ioa_cfg->hostrcb[i],
5414 ioa_cfg->hostrcb_dma[i]);
5417 ipr_free_dump(ioa_cfg);
5418 kfree(ioa_cfg->saved_mode_pages);
5419 kfree(ioa_cfg->trace);
5423 * ipr_free_all_resources - Free all allocated resources for an adapter.
5424 * @ipr_cmd: ipr command struct
5426 * This function frees all allocated resources for the
5427 * specified adapter.
5432 static void ipr_free_all_resources(struct ipr_ioa_cfg *ioa_cfg)
5435 free_irq(ioa_cfg->pdev->irq, ioa_cfg);
5436 iounmap((void *) ioa_cfg->hdw_dma_regs);
5437 release_mem_region(ioa_cfg->hdw_dma_regs_pci,
5438 pci_resource_len(ioa_cfg->pdev, 0));
5439 ipr_free_mem(ioa_cfg);
5440 scsi_host_put(ioa_cfg->host);
5445 * ipr_alloc_cmd_blks - Allocate command blocks for an adapter
5446 * @ioa_cfg: ioa config struct
5449 * 0 on success / -ENOMEM on allocation failure
5451 static int __devinit ipr_alloc_cmd_blks(struct ipr_ioa_cfg *ioa_cfg)
5453 struct ipr_cmnd *ipr_cmd;
5454 struct ipr_ioarcb *ioarcb;
5458 ioa_cfg->ipr_cmd_pool = pci_pool_create (IPR_NAME, ioa_cfg->pdev,
5459 sizeof(struct ipr_cmnd), 8, 0);
5461 if (!ioa_cfg->ipr_cmd_pool)
5464 for (i = 0; i < IPR_NUM_CMD_BLKS; i++) {
5465 ipr_cmd = pci_pool_alloc (ioa_cfg->ipr_cmd_pool, SLAB_KERNEL, &dma_addr);
5468 ipr_free_cmd_blks(ioa_cfg);
5472 memset(ipr_cmd, 0, sizeof(*ipr_cmd));
5473 ioa_cfg->ipr_cmnd_list[i] = ipr_cmd;
5474 ioa_cfg->ipr_cmnd_list_dma[i] = dma_addr;
5476 ioarcb = &ipr_cmd->ioarcb;
5477 ioarcb->ioarcb_host_pci_addr = cpu_to_be32(dma_addr);
5478 ioarcb->host_response_handle = cpu_to_be32(i << 2);
5479 ioarcb->write_ioadl_addr =
5480 cpu_to_be32(dma_addr + offsetof(struct ipr_cmnd, ioadl));
5481 ioarcb->read_ioadl_addr = ioarcb->write_ioadl_addr;
5482 ioarcb->ioasa_host_pci_addr =
5483 cpu_to_be32(dma_addr + offsetof(struct ipr_cmnd, ioasa));
5484 ioarcb->ioasa_len = cpu_to_be16(sizeof(struct ipr_ioasa));
5485 ipr_cmd->cmd_index = i;
5486 ipr_cmd->ioa_cfg = ioa_cfg;
5487 ipr_cmd->sense_buffer_dma = dma_addr +
5488 offsetof(struct ipr_cmnd, sense_buffer);
5490 list_add_tail(&ipr_cmd->queue, &ioa_cfg->free_q);
5497 * ipr_alloc_mem - Allocate memory for an adapter
5498 * @ioa_cfg: ioa config struct
5501 * 0 on success / non-zero for error
5503 static int __devinit ipr_alloc_mem(struct ipr_ioa_cfg *ioa_cfg)
5508 ioa_cfg->res_entries = kmalloc(sizeof(struct ipr_resource_entry) *
5509 IPR_MAX_PHYSICAL_DEVS, GFP_KERNEL);
5511 if (!ioa_cfg->res_entries)
5514 memset(ioa_cfg->res_entries, 0,
5515 sizeof(struct ipr_resource_entry) * IPR_MAX_PHYSICAL_DEVS);
5517 for (i = 0; i < IPR_MAX_PHYSICAL_DEVS; i++)
5518 list_add_tail(&ioa_cfg->res_entries[i].queue, &ioa_cfg->free_res_q);
5520 ioa_cfg->vpd_cbs = pci_alloc_consistent(ioa_cfg->pdev,
5521 sizeof(struct ipr_misc_cbs),
5522 &ioa_cfg->vpd_cbs_dma);
5524 if (!ioa_cfg->vpd_cbs)
5527 if (ipr_alloc_cmd_blks(ioa_cfg))
5530 ioa_cfg->host_rrq = pci_alloc_consistent(ioa_cfg->pdev,
5531 sizeof(u32) * IPR_NUM_CMD_BLKS,
5532 &ioa_cfg->host_rrq_dma);
5534 if (!ioa_cfg->host_rrq)
5537 ioa_cfg->cfg_table = pci_alloc_consistent(ioa_cfg->pdev,
5538 sizeof(struct ipr_config_table),
5539 &ioa_cfg->cfg_table_dma);
5541 if (!ioa_cfg->cfg_table)
5544 for (i = 0; i < IPR_NUM_HCAMS; i++) {
5545 ioa_cfg->hostrcb[i] = pci_alloc_consistent(ioa_cfg->pdev,
5546 sizeof(struct ipr_hostrcb),
5547 &ioa_cfg->hostrcb_dma[i]);
5549 if (!ioa_cfg->hostrcb[i])
5552 memset(ioa_cfg->hostrcb[i], 0, sizeof(struct ipr_hostrcb));
5553 ioa_cfg->hostrcb[i]->hostrcb_dma =
5554 ioa_cfg->hostrcb_dma[i] + offsetof(struct ipr_hostrcb, hcam);
5555 list_add_tail(&ioa_cfg->hostrcb[i]->queue, &ioa_cfg->hostrcb_free_q);
5558 ioa_cfg->trace = kmalloc(sizeof(struct ipr_trace_entry) *
5559 IPR_NUM_TRACE_ENTRIES, GFP_KERNEL);
5561 if (!ioa_cfg->trace)
5564 memset(ioa_cfg->trace, 0,
5565 sizeof(struct ipr_trace_entry) * IPR_NUM_TRACE_ENTRIES);
5571 ipr_free_mem(ioa_cfg);
5578 * ipr_initialize_bus_attr - Initialize SCSI bus attributes to default values
5579 * @ioa_cfg: ioa config struct
5584 static void __devinit ipr_initialize_bus_attr(struct ipr_ioa_cfg *ioa_cfg)
5588 for (i = 0; i < IPR_MAX_NUM_BUSES; i++) {
5589 ioa_cfg->bus_attr[i].bus = i;
5590 ioa_cfg->bus_attr[i].qas_enabled = 0;
5591 ioa_cfg->bus_attr[i].bus_width = IPR_DEFAULT_BUS_WIDTH;
5592 if (ipr_max_speed < ARRAY_SIZE(ipr_max_bus_speeds))
5593 ioa_cfg->bus_attr[i].max_xfer_rate = ipr_max_bus_speeds[ipr_max_speed];
5595 ioa_cfg->bus_attr[i].max_xfer_rate = IPR_U160_SCSI_RATE;
5600 * ipr_init_ioa_cfg - Initialize IOA config struct
5601 * @ioa_cfg: ioa config struct
5602 * @host: scsi host struct
5603 * @pdev: PCI dev struct
5608 static void __devinit ipr_init_ioa_cfg(struct ipr_ioa_cfg *ioa_cfg,
5609 struct Scsi_Host *host, struct pci_dev *pdev)
5611 ioa_cfg->host = host;
5612 ioa_cfg->pdev = pdev;
5613 ioa_cfg->log_level = ipr_log_level;
5614 sprintf(ioa_cfg->eye_catcher, IPR_EYECATCHER);
5615 sprintf(ioa_cfg->trace_start, IPR_TRACE_START_LABEL);
5616 sprintf(ioa_cfg->ipr_free_label, IPR_FREEQ_LABEL);
5617 sprintf(ioa_cfg->ipr_pending_label, IPR_PENDQ_LABEL);
5618 sprintf(ioa_cfg->cfg_table_start, IPR_CFG_TBL_START);
5619 sprintf(ioa_cfg->resource_table_label, IPR_RES_TABLE_LABEL);
5620 sprintf(ioa_cfg->ipr_hcam_label, IPR_HCAM_LABEL);
5621 sprintf(ioa_cfg->ipr_cmd_label, IPR_CMD_LABEL);
5623 INIT_LIST_HEAD(&ioa_cfg->free_q);
5624 INIT_LIST_HEAD(&ioa_cfg->pending_q);
5625 INIT_LIST_HEAD(&ioa_cfg->hostrcb_free_q);
5626 INIT_LIST_HEAD(&ioa_cfg->hostrcb_pending_q);
5627 INIT_LIST_HEAD(&ioa_cfg->free_res_q);
5628 INIT_LIST_HEAD(&ioa_cfg->used_res_q);
5629 INIT_WORK(&ioa_cfg->work_q, ipr_worker_thread, ioa_cfg);
5630 init_waitqueue_head(&ioa_cfg->reset_wait_q);
5631 ioa_cfg->sdt_state = INACTIVE;
5633 ipr_initialize_bus_attr(ioa_cfg);
5635 host->max_id = IPR_MAX_NUM_TARGETS_PER_BUS;
5636 host->max_lun = IPR_MAX_NUM_LUNS_PER_TARGET;
5637 host->max_channel = IPR_MAX_BUS_TO_SCAN;
5638 host->unique_id = host->host_no;
5639 host->max_cmd_len = IPR_MAX_CDB_LEN;
5640 pci_set_drvdata(pdev, ioa_cfg);
5642 memcpy(&ioa_cfg->regs, &ioa_cfg->chip_cfg->regs, sizeof(ioa_cfg->regs));
5644 ioa_cfg->regs.set_interrupt_mask_reg += ioa_cfg->hdw_dma_regs;
5645 ioa_cfg->regs.clr_interrupt_mask_reg += ioa_cfg->hdw_dma_regs;
5646 ioa_cfg->regs.sense_interrupt_mask_reg += ioa_cfg->hdw_dma_regs;
5647 ioa_cfg->regs.clr_interrupt_reg += ioa_cfg->hdw_dma_regs;
5648 ioa_cfg->regs.sense_interrupt_reg += ioa_cfg->hdw_dma_regs;
5649 ioa_cfg->regs.ioarrin_reg += ioa_cfg->hdw_dma_regs;
5650 ioa_cfg->regs.sense_uproc_interrupt_reg += ioa_cfg->hdw_dma_regs;
5651 ioa_cfg->regs.set_uproc_interrupt_reg += ioa_cfg->hdw_dma_regs;
5652 ioa_cfg->regs.clr_uproc_interrupt_reg += ioa_cfg->hdw_dma_regs;
5656 * ipr_probe_ioa - Allocates memory and does first stage of initialization
5657 * @pdev: PCI device struct
5658 * @dev_id: PCI device id struct
5661 * 0 on success / non-zero on failure
5663 static int __devinit ipr_probe_ioa(struct pci_dev *pdev,
5664 const struct pci_device_id *dev_id)
5666 struct ipr_ioa_cfg *ioa_cfg;
5667 struct Scsi_Host *host;
5668 unsigned long ipr_regs, ipr_regs_pci;
5669 u32 rc = PCIBIOS_SUCCESSFUL;
5673 if ((rc = pci_enable_device(pdev))) {
5674 dev_err(&pdev->dev, "Cannot enable adapter\n");
5678 dev_info(&pdev->dev, "Found IOA with IRQ: %d\n", pdev->irq);
5680 host = scsi_host_alloc(&driver_template, sizeof(*ioa_cfg));
5683 dev_err(&pdev->dev, "call to scsi_host_alloc failed!\n");
5687 ioa_cfg = (struct ipr_ioa_cfg *)host->hostdata;
5688 memset(ioa_cfg, 0, sizeof(struct ipr_ioa_cfg));
5690 ioa_cfg->chip_cfg = (const struct ipr_chip_cfg_t *)dev_id->driver_data;
5692 ipr_regs_pci = pci_resource_start(pdev, 0);
5694 if (!request_mem_region(ipr_regs_pci,
5695 pci_resource_len(pdev, 0), IPR_NAME)) {
5697 "Couldn't register memory range of registers\n");
5698 scsi_host_put(host);
5702 ipr_regs = (unsigned long)ioremap(ipr_regs_pci,
5703 pci_resource_len(pdev, 0));
5707 "Couldn't map memory range of registers\n");
5708 release_mem_region(ipr_regs_pci, pci_resource_len(pdev, 0));
5709 scsi_host_put(host);
5713 ioa_cfg->hdw_dma_regs = ipr_regs;
5714 ioa_cfg->hdw_dma_regs_pci = ipr_regs_pci;
5715 ioa_cfg->ioa_mailbox = ioa_cfg->chip_cfg->mailbox + ipr_regs;
5717 ipr_init_ioa_cfg(ioa_cfg, host, pdev);
5719 pci_set_master(pdev);
5720 rc = pci_set_dma_mask(pdev, 0xffffffff);
5722 if (rc != PCIBIOS_SUCCESSFUL) {
5723 dev_err(&pdev->dev, "Failed to set PCI DMA mask\n");
5728 rc = pci_write_config_byte(pdev, PCI_CACHE_LINE_SIZE,
5729 ioa_cfg->chip_cfg->cache_line_size);
5731 if (rc != PCIBIOS_SUCCESSFUL) {
5732 dev_err(&pdev->dev, "Write of cache line size failed\n");
5737 /* Save away PCI config space for use following IOA reset */
5738 rc = pci_save_state(pdev, ioa_cfg->pci_cfg_buf);
5740 if (rc != PCIBIOS_SUCCESSFUL) {
5741 dev_err(&pdev->dev, "Failed to save PCI config space\n");
5746 if ((rc = ipr_save_pcix_cmd_reg(ioa_cfg)))
5749 if ((rc = ipr_set_pcix_cmd_reg(ioa_cfg)))
5752 if ((rc = ipr_alloc_mem(ioa_cfg)))
5755 ipr_mask_and_clear_interrupts(ioa_cfg, ~IPR_PCII_IOA_TRANS_TO_OPER);
5756 rc = request_irq(pdev->irq, ipr_isr, SA_SHIRQ, IPR_NAME, ioa_cfg);
5759 dev_err(&pdev->dev, "Couldn't register IRQ %d! rc=%d\n",
5764 spin_lock(&ipr_driver_lock);
5765 list_add_tail(&ioa_cfg->queue, &ipr_ioa_head);
5766 spin_unlock(&ipr_driver_lock);
5772 dev_err(&pdev->dev, "Couldn't allocate enough memory for device driver!\n");
5774 ipr_free_mem(ioa_cfg);
5776 iounmap((void *) ipr_regs);
5777 release_mem_region(ipr_regs_pci, pci_resource_len(pdev, 0));
5778 scsi_host_put(host);
5784 * ipr_scan_vsets - Scans for VSET devices
5785 * @ioa_cfg: ioa config struct
5787 * Description: Since the VSET resources do not follow SAM in that we can have
5788 * sparse LUNs with no LUN 0, we have to scan for these ourselves.
5793 static void ipr_scan_vsets(struct ipr_ioa_cfg *ioa_cfg)
5797 for (target = 0; target < IPR_MAX_NUM_TARGETS_PER_BUS; target++)
5798 for (lun = 0; lun < IPR_MAX_NUM_VSET_LUNS_PER_TARGET; lun++ )
5799 scsi_add_device(ioa_cfg->host, IPR_VSET_BUS, target, lun);
5803 * ipr_initiate_ioa_bringdown - Bring down an adapter
5804 * @ioa_cfg: ioa config struct
5805 * @shutdown_type: shutdown type
5807 * Description: This function will initiate bringing down the adapter.
5808 * This consists of issuing an IOA shutdown to the adapter
5809 * to flush the cache, and running BIST.
5810 * If the caller needs to wait on the completion of the reset,
5811 * the caller must sleep on the reset_wait_q.
5816 static void ipr_initiate_ioa_bringdown(struct ipr_ioa_cfg *ioa_cfg,
5817 enum ipr_shutdown_type shutdown_type)
5820 if (ioa_cfg->sdt_state == WAIT_FOR_DUMP)
5821 ioa_cfg->sdt_state = ABORT_DUMP;
5822 ioa_cfg->reset_retries = 0;
5823 ioa_cfg->in_ioa_bringdown = 1;
5824 ipr_initiate_ioa_reset(ioa_cfg, shutdown_type);
5829 * __ipr_remove - Remove a single adapter
5830 * @pdev: pci device struct
5832 * Adapter hot plug remove entry point.
5837 static void __ipr_remove(struct pci_dev *pdev)
5839 unsigned long host_lock_flags = 0;
5840 struct ipr_ioa_cfg *ioa_cfg = pci_get_drvdata(pdev);
5843 spin_lock_irqsave(ioa_cfg->host->host_lock, host_lock_flags);
5844 ipr_initiate_ioa_bringdown(ioa_cfg, IPR_SHUTDOWN_NORMAL);
5846 spin_unlock_irqrestore(ioa_cfg->host->host_lock, host_lock_flags);
5847 wait_event(ioa_cfg->reset_wait_q, !ioa_cfg->in_reset_reload);
5848 spin_lock_irqsave(ioa_cfg->host->host_lock, host_lock_flags);
5850 spin_lock(&ipr_driver_lock);
5851 list_del(&ioa_cfg->queue);
5852 spin_unlock(&ipr_driver_lock);
5854 if (ioa_cfg->sdt_state == ABORT_DUMP)
5855 ioa_cfg->sdt_state = WAIT_FOR_DUMP;
5856 spin_unlock_irqrestore(ioa_cfg->host->host_lock, host_lock_flags);
5858 ipr_free_all_resources(ioa_cfg);
5864 * ipr_remove - IOA hot plug remove entry point
5865 * @pdev: pci device struct
5867 * Adapter hot plug remove entry point.
5872 static void ipr_remove(struct pci_dev *pdev)
5874 struct ipr_ioa_cfg *ioa_cfg = pci_get_drvdata(pdev);
5878 ioa_cfg->allow_cmds = 0;
5879 flush_scheduled_work();
5880 ipr_remove_trace_file(&ioa_cfg->host->shost_classdev.kobj,
5882 ipr_remove_dump_file(&ioa_cfg->host->shost_classdev.kobj,
5884 scsi_remove_host(ioa_cfg->host);
5892 * ipr_probe - Adapter hot plug add entry point
5895 * 0 on success / non-zero on failure
5897 static int __devinit ipr_probe(struct pci_dev *pdev,
5898 const struct pci_device_id *dev_id)
5900 struct ipr_ioa_cfg *ioa_cfg;
5903 rc = ipr_probe_ioa(pdev, dev_id);
5908 ioa_cfg = pci_get_drvdata(pdev);
5909 rc = ipr_probe_ioa_part2(ioa_cfg);
5916 rc = scsi_add_host(ioa_cfg->host, &pdev->dev);
5923 rc = ipr_create_trace_file(&ioa_cfg->host->shost_classdev.kobj,
5927 scsi_remove_host(ioa_cfg->host);
5932 rc = ipr_create_dump_file(&ioa_cfg->host->shost_classdev.kobj,
5936 ipr_remove_trace_file(&ioa_cfg->host->shost_classdev.kobj,
5938 scsi_remove_host(ioa_cfg->host);
5943 scsi_scan_host(ioa_cfg->host);
5944 ipr_scan_vsets(ioa_cfg);
5945 scsi_add_device(ioa_cfg->host, IPR_IOA_BUS, IPR_IOA_TARGET, IPR_IOA_LUN);
5946 ioa_cfg->allow_ml_add_del = 1;
5947 schedule_work(&ioa_cfg->work_q);
5952 * ipr_shutdown - Shutdown handler.
5953 * @dev: device struct
5955 * This function is invoked upon system shutdown/reboot. It will issue
5956 * an adapter shutdown to the adapter to flush the write cache.
5961 static void ipr_shutdown(struct device *dev)
5963 struct ipr_ioa_cfg *ioa_cfg = pci_get_drvdata(to_pci_dev(dev));
5964 unsigned long lock_flags = 0;
5966 spin_lock_irqsave(ioa_cfg->host->host_lock, lock_flags);
5967 ipr_initiate_ioa_bringdown(ioa_cfg, IPR_SHUTDOWN_NORMAL);
5968 spin_unlock_irqrestore(ioa_cfg->host->host_lock, lock_flags);
5969 wait_event(ioa_cfg->reset_wait_q, !ioa_cfg->in_reset_reload);
5972 static struct pci_device_id ipr_pci_table[] __devinitdata = {
5973 { PCI_VENDOR_ID_MYLEX, PCI_DEVICE_ID_IBM_GEMSTONE,
5974 PCI_VENDOR_ID_IBM, IPR_SUBS_DEV_ID_5702,
5975 0, 0, (kernel_ulong_t)&ipr_chip_cfg[0] },
5976 { PCI_VENDOR_ID_MYLEX, PCI_DEVICE_ID_IBM_GEMSTONE,
5977 PCI_VENDOR_ID_IBM, IPR_SUBS_DEV_ID_572E,
5978 0, 0, (kernel_ulong_t)&ipr_chip_cfg[0] },
5979 { PCI_VENDOR_ID_MYLEX, PCI_DEVICE_ID_IBM_GEMSTONE,
5980 PCI_VENDOR_ID_IBM, IPR_SUBS_DEV_ID_5703,
5981 0, 0, (kernel_ulong_t)&ipr_chip_cfg[0] },
5982 { PCI_VENDOR_ID_MYLEX, PCI_DEVICE_ID_IBM_GEMSTONE,
5983 PCI_VENDOR_ID_IBM, IPR_SUBS_DEV_ID_573D,
5984 0, 0, (kernel_ulong_t)&ipr_chip_cfg[0] },
5985 { PCI_VENDOR_ID_IBM, PCI_DEVICE_ID_IBM_SNIPE,
5986 PCI_VENDOR_ID_IBM, IPR_SUBS_DEV_ID_2780,
5987 0, 0, (kernel_ulong_t)&ipr_chip_cfg[1] },
5990 MODULE_DEVICE_TABLE(pci, ipr_pci_table);
5992 static struct pci_driver ipr_driver = {
5994 .id_table = ipr_pci_table,
5996 .remove = ipr_remove,
5998 .shutdown = ipr_shutdown,
6003 * ipr_init - Module entry point
6006 * 0 on success / non-zero on failure
6008 static int __init ipr_init(void)
6010 ipr_info("IBM Power RAID SCSI Device Driver version: %s %s\n",
6011 IPR_DRIVER_VERSION, IPR_DRIVER_DATE);
6013 pci_register_driver(&ipr_driver);
6019 * ipr_exit - Module unload
6021 * Module unload entry point.
6026 static void __exit ipr_exit(void)
6028 pci_unregister_driver(&ipr_driver);
6031 module_init(ipr_init);
6032 module_exit(ipr_exit);