2 * ipr.c -- driver for IBM Power Linux RAID adapters
4 * Written By: Brian King <brking@us.ibm.com>, IBM Corporation
6 * Copyright (C) 2003, 2004 IBM Corporation
8 * This program is free software; you can redistribute it and/or modify
9 * it under the terms of the GNU General Public License as published by
10 * the Free Software Foundation; either version 2 of the License, or
11 * (at your option) any later version.
13 * This program is distributed in the hope that it will be useful,
14 * but WITHOUT ANY WARRANTY; without even the implied warranty of
15 * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
16 * GNU General Public License for more details.
18 * You should have received a copy of the GNU General Public License
19 * along with this program; if not, write to the Free Software
20 * Foundation, Inc., 59 Temple Place, Suite 330, Boston, MA 02111-1307 USA
27 * This driver is used to control the following SCSI adapters:
29 * IBM iSeries: 5702, 5703, 2780, 5709, 570A, 570B
31 * IBM pSeries: PCI-X Dual Channel Ultra 320 SCSI RAID Adapter
32 * PCI-X Dual Channel Ultra 320 SCSI Adapter
33 * PCI-X Dual Channel Ultra 320 SCSI RAID Enablement Card
34 * Embedded SCSI adapter on p615 and p655 systems
36 * Supported Hardware Features:
37 * - Ultra 320 SCSI controller
38 * - PCI-X host interface
39 * - Embedded PowerPC RISC Processor and Hardware XOR DMA Engine
40 * - Non-Volatile Write Cache
41 * - Supports attachment of non-RAID disks, tape, and optical devices
42 * - RAID Levels 0, 5, 10
44 * - Background Parity Checking
45 * - Background Data Scrubbing
46 * - Ability to increase the capacity of an existing RAID 5 disk array
50 * - Tagged command queuing
51 * - Adapter microcode download
53 * - SCSI device hot plug
57 #include <linux/config.h>
59 #include <linux/init.h>
60 #include <linux/types.h>
61 #include <linux/errno.h>
62 #include <linux/kernel.h>
63 #include <linux/ioport.h>
64 #include <linux/delay.h>
65 #include <linux/pci.h>
66 #include <linux/wait.h>
67 #include <linux/spinlock.h>
68 #include <linux/sched.h>
69 #include <linux/interrupt.h>
70 #include <linux/blkdev.h>
71 #include <linux/firmware.h>
72 #include <linux/module.h>
73 #include <linux/moduleparam.h>
76 #include <asm/processor.h>
77 #include <scsi/scsi.h>
78 #include <scsi/scsi_host.h>
79 #include <scsi/scsi_tcq.h>
80 #include <scsi/scsi_eh.h>
81 #include <scsi/scsi_cmnd.h>
82 #include <scsi/scsi_request.h>
88 static struct list_head ipr_ioa_head = LIST_HEAD_INIT(ipr_ioa_head);
89 static unsigned int ipr_log_level = IPR_DEFAULT_LOG_LEVEL;
90 static unsigned int ipr_max_speed = 1;
91 static int ipr_testmode = 0;
92 static spinlock_t ipr_driver_lock = SPIN_LOCK_UNLOCKED;
94 /* This table describes the differences between DMA controller chips */
95 static const struct ipr_chip_cfg_t ipr_chip_cfg[] = {
96 { /* Gemstone and Citrine */
98 .cache_line_size = 0x20,
100 .set_interrupt_mask_reg = 0x0022C,
101 .clr_interrupt_mask_reg = 0x00230,
102 .sense_interrupt_mask_reg = 0x0022C,
103 .clr_interrupt_reg = 0x00228,
104 .sense_interrupt_reg = 0x00224,
105 .ioarrin_reg = 0x00404,
106 .sense_uproc_interrupt_reg = 0x00214,
107 .set_uproc_interrupt_reg = 0x00214,
108 .clr_uproc_interrupt_reg = 0x00218
113 .cache_line_size = 0x20,
115 .set_interrupt_mask_reg = 0x00288,
116 .clr_interrupt_mask_reg = 0x0028C,
117 .sense_interrupt_mask_reg = 0x00288,
118 .clr_interrupt_reg = 0x00284,
119 .sense_interrupt_reg = 0x00280,
120 .ioarrin_reg = 0x00504,
121 .sense_uproc_interrupt_reg = 0x00290,
122 .set_uproc_interrupt_reg = 0x00290,
123 .clr_uproc_interrupt_reg = 0x00294
128 static int ipr_max_bus_speeds [] = {
129 IPR_80MBs_SCSI_RATE, IPR_U160_SCSI_RATE, IPR_U320_SCSI_RATE
132 MODULE_AUTHOR("Brian King <brking@us.ibm.com>");
133 MODULE_DESCRIPTION("IBM Power RAID SCSI Adapter Driver");
134 module_param_named(max_speed, ipr_max_speed, uint, 0);
135 MODULE_PARM_DESC(max_speed, "Maximum bus speed (0-2). Default: 1=U160. Speeds: 0=80 MB/s, 1=U160, 2=U320");
136 module_param_named(log_level, ipr_log_level, uint, 0);
137 MODULE_PARM_DESC(log_level, "Set to 0 - 4 for increasing verbosity of device driver");
138 module_param_named(testmode, ipr_testmode, int, 0);
139 MODULE_PARM_DESC(testmode, "DANGEROUS!!! Allows unsupported configurations");
140 MODULE_LICENSE("GPL");
141 MODULE_VERSION(IPR_DRIVER_VERSION);
143 static const char *ipr_gpdd_dev_end_states[] = {
145 "Terminated by host",
146 "Terminated by device reset",
147 "Terminated by bus reset",
149 "Command not started"
152 static const char *ipr_gpdd_dev_bus_phases[] = {
166 /* A constant array of IOASCs/URCs/Error Messages */
168 struct ipr_error_table_t ipr_error_table[] = {
170 "8155: An unknown error was received"},
172 "Soft underlength error"},
174 "Command to be cancelled not found"},
176 "Qualified success"},
178 "FFFE: Soft device bus error recovered by the IOA"},
180 "FFF9: Device sector reassign successful"},
182 "FFF7: Media error recovered by device rewrite procedures"},
184 "7001: IOA sector reassignment successful"},
186 "FFF9: Soft media error. Sector reassignment recommended"},
188 "FFF7: Media error recovered by IOA rewrite procedures"},
190 "FF3D: Soft PCI bus error recovered by the IOA"},
192 "FFF6: Device hardware error recovered by the IOA"},
194 "FFF6: Device hardware error recovered by the device"},
196 "FF3D: Soft IOA error recovered by the IOA"},
198 "FFFA: Undefined device response recovered by the IOA"},
200 "FFF6: Device bus error, message or command phase"},
202 "FFF6: Failure prediction threshold exceeded"},
204 "8009: Impending cache battery pack failure"},
206 "34FF: Disk device format in progress"},
208 "Synchronization required"},
210 "No ready, IOA shutdown"},
212 "Not ready, IOA has been shutdown"},
214 "3020: Storage subsystem configuration error"},
216 "FFF5: Medium error, data unreadable, recommend reassign"},
218 "7000: Medium error, data unreadable, do not reassign"},
220 "FFF3: Disk media format bad"},
222 "3002: Addressed device failed to respond to selection"},
224 "3100: Device bus error"},
226 "3109: IOA timed out a device command"},
228 "3120: SCSI bus is not operational"},
230 "9000: IOA reserved area data check"},
232 "9001: IOA reserved area invalid data pattern"},
234 "9002: IOA reserved area LRC error"},
236 "102E: Out of alternate sectors for disk storage"},
238 "FFF4: Data transfer underlength error"},
240 "FFF4: Data transfer overlength error"},
242 "3400: Logical unit failure"},
244 "FFF4: Device microcode is corrupt"},
246 "8150: PCI bus error"},
248 "Unsupported device bus message received"},
250 "FFF4: Disk device problem"},
252 "8150: Permanent IOA failure"},
254 "3010: Disk device returned wrong response to IOA"},
256 "8151: IOA microcode error"},
258 "Device bus status error"},
260 "8157: IOA error requiring IOA reset to recover"},
262 "Message reject received from the device"},
264 "8008: A permanent cache battery pack failure occurred"},
266 "9090: Disk unit has been modified after the last known status"},
268 "9081: IOA detected device error"},
270 "9082: IOA detected device error"},
272 "3110: Device bus error, message or command phase"},
274 "9091: Incorrect hardware configuration change has been detected"},
276 "FFF4: Command to logical unit failed"},
278 "Illegal request, invalid request type or request packet"},
280 "Illegal request, invalid resource handle"},
282 "Illegal request, invalid field in parameter list"},
284 "Illegal request, parameter not supported"},
286 "Illegal request, parameter value invalid"},
288 "Illegal request, command sequence error"},
290 "9031: Array protection temporarily suspended, protection resuming"},
292 "9040: Array protection temporarily suspended, protection resuming"},
294 "FFFB: SCSI bus was reset"},
296 "FFFE: SCSI bus transition to single ended"},
298 "FFFE: SCSI bus transition to LVD"},
300 "FFFB: SCSI bus was reset by another initiator"},
302 "3029: A device replacement has occurred"},
304 "9051: IOA cache data exists for a missing or failed device"},
306 "9025: Disk unit is not supported at its physical location"},
308 "3020: IOA detected a SCSI bus configuration error"},
310 "3150: SCSI bus configuration error"},
312 "9041: Array protection temporarily suspended"},
314 "9030: Array no longer protected due to missing or failed disk unit"},
316 "Failure due to other device"},
318 "9008: IOA does not support functions expected by devices"},
320 "9010: Cache data associated with attached devices cannot be found"},
322 "9011: Cache data belongs to devices other than those attached"},
324 "9020: Array missing 2 or more devices with only 1 device present"},
326 "9021: Array missing 2 or more devices with 2 or more devices present"},
328 "9022: Exposed array is missing a required device"},
330 "9023: Array member(s) not at required physical locations"},
332 "9024: Array not functional due to present hardware configuration"},
334 "9026: Array not functional due to present hardware configuration"},
336 "9027: Array is missing a device and parity is out of sync"},
338 "9028: Maximum number of arrays already exist"},
340 "9050: Required cache data cannot be located for a disk unit"},
342 "9052: Cache data exists for a device that has been modified"},
344 "9054: IOA resources not available due to previous problems"},
346 "9092: Disk unit requires initialization before use"},
348 "9029: Incorrect hardware configuration change has been detected"},
350 "9060: One or more disk pairs are missing from an array"},
352 "9061: One or more disks are missing from an array"},
354 "9062: One or more disks are missing from an array"},
356 "9063: Maximum number of functional arrays has been exceeded"},
358 "Aborted command, invalid descriptor"},
360 "Command terminated by host"}
363 static const struct ipr_ses_table_entry ipr_ses_table[] = {
364 { "2104-DL1 ", "XXXXXXXXXXXXXXXX", 80 },
365 { "2104-TL1 ", "XXXXXXXXXXXXXXXX", 80 },
366 { "HSBP07M P U2SCSI", "XXXXXXXXXXXXXXXX", 80 }, /* Hidive 7 slot */
367 { "HSBP05M P U2SCSI", "XXXXXXXXXXXXXXXX", 80 }, /* Hidive 5 slot */
368 { "HSBP05M S U2SCSI", "XXXXXXXXXXXXXXXX", 80 }, /* Bowtie */
369 { "HSBP06E ASU2SCSI", "XXXXXXXXXXXXXXXX", 80 }, /* MartinFenning */
370 { "2104-DU3 ", "XXXXXXXXXXXXXXXX", 160 },
371 { "2104-TU3 ", "XXXXXXXXXXXXXXXX", 160 },
372 { "HSBP04C RSU2SCSI", "XXXXXXX*XXXXXXXX", 160 },
373 { "HSBP06E RSU2SCSI", "XXXXXXX*XXXXXXXX", 160 },
374 { "St V1S2 ", "XXXXXXXXXXXXXXXX", 160 },
375 { "HSBPD4M PU3SCSI", "XXXXXXX*XXXXXXXX", 160 },
376 { "VSBPD1H U3SCSI", "XXXXXXX*XXXXXXXX", 160 }
380 * Function Prototypes
382 static int ipr_reset_alert(struct ipr_cmnd *);
383 static void ipr_process_ccn(struct ipr_cmnd *);
384 static void ipr_process_error(struct ipr_cmnd *);
385 static void ipr_reset_ioa_job(struct ipr_cmnd *);
386 static void ipr_initiate_ioa_reset(struct ipr_ioa_cfg *,
387 enum ipr_shutdown_type);
389 #ifdef CONFIG_SCSI_IPR_TRACE
391 * ipr_trc_hook - Add a trace entry to the driver trace
392 * @ipr_cmd: ipr command struct
394 * @add_data: additional data
399 static void ipr_trc_hook(struct ipr_cmnd *ipr_cmd,
400 u8 type, u32 add_data)
402 struct ipr_trace_entry *trace_entry;
403 struct ipr_ioa_cfg *ioa_cfg = ipr_cmd->ioa_cfg;
405 trace_entry = &ioa_cfg->trace[ioa_cfg->trace_index++];
406 trace_entry->time = jiffies;
407 trace_entry->op_code = ipr_cmd->ioarcb.cmd_pkt.cdb[0];
408 trace_entry->type = type;
409 trace_entry->cmd_index = ipr_cmd->cmd_index;
410 trace_entry->res_handle = ipr_cmd->ioarcb.res_handle;
411 trace_entry->u.add_data = add_data;
414 #define ipr_trc_hook(ipr_cmd, type, add_data) do { } while(0)
418 * ipr_reinit_ipr_cmnd - Re-initialize an IPR Cmnd block for reuse
419 * @ipr_cmd: ipr command struct
424 static void ipr_reinit_ipr_cmnd(struct ipr_cmnd *ipr_cmd)
426 struct ipr_ioarcb *ioarcb = &ipr_cmd->ioarcb;
427 struct ipr_ioasa *ioasa = &ipr_cmd->ioasa;
429 memset(&ioarcb->cmd_pkt, 0, sizeof(struct ipr_cmd_pkt));
430 ioarcb->write_data_transfer_length = 0;
431 ioarcb->read_data_transfer_length = 0;
432 ioarcb->write_ioadl_len = 0;
433 ioarcb->read_ioadl_len = 0;
435 ioasa->residual_data_len = 0;
437 ipr_cmd->scsi_cmd = NULL;
438 ipr_cmd->sense_buffer[0] = 0;
439 ipr_cmd->dma_use_sg = 0;
443 * ipr_init_ipr_cmnd - Initialize an IPR Cmnd block
444 * @ipr_cmd: ipr command struct
449 static void ipr_init_ipr_cmnd(struct ipr_cmnd *ipr_cmd)
451 ipr_reinit_ipr_cmnd(ipr_cmd);
452 ipr_cmd->u.scratch = 0;
453 ipr_cmd->sibling = NULL;
454 init_timer(&ipr_cmd->timer);
458 * ipr_get_free_ipr_cmnd - Get a free IPR Cmnd block
459 * @ioa_cfg: ioa config struct
462 * pointer to ipr command struct
465 struct ipr_cmnd *ipr_get_free_ipr_cmnd(struct ipr_ioa_cfg *ioa_cfg)
467 struct ipr_cmnd *ipr_cmd;
469 ipr_cmd = list_entry(ioa_cfg->free_q.next, struct ipr_cmnd, queue);
470 list_del(&ipr_cmd->queue);
471 ipr_init_ipr_cmnd(ipr_cmd);
477 * ipr_unmap_sglist - Unmap scatterlist if mapped
478 * @ioa_cfg: ioa config struct
479 * @ipr_cmd: ipr command struct
484 static void ipr_unmap_sglist(struct ipr_ioa_cfg *ioa_cfg,
485 struct ipr_cmnd *ipr_cmd)
487 struct scsi_cmnd *scsi_cmd = ipr_cmd->scsi_cmd;
489 if (ipr_cmd->dma_use_sg) {
490 if (scsi_cmd->use_sg > 0) {
491 pci_unmap_sg(ioa_cfg->pdev, scsi_cmd->request_buffer,
493 scsi_cmd->sc_data_direction);
495 pci_unmap_single(ioa_cfg->pdev, ipr_cmd->dma_handle,
496 scsi_cmd->request_bufflen,
497 scsi_cmd->sc_data_direction);
503 * ipr_mask_and_clear_interrupts - Mask all and clear specified interrupts
504 * @ioa_cfg: ioa config struct
505 * @clr_ints: interrupts to clear
507 * This function masks all interrupts on the adapter, then clears the
508 * interrupts specified in the mask
513 static void ipr_mask_and_clear_interrupts(struct ipr_ioa_cfg *ioa_cfg,
516 volatile u32 int_reg;
518 /* Stop new interrupts */
519 ioa_cfg->allow_interrupts = 0;
521 /* Set interrupt mask to stop all new interrupts */
522 writel(~0, ioa_cfg->regs.set_interrupt_mask_reg);
524 /* Clear any pending interrupts */
525 writel(clr_ints, ioa_cfg->regs.clr_interrupt_reg);
526 int_reg = readl(ioa_cfg->regs.sense_interrupt_reg);
530 * ipr_save_pcix_cmd_reg - Save PCI-X command register
531 * @ioa_cfg: ioa config struct
534 * 0 on success / -EIO on failure
536 static int ipr_save_pcix_cmd_reg(struct ipr_ioa_cfg *ioa_cfg)
538 int pcix_cmd_reg = pci_find_capability(ioa_cfg->pdev, PCI_CAP_ID_PCIX);
540 if (pcix_cmd_reg == 0) {
541 dev_err(&ioa_cfg->pdev->dev, "Failed to save PCI-X command register\n");
545 if (pci_read_config_word(ioa_cfg->pdev, pcix_cmd_reg,
546 &ioa_cfg->saved_pcix_cmd_reg) != PCIBIOS_SUCCESSFUL) {
547 dev_err(&ioa_cfg->pdev->dev, "Failed to save PCI-X command register\n");
551 ioa_cfg->saved_pcix_cmd_reg |= PCI_X_CMD_DPERR_E | PCI_X_CMD_ERO;
556 * ipr_set_pcix_cmd_reg - Setup PCI-X command register
557 * @ioa_cfg: ioa config struct
560 * 0 on success / -EIO on failure
562 static int ipr_set_pcix_cmd_reg(struct ipr_ioa_cfg *ioa_cfg)
564 int pcix_cmd_reg = pci_find_capability(ioa_cfg->pdev, PCI_CAP_ID_PCIX);
567 if (pci_write_config_word(ioa_cfg->pdev, pcix_cmd_reg,
568 ioa_cfg->saved_pcix_cmd_reg) != PCIBIOS_SUCCESSFUL) {
569 dev_err(&ioa_cfg->pdev->dev, "Failed to setup PCI-X command register\n");
573 dev_err(&ioa_cfg->pdev->dev,
574 "Failed to setup PCI-X command register\n");
582 * ipr_scsi_eh_done - mid-layer done function for aborted ops
583 * @ipr_cmd: ipr command struct
585 * This function is invoked by the interrupt handler for
586 * ops generated by the SCSI mid-layer which are being aborted.
591 static void ipr_scsi_eh_done(struct ipr_cmnd *ipr_cmd)
593 struct ipr_ioa_cfg *ioa_cfg = ipr_cmd->ioa_cfg;
594 struct scsi_cmnd *scsi_cmd = ipr_cmd->scsi_cmd;
596 scsi_cmd->result |= (DID_ERROR << 16);
598 ipr_unmap_sglist(ioa_cfg, ipr_cmd);
599 scsi_cmd->scsi_done(scsi_cmd);
600 list_add_tail(&ipr_cmd->queue, &ioa_cfg->free_q);
604 * ipr_fail_all_ops - Fails all outstanding ops.
605 * @ioa_cfg: ioa config struct
607 * This function fails all outstanding ops.
612 static void ipr_fail_all_ops(struct ipr_ioa_cfg *ioa_cfg)
614 struct ipr_cmnd *ipr_cmd, *temp;
617 list_for_each_entry_safe(ipr_cmd, temp, &ioa_cfg->pending_q, queue) {
618 list_del(&ipr_cmd->queue);
620 ipr_cmd->ioasa.ioasc = cpu_to_be32(IPR_IOASC_IOA_WAS_RESET);
621 ipr_cmd->ioasa.ilid = cpu_to_be32(IPR_DRIVER_ILID);
623 if (ipr_cmd->scsi_cmd)
624 ipr_cmd->done = ipr_scsi_eh_done;
626 ipr_trc_hook(ipr_cmd, IPR_TRACE_FINISH, IPR_IOASC_IOA_WAS_RESET);
627 del_timer(&ipr_cmd->timer);
628 ipr_cmd->done(ipr_cmd);
635 * ipr_do_req - Send driver initiated requests.
636 * @ipr_cmd: ipr command struct
637 * @done: done function
638 * @timeout_func: timeout function
639 * @timeout: timeout value
641 * This function sends the specified command to the adapter with the
642 * timeout given. The done function is invoked on command completion.
647 static void ipr_do_req(struct ipr_cmnd *ipr_cmd,
648 void (*done) (struct ipr_cmnd *),
649 void (*timeout_func) (struct ipr_cmnd *), u32 timeout)
651 struct ipr_ioa_cfg *ioa_cfg = ipr_cmd->ioa_cfg;
653 list_add_tail(&ipr_cmd->queue, &ioa_cfg->pending_q);
655 ipr_cmd->done = done;
657 ipr_cmd->timer.data = (unsigned long) ipr_cmd;
658 ipr_cmd->timer.expires = jiffies + timeout;
659 ipr_cmd->timer.function = (void (*)(unsigned long))timeout_func;
661 add_timer(&ipr_cmd->timer);
663 ipr_trc_hook(ipr_cmd, IPR_TRACE_START, 0);
666 writel(be32_to_cpu(ipr_cmd->ioarcb.ioarcb_host_pci_addr),
667 ioa_cfg->regs.ioarrin_reg);
671 * ipr_internal_cmd_done - Op done function for an internally generated op.
672 * @ipr_cmd: ipr command struct
674 * This function is the op done function for an internally generated,
675 * blocking op. It simply wakes the sleeping thread.
680 static void ipr_internal_cmd_done(struct ipr_cmnd *ipr_cmd)
682 if (ipr_cmd->sibling)
683 ipr_cmd->sibling = NULL;
685 complete(&ipr_cmd->completion);
689 * ipr_send_blocking_cmd - Send command and sleep on its completion.
690 * @ipr_cmd: ipr command struct
691 * @timeout_func: function to invoke if command times out
697 static void ipr_send_blocking_cmd(struct ipr_cmnd *ipr_cmd,
698 void (*timeout_func) (struct ipr_cmnd *ipr_cmd),
701 struct ipr_ioa_cfg *ioa_cfg = ipr_cmd->ioa_cfg;
703 init_completion(&ipr_cmd->completion);
704 ipr_do_req(ipr_cmd, ipr_internal_cmd_done, timeout_func, timeout);
706 spin_unlock_irq(ioa_cfg->host->host_lock);
707 wait_for_completion(&ipr_cmd->completion);
708 spin_lock_irq(ioa_cfg->host->host_lock);
712 * ipr_send_hcam - Send an HCAM to the adapter.
713 * @ioa_cfg: ioa config struct
715 * @hostrcb: hostrcb struct
717 * This function will send a Host Controlled Async command to the adapter.
718 * If HCAMs are currently not allowed to be issued to the adapter, it will
719 * place the hostrcb on the free queue.
724 static void ipr_send_hcam(struct ipr_ioa_cfg *ioa_cfg, u8 type,
725 struct ipr_hostrcb *hostrcb)
727 struct ipr_cmnd *ipr_cmd;
728 struct ipr_ioarcb *ioarcb;
730 if (ioa_cfg->allow_cmds) {
731 ipr_cmd = ipr_get_free_ipr_cmnd(ioa_cfg);
732 list_add_tail(&ipr_cmd->queue, &ioa_cfg->pending_q);
733 list_add_tail(&hostrcb->queue, &ioa_cfg->hostrcb_pending_q);
735 ipr_cmd->u.hostrcb = hostrcb;
736 ioarcb = &ipr_cmd->ioarcb;
738 ioarcb->res_handle = cpu_to_be32(IPR_IOA_RES_HANDLE);
739 ioarcb->cmd_pkt.request_type = IPR_RQTYPE_HCAM;
740 ioarcb->cmd_pkt.cdb[0] = IPR_HOST_CONTROLLED_ASYNC;
741 ioarcb->cmd_pkt.cdb[1] = type;
742 ioarcb->cmd_pkt.cdb[7] = (sizeof(hostrcb->hcam) >> 8) & 0xff;
743 ioarcb->cmd_pkt.cdb[8] = sizeof(hostrcb->hcam) & 0xff;
745 ioarcb->read_data_transfer_length = cpu_to_be32(sizeof(hostrcb->hcam));
746 ioarcb->read_ioadl_len = cpu_to_be32(sizeof(struct ipr_ioadl_desc));
747 ipr_cmd->ioadl[0].flags_and_data_len =
748 cpu_to_be32(IPR_IOADL_FLAGS_READ_LAST | sizeof(hostrcb->hcam));
749 ipr_cmd->ioadl[0].address = cpu_to_be32(hostrcb->hostrcb_dma);
751 if (type == IPR_HCAM_CDB_OP_CODE_CONFIG_CHANGE)
752 ipr_cmd->done = ipr_process_ccn;
754 ipr_cmd->done = ipr_process_error;
756 ipr_trc_hook(ipr_cmd, IPR_TRACE_START, IPR_IOA_RES_ADDR);
759 writel(be32_to_cpu(ipr_cmd->ioarcb.ioarcb_host_pci_addr),
760 ioa_cfg->regs.ioarrin_reg);
762 list_add_tail(&hostrcb->queue, &ioa_cfg->hostrcb_free_q);
767 * ipr_init_res_entry - Initialize a resource entry struct.
768 * @res: resource entry struct
773 static void ipr_init_res_entry(struct ipr_resource_entry *res)
775 res->needs_sync_complete = 1;
778 res->del_from_ml = 0;
779 res->resetting_device = 0;
781 res->qdepth = IPR_MAX_CMD_PER_LUN;
786 * ipr_handle_config_change - Handle a config change from the adapter
787 * @ioa_cfg: ioa config struct
793 static void ipr_handle_config_change(struct ipr_ioa_cfg *ioa_cfg,
794 struct ipr_hostrcb *hostrcb)
796 struct ipr_resource_entry *res = NULL;
797 struct ipr_config_table_entry *cfgte;
800 cfgte = &hostrcb->hcam.u.ccn.cfgte;
802 list_for_each_entry(res, &ioa_cfg->used_res_q, queue) {
803 if (!memcmp(&res->cfgte.res_addr, &cfgte->res_addr,
804 sizeof(cfgte->res_addr))) {
811 if (list_empty(&ioa_cfg->free_res_q)) {
812 ipr_send_hcam(ioa_cfg,
813 IPR_HCAM_CDB_OP_CODE_CONFIG_CHANGE,
818 res = list_entry(ioa_cfg->free_res_q.next,
819 struct ipr_resource_entry, queue);
821 list_del(&res->queue);
822 ipr_init_res_entry(res);
823 list_add_tail(&res->queue, &ioa_cfg->used_res_q);
826 memcpy(&res->cfgte, cfgte, sizeof(struct ipr_config_table_entry));
828 if (hostrcb->hcam.notify_type == IPR_HOST_RCB_NOTIF_TYPE_REM_ENTRY) {
830 res->sdev->hostdata = NULL;
831 res->del_from_ml = 1;
832 if (ioa_cfg->allow_ml_add_del)
833 schedule_work(&ioa_cfg->work_q);
835 list_move_tail(&res->queue, &ioa_cfg->free_res_q);
836 } else if (!res->sdev) {
838 if (ioa_cfg->allow_ml_add_del)
839 schedule_work(&ioa_cfg->work_q);
842 ipr_send_hcam(ioa_cfg, IPR_HCAM_CDB_OP_CODE_CONFIG_CHANGE, hostrcb);
846 * ipr_process_ccn - Op done function for a CCN.
847 * @ipr_cmd: ipr command struct
849 * This function is the op done function for a configuration
850 * change notification host controlled async from the adapter.
855 static void ipr_process_ccn(struct ipr_cmnd *ipr_cmd)
857 struct ipr_ioa_cfg *ioa_cfg = ipr_cmd->ioa_cfg;
858 struct ipr_hostrcb *hostrcb = ipr_cmd->u.hostrcb;
859 u32 ioasc = be32_to_cpu(ipr_cmd->ioasa.ioasc);
861 list_del(&hostrcb->queue);
862 list_add_tail(&ipr_cmd->queue, &ioa_cfg->free_q);
865 if (ioasc != IPR_IOASC_IOA_WAS_RESET)
866 dev_err(&ioa_cfg->pdev->dev,
867 "Host RCB failed with IOASC: 0x%08X\n", ioasc);
869 ipr_send_hcam(ioa_cfg, IPR_HCAM_CDB_OP_CODE_CONFIG_CHANGE, hostrcb);
871 ipr_handle_config_change(ioa_cfg, hostrcb);
876 * ipr_log_vpd - Log the passed VPD to the error log.
877 * @vpids: vendor/product id struct
878 * @serial_num: serial number string
883 static void ipr_log_vpd(struct ipr_std_inq_vpids *vpids, u8 *serial_num)
885 char buffer[IPR_VENDOR_ID_LEN + IPR_PROD_ID_LEN
886 + IPR_SERIAL_NUM_LEN];
888 memcpy(buffer, vpids->vendor_id, IPR_VENDOR_ID_LEN);
889 memcpy(buffer + IPR_VENDOR_ID_LEN, vpids->product_id,
891 buffer[IPR_VENDOR_ID_LEN + IPR_PROD_ID_LEN] = '\0';
892 ipr_err("Vendor/Product ID: %s\n", buffer);
894 memcpy(buffer, serial_num, IPR_SERIAL_NUM_LEN);
895 buffer[IPR_SERIAL_NUM_LEN] = '\0';
896 ipr_err(" Serial Number: %s\n", buffer);
900 * ipr_log_cache_error - Log a cache error.
901 * @ioa_cfg: ioa config struct
902 * @hostrcb: hostrcb struct
907 static void ipr_log_cache_error(struct ipr_ioa_cfg *ioa_cfg,
908 struct ipr_hostrcb *hostrcb)
910 struct ipr_hostrcb_type_02_error *error =
911 &hostrcb->hcam.u.error.u.type_02_error;
913 ipr_err("-----Current Configuration-----\n");
914 ipr_err("Cache Directory Card Information:\n");
915 ipr_log_vpd(&error->ioa_vpids, error->ioa_sn);
916 ipr_err("Adapter Card Information:\n");
917 ipr_log_vpd(&error->cfc_vpids, error->cfc_sn);
919 ipr_err("-----Expected Configuration-----\n");
920 ipr_err("Cache Directory Card Information:\n");
921 ipr_log_vpd(&error->ioa_last_attached_to_cfc_vpids,
922 error->ioa_last_attached_to_cfc_sn);
923 ipr_err("Adapter Card Information:\n");
924 ipr_log_vpd(&error->cfc_last_attached_to_ioa_vpids,
925 error->cfc_last_attached_to_ioa_sn);
927 ipr_err("Additional IOA Data: %08X %08X %08X\n",
928 be32_to_cpu(error->ioa_data[0]),
929 be32_to_cpu(error->ioa_data[1]),
930 be32_to_cpu(error->ioa_data[2]));
934 * ipr_log_config_error - Log a configuration error.
935 * @ioa_cfg: ioa config struct
936 * @hostrcb: hostrcb struct
941 static void ipr_log_config_error(struct ipr_ioa_cfg *ioa_cfg,
942 struct ipr_hostrcb *hostrcb)
944 int errors_logged, i;
945 struct ipr_hostrcb_device_data_entry *dev_entry;
946 struct ipr_hostrcb_type_03_error *error;
948 error = &hostrcb->hcam.u.error.u.type_03_error;
949 errors_logged = be32_to_cpu(error->errors_logged);
951 ipr_err("Device Errors Detected/Logged: %d/%d\n",
952 be32_to_cpu(error->errors_detected), errors_logged);
954 dev_entry = error->dev_entry;
956 for (i = 0; i < errors_logged; i++, dev_entry++) {
959 if (dev_entry->dev_res_addr.bus >= IPR_MAX_NUM_BUSES) {
960 ipr_err("Device %d: missing\n", i + 1);
962 ipr_err("Device %d: %d:%d:%d:%d\n", i + 1,
963 ioa_cfg->host->host_no, dev_entry->dev_res_addr.bus,
964 dev_entry->dev_res_addr.target, dev_entry->dev_res_addr.lun);
966 ipr_log_vpd(&dev_entry->dev_vpids, dev_entry->dev_sn);
968 ipr_err("-----New Device Information-----\n");
969 ipr_log_vpd(&dev_entry->new_dev_vpids, dev_entry->new_dev_sn);
971 ipr_err("Cache Directory Card Information:\n");
972 ipr_log_vpd(&dev_entry->ioa_last_with_dev_vpids,
973 dev_entry->ioa_last_with_dev_sn);
975 ipr_err("Adapter Card Information:\n");
976 ipr_log_vpd(&dev_entry->cfc_last_with_dev_vpids,
977 dev_entry->cfc_last_with_dev_sn);
979 ipr_err("Additional IOA Data: %08X %08X %08X %08X %08X\n",
980 be32_to_cpu(dev_entry->ioa_data[0]),
981 be32_to_cpu(dev_entry->ioa_data[1]),
982 be32_to_cpu(dev_entry->ioa_data[2]),
983 be32_to_cpu(dev_entry->ioa_data[3]),
984 be32_to_cpu(dev_entry->ioa_data[4]));
989 * ipr_log_array_error - Log an array configuration error.
990 * @ioa_cfg: ioa config struct
991 * @hostrcb: hostrcb struct
996 static void ipr_log_array_error(struct ipr_ioa_cfg *ioa_cfg,
997 struct ipr_hostrcb *hostrcb)
1000 struct ipr_hostrcb_type_04_error *error;
1001 struct ipr_hostrcb_array_data_entry *array_entry;
1002 u8 zero_sn[IPR_SERIAL_NUM_LEN];
1004 memset(zero_sn, '0', IPR_SERIAL_NUM_LEN);
1006 error = &hostrcb->hcam.u.error.u.type_04_error;
1010 ipr_err("RAID %s Array Configuration: %d:%d:%d:%d\n",
1011 error->protection_level,
1012 ioa_cfg->host->host_no,
1013 error->last_func_vset_res_addr.bus,
1014 error->last_func_vset_res_addr.target,
1015 error->last_func_vset_res_addr.lun);
1019 array_entry = error->array_member;
1021 for (i = 0; i < 18; i++) {
1022 if (!memcmp(array_entry->serial_num, zero_sn, IPR_SERIAL_NUM_LEN))
1025 if (error->exposed_mode_adn == i) {
1026 ipr_err("Exposed Array Member %d:\n", i);
1028 ipr_err("Array Member %d:\n", i);
1031 ipr_log_vpd(&array_entry->vpids, array_entry->serial_num);
1033 if (array_entry->dev_res_addr.bus >= IPR_MAX_NUM_BUSES) {
1034 ipr_err("Current Location: unknown\n");
1036 ipr_err("Current Location: %d:%d:%d:%d\n",
1037 ioa_cfg->host->host_no,
1038 array_entry->dev_res_addr.bus,
1039 array_entry->dev_res_addr.target,
1040 array_entry->dev_res_addr.lun);
1043 if (array_entry->dev_res_addr.bus >= IPR_MAX_NUM_BUSES) {
1044 ipr_err("Expected Location: unknown\n");
1046 ipr_err("Expected Location: %d:%d:%d:%d\n",
1047 ioa_cfg->host->host_no,
1048 array_entry->expected_dev_res_addr.bus,
1049 array_entry->expected_dev_res_addr.target,
1050 array_entry->expected_dev_res_addr.lun);
1056 array_entry = error->array_member2;
1063 * ipr_log_generic_error - Log an adapter error.
1064 * @ioa_cfg: ioa config struct
1065 * @hostrcb: hostrcb struct
1070 static void ipr_log_generic_error(struct ipr_ioa_cfg *ioa_cfg,
1071 struct ipr_hostrcb *hostrcb)
1074 int ioa_data_len = be32_to_cpu(hostrcb->hcam.length);
1076 if (ioa_data_len == 0)
1079 ipr_err("IOA Error Data:\n");
1080 ipr_err("Offset 0 1 2 3 4 5 6 7 8 9 A B C D E F\n");
1082 for (i = 0; i < ioa_data_len / 4; i += 4) {
1083 ipr_err("%08X: %08X %08X %08X %08X\n", i*4,
1084 be32_to_cpu(hostrcb->hcam.u.raw.data[i]),
1085 be32_to_cpu(hostrcb->hcam.u.raw.data[i+1]),
1086 be32_to_cpu(hostrcb->hcam.u.raw.data[i+2]),
1087 be32_to_cpu(hostrcb->hcam.u.raw.data[i+3]));
1092 * ipr_get_error - Find the specfied IOASC in the ipr_error_table.
1095 * This function will return the index of into the ipr_error_table
1096 * for the specified IOASC. If the IOASC is not in the table,
1097 * 0 will be returned, which points to the entry used for unknown errors.
1100 * index into the ipr_error_table
1102 static u32 ipr_get_error(u32 ioasc)
1106 for (i = 0; i < ARRAY_SIZE(ipr_error_table); i++)
1107 if (ipr_error_table[i].ioasc == ioasc)
1114 * ipr_handle_log_data - Log an adapter error.
1115 * @ioa_cfg: ioa config struct
1116 * @hostrcb: hostrcb struct
1118 * This function logs an adapter error to the system.
1123 static void ipr_handle_log_data(struct ipr_ioa_cfg *ioa_cfg,
1124 struct ipr_hostrcb *hostrcb)
1129 if (hostrcb->hcam.notify_type != IPR_HOST_RCB_NOTIF_TYPE_ERROR_LOG_ENTRY)
1132 if (hostrcb->hcam.notifications_lost == IPR_HOST_RCB_NOTIFICATIONS_LOST)
1133 dev_err(&ioa_cfg->pdev->dev, "Error notifications lost\n");
1135 ioasc = be32_to_cpu(hostrcb->hcam.u.error.failing_dev_ioasc);
1137 if (ioasc == IPR_IOASC_BUS_WAS_RESET ||
1138 ioasc == IPR_IOASC_BUS_WAS_RESET_BY_OTHER) {
1139 /* Tell the midlayer we had a bus reset so it will handle the UA properly */
1140 scsi_report_bus_reset(ioa_cfg->host,
1141 hostrcb->hcam.u.error.failing_dev_res_addr.bus);
1144 error_index = ipr_get_error(ioasc);
1146 if (!ipr_error_table[error_index].log_hcam)
1149 if (ipr_is_device(&hostrcb->hcam.u.error.failing_dev_res_addr)) {
1150 ipr_res_err(ioa_cfg, hostrcb->hcam.u.error.failing_dev_res_addr,
1151 "%s\n", ipr_error_table[error_index].error);
1153 dev_err(&ioa_cfg->pdev->dev, "%s\n",
1154 ipr_error_table[error_index].error);
1157 /* Set indication we have logged an error */
1158 ioa_cfg->errors_logged++;
1160 if (ioa_cfg->log_level < IPR_DEFAULT_LOG_LEVEL)
1163 switch (hostrcb->hcam.overlay_id) {
1164 case IPR_HOST_RCB_OVERLAY_ID_1:
1165 ipr_log_generic_error(ioa_cfg, hostrcb);
1167 case IPR_HOST_RCB_OVERLAY_ID_2:
1168 ipr_log_cache_error(ioa_cfg, hostrcb);
1170 case IPR_HOST_RCB_OVERLAY_ID_3:
1171 ipr_log_config_error(ioa_cfg, hostrcb);
1173 case IPR_HOST_RCB_OVERLAY_ID_4:
1174 case IPR_HOST_RCB_OVERLAY_ID_6:
1175 ipr_log_array_error(ioa_cfg, hostrcb);
1177 case IPR_HOST_RCB_OVERLAY_ID_DEFAULT:
1178 ipr_log_generic_error(ioa_cfg, hostrcb);
1181 dev_err(&ioa_cfg->pdev->dev,
1182 "Unknown error received. Overlay ID: %d\n",
1183 hostrcb->hcam.overlay_id);
1189 * ipr_process_error - Op done function for an adapter error log.
1190 * @ipr_cmd: ipr command struct
1192 * This function is the op done function for an error log host
1193 * controlled async from the adapter. It will log the error and
1194 * send the HCAM back to the adapter.
1199 static void ipr_process_error(struct ipr_cmnd *ipr_cmd)
1201 struct ipr_ioa_cfg *ioa_cfg = ipr_cmd->ioa_cfg;
1202 struct ipr_hostrcb *hostrcb = ipr_cmd->u.hostrcb;
1203 u32 ioasc = be32_to_cpu(ipr_cmd->ioasa.ioasc);
1205 list_del(&hostrcb->queue);
1206 list_add_tail(&ipr_cmd->queue, &ioa_cfg->free_q);
1209 ipr_handle_log_data(ioa_cfg, hostrcb);
1210 } else if (ioasc != IPR_IOASC_IOA_WAS_RESET) {
1211 dev_err(&ioa_cfg->pdev->dev,
1212 "Host RCB failed with IOASC: 0x%08X\n", ioasc);
1215 ipr_send_hcam(ioa_cfg, IPR_HCAM_CDB_OP_CODE_LOG_DATA, hostrcb);
1219 * ipr_timeout - An internally generated op has timed out.
1220 * @ipr_cmd: ipr command struct
1222 * This function blocks host requests and initiates an
1228 static void ipr_timeout(struct ipr_cmnd *ipr_cmd)
1230 unsigned long lock_flags = 0;
1231 struct ipr_ioa_cfg *ioa_cfg = ipr_cmd->ioa_cfg;
1234 spin_lock_irqsave(ioa_cfg->host->host_lock, lock_flags);
1236 ioa_cfg->errors_logged++;
1237 dev_err(&ioa_cfg->pdev->dev,
1238 "Adapter being reset due to command timeout.\n");
1240 if (WAIT_FOR_DUMP == ioa_cfg->sdt_state)
1241 ioa_cfg->sdt_state = GET_DUMP;
1243 if (!ioa_cfg->in_reset_reload || ioa_cfg->reset_cmd == ipr_cmd)
1244 ipr_initiate_ioa_reset(ioa_cfg, IPR_SHUTDOWN_NONE);
1246 spin_unlock_irqrestore(ioa_cfg->host->host_lock, lock_flags);
1251 * ipr_reset_reload - Reset/Reload the IOA
1252 * @ioa_cfg: ioa config struct
1253 * @shutdown_type: shutdown type
1255 * This function resets the adapter and re-initializes it.
1256 * This function assumes that all new host commands have been stopped.
1260 static int ipr_reset_reload(struct ipr_ioa_cfg *ioa_cfg,
1261 enum ipr_shutdown_type shutdown_type)
1263 if (!ioa_cfg->in_reset_reload)
1264 ipr_initiate_ioa_reset(ioa_cfg, shutdown_type);
1266 spin_unlock_irq(ioa_cfg->host->host_lock);
1267 wait_event(ioa_cfg->reset_wait_q, !ioa_cfg->in_reset_reload);
1268 spin_lock_irq(ioa_cfg->host->host_lock);
1270 /* If we got hit with a host reset while we were already resetting
1271 the adapter for some reason, and the reset failed. */
1272 if (ioa_cfg->ioa_is_dead) {
1281 * ipr_find_ses_entry - Find matching SES in SES table
1282 * @res: resource entry struct of SES
1285 * pointer to SES table entry / NULL on failure
1287 static const struct ipr_ses_table_entry *
1288 ipr_find_ses_entry(struct ipr_resource_entry *res)
1291 const struct ipr_ses_table_entry *ste = ipr_ses_table;
1293 for (i = 0; i < ARRAY_SIZE(ipr_ses_table); i++, ste++) {
1294 for (j = 0, matches = 0; j < IPR_PROD_ID_LEN; j++) {
1295 if (ste->compare_product_id_byte[j] == 'X') {
1296 if (res->cfgte.std_inq_data.vpids.product_id[j] == ste->product_id[j])
1304 if (matches == IPR_PROD_ID_LEN)
1312 * ipr_get_max_scsi_speed - Determine max SCSI speed for a given bus
1313 * @ioa_cfg: ioa config struct
1315 * @bus_width: bus width
1318 * SCSI bus speed in units of 100KHz, 1600 is 160 MHz
1319 * For a 2-byte wide SCSI bus, the maximum transfer speed is
1320 * twice the maximum transfer rate (e.g. for a wide enabled bus,
1321 * max 160MHz = max 320MB/sec).
1323 static u32 ipr_get_max_scsi_speed(struct ipr_ioa_cfg *ioa_cfg, u8 bus, u8 bus_width)
1325 struct ipr_resource_entry *res;
1326 const struct ipr_ses_table_entry *ste;
1327 u32 max_xfer_rate = IPR_MAX_SCSI_RATE(bus_width);
1329 /* Loop through each config table entry in the config table buffer */
1330 list_for_each_entry(res, &ioa_cfg->used_res_q, queue) {
1331 if (!(IPR_IS_SES_DEVICE(res->cfgte.std_inq_data)))
1334 if (bus != res->cfgte.res_addr.bus)
1337 if (!(ste = ipr_find_ses_entry(res)))
1340 max_xfer_rate = (ste->max_bus_speed_limit * 10) / (bus_width / 8);
1343 return max_xfer_rate;
1347 * ipr_wait_iodbg_ack - Wait for an IODEBUG ACK from the IOA
1348 * @ioa_cfg: ioa config struct
1349 * @max_delay: max delay in micro-seconds to wait
1351 * Waits for an IODEBUG ACK from the IOA, doing busy looping.
1354 * 0 on success / other on failure
1356 static int ipr_wait_iodbg_ack(struct ipr_ioa_cfg *ioa_cfg, int max_delay)
1358 volatile u32 pcii_reg;
1361 /* Read interrupt reg until IOA signals IO Debug Acknowledge */
1362 while (delay < max_delay) {
1363 pcii_reg = readl(ioa_cfg->regs.sense_interrupt_reg);
1365 if (pcii_reg & IPR_PCII_IO_DEBUG_ACKNOWLEDGE)
1368 /* udelay cannot be used if delay is more than a few milliseconds */
1369 if ((delay / 1000) > MAX_UDELAY_MS)
1370 mdelay(delay / 1000);
1380 * ipr_get_ldump_data_section - Dump IOA memory
1381 * @ioa_cfg: ioa config struct
1382 * @start_addr: adapter address to dump
1383 * @dest: destination kernel buffer
1384 * @length_in_words: length to dump in 4 byte words
1387 * 0 on success / -EIO on failure
1389 static int ipr_get_ldump_data_section(struct ipr_ioa_cfg *ioa_cfg,
1391 u32 *dest, u32 length_in_words)
1393 volatile u32 temp_pcii_reg;
1396 /* Write IOA interrupt reg starting LDUMP state */
1397 writel((IPR_UPROCI_RESET_ALERT | IPR_UPROCI_IO_DEBUG_ALERT),
1398 ioa_cfg->regs.set_uproc_interrupt_reg);
1400 /* Wait for IO debug acknowledge */
1401 if (ipr_wait_iodbg_ack(ioa_cfg,
1402 IPR_LDUMP_MAX_LONG_ACK_DELAY_IN_USEC)) {
1403 dev_err(&ioa_cfg->pdev->dev,
1404 "IOA dump long data transfer timeout\n");
1408 /* Signal LDUMP interlocked - clear IO debug ack */
1409 writel(IPR_PCII_IO_DEBUG_ACKNOWLEDGE,
1410 ioa_cfg->regs.clr_interrupt_reg);
1412 /* Write Mailbox with starting address */
1413 writel(start_addr, ioa_cfg->ioa_mailbox);
1415 /* Signal address valid - clear IOA Reset alert */
1416 writel(IPR_UPROCI_RESET_ALERT,
1417 ioa_cfg->regs.clr_uproc_interrupt_reg);
1419 for (i = 0; i < length_in_words; i++) {
1420 /* Wait for IO debug acknowledge */
1421 if (ipr_wait_iodbg_ack(ioa_cfg,
1422 IPR_LDUMP_MAX_SHORT_ACK_DELAY_IN_USEC)) {
1423 dev_err(&ioa_cfg->pdev->dev,
1424 "IOA dump short data transfer timeout\n");
1428 /* Read data from mailbox and increment destination pointer */
1429 *dest = cpu_to_be32(readl(ioa_cfg->ioa_mailbox));
1432 /* For all but the last word of data, signal data received */
1433 if (i < (length_in_words - 1)) {
1434 /* Signal dump data received - Clear IO debug Ack */
1435 writel(IPR_PCII_IO_DEBUG_ACKNOWLEDGE,
1436 ioa_cfg->regs.clr_interrupt_reg);
1440 /* Signal end of block transfer. Set reset alert then clear IO debug ack */
1441 writel(IPR_UPROCI_RESET_ALERT,
1442 ioa_cfg->regs.set_uproc_interrupt_reg);
1444 writel(IPR_UPROCI_IO_DEBUG_ALERT,
1445 ioa_cfg->regs.clr_uproc_interrupt_reg);
1447 /* Signal dump data received - Clear IO debug Ack */
1448 writel(IPR_PCII_IO_DEBUG_ACKNOWLEDGE,
1449 ioa_cfg->regs.clr_interrupt_reg);
1451 /* Wait for IOA to signal LDUMP exit - IOA reset alert will be cleared */
1452 while (delay < IPR_LDUMP_MAX_SHORT_ACK_DELAY_IN_USEC) {
1454 readl(ioa_cfg->regs.sense_uproc_interrupt_reg);
1456 if (!(temp_pcii_reg & IPR_UPROCI_RESET_ALERT))
1466 #ifdef CONFIG_SCSI_IPR_DUMP
1468 * ipr_sdt_copy - Copy Smart Dump Table to kernel buffer
1469 * @ioa_cfg: ioa config struct
1470 * @pci_address: adapter address
1471 * @length: length of data to copy
1473 * Copy data from PCI adapter to kernel buffer.
1474 * Note: length MUST be a 4 byte multiple
1476 * 0 on success / other on failure
1478 static int ipr_sdt_copy(struct ipr_ioa_cfg *ioa_cfg,
1479 unsigned long pci_address, u32 length)
1481 int bytes_copied = 0;
1482 int cur_len, rc, rem_len, rem_page_len;
1484 unsigned long lock_flags = 0;
1485 struct ipr_ioa_dump *ioa_dump = &ioa_cfg->dump->ioa_dump;
1487 while (bytes_copied < length &&
1488 (ioa_dump->hdr.len + bytes_copied) < IPR_MAX_IOA_DUMP_SIZE) {
1489 if (ioa_dump->page_offset >= PAGE_SIZE ||
1490 ioa_dump->page_offset == 0) {
1491 page = (u32 *)__get_free_page(GFP_ATOMIC);
1495 return bytes_copied;
1498 ioa_dump->page_offset = 0;
1499 ioa_dump->ioa_data[ioa_dump->next_page_index] = page;
1500 ioa_dump->next_page_index++;
1502 page = ioa_dump->ioa_data[ioa_dump->next_page_index - 1];
1504 rem_len = length - bytes_copied;
1505 rem_page_len = PAGE_SIZE - ioa_dump->page_offset;
1506 cur_len = min(rem_len, rem_page_len);
1508 spin_lock_irqsave(ioa_cfg->host->host_lock, lock_flags);
1509 if (ioa_cfg->sdt_state == ABORT_DUMP) {
1512 rc = ipr_get_ldump_data_section(ioa_cfg,
1513 pci_address + bytes_copied,
1514 &page[ioa_dump->page_offset / 4],
1515 (cur_len / sizeof(u32)));
1517 spin_unlock_irqrestore(ioa_cfg->host->host_lock, lock_flags);
1520 ioa_dump->page_offset += cur_len;
1521 bytes_copied += cur_len;
1529 return bytes_copied;
1533 * ipr_init_dump_entry_hdr - Initialize a dump entry header.
1534 * @hdr: dump entry header struct
1539 static void ipr_init_dump_entry_hdr(struct ipr_dump_entry_header *hdr)
1541 hdr->eye_catcher = IPR_DUMP_EYE_CATCHER;
1543 hdr->offset = sizeof(*hdr);
1544 hdr->status = IPR_DUMP_STATUS_SUCCESS;
1548 * ipr_dump_ioa_type_data - Fill in the adapter type in the dump.
1549 * @ioa_cfg: ioa config struct
1550 * @driver_dump: driver dump struct
1555 static void ipr_dump_ioa_type_data(struct ipr_ioa_cfg *ioa_cfg,
1556 struct ipr_driver_dump *driver_dump)
1558 struct ipr_inquiry_page3 *ucode_vpd = &ioa_cfg->vpd_cbs->page3_data;
1560 ipr_init_dump_entry_hdr(&driver_dump->ioa_type_entry.hdr);
1561 driver_dump->ioa_type_entry.hdr.len =
1562 sizeof(struct ipr_dump_ioa_type_entry) -
1563 sizeof(struct ipr_dump_entry_header);
1564 driver_dump->ioa_type_entry.hdr.data_type = IPR_DUMP_DATA_TYPE_BINARY;
1565 driver_dump->ioa_type_entry.hdr.id = IPR_DUMP_DRIVER_TYPE_ID;
1566 driver_dump->ioa_type_entry.type = ioa_cfg->type;
1567 driver_dump->ioa_type_entry.fw_version = (ucode_vpd->major_release << 24) |
1568 (ucode_vpd->card_type << 16) | (ucode_vpd->minor_release[0] << 8) |
1569 ucode_vpd->minor_release[1];
1570 driver_dump->hdr.num_entries++;
1574 * ipr_dump_version_data - Fill in the driver version in the dump.
1575 * @ioa_cfg: ioa config struct
1576 * @driver_dump: driver dump struct
1581 static void ipr_dump_version_data(struct ipr_ioa_cfg *ioa_cfg,
1582 struct ipr_driver_dump *driver_dump)
1584 ipr_init_dump_entry_hdr(&driver_dump->version_entry.hdr);
1585 driver_dump->version_entry.hdr.len =
1586 sizeof(struct ipr_dump_version_entry) -
1587 sizeof(struct ipr_dump_entry_header);
1588 driver_dump->version_entry.hdr.data_type = IPR_DUMP_DATA_TYPE_ASCII;
1589 driver_dump->version_entry.hdr.id = IPR_DUMP_DRIVER_VERSION_ID;
1590 strcpy(driver_dump->version_entry.version, IPR_DRIVER_VERSION);
1591 driver_dump->hdr.num_entries++;
1595 * ipr_dump_trace_data - Fill in the IOA trace in the dump.
1596 * @ioa_cfg: ioa config struct
1597 * @driver_dump: driver dump struct
1602 static void ipr_dump_trace_data(struct ipr_ioa_cfg *ioa_cfg,
1603 struct ipr_driver_dump *driver_dump)
1605 ipr_init_dump_entry_hdr(&driver_dump->trace_entry.hdr);
1606 driver_dump->trace_entry.hdr.len =
1607 sizeof(struct ipr_dump_trace_entry) -
1608 sizeof(struct ipr_dump_entry_header);
1609 driver_dump->trace_entry.hdr.data_type = IPR_DUMP_DATA_TYPE_BINARY;
1610 driver_dump->trace_entry.hdr.id = IPR_DUMP_TRACE_ID;
1611 memcpy(driver_dump->trace_entry.trace, ioa_cfg->trace, IPR_TRACE_SIZE);
1612 driver_dump->hdr.num_entries++;
1616 * ipr_dump_location_data - Fill in the IOA location in the dump.
1617 * @ioa_cfg: ioa config struct
1618 * @driver_dump: driver dump struct
1623 static void ipr_dump_location_data(struct ipr_ioa_cfg *ioa_cfg,
1624 struct ipr_driver_dump *driver_dump)
1626 ipr_init_dump_entry_hdr(&driver_dump->location_entry.hdr);
1627 driver_dump->location_entry.hdr.len =
1628 sizeof(struct ipr_dump_location_entry) -
1629 sizeof(struct ipr_dump_entry_header);
1630 driver_dump->location_entry.hdr.data_type = IPR_DUMP_DATA_TYPE_ASCII;
1631 driver_dump->location_entry.hdr.id = IPR_DUMP_LOCATION_ID;
1632 strcpy(driver_dump->location_entry.location, ioa_cfg->pdev->dev.bus_id);
1633 driver_dump->hdr.num_entries++;
1637 * ipr_get_ioa_dump - Perform a dump of the driver and adapter.
1638 * @ioa_cfg: ioa config struct
1639 * @dump: dump struct
1644 static void ipr_get_ioa_dump(struct ipr_ioa_cfg *ioa_cfg, struct ipr_dump *dump)
1646 unsigned long start_addr, sdt_word;
1647 unsigned long lock_flags = 0;
1648 struct ipr_driver_dump *driver_dump = &dump->driver_dump;
1649 struct ipr_ioa_dump *ioa_dump = &dump->ioa_dump;
1650 u32 num_entries, start_off, end_off;
1651 u32 bytes_to_copy, bytes_copied, rc;
1652 struct ipr_sdt *sdt;
1657 spin_lock_irqsave(ioa_cfg->host->host_lock, lock_flags);
1659 if (ioa_cfg->sdt_state != GET_DUMP) {
1660 spin_unlock_irqrestore(ioa_cfg->host->host_lock, lock_flags);
1664 start_addr = readl(ioa_cfg->ioa_mailbox);
1666 if (!ipr_sdt_is_fmt2(start_addr)) {
1667 dev_err(&ioa_cfg->pdev->dev,
1668 "Invalid dump table format: %lx\n", start_addr);
1669 spin_unlock_irqrestore(ioa_cfg->host->host_lock, lock_flags);
1673 dev_err(&ioa_cfg->pdev->dev, "Dump of IOA initiated\n");
1675 driver_dump->hdr.eye_catcher = IPR_DUMP_EYE_CATCHER;
1677 /* Initialize the overall dump header */
1678 driver_dump->hdr.len = sizeof(struct ipr_driver_dump);
1679 driver_dump->hdr.num_entries = 1;
1680 driver_dump->hdr.first_entry_offset = sizeof(struct ipr_dump_header);
1681 driver_dump->hdr.status = IPR_DUMP_STATUS_SUCCESS;
1682 driver_dump->hdr.os = IPR_DUMP_OS_LINUX;
1683 driver_dump->hdr.driver_name = IPR_DUMP_DRIVER_NAME;
1685 ipr_dump_version_data(ioa_cfg, driver_dump);
1686 ipr_dump_location_data(ioa_cfg, driver_dump);
1687 ipr_dump_ioa_type_data(ioa_cfg, driver_dump);
1688 ipr_dump_trace_data(ioa_cfg, driver_dump);
1690 /* Update dump_header */
1691 driver_dump->hdr.len += sizeof(struct ipr_dump_entry_header);
1693 /* IOA Dump entry */
1694 ipr_init_dump_entry_hdr(&ioa_dump->hdr);
1695 ioa_dump->format = IPR_SDT_FMT2;
1696 ioa_dump->hdr.len = 0;
1697 ioa_dump->hdr.data_type = IPR_DUMP_DATA_TYPE_BINARY;
1698 ioa_dump->hdr.id = IPR_DUMP_IOA_DUMP_ID;
1700 /* First entries in sdt are actually a list of dump addresses and
1701 lengths to gather the real dump data. sdt represents the pointer
1702 to the ioa generated dump table. Dump data will be extracted based
1703 on entries in this table */
1704 sdt = &ioa_dump->sdt;
1706 rc = ipr_get_ldump_data_section(ioa_cfg, start_addr, (u32 *)sdt,
1707 sizeof(struct ipr_sdt) / sizeof(u32));
1709 /* Smart Dump table is ready to use and the first entry is valid */
1710 if (rc || (be32_to_cpu(sdt->hdr.state) != IPR_FMT2_SDT_READY_TO_USE)) {
1711 dev_err(&ioa_cfg->pdev->dev,
1712 "Dump of IOA failed. Dump table not valid: %d, %X.\n",
1713 rc, be32_to_cpu(sdt->hdr.state));
1714 driver_dump->hdr.status = IPR_DUMP_STATUS_FAILED;
1715 ioa_cfg->sdt_state = DUMP_OBTAINED;
1716 spin_unlock_irqrestore(ioa_cfg->host->host_lock, lock_flags);
1720 num_entries = be32_to_cpu(sdt->hdr.num_entries_used);
1722 if (num_entries > IPR_NUM_SDT_ENTRIES)
1723 num_entries = IPR_NUM_SDT_ENTRIES;
1725 spin_unlock_irqrestore(ioa_cfg->host->host_lock, lock_flags);
1727 for (i = 0; i < num_entries; i++) {
1728 if (ioa_dump->hdr.len > IPR_MAX_IOA_DUMP_SIZE) {
1729 driver_dump->hdr.status = IPR_DUMP_STATUS_QUAL_SUCCESS;
1733 if (sdt->entry[i].flags & IPR_SDT_VALID_ENTRY) {
1734 sdt_word = be32_to_cpu(sdt->entry[i].bar_str_offset);
1735 start_off = sdt_word & IPR_FMT2_MBX_ADDR_MASK;
1736 end_off = be32_to_cpu(sdt->entry[i].end_offset);
1738 if (ipr_sdt_is_fmt2(sdt_word) && sdt_word) {
1739 bytes_to_copy = end_off - start_off;
1740 if (bytes_to_copy > IPR_MAX_IOA_DUMP_SIZE) {
1741 sdt->entry[i].flags &= ~IPR_SDT_VALID_ENTRY;
1745 /* Copy data from adapter to driver buffers */
1746 bytes_copied = ipr_sdt_copy(ioa_cfg, sdt_word,
1749 ioa_dump->hdr.len += bytes_copied;
1751 if (bytes_copied != bytes_to_copy) {
1752 driver_dump->hdr.status = IPR_DUMP_STATUS_QUAL_SUCCESS;
1759 dev_err(&ioa_cfg->pdev->dev, "Dump of IOA completed.\n");
1761 /* Update dump_header */
1762 driver_dump->hdr.len += ioa_dump->hdr.len;
1764 ioa_cfg->sdt_state = DUMP_OBTAINED;
1769 #define ipr_get_ioa_dump(ioa_cfg, dump) do { } while(0)
1773 * ipr_release_dump - Free adapter dump memory
1774 * @kref: kref struct
1779 static void ipr_release_dump(struct kref *kref)
1781 struct ipr_dump *dump = container_of(kref,struct ipr_dump,kref);
1782 struct ipr_ioa_cfg *ioa_cfg = dump->ioa_cfg;
1783 unsigned long lock_flags = 0;
1787 spin_lock_irqsave(ioa_cfg->host->host_lock, lock_flags);
1788 ioa_cfg->dump = NULL;
1789 ioa_cfg->sdt_state = INACTIVE;
1790 spin_unlock_irqrestore(ioa_cfg->host->host_lock, lock_flags);
1792 for (i = 0; i < dump->ioa_dump.next_page_index; i++)
1793 free_page((unsigned long) dump->ioa_dump.ioa_data[i]);
1800 * ipr_worker_thread - Worker thread
1801 * @data: ioa config struct
1803 * Called at task level from a work thread. This function takes care
1804 * of adding and removing device from the mid-layer as configuration
1805 * changes are detected by the adapter.
1810 static void ipr_worker_thread(void *data)
1812 unsigned long lock_flags;
1813 struct ipr_resource_entry *res;
1814 struct scsi_device *sdev;
1815 struct ipr_dump *dump;
1816 struct ipr_ioa_cfg *ioa_cfg = data;
1817 u8 bus, target, lun;
1821 spin_lock_irqsave(ioa_cfg->host->host_lock, lock_flags);
1823 if (ioa_cfg->sdt_state == GET_DUMP) {
1824 dump = ioa_cfg->dump;
1826 spin_unlock_irqrestore(ioa_cfg->host->host_lock, lock_flags);
1829 kref_get(&dump->kref);
1830 spin_unlock_irqrestore(ioa_cfg->host->host_lock, lock_flags);
1831 ipr_get_ioa_dump(ioa_cfg, dump);
1832 kref_put(&dump->kref, ipr_release_dump);
1834 spin_lock_irqsave(ioa_cfg->host->host_lock, lock_flags);
1835 if (ioa_cfg->sdt_state == DUMP_OBTAINED)
1836 ipr_initiate_ioa_reset(ioa_cfg, IPR_SHUTDOWN_NONE);
1837 spin_unlock_irqrestore(ioa_cfg->host->host_lock, lock_flags);
1844 if (!ioa_cfg->allow_cmds || !ioa_cfg->allow_ml_add_del) {
1845 spin_unlock_irqrestore(ioa_cfg->host->host_lock, lock_flags);
1849 list_for_each_entry(res, &ioa_cfg->used_res_q, queue) {
1850 if (res->del_from_ml && res->sdev) {
1853 if (!scsi_device_get(sdev)) {
1855 list_move_tail(&res->queue, &ioa_cfg->free_res_q);
1856 spin_unlock_irqrestore(ioa_cfg->host->host_lock, lock_flags);
1857 scsi_remove_device(sdev);
1858 scsi_device_put(sdev);
1859 spin_lock_irqsave(ioa_cfg->host->host_lock, lock_flags);
1866 list_for_each_entry(res, &ioa_cfg->used_res_q, queue) {
1867 if (res->add_to_ml) {
1868 bus = res->cfgte.res_addr.bus;
1869 target = res->cfgte.res_addr.target;
1870 lun = res->cfgte.res_addr.lun;
1871 spin_unlock_irqrestore(ioa_cfg->host->host_lock, lock_flags);
1872 scsi_add_device(ioa_cfg->host, bus, target, lun);
1873 spin_lock_irqsave(ioa_cfg->host->host_lock, lock_flags);
1878 spin_unlock_irqrestore(ioa_cfg->host->host_lock, lock_flags);
1882 #ifdef CONFIG_SCSI_IPR_TRACE
1884 * ipr_read_trace - Dump the adapter trace
1885 * @kobj: kobject struct
1888 * @count: buffer size
1891 * number of bytes printed to buffer
1893 static ssize_t ipr_read_trace(struct kobject *kobj, char *buf,
1894 loff_t off, size_t count)
1896 struct class_device *cdev = container_of(kobj,struct class_device,kobj);
1897 struct Scsi_Host *shost = class_to_shost(cdev);
1898 struct ipr_ioa_cfg *ioa_cfg = (struct ipr_ioa_cfg *)shost->hostdata;
1899 unsigned long lock_flags = 0;
1900 int size = IPR_TRACE_SIZE;
1901 char *src = (char *)ioa_cfg->trace;
1905 if (off + count > size) {
1910 spin_lock_irqsave(ioa_cfg->host->host_lock, lock_flags);
1911 memcpy(buf, &src[off], count);
1912 spin_unlock_irqrestore(ioa_cfg->host->host_lock, lock_flags);
1916 static struct bin_attribute ipr_trace_attr = {
1922 .read = ipr_read_trace,
1927 * ipr_show_fw_version - Show the firmware version
1928 * @class_dev: class device struct
1932 * number of bytes printed to buffer
1934 static ssize_t ipr_show_fw_version(struct class_device *class_dev, char *buf)
1936 struct Scsi_Host *shost = class_to_shost(class_dev);
1937 struct ipr_ioa_cfg *ioa_cfg = (struct ipr_ioa_cfg *)shost->hostdata;
1938 struct ipr_inquiry_page3 *ucode_vpd = &ioa_cfg->vpd_cbs->page3_data;
1939 unsigned long lock_flags = 0;
1942 spin_lock_irqsave(ioa_cfg->host->host_lock, lock_flags);
1943 len = snprintf(buf, PAGE_SIZE, "%02X%02X%02X%02X\n",
1944 ucode_vpd->major_release, ucode_vpd->card_type,
1945 ucode_vpd->minor_release[0],
1946 ucode_vpd->minor_release[1]);
1947 spin_unlock_irqrestore(ioa_cfg->host->host_lock, lock_flags);
1951 static struct class_device_attribute ipr_fw_version_attr = {
1953 .name = "fw_version",
1956 .show = ipr_show_fw_version,
1960 * ipr_show_log_level - Show the adapter's error logging level
1961 * @class_dev: class device struct
1965 * number of bytes printed to buffer
1967 static ssize_t ipr_show_log_level(struct class_device *class_dev, char *buf)
1969 struct Scsi_Host *shost = class_to_shost(class_dev);
1970 struct ipr_ioa_cfg *ioa_cfg = (struct ipr_ioa_cfg *)shost->hostdata;
1971 unsigned long lock_flags = 0;
1974 spin_lock_irqsave(ioa_cfg->host->host_lock, lock_flags);
1975 len = snprintf(buf, PAGE_SIZE, "%d\n", ioa_cfg->log_level);
1976 spin_unlock_irqrestore(ioa_cfg->host->host_lock, lock_flags);
1981 * ipr_store_log_level - Change the adapter's error logging level
1982 * @class_dev: class device struct
1986 * number of bytes printed to buffer
1988 static ssize_t ipr_store_log_level(struct class_device *class_dev,
1989 const char *buf, size_t count)
1991 struct Scsi_Host *shost = class_to_shost(class_dev);
1992 struct ipr_ioa_cfg *ioa_cfg = (struct ipr_ioa_cfg *)shost->hostdata;
1993 unsigned long lock_flags = 0;
1995 spin_lock_irqsave(ioa_cfg->host->host_lock, lock_flags);
1996 ioa_cfg->log_level = simple_strtoul(buf, NULL, 10);
1997 spin_unlock_irqrestore(ioa_cfg->host->host_lock, lock_flags);
2001 static struct class_device_attribute ipr_log_level_attr = {
2003 .name = "log_level",
2004 .mode = S_IRUGO | S_IWUSR,
2006 .show = ipr_show_log_level,
2007 .store = ipr_store_log_level
2011 * ipr_store_diagnostics - IOA Diagnostics interface
2012 * @class_dev: class_device struct
2014 * @count: buffer size
2016 * This function will reset the adapter and wait a reasonable
2017 * amount of time for any errors that the adapter might log.
2020 * count on success / other on failure
2022 static ssize_t ipr_store_diagnostics(struct class_device *class_dev,
2023 const char *buf, size_t count)
2025 struct Scsi_Host *shost = class_to_shost(class_dev);
2026 struct ipr_ioa_cfg *ioa_cfg = (struct ipr_ioa_cfg *)shost->hostdata;
2027 unsigned long lock_flags = 0;
2030 if (!capable(CAP_SYS_ADMIN))
2033 wait_event(ioa_cfg->reset_wait_q, !ioa_cfg->in_reset_reload);
2034 spin_lock_irqsave(ioa_cfg->host->host_lock, lock_flags);
2035 ioa_cfg->errors_logged = 0;
2036 ipr_initiate_ioa_reset(ioa_cfg, IPR_SHUTDOWN_NORMAL);
2038 if (ioa_cfg->in_reset_reload) {
2039 spin_unlock_irqrestore(ioa_cfg->host->host_lock, lock_flags);
2040 wait_event(ioa_cfg->reset_wait_q, !ioa_cfg->in_reset_reload);
2042 /* Wait for a second for any errors to be logged */
2045 spin_unlock_irqrestore(ioa_cfg->host->host_lock, lock_flags);
2049 spin_lock_irqsave(ioa_cfg->host->host_lock, lock_flags);
2050 if (ioa_cfg->in_reset_reload || ioa_cfg->errors_logged)
2052 spin_unlock_irqrestore(ioa_cfg->host->host_lock, lock_flags);
2057 static struct class_device_attribute ipr_diagnostics_attr = {
2059 .name = "run_diagnostics",
2062 .store = ipr_store_diagnostics
2066 * ipr_store_reset_adapter - Reset the adapter
2067 * @class_dev: class_device struct
2069 * @count: buffer size
2071 * This function will reset the adapter.
2074 * count on success / other on failure
2076 static ssize_t ipr_store_reset_adapter(struct class_device *class_dev,
2077 const char *buf, size_t count)
2079 struct Scsi_Host *shost = class_to_shost(class_dev);
2080 struct ipr_ioa_cfg *ioa_cfg = (struct ipr_ioa_cfg *)shost->hostdata;
2081 unsigned long lock_flags;
2084 if (!capable(CAP_SYS_ADMIN))
2087 spin_lock_irqsave(ioa_cfg->host->host_lock, lock_flags);
2088 if (!ioa_cfg->in_reset_reload)
2089 ipr_initiate_ioa_reset(ioa_cfg, IPR_SHUTDOWN_NORMAL);
2090 spin_unlock_irqrestore(ioa_cfg->host->host_lock, lock_flags);
2091 wait_event(ioa_cfg->reset_wait_q, !ioa_cfg->in_reset_reload);
2096 static struct class_device_attribute ipr_ioa_reset_attr = {
2098 .name = "reset_host",
2101 .store = ipr_store_reset_adapter
2105 * ipr_alloc_ucode_buffer - Allocates a microcode download buffer
2106 * @buf_len: buffer length
2108 * Allocates a DMA'able buffer in chunks and assembles a scatter/gather
2109 * list to use for microcode download
2112 * pointer to sglist / NULL on failure
2114 static struct ipr_sglist *ipr_alloc_ucode_buffer(int buf_len)
2116 int sg_size, order, bsize_elem, num_elem, i, j;
2117 struct ipr_sglist *sglist;
2118 struct scatterlist *scatterlist;
2121 /* Get the minimum size per scatter/gather element */
2122 sg_size = buf_len / (IPR_MAX_SGLIST - 1);
2124 /* Get the actual size per element */
2125 order = get_order(sg_size);
2127 /* Determine the actual number of bytes per element */
2128 bsize_elem = PAGE_SIZE * (1 << order);
2130 /* Determine the actual number of sg entries needed */
2131 if (buf_len % bsize_elem)
2132 num_elem = (buf_len / bsize_elem) + 1;
2134 num_elem = buf_len / bsize_elem;
2136 /* Allocate a scatter/gather list for the DMA */
2137 sglist = kmalloc(sizeof(struct ipr_sglist) +
2138 (sizeof(struct scatterlist) * (num_elem - 1)),
2141 if (sglist == NULL) {
2146 memset(sglist, 0, sizeof(struct ipr_sglist) +
2147 (sizeof(struct scatterlist) * (num_elem - 1)));
2149 scatterlist = sglist->scatterlist;
2151 sglist->order = order;
2152 sglist->num_sg = num_elem;
2154 /* Allocate a bunch of sg elements */
2155 for (i = 0; i < num_elem; i++) {
2156 page = alloc_pages(GFP_KERNEL, order);
2160 /* Free up what we already allocated */
2161 for (j = i - 1; j >= 0; j--)
2162 __free_pages(scatterlist[j].page, order);
2167 scatterlist[i].page = page;
2174 * ipr_free_ucode_buffer - Frees a microcode download buffer
2175 * @p_dnld: scatter/gather list pointer
2177 * Free a DMA'able ucode download buffer previously allocated with
2178 * ipr_alloc_ucode_buffer
2183 static void ipr_free_ucode_buffer(struct ipr_sglist *sglist)
2187 for (i = 0; i < sglist->num_sg; i++)
2188 __free_pages(sglist->scatterlist[i].page, sglist->order);
2194 * ipr_copy_ucode_buffer - Copy user buffer to kernel buffer
2195 * @sglist: scatter/gather list pointer
2196 * @buffer: buffer pointer
2197 * @len: buffer length
2199 * Copy a microcode image from a user buffer into a buffer allocated by
2200 * ipr_alloc_ucode_buffer
2203 * 0 on success / other on failure
2205 static int ipr_copy_ucode_buffer(struct ipr_sglist *sglist,
2206 u8 *buffer, u32 len)
2208 int bsize_elem, i, result = 0;
2209 struct scatterlist *scatterlist;
2212 /* Determine the actual number of bytes per element */
2213 bsize_elem = PAGE_SIZE * (1 << sglist->order);
2215 scatterlist = sglist->scatterlist;
2217 for (i = 0; i < (len / bsize_elem); i++, buffer += bsize_elem) {
2218 kaddr = kmap(scatterlist[i].page);
2219 memcpy(kaddr, buffer, bsize_elem);
2220 kunmap(scatterlist[i].page);
2222 scatterlist[i].length = bsize_elem;
2230 if (len % bsize_elem) {
2231 kaddr = kmap(scatterlist[i].page);
2232 memcpy(kaddr, buffer, len % bsize_elem);
2233 kunmap(scatterlist[i].page);
2235 scatterlist[i].length = len % bsize_elem;
2238 sglist->buffer_len = len;
2243 * ipr_map_ucode_buffer - Map a microcode download buffer
2244 * @ipr_cmd: ipr command struct
2245 * @sglist: scatter/gather list
2246 * @len: total length of download buffer
2248 * Maps a microcode download scatter/gather list for DMA and
2252 * 0 on success / -EIO on failure
2254 static int ipr_map_ucode_buffer(struct ipr_cmnd *ipr_cmd,
2255 struct ipr_sglist *sglist, int len)
2257 struct ipr_ioa_cfg *ioa_cfg = ipr_cmd->ioa_cfg;
2258 struct ipr_ioarcb *ioarcb = &ipr_cmd->ioarcb;
2259 struct ipr_ioadl_desc *ioadl = ipr_cmd->ioadl;
2260 struct scatterlist *scatterlist = sglist->scatterlist;
2263 ipr_cmd->dma_use_sg = pci_map_sg(ioa_cfg->pdev, scatterlist,
2264 sglist->num_sg, DMA_TO_DEVICE);
2266 ioarcb->cmd_pkt.flags_hi |= IPR_FLAGS_HI_WRITE_NOT_READ;
2267 ioarcb->write_data_transfer_length = cpu_to_be32(len);
2268 ioarcb->write_ioadl_len =
2269 cpu_to_be32(sizeof(struct ipr_ioadl_desc) * ipr_cmd->dma_use_sg);
2271 for (i = 0; i < ipr_cmd->dma_use_sg; i++) {
2272 ioadl[i].flags_and_data_len =
2273 cpu_to_be32(IPR_IOADL_FLAGS_WRITE | sg_dma_len(&scatterlist[i]));
2275 cpu_to_be32(sg_dma_address(&scatterlist[i]));
2278 if (likely(ipr_cmd->dma_use_sg)) {
2279 ioadl[i-1].flags_and_data_len |=
2280 cpu_to_be32(IPR_IOADL_FLAGS_LAST);
2283 dev_err(&ioa_cfg->pdev->dev, "pci_map_sg failed!\n");
2291 * ipr_store_update_fw - Update the firmware on the adapter
2292 * @class_dev: class_device struct
2294 * @count: buffer size
2296 * This function will update the firmware on the adapter.
2299 * count on success / other on failure
2301 static ssize_t ipr_store_update_fw(struct class_device *class_dev,
2302 const char *buf, size_t count)
2304 struct Scsi_Host *shost = class_to_shost(class_dev);
2305 struct ipr_ioa_cfg *ioa_cfg = (struct ipr_ioa_cfg *)shost->hostdata;
2306 struct ipr_ucode_image_header *image_hdr;
2307 const struct firmware *fw_entry;
2308 struct ipr_sglist *sglist;
2309 unsigned long lock_flags;
2312 int len, result, dnld_size;
2314 if (!capable(CAP_SYS_ADMIN))
2317 len = snprintf(fname, 99, "%s", buf);
2318 fname[len-1] = '\0';
2320 if(request_firmware(&fw_entry, fname, &ioa_cfg->pdev->dev)) {
2321 dev_err(&ioa_cfg->pdev->dev, "Firmware file %s not found\n", fname);
2325 image_hdr = (struct ipr_ucode_image_header *)fw_entry->data;
2327 if (be32_to_cpu(image_hdr->header_length) > fw_entry->size ||
2328 (ioa_cfg->vpd_cbs->page3_data.card_type &&
2329 ioa_cfg->vpd_cbs->page3_data.card_type != image_hdr->card_type)) {
2330 dev_err(&ioa_cfg->pdev->dev, "Invalid microcode buffer\n");
2331 release_firmware(fw_entry);
2335 src = (u8 *)image_hdr + be32_to_cpu(image_hdr->header_length);
2336 dnld_size = fw_entry->size - be32_to_cpu(image_hdr->header_length);
2337 sglist = ipr_alloc_ucode_buffer(dnld_size);
2340 dev_err(&ioa_cfg->pdev->dev, "Microcode buffer allocation failed\n");
2341 release_firmware(fw_entry);
2345 result = ipr_copy_ucode_buffer(sglist, src, dnld_size);
2348 dev_err(&ioa_cfg->pdev->dev,
2349 "Microcode buffer copy to DMA buffer failed\n");
2350 ipr_free_ucode_buffer(sglist);
2351 release_firmware(fw_entry);
2355 spin_lock_irqsave(ioa_cfg->host->host_lock, lock_flags);
2357 if (ioa_cfg->ucode_sglist) {
2358 spin_unlock_irqrestore(ioa_cfg->host->host_lock, lock_flags);
2359 dev_err(&ioa_cfg->pdev->dev,
2360 "Microcode download already in progress\n");
2361 ipr_free_ucode_buffer(sglist);
2362 release_firmware(fw_entry);
2366 ioa_cfg->ucode_sglist = sglist;
2367 ipr_initiate_ioa_reset(ioa_cfg, IPR_SHUTDOWN_NORMAL);
2368 spin_unlock_irqrestore(ioa_cfg->host->host_lock, lock_flags);
2369 wait_event(ioa_cfg->reset_wait_q, !ioa_cfg->in_reset_reload);
2371 spin_lock_irqsave(ioa_cfg->host->host_lock, lock_flags);
2372 ioa_cfg->ucode_sglist = NULL;
2373 spin_unlock_irqrestore(ioa_cfg->host->host_lock, lock_flags);
2375 ipr_free_ucode_buffer(sglist);
2376 release_firmware(fw_entry);
2381 static struct class_device_attribute ipr_update_fw_attr = {
2383 .name = "update_fw",
2386 .store = ipr_store_update_fw
2389 static struct class_device_attribute *ipr_ioa_attrs[] = {
2390 &ipr_fw_version_attr,
2391 &ipr_log_level_attr,
2392 &ipr_diagnostics_attr,
2393 &ipr_ioa_reset_attr,
2394 &ipr_update_fw_attr,
2398 #ifdef CONFIG_SCSI_IPR_DUMP
2400 * ipr_read_dump - Dump the adapter
2401 * @kobj: kobject struct
2404 * @count: buffer size
2407 * number of bytes printed to buffer
2409 static ssize_t ipr_read_dump(struct kobject *kobj, char *buf,
2410 loff_t off, size_t count)
2412 struct class_device *cdev = container_of(kobj,struct class_device,kobj);
2413 struct Scsi_Host *shost = class_to_shost(cdev);
2414 struct ipr_ioa_cfg *ioa_cfg = (struct ipr_ioa_cfg *)shost->hostdata;
2415 struct ipr_dump *dump;
2416 unsigned long lock_flags = 0;
2421 if (!capable(CAP_SYS_ADMIN))
2424 spin_lock_irqsave(ioa_cfg->host->host_lock, lock_flags);
2425 dump = ioa_cfg->dump;
2427 if (ioa_cfg->sdt_state != DUMP_OBTAINED || !dump) {
2428 spin_unlock_irqrestore(ioa_cfg->host->host_lock, lock_flags);
2431 kref_get(&dump->kref);
2432 spin_unlock_irqrestore(ioa_cfg->host->host_lock, lock_flags);
2434 if (off > dump->driver_dump.hdr.len) {
2435 kref_put(&dump->kref, ipr_release_dump);
2439 if (off + count > dump->driver_dump.hdr.len) {
2440 count = dump->driver_dump.hdr.len - off;
2444 if (count && off < sizeof(dump->driver_dump)) {
2445 if (off + count > sizeof(dump->driver_dump))
2446 len = sizeof(dump->driver_dump) - off;
2449 src = (u8 *)&dump->driver_dump + off;
2450 memcpy(buf, src, len);
2456 off -= sizeof(dump->driver_dump);
2458 if (count && off < offsetof(struct ipr_ioa_dump, ioa_data)) {
2459 if (off + count > offsetof(struct ipr_ioa_dump, ioa_data))
2460 len = offsetof(struct ipr_ioa_dump, ioa_data) - off;
2463 src = (u8 *)&dump->ioa_dump + off;
2464 memcpy(buf, src, len);
2470 off -= offsetof(struct ipr_ioa_dump, ioa_data);
2473 if ((off & PAGE_MASK) != ((off + count) & PAGE_MASK))
2474 len = PAGE_ALIGN(off) - off;
2477 src = (u8 *)dump->ioa_dump.ioa_data[(off & PAGE_MASK) >> PAGE_SHIFT];
2478 src += off & ~PAGE_MASK;
2479 memcpy(buf, src, len);
2485 kref_put(&dump->kref, ipr_release_dump);
2490 * ipr_alloc_dump - Prepare for adapter dump
2491 * @ioa_cfg: ioa config struct
2494 * 0 on success / other on failure
2496 static int ipr_alloc_dump(struct ipr_ioa_cfg *ioa_cfg)
2498 struct ipr_dump *dump;
2499 unsigned long lock_flags = 0;
2502 dump = kmalloc(sizeof(struct ipr_dump), GFP_KERNEL);
2505 ipr_err("Dump memory allocation failed\n");
2509 memset(dump, 0, sizeof(struct ipr_dump));
2510 kref_init(&dump->kref);
2511 dump->ioa_cfg = ioa_cfg;
2513 spin_lock_irqsave(ioa_cfg->host->host_lock, lock_flags);
2515 if (INACTIVE != ioa_cfg->sdt_state) {
2516 spin_unlock_irqrestore(ioa_cfg->host->host_lock, lock_flags);
2521 ioa_cfg->dump = dump;
2522 ioa_cfg->sdt_state = WAIT_FOR_DUMP;
2523 if (ioa_cfg->ioa_is_dead && !ioa_cfg->dump_taken) {
2524 ioa_cfg->dump_taken = 1;
2525 schedule_work(&ioa_cfg->work_q);
2527 spin_unlock_irqrestore(ioa_cfg->host->host_lock, lock_flags);
2534 * ipr_free_dump - Free adapter dump memory
2535 * @ioa_cfg: ioa config struct
2538 * 0 on success / other on failure
2540 static int ipr_free_dump(struct ipr_ioa_cfg *ioa_cfg)
2542 struct ipr_dump *dump;
2543 unsigned long lock_flags = 0;
2547 spin_lock_irqsave(ioa_cfg->host->host_lock, lock_flags);
2548 dump = ioa_cfg->dump;
2550 spin_unlock_irqrestore(ioa_cfg->host->host_lock, lock_flags);
2554 ioa_cfg->dump = NULL;
2555 spin_unlock_irqrestore(ioa_cfg->host->host_lock, lock_flags);
2557 kref_put(&dump->kref, ipr_release_dump);
2564 * ipr_write_dump - Setup dump state of adapter
2565 * @kobj: kobject struct
2568 * @count: buffer size
2571 * number of bytes printed to buffer
2573 static ssize_t ipr_write_dump(struct kobject *kobj, char *buf,
2574 loff_t off, size_t count)
2576 struct class_device *cdev = container_of(kobj,struct class_device,kobj);
2577 struct Scsi_Host *shost = class_to_shost(cdev);
2578 struct ipr_ioa_cfg *ioa_cfg = (struct ipr_ioa_cfg *)shost->hostdata;
2581 if (!capable(CAP_SYS_ADMIN))
2585 rc = ipr_alloc_dump(ioa_cfg);
2586 else if (buf[0] == '0')
2587 rc = ipr_free_dump(ioa_cfg);
2597 static struct bin_attribute ipr_dump_attr = {
2600 .mode = S_IRUSR | S_IWUSR,
2603 .read = ipr_read_dump,
2604 .write = ipr_write_dump
2607 static int ipr_free_dump(struct ipr_ioa_cfg *ioa_cfg) { return 0; };
2611 * ipr_store_queue_depth - Change the device's queue depth
2612 * @dev: device struct
2616 * number of bytes printed to buffer
2618 static ssize_t ipr_store_queue_depth(struct device *dev,
2619 const char *buf, size_t count)
2621 struct scsi_device *sdev = to_scsi_device(dev);
2622 struct ipr_ioa_cfg *ioa_cfg = (struct ipr_ioa_cfg *)sdev->host->hostdata;
2623 struct ipr_resource_entry *res;
2624 int qdepth = simple_strtoul(buf, NULL, 10);
2626 unsigned long lock_flags = 0;
2627 ssize_t len = -ENXIO;
2629 spin_lock_irqsave(ioa_cfg->host->host_lock, lock_flags);
2630 res = (struct ipr_resource_entry *)sdev->hostdata;
2632 res->qdepth = qdepth;
2634 if (ipr_is_gscsi(res) && res->tcq_active)
2635 tagged = MSG_ORDERED_TAG;
2640 spin_unlock_irqrestore(ioa_cfg->host->host_lock, lock_flags);
2641 scsi_adjust_queue_depth(sdev, tagged, qdepth);
2645 static struct device_attribute ipr_queue_depth_attr = {
2647 .name = "queue_depth",
2648 .mode = S_IRUSR | S_IWUSR,
2650 .store = ipr_store_queue_depth
2654 * ipr_show_tcq_enable - Show if the device is enabled for tcqing
2655 * @dev: device struct
2659 * number of bytes printed to buffer
2661 static ssize_t ipr_show_tcq_enable(struct device *dev, char *buf)
2663 struct scsi_device *sdev = to_scsi_device(dev);
2664 struct ipr_ioa_cfg *ioa_cfg = (struct ipr_ioa_cfg *)sdev->host->hostdata;
2665 struct ipr_resource_entry *res;
2666 unsigned long lock_flags = 0;
2667 ssize_t len = -ENXIO;
2669 spin_lock_irqsave(ioa_cfg->host->host_lock, lock_flags);
2670 res = (struct ipr_resource_entry *)sdev->hostdata;
2672 len = snprintf(buf, PAGE_SIZE, "%d\n", res->tcq_active);
2673 spin_unlock_irqrestore(ioa_cfg->host->host_lock, lock_flags);
2678 * ipr_store_tcq_enable - Change the device's TCQing state
2679 * @dev: device struct
2683 * number of bytes printed to buffer
2685 static ssize_t ipr_store_tcq_enable(struct device *dev,
2686 const char *buf, size_t count)
2688 struct scsi_device *sdev = to_scsi_device(dev);
2689 struct ipr_ioa_cfg *ioa_cfg = (struct ipr_ioa_cfg *)sdev->host->hostdata;
2690 struct ipr_resource_entry *res;
2691 unsigned long lock_flags = 0;
2692 int tcq_active = simple_strtoul(buf, NULL, 10);
2693 ssize_t len = -ENXIO;
2695 spin_lock_irqsave(ioa_cfg->host->host_lock, lock_flags);
2697 res = (struct ipr_resource_entry *)sdev->hostdata;
2700 if (ipr_is_gscsi(res) && sdev->tagged_supported) {
2702 res->tcq_active = 1;
2703 scsi_activate_tcq(sdev, res->qdepth);
2705 res->tcq_active = 0;
2706 scsi_deactivate_tcq(sdev, res->qdepth);
2710 } else if (tcq_active) {
2715 spin_unlock_irqrestore(ioa_cfg->host->host_lock, lock_flags);
2719 static struct device_attribute ipr_tcqing_attr = {
2721 .name = "tcq_enable",
2722 .mode = S_IRUSR | S_IWUSR,
2724 .store = ipr_store_tcq_enable,
2725 .show = ipr_show_tcq_enable
2729 * ipr_show_adapter_handle - Show the adapter's resource handle for this device
2730 * @dev: device struct
2734 * number of bytes printed to buffer
2736 static ssize_t ipr_show_adapter_handle(struct device *dev, char *buf)
2738 struct scsi_device *sdev = to_scsi_device(dev);
2739 struct ipr_ioa_cfg *ioa_cfg = (struct ipr_ioa_cfg *)sdev->host->hostdata;
2740 struct ipr_resource_entry *res;
2741 unsigned long lock_flags = 0;
2742 ssize_t len = -ENXIO;
2744 spin_lock_irqsave(ioa_cfg->host->host_lock, lock_flags);
2745 res = (struct ipr_resource_entry *)sdev->hostdata;
2747 len = snprintf(buf, PAGE_SIZE, "%08X\n", res->cfgte.res_handle);
2748 spin_unlock_irqrestore(ioa_cfg->host->host_lock, lock_flags);
2752 static struct device_attribute ipr_adapter_handle_attr = {
2754 .name = "adapter_handle",
2757 .show = ipr_show_adapter_handle
2760 static struct device_attribute *ipr_dev_attrs[] = {
2761 &ipr_queue_depth_attr,
2763 &ipr_adapter_handle_attr,
2768 * ipr_biosparam - Return the HSC mapping
2769 * @sdev: scsi device struct
2770 * @block_device: block device pointer
2771 * @capacity: capacity of the device
2772 * @parm: Array containing returned HSC values.
2774 * This function generates the HSC parms that fdisk uses.
2775 * We want to make sure we return something that places partitions
2776 * on 4k boundaries for best performance with the IOA.
2781 static int ipr_biosparam(struct scsi_device *sdev,
2782 struct block_device *block_device,
2783 sector_t capacity, int *parm)
2791 cylinders = capacity;
2792 sector_div(cylinders, (128 * 32));
2797 parm[2] = cylinders;
2803 * ipr_slave_destroy - Unconfigure a SCSI device
2804 * @sdev: scsi device struct
2809 static void ipr_slave_destroy(struct scsi_device *sdev)
2811 struct ipr_resource_entry *res;
2812 struct ipr_ioa_cfg *ioa_cfg;
2813 unsigned long lock_flags = 0;
2815 ioa_cfg = (struct ipr_ioa_cfg *) sdev->host->hostdata;
2817 spin_lock_irqsave(ioa_cfg->host->host_lock, lock_flags);
2818 res = (struct ipr_resource_entry *) sdev->hostdata;
2820 sdev->hostdata = NULL;
2823 spin_unlock_irqrestore(ioa_cfg->host->host_lock, lock_flags);
2827 * ipr_slave_configure - Configure a SCSI device
2828 * @sdev: scsi device struct
2830 * This function configures the specified scsi device.
2835 static int ipr_slave_configure(struct scsi_device *sdev)
2837 struct ipr_ioa_cfg *ioa_cfg = (struct ipr_ioa_cfg *) sdev->host->hostdata;
2838 struct ipr_resource_entry *res;
2839 unsigned long lock_flags = 0;
2841 spin_lock_irqsave(ioa_cfg->host->host_lock, lock_flags);
2842 res = sdev->hostdata;
2844 if (ipr_is_af_dasd_device(res))
2845 sdev->type = TYPE_RAID;
2846 if (ipr_is_af_dasd_device(res) || ipr_is_ioa_resource(res))
2847 sdev->scsi_level = 4;
2848 if (ipr_is_vset_device(res))
2849 sdev->timeout = IPR_VSET_RW_TIMEOUT;
2850 if (IPR_IS_DASD_DEVICE(res->cfgte.std_inq_data))
2851 sdev->allow_restart = 1;
2852 scsi_adjust_queue_depth(sdev, 0, res->qdepth);
2854 spin_unlock_irqrestore(ioa_cfg->host->host_lock, lock_flags);
2859 * ipr_slave_alloc - Prepare for commands to a device.
2860 * @sdev: scsi device struct
2862 * This function saves a pointer to the resource entry
2863 * in the scsi device struct if the device exists. We
2864 * can then use this pointer in ipr_queuecommand when
2865 * handling new commands.
2870 static int ipr_slave_alloc(struct scsi_device *sdev)
2872 struct ipr_ioa_cfg *ioa_cfg = (struct ipr_ioa_cfg *) sdev->host->hostdata;
2873 struct ipr_resource_entry *res;
2874 unsigned long lock_flags;
2876 sdev->hostdata = NULL;
2878 spin_lock_irqsave(ioa_cfg->host->host_lock, lock_flags);
2880 list_for_each_entry(res, &ioa_cfg->used_res_q, queue) {
2881 if ((res->cfgte.res_addr.bus == sdev->channel) &&
2882 (res->cfgte.res_addr.target == sdev->id) &&
2883 (res->cfgte.res_addr.lun == sdev->lun)) {
2887 sdev->hostdata = res;
2888 res->needs_sync_complete = 1;
2893 spin_unlock_irqrestore(ioa_cfg->host->host_lock, lock_flags);
2899 * ipr_eh_host_reset - Reset the host adapter
2900 * @scsi_cmd: scsi command struct
2905 static int ipr_eh_host_reset(struct scsi_cmnd * scsi_cmd)
2907 struct ipr_ioa_cfg *ioa_cfg;
2911 ioa_cfg = (struct ipr_ioa_cfg *) scsi_cmd->device->host->hostdata;
2913 dev_err(&ioa_cfg->pdev->dev,
2914 "Adapter being reset as a result of error recovery.\n");
2916 if (WAIT_FOR_DUMP == ioa_cfg->sdt_state)
2917 ioa_cfg->sdt_state = GET_DUMP;
2919 rc = ipr_reset_reload(ioa_cfg, IPR_SHUTDOWN_ABBREV);
2926 * ipr_eh_dev_reset - Reset the device
2927 * @scsi_cmd: scsi command struct
2929 * This function issues a device reset to the affected device.
2930 * A LUN reset will be sent to the device first. If that does
2931 * not work, a target reset will be sent.
2936 static int ipr_eh_dev_reset(struct scsi_cmnd * scsi_cmd)
2938 struct ipr_cmnd *ipr_cmd;
2939 struct ipr_ioa_cfg *ioa_cfg;
2940 struct ipr_resource_entry *res;
2941 struct ipr_cmd_pkt *cmd_pkt;
2945 ioa_cfg = (struct ipr_ioa_cfg *) scsi_cmd->device->host->hostdata;
2946 res = scsi_cmd->device->hostdata;
2948 if (!res || (!ipr_is_gscsi(res) && !ipr_is_vset_device(res)))
2952 * If we are currently going through reset/reload, return failed. This will force the
2953 * mid-layer to call ipr_eh_host_reset, which will then go to sleep and wait for the
2956 if (ioa_cfg->in_reset_reload)
2958 if (ioa_cfg->ioa_is_dead)
2961 list_for_each_entry(ipr_cmd, &ioa_cfg->pending_q, queue) {
2962 if (ipr_cmd->ioarcb.res_handle == res->cfgte.res_handle) {
2963 if (ipr_cmd->scsi_cmd)
2964 ipr_cmd->done = ipr_scsi_eh_done;
2968 res->resetting_device = 1;
2970 ipr_cmd = ipr_get_free_ipr_cmnd(ioa_cfg);
2972 ipr_cmd->ioarcb.res_handle = res->cfgte.res_handle;
2973 cmd_pkt = &ipr_cmd->ioarcb.cmd_pkt;
2974 cmd_pkt->request_type = IPR_RQTYPE_IOACMD;
2975 cmd_pkt->cdb[0] = IPR_RESET_DEVICE;
2977 ipr_sdev_err(scsi_cmd->device, "Resetting device\n");
2978 ipr_send_blocking_cmd(ipr_cmd, ipr_timeout, IPR_DEVICE_RESET_TIMEOUT);
2980 ioasc = be32_to_cpu(ipr_cmd->ioasa.ioasc);
2982 res->resetting_device = 0;
2984 list_add_tail(&ipr_cmd->queue, &ioa_cfg->free_q);
2987 return (IPR_IOASC_SENSE_KEY(ioasc) ? FAILED : SUCCESS);
2991 * ipr_bus_reset_done - Op done function for bus reset.
2992 * @ipr_cmd: ipr command struct
2994 * This function is the op done function for a bus reset
2999 static void ipr_bus_reset_done(struct ipr_cmnd *ipr_cmd)
3001 struct ipr_ioa_cfg *ioa_cfg = ipr_cmd->ioa_cfg;
3002 struct ipr_resource_entry *res;
3005 list_for_each_entry(res, &ioa_cfg->used_res_q, queue) {
3006 if (!memcmp(&res->cfgte.res_handle, &ipr_cmd->ioarcb.res_handle,
3007 sizeof(res->cfgte.res_handle))) {
3008 scsi_report_bus_reset(ioa_cfg->host, res->cfgte.res_addr.bus);
3014 * If abort has not completed, indicate the reset has, else call the
3015 * abort's done function to wake the sleeping eh thread
3017 if (ipr_cmd->sibling->sibling)
3018 ipr_cmd->sibling->sibling = NULL;
3020 ipr_cmd->sibling->done(ipr_cmd->sibling);
3022 list_add_tail(&ipr_cmd->queue, &ioa_cfg->free_q);
3027 * ipr_abort_timeout - An abort task has timed out
3028 * @ipr_cmd: ipr command struct
3030 * This function handles when an abort task times out. If this
3031 * happens we issue a bus reset since we have resources tied
3032 * up that must be freed before returning to the midlayer.
3037 static void ipr_abort_timeout(struct ipr_cmnd *ipr_cmd)
3039 struct ipr_cmnd *reset_cmd;
3040 struct ipr_ioa_cfg *ioa_cfg = ipr_cmd->ioa_cfg;
3041 struct ipr_cmd_pkt *cmd_pkt;
3042 unsigned long lock_flags = 0;
3045 spin_lock_irqsave(ioa_cfg->host->host_lock, lock_flags);
3046 if (ipr_cmd->completion.done || ioa_cfg->in_reset_reload) {
3047 spin_unlock_irqrestore(ioa_cfg->host->host_lock, lock_flags);
3051 ipr_sdev_err(ipr_cmd->u.sdev, "Abort timed out. Resetting bus\n");
3052 reset_cmd = ipr_get_free_ipr_cmnd(ioa_cfg);
3053 ipr_cmd->sibling = reset_cmd;
3054 reset_cmd->sibling = ipr_cmd;
3055 reset_cmd->ioarcb.res_handle = ipr_cmd->ioarcb.res_handle;
3056 cmd_pkt = &reset_cmd->ioarcb.cmd_pkt;
3057 cmd_pkt->request_type = IPR_RQTYPE_IOACMD;
3058 cmd_pkt->cdb[0] = IPR_RESET_DEVICE;
3059 cmd_pkt->cdb[2] = IPR_RESET_TYPE_SELECT | IPR_BUS_RESET;
3061 ipr_do_req(reset_cmd, ipr_bus_reset_done, ipr_timeout, IPR_DEVICE_RESET_TIMEOUT);
3062 spin_unlock_irqrestore(ioa_cfg->host->host_lock, lock_flags);
3067 * ipr_cancel_op - Cancel specified op
3068 * @scsi_cmd: scsi command struct
3070 * This function cancels specified op.
3075 static int ipr_cancel_op(struct scsi_cmnd * scsi_cmd)
3077 struct ipr_cmnd *ipr_cmd;
3078 struct ipr_ioa_cfg *ioa_cfg;
3079 struct ipr_resource_entry *res;
3080 struct ipr_cmd_pkt *cmd_pkt;
3085 ioa_cfg = (struct ipr_ioa_cfg *)scsi_cmd->device->host->hostdata;
3086 res = scsi_cmd->device->hostdata;
3088 if (!res || (!ipr_is_gscsi(res) && !ipr_is_vset_device(res)))
3091 list_for_each_entry(ipr_cmd, &ioa_cfg->pending_q, queue) {
3092 if (ipr_cmd->scsi_cmd == scsi_cmd) {
3093 ipr_cmd->done = ipr_scsi_eh_done;
3102 ipr_cmd = ipr_get_free_ipr_cmnd(ioa_cfg);
3103 ipr_cmd->ioarcb.res_handle = res->cfgte.res_handle;
3104 cmd_pkt = &ipr_cmd->ioarcb.cmd_pkt;
3105 cmd_pkt->request_type = IPR_RQTYPE_IOACMD;
3106 cmd_pkt->cdb[0] = IPR_CANCEL_ALL_REQUESTS;
3107 ipr_cmd->u.sdev = scsi_cmd->device;
3109 ipr_sdev_err(scsi_cmd->device, "Aborting command: %02X\n", scsi_cmd->cmnd[0]);
3110 ipr_send_blocking_cmd(ipr_cmd, ipr_abort_timeout, IPR_CANCEL_ALL_TIMEOUT);
3111 ioasc = be32_to_cpu(ipr_cmd->ioasa.ioasc);
3114 * If the abort task timed out and we sent a bus reset, we will get
3115 * one the following responses to the abort
3117 if (ioasc == IPR_IOASC_BUS_WAS_RESET || ioasc == IPR_IOASC_SYNC_REQUIRED) {
3122 list_add_tail(&ipr_cmd->queue, &ioa_cfg->free_q);
3123 res->needs_sync_complete = 1;
3126 return (IPR_IOASC_SENSE_KEY(ioasc) ? FAILED : SUCCESS);
3130 * ipr_eh_abort - Abort a single op
3131 * @scsi_cmd: scsi command struct
3136 static int ipr_eh_abort(struct scsi_cmnd * scsi_cmd)
3138 struct ipr_ioa_cfg *ioa_cfg;
3141 ioa_cfg = (struct ipr_ioa_cfg *) scsi_cmd->device->host->hostdata;
3143 /* If we are currently going through reset/reload, return failed. This will force the
3144 mid-layer to call ipr_eh_host_reset, which will then go to sleep and wait for the
3145 reset to complete */
3146 if (ioa_cfg->in_reset_reload)
3148 if (ioa_cfg->ioa_is_dead)
3150 if (!scsi_cmd->device->hostdata)
3154 return ipr_cancel_op(scsi_cmd);
3158 * ipr_handle_other_interrupt - Handle "other" interrupts
3159 * @ioa_cfg: ioa config struct
3160 * @int_reg: interrupt register
3163 * IRQ_NONE / IRQ_HANDLED
3165 static irqreturn_t ipr_handle_other_interrupt(struct ipr_ioa_cfg *ioa_cfg,
3166 volatile u32 int_reg)
3168 irqreturn_t rc = IRQ_HANDLED;
3170 if (int_reg & IPR_PCII_IOA_TRANS_TO_OPER) {
3171 /* Mask the interrupt */
3172 writel(IPR_PCII_IOA_TRANS_TO_OPER, ioa_cfg->regs.set_interrupt_mask_reg);
3174 /* Clear the interrupt */
3175 writel(IPR_PCII_IOA_TRANS_TO_OPER, ioa_cfg->regs.clr_interrupt_reg);
3176 int_reg = readl(ioa_cfg->regs.sense_interrupt_reg);
3178 list_del(&ioa_cfg->reset_cmd->queue);
3179 del_timer(&ioa_cfg->reset_cmd->timer);
3180 ipr_reset_ioa_job(ioa_cfg->reset_cmd);
3182 if (int_reg & IPR_PCII_IOA_UNIT_CHECKED)
3183 ioa_cfg->ioa_unit_checked = 1;
3185 dev_err(&ioa_cfg->pdev->dev,
3186 "Permanent IOA failure. 0x%08X\n", int_reg);
3188 if (WAIT_FOR_DUMP == ioa_cfg->sdt_state)
3189 ioa_cfg->sdt_state = GET_DUMP;
3191 ipr_mask_and_clear_interrupts(ioa_cfg, ~0);
3192 ipr_initiate_ioa_reset(ioa_cfg, IPR_SHUTDOWN_NONE);
3199 * ipr_isr - Interrupt service routine
3201 * @devp: pointer to ioa config struct
3202 * @regs: pt_regs struct
3205 * IRQ_NONE / IRQ_HANDLED
3207 static irqreturn_t ipr_isr(int irq, void *devp, struct pt_regs *regs)
3209 struct ipr_ioa_cfg *ioa_cfg = (struct ipr_ioa_cfg *)devp;
3210 unsigned long lock_flags = 0;
3211 volatile u32 int_reg, int_mask_reg;
3214 struct ipr_cmnd *ipr_cmd;
3215 irqreturn_t rc = IRQ_NONE;
3217 spin_lock_irqsave(ioa_cfg->host->host_lock, lock_flags);
3219 /* If interrupts are disabled, ignore the interrupt */
3220 if (!ioa_cfg->allow_interrupts) {
3221 spin_unlock_irqrestore(ioa_cfg->host->host_lock, lock_flags);
3225 int_mask_reg = readl(ioa_cfg->regs.sense_interrupt_mask_reg);
3226 int_reg = readl(ioa_cfg->regs.sense_interrupt_reg) & ~int_mask_reg;
3228 /* If an interrupt on the adapter did not occur, ignore it */
3229 if (unlikely((int_reg & IPR_PCII_OPER_INTERRUPTS) == 0)) {
3230 spin_unlock_irqrestore(ioa_cfg->host->host_lock, lock_flags);
3237 while ((be32_to_cpu(*ioa_cfg->hrrq_curr) & IPR_HRRQ_TOGGLE_BIT) ==
3238 ioa_cfg->toggle_bit) {
3240 cmd_index = (be32_to_cpu(*ioa_cfg->hrrq_curr) &
3241 IPR_HRRQ_REQ_RESP_HANDLE_MASK) >> IPR_HRRQ_REQ_RESP_HANDLE_SHIFT;
3243 if (unlikely(cmd_index >= IPR_NUM_CMD_BLKS)) {
3244 ioa_cfg->errors_logged++;
3245 dev_err(&ioa_cfg->pdev->dev, "Invalid response handle from IOA\n");
3247 if (WAIT_FOR_DUMP == ioa_cfg->sdt_state)
3248 ioa_cfg->sdt_state = GET_DUMP;
3250 ipr_initiate_ioa_reset(ioa_cfg, IPR_SHUTDOWN_NONE);
3251 spin_unlock_irqrestore(ioa_cfg->host->host_lock, lock_flags);
3255 ipr_cmd = ioa_cfg->ipr_cmnd_list[cmd_index];
3257 ioasc = be32_to_cpu(ipr_cmd->ioasa.ioasc);
3259 ipr_trc_hook(ipr_cmd, IPR_TRACE_FINISH, ioasc);
3261 list_del(&ipr_cmd->queue);
3262 del_timer(&ipr_cmd->timer);
3263 ipr_cmd->done(ipr_cmd);
3267 if (ioa_cfg->hrrq_curr < ioa_cfg->hrrq_end) {
3268 ioa_cfg->hrrq_curr++;
3270 ioa_cfg->hrrq_curr = ioa_cfg->hrrq_start;
3271 ioa_cfg->toggle_bit ^= 1u;
3275 if (ipr_cmd != NULL) {
3276 /* Clear the PCI interrupt */
3277 writel(IPR_PCII_HRRQ_UPDATED, ioa_cfg->regs.clr_interrupt_reg);
3278 int_reg = readl(ioa_cfg->regs.sense_interrupt_reg) & ~int_mask_reg;
3283 if (unlikely(rc == IRQ_NONE))
3284 rc = ipr_handle_other_interrupt(ioa_cfg, int_reg);
3286 spin_unlock_irqrestore(ioa_cfg->host->host_lock, lock_flags);
3291 * ipr_build_ioadl - Build a scatter/gather list and map the buffer
3292 * @ioa_cfg: ioa config struct
3293 * @ipr_cmd: ipr command struct
3296 * 0 on success / -1 on failure
3298 static int ipr_build_ioadl(struct ipr_ioa_cfg *ioa_cfg,
3299 struct ipr_cmnd *ipr_cmd)
3302 struct scatterlist *sglist;
3304 u32 ioadl_flags = 0;
3305 struct scsi_cmnd *scsi_cmd = ipr_cmd->scsi_cmd;
3306 struct ipr_ioarcb *ioarcb = &ipr_cmd->ioarcb;
3307 struct ipr_ioadl_desc *ioadl = ipr_cmd->ioadl;
3309 length = scsi_cmd->request_bufflen;
3314 if (scsi_cmd->use_sg) {
3315 ipr_cmd->dma_use_sg = pci_map_sg(ioa_cfg->pdev,
3316 scsi_cmd->request_buffer,
3318 scsi_cmd->sc_data_direction);
3320 if (scsi_cmd->sc_data_direction == DMA_TO_DEVICE) {
3321 ioadl_flags = IPR_IOADL_FLAGS_WRITE;
3322 ioarcb->cmd_pkt.flags_hi |= IPR_FLAGS_HI_WRITE_NOT_READ;
3323 ioarcb->write_data_transfer_length = cpu_to_be32(length);
3324 ioarcb->write_ioadl_len =
3325 cpu_to_be32(sizeof(struct ipr_ioadl_desc) * ipr_cmd->dma_use_sg);
3326 } else if (scsi_cmd->sc_data_direction == DMA_FROM_DEVICE) {
3327 ioadl_flags = IPR_IOADL_FLAGS_READ;
3328 ioarcb->read_data_transfer_length = cpu_to_be32(length);
3329 ioarcb->read_ioadl_len =
3330 cpu_to_be32(sizeof(struct ipr_ioadl_desc) * ipr_cmd->dma_use_sg);
3333 sglist = scsi_cmd->request_buffer;
3335 for (i = 0; i < ipr_cmd->dma_use_sg; i++) {
3336 ioadl[i].flags_and_data_len =
3337 cpu_to_be32(ioadl_flags | sg_dma_len(&sglist[i]));
3339 cpu_to_be32(sg_dma_address(&sglist[i]));
3342 if (likely(ipr_cmd->dma_use_sg)) {
3343 ioadl[i-1].flags_and_data_len |=
3344 cpu_to_be32(IPR_IOADL_FLAGS_LAST);
3347 dev_err(&ioa_cfg->pdev->dev, "pci_map_sg failed!\n");
3349 if (scsi_cmd->sc_data_direction == DMA_TO_DEVICE) {
3350 ioadl_flags = IPR_IOADL_FLAGS_WRITE;
3351 ioarcb->cmd_pkt.flags_hi |= IPR_FLAGS_HI_WRITE_NOT_READ;
3352 ioarcb->write_data_transfer_length = cpu_to_be32(length);
3353 ioarcb->write_ioadl_len = cpu_to_be32(sizeof(struct ipr_ioadl_desc));
3354 } else if (scsi_cmd->sc_data_direction == DMA_FROM_DEVICE) {
3355 ioadl_flags = IPR_IOADL_FLAGS_READ;
3356 ioarcb->read_data_transfer_length = cpu_to_be32(length);
3357 ioarcb->read_ioadl_len = cpu_to_be32(sizeof(struct ipr_ioadl_desc));
3360 ipr_cmd->dma_handle = pci_map_single(ioa_cfg->pdev,
3361 scsi_cmd->request_buffer, length,
3362 scsi_cmd->sc_data_direction);
3364 if (likely(!pci_dma_mapping_error(ipr_cmd->dma_handle))) {
3365 ipr_cmd->dma_use_sg = 1;
3366 ioadl[0].flags_and_data_len =
3367 cpu_to_be32(ioadl_flags | length | IPR_IOADL_FLAGS_LAST);
3368 ioadl[0].address = cpu_to_be32(ipr_cmd->dma_handle);
3371 dev_err(&ioa_cfg->pdev->dev, "pci_map_single failed!\n");
3378 * ipr_get_task_attributes - Translate SPI Q-Tag to task attributes
3379 * @scsi_cmd: scsi command struct
3384 static u8 ipr_get_task_attributes(struct scsi_cmnd *scsi_cmd)
3387 u8 rc = IPR_FLAGS_LO_UNTAGGED_TASK;
3389 if (scsi_populate_tag_msg(scsi_cmd, tag)) {
3391 case MSG_SIMPLE_TAG:
3392 rc = IPR_FLAGS_LO_SIMPLE_TASK;
3395 rc = IPR_FLAGS_LO_HEAD_OF_Q_TASK;
3397 case MSG_ORDERED_TAG:
3398 rc = IPR_FLAGS_LO_ORDERED_TASK;
3407 * ipr_erp_done - Process completion of ERP for a device
3408 * @ipr_cmd: ipr command struct
3410 * This function copies the sense buffer into the scsi_cmd
3411 * struct and pushes the scsi_done function.
3416 static void ipr_erp_done(struct ipr_cmnd *ipr_cmd)
3418 struct scsi_cmnd *scsi_cmd = ipr_cmd->scsi_cmd;
3419 struct ipr_resource_entry *res = scsi_cmd->device->hostdata;
3420 struct ipr_ioa_cfg *ioa_cfg = ipr_cmd->ioa_cfg;
3421 u32 ioasc = be32_to_cpu(ipr_cmd->ioasa.ioasc);
3423 if (IPR_IOASC_SENSE_KEY(ioasc) > 0) {
3424 scsi_cmd->result |= (DID_ERROR << 16);
3425 ipr_sdev_err(scsi_cmd->device,
3426 "Request Sense failed with IOASC: 0x%08X\n", ioasc);
3428 memcpy(scsi_cmd->sense_buffer, ipr_cmd->sense_buffer,
3429 SCSI_SENSE_BUFFERSIZE);
3433 res->needs_sync_complete = 1;
3436 ipr_unmap_sglist(ioa_cfg, ipr_cmd);
3437 list_add_tail(&ipr_cmd->queue, &ioa_cfg->free_q);
3438 scsi_cmd->scsi_done(scsi_cmd);
3442 * ipr_reinit_ipr_cmnd_for_erp - Re-initialize a cmnd block to be used for ERP
3443 * @ipr_cmd: ipr command struct
3448 static void ipr_reinit_ipr_cmnd_for_erp(struct ipr_cmnd *ipr_cmd)
3450 struct ipr_ioarcb *ioarcb;
3451 struct ipr_ioasa *ioasa;
3453 ioarcb = &ipr_cmd->ioarcb;
3454 ioasa = &ipr_cmd->ioasa;
3456 memset(&ioarcb->cmd_pkt, 0, sizeof(struct ipr_cmd_pkt));
3457 ioarcb->write_data_transfer_length = 0;
3458 ioarcb->read_data_transfer_length = 0;
3459 ioarcb->write_ioadl_len = 0;
3460 ioarcb->read_ioadl_len = 0;
3462 ioasa->residual_data_len = 0;
3466 * ipr_erp_request_sense - Send request sense to a device
3467 * @ipr_cmd: ipr command struct
3469 * This function sends a request sense to a device as a result
3470 * of a check condition.
3475 static void ipr_erp_request_sense(struct ipr_cmnd *ipr_cmd)
3477 struct ipr_cmd_pkt *cmd_pkt = &ipr_cmd->ioarcb.cmd_pkt;
3478 u32 ioasc = be32_to_cpu(ipr_cmd->ioasa.ioasc);
3480 if (IPR_IOASC_SENSE_KEY(ioasc) > 0) {
3481 ipr_erp_done(ipr_cmd);
3485 ipr_reinit_ipr_cmnd_for_erp(ipr_cmd);
3487 cmd_pkt->request_type = IPR_RQTYPE_SCSICDB;
3488 cmd_pkt->cdb[0] = REQUEST_SENSE;
3489 cmd_pkt->cdb[4] = SCSI_SENSE_BUFFERSIZE;
3490 cmd_pkt->flags_hi |= IPR_FLAGS_HI_SYNC_OVERRIDE;
3491 cmd_pkt->flags_hi |= IPR_FLAGS_HI_NO_ULEN_CHK;
3492 cmd_pkt->timeout = cpu_to_be16(IPR_REQUEST_SENSE_TIMEOUT / HZ);
3494 ipr_cmd->ioadl[0].flags_and_data_len =
3495 cpu_to_be32(IPR_IOADL_FLAGS_READ_LAST | SCSI_SENSE_BUFFERSIZE);
3496 ipr_cmd->ioadl[0].address =
3497 cpu_to_be32(ipr_cmd->sense_buffer_dma);
3499 ipr_cmd->ioarcb.read_ioadl_len =
3500 cpu_to_be32(sizeof(struct ipr_ioadl_desc));
3501 ipr_cmd->ioarcb.read_data_transfer_length =
3502 cpu_to_be32(SCSI_SENSE_BUFFERSIZE);
3504 ipr_do_req(ipr_cmd, ipr_erp_done, ipr_timeout,
3505 IPR_REQUEST_SENSE_TIMEOUT * 2);
3509 * ipr_erp_cancel_all - Send cancel all to a device
3510 * @ipr_cmd: ipr command struct
3512 * This function sends a cancel all to a device to clear the
3513 * queue. If we are running TCQ on the device, QERR is set to 1,
3514 * which means all outstanding ops have been dropped on the floor.
3515 * Cancel all will return them to us.
3520 static void ipr_erp_cancel_all(struct ipr_cmnd *ipr_cmd)
3522 struct scsi_cmnd *scsi_cmd = ipr_cmd->scsi_cmd;
3523 struct ipr_resource_entry *res = scsi_cmd->device->hostdata;
3524 struct ipr_cmd_pkt *cmd_pkt;
3528 ipr_reinit_ipr_cmnd_for_erp(ipr_cmd);
3530 if (!res->tcq_active) {
3531 ipr_erp_request_sense(ipr_cmd);
3535 cmd_pkt = &ipr_cmd->ioarcb.cmd_pkt;
3536 cmd_pkt->request_type = IPR_RQTYPE_IOACMD;
3537 cmd_pkt->cdb[0] = IPR_CANCEL_ALL_REQUESTS;
3539 ipr_do_req(ipr_cmd, ipr_erp_request_sense, ipr_timeout,
3540 IPR_CANCEL_ALL_TIMEOUT);
3544 * ipr_dump_ioasa - Dump contents of IOASA
3545 * @ioa_cfg: ioa config struct
3546 * @ipr_cmd: ipr command struct
3548 * This function is invoked by the interrupt handler when ops
3549 * fail. It will log the IOASA if appropriate. Only called
3555 static void ipr_dump_ioasa(struct ipr_ioa_cfg *ioa_cfg,
3556 struct ipr_cmnd *ipr_cmd)
3561 struct ipr_ioasa *ioasa = &ipr_cmd->ioasa;
3562 u32 *ioasa_data = (u32 *)ioasa;
3565 ioasc = be32_to_cpu(ioasa->ioasc) & IPR_IOASC_IOASC_MASK;
3570 if (ioa_cfg->log_level < IPR_DEFAULT_LOG_LEVEL)
3573 error_index = ipr_get_error(ioasc);
3575 if (ioa_cfg->log_level < IPR_MAX_LOG_LEVEL) {
3576 /* Don't log an error if the IOA already logged one */
3577 if (ioasa->ilid != 0)
3580 if (ipr_error_table[error_index].log_ioasa == 0)
3584 ipr_sdev_err(ipr_cmd->scsi_cmd->device, "%s\n",
3585 ipr_error_table[error_index].error);
3587 if ((ioasa->u.gpdd.end_state <= ARRAY_SIZE(ipr_gpdd_dev_end_states)) &&
3588 (ioasa->u.gpdd.bus_phase <= ARRAY_SIZE(ipr_gpdd_dev_bus_phases))) {
3589 ipr_sdev_err(ipr_cmd->scsi_cmd->device,
3590 "Device End state: %s Phase: %s\n",
3591 ipr_gpdd_dev_end_states[ioasa->u.gpdd.end_state],
3592 ipr_gpdd_dev_bus_phases[ioasa->u.gpdd.bus_phase]);
3595 if (sizeof(struct ipr_ioasa) < be16_to_cpu(ioasa->ret_stat_len))
3596 data_len = sizeof(struct ipr_ioasa);
3598 data_len = be16_to_cpu(ioasa->ret_stat_len);
3600 ipr_err("IOASA Dump:\n");
3602 for (i = 0; i < data_len / 4; i += 4) {
3603 ipr_err("%08X: %08X %08X %08X %08X\n", i*4,
3604 be32_to_cpu(ioasa_data[i]),
3605 be32_to_cpu(ioasa_data[i+1]),
3606 be32_to_cpu(ioasa_data[i+2]),
3607 be32_to_cpu(ioasa_data[i+3]));
3612 * ipr_gen_sense - Generate SCSI sense data from an IOASA
3614 * @sense_buf: sense data buffer
3619 static void ipr_gen_sense(struct ipr_cmnd *ipr_cmd)
3622 u8 *sense_buf = ipr_cmd->scsi_cmd->sense_buffer;
3623 struct ipr_resource_entry *res = ipr_cmd->scsi_cmd->device->hostdata;
3624 struct ipr_ioasa *ioasa = &ipr_cmd->ioasa;
3625 u32 ioasc = be32_to_cpu(ioasa->ioasc);
3627 memset(sense_buf, 0, SCSI_SENSE_BUFFERSIZE);
3629 if (ioasc >= IPR_FIRST_DRIVER_IOASC)
3632 ipr_cmd->scsi_cmd->result = SAM_STAT_CHECK_CONDITION;
3634 if (ipr_is_vset_device(res) &&
3635 ioasc == IPR_IOASC_MED_DO_NOT_REALLOC &&
3636 ioasa->u.vset.failing_lba_hi != 0) {
3637 sense_buf[0] = 0x72;
3638 sense_buf[1] = IPR_IOASC_SENSE_KEY(ioasc);
3639 sense_buf[2] = IPR_IOASC_SENSE_CODE(ioasc);
3640 sense_buf[3] = IPR_IOASC_SENSE_QUAL(ioasc);
3644 sense_buf[9] = 0x0A;
3645 sense_buf[10] = 0x80;
3647 failing_lba = be32_to_cpu(ioasa->u.vset.failing_lba_hi);
3649 sense_buf[12] = (failing_lba & 0xff000000) >> 24;
3650 sense_buf[13] = (failing_lba & 0x00ff0000) >> 16;
3651 sense_buf[14] = (failing_lba & 0x0000ff00) >> 8;
3652 sense_buf[15] = failing_lba & 0x000000ff;
3654 failing_lba = be32_to_cpu(ioasa->u.vset.failing_lba_lo);
3656 sense_buf[16] = (failing_lba & 0xff000000) >> 24;
3657 sense_buf[17] = (failing_lba & 0x00ff0000) >> 16;
3658 sense_buf[18] = (failing_lba & 0x0000ff00) >> 8;
3659 sense_buf[19] = failing_lba & 0x000000ff;
3661 sense_buf[0] = 0x70;
3662 sense_buf[2] = IPR_IOASC_SENSE_KEY(ioasc);
3663 sense_buf[12] = IPR_IOASC_SENSE_CODE(ioasc);
3664 sense_buf[13] = IPR_IOASC_SENSE_QUAL(ioasc);
3666 /* Illegal request */
3667 if ((IPR_IOASC_SENSE_KEY(ioasc) == 0x05) &&
3668 (be32_to_cpu(ioasa->ioasc_specific) & IPR_FIELD_POINTER_VALID)) {
3669 sense_buf[7] = 10; /* additional length */
3671 /* IOARCB was in error */
3672 if (IPR_IOASC_SENSE_CODE(ioasc) == 0x24)
3673 sense_buf[15] = 0xC0;
3674 else /* Parameter data was invalid */
3675 sense_buf[15] = 0x80;
3678 ((IPR_FIELD_POINTER_MASK &
3679 be32_to_cpu(ioasa->ioasc_specific)) >> 8) & 0xff;
3681 (IPR_FIELD_POINTER_MASK &
3682 be32_to_cpu(ioasa->ioasc_specific)) & 0xff;
3684 if (ioasc == IPR_IOASC_MED_DO_NOT_REALLOC) {
3685 if (ipr_is_vset_device(res))
3686 failing_lba = be32_to_cpu(ioasa->u.vset.failing_lba_lo);
3688 failing_lba = be32_to_cpu(ioasa->u.dasd.failing_lba);
3690 sense_buf[0] |= 0x80; /* Or in the Valid bit */
3691 sense_buf[3] = (failing_lba & 0xff000000) >> 24;
3692 sense_buf[4] = (failing_lba & 0x00ff0000) >> 16;
3693 sense_buf[5] = (failing_lba & 0x0000ff00) >> 8;
3694 sense_buf[6] = failing_lba & 0x000000ff;
3697 sense_buf[7] = 6; /* additional length */
3703 * ipr_erp_start - Process an error response for a SCSI op
3704 * @ioa_cfg: ioa config struct
3705 * @ipr_cmd: ipr command struct
3707 * This function determines whether or not to initiate ERP
3708 * on the affected device.
3713 static void ipr_erp_start(struct ipr_ioa_cfg *ioa_cfg,
3714 struct ipr_cmnd *ipr_cmd)
3716 struct scsi_cmnd *scsi_cmd = ipr_cmd->scsi_cmd;
3717 struct ipr_resource_entry *res = scsi_cmd->device->hostdata;
3718 u32 ioasc = be32_to_cpu(ipr_cmd->ioasa.ioasc);
3721 ipr_scsi_eh_done(ipr_cmd);
3725 if (ipr_is_gscsi(res))
3726 ipr_dump_ioasa(ioa_cfg, ipr_cmd);
3728 ipr_gen_sense(ipr_cmd);
3730 switch (ioasc & IPR_IOASC_IOASC_MASK) {
3731 case IPR_IOASC_ABORTED_CMD_TERM_BY_HOST:
3732 scsi_cmd->result |= (DID_IMM_RETRY << 16);
3734 case IPR_IOASC_IR_RESOURCE_HANDLE:
3735 scsi_cmd->result |= (DID_NO_CONNECT << 16);
3737 case IPR_IOASC_HW_SEL_TIMEOUT:
3738 scsi_cmd->result |= (DID_NO_CONNECT << 16);
3739 res->needs_sync_complete = 1;
3741 case IPR_IOASC_SYNC_REQUIRED:
3743 res->needs_sync_complete = 1;
3744 scsi_cmd->result |= (DID_IMM_RETRY << 16);
3746 case IPR_IOASC_MED_DO_NOT_REALLOC: /* prevent retries */
3747 scsi_cmd->result |= (DID_PASSTHROUGH << 16);
3749 case IPR_IOASC_BUS_WAS_RESET:
3750 case IPR_IOASC_BUS_WAS_RESET_BY_OTHER:
3752 * Report the bus reset and ask for a retry. The device
3753 * will give CC/UA the next command.
3755 if (!res->resetting_device)
3756 scsi_report_bus_reset(ioa_cfg->host, scsi_cmd->device->channel);
3757 scsi_cmd->result |= (DID_ERROR << 16);
3758 res->needs_sync_complete = 1;
3760 case IPR_IOASC_HW_DEV_BUS_STATUS:
3761 scsi_cmd->result |= IPR_IOASC_SENSE_STATUS(ioasc);
3762 if (IPR_IOASC_SENSE_STATUS(ioasc) == SAM_STAT_CHECK_CONDITION) {
3763 ipr_erp_cancel_all(ipr_cmd);
3766 res->needs_sync_complete = 1;
3768 case IPR_IOASC_NR_INIT_CMD_REQUIRED:
3771 scsi_cmd->result |= (DID_ERROR << 16);
3772 if (!ipr_is_vset_device(res))
3773 res->needs_sync_complete = 1;
3777 ipr_unmap_sglist(ioa_cfg, ipr_cmd);
3778 list_add_tail(&ipr_cmd->queue, &ioa_cfg->free_q);
3779 scsi_cmd->scsi_done(scsi_cmd);
3783 * ipr_scsi_done - mid-layer done function
3784 * @ipr_cmd: ipr command struct
3786 * This function is invoked by the interrupt handler for
3787 * ops generated by the SCSI mid-layer
3792 static void ipr_scsi_done(struct ipr_cmnd *ipr_cmd)
3794 struct ipr_ioa_cfg *ioa_cfg = ipr_cmd->ioa_cfg;
3795 struct scsi_cmnd *scsi_cmd = ipr_cmd->scsi_cmd;
3796 u32 ioasc = be32_to_cpu(ipr_cmd->ioasa.ioasc);
3798 scsi_cmd->resid = be32_to_cpu(ipr_cmd->ioasa.residual_data_len);
3800 if (likely(IPR_IOASC_SENSE_KEY(ioasc) == 0)) {
3801 ipr_unmap_sglist(ioa_cfg, ipr_cmd);
3802 list_add_tail(&ipr_cmd->queue, &ioa_cfg->free_q);
3803 scsi_cmd->scsi_done(scsi_cmd);
3805 ipr_erp_start(ioa_cfg, ipr_cmd);
3809 * ipr_save_ioafp_mode_select - Save adapters mode select data
3810 * @ioa_cfg: ioa config struct
3811 * @scsi_cmd: scsi command struct
3813 * This function saves mode select data for the adapter to
3814 * use following an adapter reset.
3817 * 0 on success / SCSI_MLQUEUE_HOST_BUSY on failure
3819 static int ipr_save_ioafp_mode_select(struct ipr_ioa_cfg *ioa_cfg,
3820 struct scsi_cmnd *scsi_cmd)
3822 if (!ioa_cfg->saved_mode_pages) {
3823 ioa_cfg->saved_mode_pages = kmalloc(sizeof(struct ipr_mode_pages),
3825 if (!ioa_cfg->saved_mode_pages) {
3826 dev_err(&ioa_cfg->pdev->dev,
3827 "IOA mode select buffer allocation failed\n");
3828 return SCSI_MLQUEUE_HOST_BUSY;
3832 memcpy(ioa_cfg->saved_mode_pages, scsi_cmd->buffer, scsi_cmd->cmnd[4]);
3833 ioa_cfg->saved_mode_page_len = scsi_cmd->cmnd[4];
3838 * ipr_queuecommand - Queue a mid-layer request
3839 * @scsi_cmd: scsi command struct
3840 * @done: done function
3842 * This function queues a request generated by the mid-layer.
3846 * SCSI_MLQUEUE_DEVICE_BUSY if device is busy
3847 * SCSI_MLQUEUE_HOST_BUSY if host is busy
3849 static int ipr_queuecommand(struct scsi_cmnd *scsi_cmd,
3850 void (*done) (struct scsi_cmnd *))
3852 struct ipr_ioa_cfg *ioa_cfg;
3853 struct ipr_resource_entry *res;
3854 struct ipr_ioarcb *ioarcb;
3855 struct ipr_cmnd *ipr_cmd;
3858 scsi_cmd->scsi_done = done;
3859 ioa_cfg = (struct ipr_ioa_cfg *)scsi_cmd->device->host->hostdata;
3860 res = scsi_cmd->device->hostdata;
3861 scsi_cmd->result = (DID_OK << 16);
3864 * We are currently blocking all devices due to a host reset
3865 * We have told the host to stop giving us new requests, but
3866 * ERP ops don't count. FIXME
3868 if (unlikely(!ioa_cfg->allow_cmds && !ioa_cfg->ioa_is_dead))
3869 return SCSI_MLQUEUE_HOST_BUSY;
3872 * FIXME - Create scsi_set_host_offline interface
3873 * and the ioa_is_dead check can be removed
3875 if (unlikely(ioa_cfg->ioa_is_dead || !res)) {
3876 memset(scsi_cmd->sense_buffer, 0, SCSI_SENSE_BUFFERSIZE);
3877 scsi_cmd->result = (DID_NO_CONNECT << 16);
3878 scsi_cmd->scsi_done(scsi_cmd);
3882 ipr_cmd = ipr_get_free_ipr_cmnd(ioa_cfg);
3883 ioarcb = &ipr_cmd->ioarcb;
3884 list_add_tail(&ipr_cmd->queue, &ioa_cfg->pending_q);
3886 memcpy(ioarcb->cmd_pkt.cdb, scsi_cmd->cmnd, scsi_cmd->cmd_len);
3887 ipr_cmd->scsi_cmd = scsi_cmd;
3888 ioarcb->res_handle = res->cfgte.res_handle;
3889 ipr_cmd->done = ipr_scsi_done;
3890 ipr_trc_hook(ipr_cmd, IPR_TRACE_START, IPR_GET_PHYS_LOC(res->cfgte.res_addr));
3892 if (ipr_is_gscsi(res) || ipr_is_vset_device(res)) {
3893 if (scsi_cmd->underflow == 0)
3894 ioarcb->cmd_pkt.flags_hi |= IPR_FLAGS_HI_NO_ULEN_CHK;
3896 if (res->needs_sync_complete) {
3897 ioarcb->cmd_pkt.flags_hi |= IPR_FLAGS_HI_SYNC_COMPLETE;
3898 res->needs_sync_complete = 0;
3901 ioarcb->cmd_pkt.flags_hi |= IPR_FLAGS_HI_NO_LINK_DESC;
3902 ioarcb->cmd_pkt.flags_lo |= IPR_FLAGS_LO_DELAY_AFTER_RST;
3903 ioarcb->cmd_pkt.flags_lo |= IPR_FLAGS_LO_ALIGNED_BFR;
3904 ioarcb->cmd_pkt.flags_lo |= ipr_get_task_attributes(scsi_cmd);
3907 if (!ipr_is_gscsi(res) && scsi_cmd->cmnd[0] >= 0xC0)
3908 ioarcb->cmd_pkt.request_type = IPR_RQTYPE_IOACMD;
3910 if (ipr_is_ioa_resource(res) && scsi_cmd->cmnd[0] == MODE_SELECT)
3911 rc = ipr_save_ioafp_mode_select(ioa_cfg, scsi_cmd);
3913 if (likely(rc == 0))
3914 rc = ipr_build_ioadl(ioa_cfg, ipr_cmd);
3916 if (likely(rc == 0)) {
3918 writel(be32_to_cpu(ipr_cmd->ioarcb.ioarcb_host_pci_addr),
3919 ioa_cfg->regs.ioarrin_reg);
3921 list_move_tail(&ipr_cmd->queue, &ioa_cfg->free_q);
3922 return SCSI_MLQUEUE_HOST_BUSY;
3929 * ipr_info - Get information about the card/driver
3930 * @scsi_host: scsi host struct
3933 * pointer to buffer with description string
3935 static const char * ipr_ioa_info(struct Scsi_Host *host)
3937 static char buffer[512];
3938 struct ipr_ioa_cfg *ioa_cfg;
3939 unsigned long lock_flags = 0;
3941 ioa_cfg = (struct ipr_ioa_cfg *) host->hostdata;
3943 spin_lock_irqsave(host->host_lock, lock_flags);
3944 sprintf(buffer, "IBM %X Storage Adapter", ioa_cfg->type);
3945 spin_unlock_irqrestore(host->host_lock, lock_flags);
3950 static struct scsi_host_template driver_template = {
3951 .module = THIS_MODULE,
3953 .info = ipr_ioa_info,
3954 .queuecommand = ipr_queuecommand,
3955 .eh_abort_handler = ipr_eh_abort,
3956 .eh_device_reset_handler = ipr_eh_dev_reset,
3957 .eh_host_reset_handler = ipr_eh_host_reset,
3958 .slave_alloc = ipr_slave_alloc,
3959 .slave_configure = ipr_slave_configure,
3960 .slave_destroy = ipr_slave_destroy,
3961 .bios_param = ipr_biosparam,
3962 .can_queue = IPR_MAX_COMMANDS,
3964 .sg_tablesize = IPR_MAX_SGLIST,
3965 .max_sectors = IPR_MAX_SECTORS,
3966 .cmd_per_lun = IPR_MAX_CMD_PER_LUN,
3967 .use_clustering = ENABLE_CLUSTERING,
3968 .shost_attrs = ipr_ioa_attrs,
3969 .sdev_attrs = ipr_dev_attrs,
3970 .proc_name = IPR_NAME
3973 #ifdef CONFIG_PPC_PSERIES
3974 static const u16 ipr_blocked_processors[] = {
3986 * ipr_invalid_adapter - Determine if this adapter is supported on this hardware
3987 * @ioa_cfg: ioa cfg struct
3989 * Adapters that use Gemstone revision < 3.1 do not work reliably on
3990 * certain pSeries hardware. This function determines if the given
3991 * adapter is in one of these confgurations or not.
3994 * 1 if adapter is not supported / 0 if adapter is supported
3996 static int ipr_invalid_adapter(struct ipr_ioa_cfg *ioa_cfg)
4001 if (ioa_cfg->type == 0x5702) {
4002 if (pci_read_config_byte(ioa_cfg->pdev, PCI_REVISION_ID,
4003 &rev_id) == PCIBIOS_SUCCESSFUL) {
4005 for (i = 0; i < ARRAY_SIZE(ipr_blocked_processors); i++){
4006 if (__is_processor(ipr_blocked_processors[i]))
4015 #define ipr_invalid_adapter(ioa_cfg) 0
4019 * ipr_ioa_bringdown_done - IOA bring down completion.
4020 * @ipr_cmd: ipr command struct
4022 * This function processes the completion of an adapter bring down.
4023 * It wakes any reset sleepers.
4028 static int ipr_ioa_bringdown_done(struct ipr_cmnd *ipr_cmd)
4030 struct ipr_ioa_cfg *ioa_cfg = ipr_cmd->ioa_cfg;
4033 ioa_cfg->in_reset_reload = 0;
4034 ioa_cfg->reset_retries = 0;
4035 list_add_tail(&ipr_cmd->queue, &ioa_cfg->free_q);
4036 wake_up_all(&ioa_cfg->reset_wait_q);
4038 spin_unlock_irq(ioa_cfg->host->host_lock);
4039 scsi_unblock_requests(ioa_cfg->host);
4040 spin_lock_irq(ioa_cfg->host->host_lock);
4043 return IPR_RC_JOB_RETURN;
4047 * ipr_ioa_reset_done - IOA reset completion.
4048 * @ipr_cmd: ipr command struct
4050 * This function processes the completion of an adapter reset.
4051 * It schedules any necessary mid-layer add/removes and
4052 * wakes any reset sleepers.
4057 static int ipr_ioa_reset_done(struct ipr_cmnd *ipr_cmd)
4059 struct ipr_ioa_cfg *ioa_cfg = ipr_cmd->ioa_cfg;
4060 struct ipr_resource_entry *res;
4061 struct ipr_hostrcb *hostrcb, *temp;
4065 ioa_cfg->in_reset_reload = 0;
4066 ioa_cfg->allow_cmds = 1;
4067 ioa_cfg->reset_cmd = NULL;
4069 list_for_each_entry(res, &ioa_cfg->used_res_q, queue) {
4070 if (ioa_cfg->allow_ml_add_del && (res->add_to_ml || res->del_from_ml)) {
4072 schedule_work(&ioa_cfg->work_q);
4077 list_for_each_entry_safe(hostrcb, temp, &ioa_cfg->hostrcb_free_q, queue) {
4078 list_del(&hostrcb->queue);
4079 if (i++ < IPR_NUM_LOG_HCAMS)
4080 ipr_send_hcam(ioa_cfg, IPR_HCAM_CDB_OP_CODE_LOG_DATA, hostrcb);
4082 ipr_send_hcam(ioa_cfg, IPR_HCAM_CDB_OP_CODE_CONFIG_CHANGE, hostrcb);
4085 dev_info(&ioa_cfg->pdev->dev, "IOA initialized.\n");
4087 ioa_cfg->reset_retries = 0;
4088 list_add_tail(&ipr_cmd->queue, &ioa_cfg->free_q);
4089 wake_up_all(&ioa_cfg->reset_wait_q);
4091 spin_unlock_irq(ioa_cfg->host->host_lock);
4092 scsi_unblock_requests(ioa_cfg->host);
4093 spin_lock_irq(ioa_cfg->host->host_lock);
4095 if (!ioa_cfg->allow_cmds)
4096 scsi_block_requests(ioa_cfg->host);
4099 return IPR_RC_JOB_RETURN;
4103 * ipr_set_sup_dev_dflt - Initialize a Set Supported Device buffer
4104 * @supported_dev: supported device struct
4105 * @vpids: vendor product id struct
4110 static void ipr_set_sup_dev_dflt(struct ipr_supported_device *supported_dev,
4111 struct ipr_std_inq_vpids *vpids)
4113 memset(supported_dev, 0, sizeof(struct ipr_supported_device));
4114 memcpy(&supported_dev->vpids, vpids, sizeof(struct ipr_std_inq_vpids));
4115 supported_dev->num_records = 1;
4116 supported_dev->data_length =
4117 cpu_to_be16(sizeof(struct ipr_supported_device));
4118 supported_dev->reserved = 0;
4122 * ipr_set_supported_devs - Send Set Supported Devices for a device
4123 * @ipr_cmd: ipr command struct
4125 * This function send a Set Supported Devices to the adapter
4128 * IPR_RC_JOB_CONTINUE / IPR_RC_JOB_RETURN
4130 static int ipr_set_supported_devs(struct ipr_cmnd *ipr_cmd)
4132 struct ipr_ioa_cfg *ioa_cfg = ipr_cmd->ioa_cfg;
4133 struct ipr_supported_device *supp_dev = &ioa_cfg->vpd_cbs->supp_dev;
4134 struct ipr_ioadl_desc *ioadl = ipr_cmd->ioadl;
4135 struct ipr_ioarcb *ioarcb = &ipr_cmd->ioarcb;
4136 struct ipr_resource_entry *res = ipr_cmd->u.res;
4138 ipr_cmd->job_step = ipr_ioa_reset_done;
4140 list_for_each_entry_continue(res, &ioa_cfg->used_res_q, queue) {
4141 if (!ipr_is_af_dasd_device(res))
4144 ipr_cmd->u.res = res;
4145 ipr_set_sup_dev_dflt(supp_dev, &res->cfgte.std_inq_data.vpids);
4147 ioarcb->res_handle = cpu_to_be32(IPR_IOA_RES_HANDLE);
4148 ioarcb->cmd_pkt.flags_hi |= IPR_FLAGS_HI_WRITE_NOT_READ;
4149 ioarcb->cmd_pkt.request_type = IPR_RQTYPE_IOACMD;
4151 ioarcb->cmd_pkt.cdb[0] = IPR_SET_SUPPORTED_DEVICES;
4152 ioarcb->cmd_pkt.cdb[7] = (sizeof(struct ipr_supported_device) >> 8) & 0xff;
4153 ioarcb->cmd_pkt.cdb[8] = sizeof(struct ipr_supported_device) & 0xff;
4155 ioadl->flags_and_data_len = cpu_to_be32(IPR_IOADL_FLAGS_WRITE_LAST |
4156 sizeof(struct ipr_supported_device));
4157 ioadl->address = cpu_to_be32(ioa_cfg->vpd_cbs_dma +
4158 offsetof(struct ipr_misc_cbs, supp_dev));
4159 ioarcb->write_ioadl_len = cpu_to_be32(sizeof(struct ipr_ioadl_desc));
4160 ioarcb->write_data_transfer_length =
4161 cpu_to_be32(sizeof(struct ipr_supported_device));
4163 ipr_do_req(ipr_cmd, ipr_reset_ioa_job, ipr_timeout,
4164 IPR_SET_SUP_DEVICE_TIMEOUT);
4166 ipr_cmd->job_step = ipr_set_supported_devs;
4167 return IPR_RC_JOB_RETURN;
4170 return IPR_RC_JOB_CONTINUE;
4174 * ipr_get_mode_page - Locate specified mode page
4175 * @mode_pages: mode page buffer
4176 * @page_code: page code to find
4177 * @len: minimum required length for mode page
4180 * pointer to mode page / NULL on failure
4182 static void *ipr_get_mode_page(struct ipr_mode_pages *mode_pages,
4183 u32 page_code, u32 len)
4185 struct ipr_mode_page_hdr *mode_hdr;
4189 if (!mode_pages || (mode_pages->hdr.length == 0))
4192 length = (mode_pages->hdr.length + 1) - 4 - mode_pages->hdr.block_desc_len;
4193 mode_hdr = (struct ipr_mode_page_hdr *)
4194 (mode_pages->data + mode_pages->hdr.block_desc_len);
4197 if (IPR_GET_MODE_PAGE_CODE(mode_hdr) == page_code) {
4198 if (mode_hdr->page_length >= (len - sizeof(struct ipr_mode_page_hdr)))
4202 page_length = (sizeof(struct ipr_mode_page_hdr) +
4203 mode_hdr->page_length);
4204 length -= page_length;
4205 mode_hdr = (struct ipr_mode_page_hdr *)
4206 ((unsigned long)mode_hdr + page_length);
4213 * ipr_check_term_power - Check for term power errors
4214 * @ioa_cfg: ioa config struct
4215 * @mode_pages: IOAFP mode pages buffer
4217 * Check the IOAFP's mode page 28 for term power errors
4222 static void ipr_check_term_power(struct ipr_ioa_cfg *ioa_cfg,
4223 struct ipr_mode_pages *mode_pages)
4227 struct ipr_dev_bus_entry *bus;
4228 struct ipr_mode_page28 *mode_page;
4230 mode_page = ipr_get_mode_page(mode_pages, 0x28,
4231 sizeof(struct ipr_mode_page28));
4233 entry_length = mode_page->entry_length;
4235 bus = mode_page->bus;
4237 for (i = 0; i < mode_page->num_entries; i++) {
4238 if (bus->flags & IPR_SCSI_ATTR_NO_TERM_PWR) {
4239 dev_err(&ioa_cfg->pdev->dev,
4240 "Term power is absent on scsi bus %d\n",
4244 bus = (struct ipr_dev_bus_entry *)((char *)bus + entry_length);
4249 * ipr_scsi_bus_speed_limit - Limit the SCSI speed based on SES table
4250 * @ioa_cfg: ioa config struct
4252 * Looks through the config table checking for SES devices. If
4253 * the SES device is in the SES table indicating a maximum SCSI
4254 * bus speed, the speed is limited for the bus.
4259 static void ipr_scsi_bus_speed_limit(struct ipr_ioa_cfg *ioa_cfg)
4264 for (i = 0; i < IPR_MAX_NUM_BUSES; i++) {
4265 max_xfer_rate = ipr_get_max_scsi_speed(ioa_cfg, i,
4266 ioa_cfg->bus_attr[i].bus_width);
4268 if (max_xfer_rate < ioa_cfg->bus_attr[i].max_xfer_rate)
4269 ioa_cfg->bus_attr[i].max_xfer_rate = max_xfer_rate;
4274 * ipr_modify_ioafp_mode_page_28 - Modify IOAFP Mode Page 28
4275 * @ioa_cfg: ioa config struct
4276 * @mode_pages: mode page 28 buffer
4278 * Updates mode page 28 based on driver configuration
4283 static void ipr_modify_ioafp_mode_page_28(struct ipr_ioa_cfg *ioa_cfg,
4284 struct ipr_mode_pages *mode_pages)
4286 int i, entry_length;
4287 struct ipr_dev_bus_entry *bus;
4288 struct ipr_bus_attributes *bus_attr;
4289 struct ipr_mode_page28 *mode_page;
4291 mode_page = ipr_get_mode_page(mode_pages, 0x28,
4292 sizeof(struct ipr_mode_page28));
4294 entry_length = mode_page->entry_length;
4296 /* Loop for each device bus entry */
4297 for (i = 0, bus = mode_page->bus;
4298 i < mode_page->num_entries;
4299 i++, bus = (struct ipr_dev_bus_entry *)((u8 *)bus + entry_length)) {
4300 if (bus->res_addr.bus > IPR_MAX_NUM_BUSES) {
4301 dev_err(&ioa_cfg->pdev->dev,
4302 "Invalid resource address reported: 0x%08X\n",
4303 IPR_GET_PHYS_LOC(bus->res_addr));
4307 bus_attr = &ioa_cfg->bus_attr[i];
4308 bus->extended_reset_delay = IPR_EXTENDED_RESET_DELAY;
4309 bus->bus_width = bus_attr->bus_width;
4310 bus->max_xfer_rate = cpu_to_be32(bus_attr->max_xfer_rate);
4311 bus->flags &= ~IPR_SCSI_ATTR_QAS_MASK;
4312 if (bus_attr->qas_enabled)
4313 bus->flags |= IPR_SCSI_ATTR_ENABLE_QAS;
4315 bus->flags |= IPR_SCSI_ATTR_DISABLE_QAS;
4320 * ipr_build_mode_select - Build a mode select command
4321 * @ipr_cmd: ipr command struct
4322 * @res_handle: resource handle to send command to
4323 * @parm: Byte 2 of Mode Sense command
4324 * @dma_addr: DMA buffer address
4325 * @xfer_len: data transfer length
4330 static void ipr_build_mode_select(struct ipr_cmnd *ipr_cmd,
4331 u32 res_handle, u8 parm, u32 dma_addr,
4334 struct ipr_ioadl_desc *ioadl = ipr_cmd->ioadl;
4335 struct ipr_ioarcb *ioarcb = &ipr_cmd->ioarcb;
4337 ioarcb->res_handle = res_handle;
4338 ioarcb->cmd_pkt.request_type = IPR_RQTYPE_SCSICDB;
4339 ioarcb->cmd_pkt.flags_hi |= IPR_FLAGS_HI_WRITE_NOT_READ;
4340 ioarcb->cmd_pkt.cdb[0] = MODE_SELECT;
4341 ioarcb->cmd_pkt.cdb[1] = parm;
4342 ioarcb->cmd_pkt.cdb[4] = xfer_len;
4344 ioadl->flags_and_data_len =
4345 cpu_to_be32(IPR_IOADL_FLAGS_WRITE_LAST | xfer_len);
4346 ioadl->address = cpu_to_be32(dma_addr);
4347 ioarcb->write_ioadl_len = cpu_to_be32(sizeof(struct ipr_ioadl_desc));
4348 ioarcb->write_data_transfer_length = cpu_to_be32(xfer_len);
4352 * ipr_ioafp_mode_select_page28 - Issue Mode Select Page 28 to IOA
4353 * @ipr_cmd: ipr command struct
4355 * This function sets up the SCSI bus attributes and sends
4356 * a Mode Select for Page 28 to activate them.
4361 static int ipr_ioafp_mode_select_page28(struct ipr_cmnd *ipr_cmd)
4363 struct ipr_ioa_cfg *ioa_cfg = ipr_cmd->ioa_cfg;
4364 struct ipr_mode_pages *mode_pages = &ioa_cfg->vpd_cbs->mode_pages;
4368 if (ioa_cfg->saved_mode_pages) {
4369 memcpy(mode_pages, ioa_cfg->saved_mode_pages,
4370 ioa_cfg->saved_mode_page_len);
4371 length = ioa_cfg->saved_mode_page_len;
4373 ipr_scsi_bus_speed_limit(ioa_cfg);
4374 ipr_check_term_power(ioa_cfg, mode_pages);
4375 ipr_modify_ioafp_mode_page_28(ioa_cfg, mode_pages);
4376 length = mode_pages->hdr.length + 1;
4377 mode_pages->hdr.length = 0;
4380 ipr_build_mode_select(ipr_cmd, cpu_to_be32(IPR_IOA_RES_HANDLE), 0x11,
4381 ioa_cfg->vpd_cbs_dma + offsetof(struct ipr_misc_cbs, mode_pages),
4384 ipr_cmd->job_step = ipr_set_supported_devs;
4385 ipr_cmd->u.res = list_entry(ioa_cfg->used_res_q.next,
4386 struct ipr_resource_entry, queue);
4388 ipr_do_req(ipr_cmd, ipr_reset_ioa_job, ipr_timeout, IPR_INTERNAL_TIMEOUT);
4391 return IPR_RC_JOB_RETURN;
4395 * ipr_build_mode_sense - Builds a mode sense command
4396 * @ipr_cmd: ipr command struct
4397 * @res: resource entry struct
4398 * @parm: Byte 2 of mode sense command
4399 * @dma_addr: DMA address of mode sense buffer
4400 * @xfer_len: Size of DMA buffer
4405 static void ipr_build_mode_sense(struct ipr_cmnd *ipr_cmd,
4407 u8 parm, u32 dma_addr, u8 xfer_len)
4409 struct ipr_ioadl_desc *ioadl = ipr_cmd->ioadl;
4410 struct ipr_ioarcb *ioarcb = &ipr_cmd->ioarcb;
4412 ioarcb->res_handle = res_handle;
4413 ioarcb->cmd_pkt.cdb[0] = MODE_SENSE;
4414 ioarcb->cmd_pkt.cdb[2] = parm;
4415 ioarcb->cmd_pkt.cdb[4] = xfer_len;
4416 ioarcb->cmd_pkt.request_type = IPR_RQTYPE_SCSICDB;
4418 ioadl->flags_and_data_len =
4419 cpu_to_be32(IPR_IOADL_FLAGS_READ_LAST | xfer_len);
4420 ioadl->address = cpu_to_be32(dma_addr);
4421 ioarcb->read_ioadl_len = cpu_to_be32(sizeof(struct ipr_ioadl_desc));
4422 ioarcb->read_data_transfer_length = cpu_to_be32(xfer_len);
4426 * ipr_ioafp_mode_sense_page28 - Issue Mode Sense Page 28 to IOA
4427 * @ipr_cmd: ipr command struct
4429 * This function send a Page 28 mode sense to the IOA to
4430 * retrieve SCSI bus attributes.
4435 static int ipr_ioafp_mode_sense_page28(struct ipr_cmnd *ipr_cmd)
4437 struct ipr_ioa_cfg *ioa_cfg = ipr_cmd->ioa_cfg;
4440 ipr_build_mode_sense(ipr_cmd, cpu_to_be32(IPR_IOA_RES_HANDLE),
4441 0x28, ioa_cfg->vpd_cbs_dma +
4442 offsetof(struct ipr_misc_cbs, mode_pages),
4443 sizeof(struct ipr_mode_pages));
4445 ipr_cmd->job_step = ipr_ioafp_mode_select_page28;
4447 ipr_do_req(ipr_cmd, ipr_reset_ioa_job, ipr_timeout, IPR_INTERNAL_TIMEOUT);
4450 return IPR_RC_JOB_RETURN;
4454 * ipr_init_res_table - Initialize the resource table
4455 * @ipr_cmd: ipr command struct
4457 * This function looks through the existing resource table, comparing
4458 * it with the config table. This function will take care of old/new
4459 * devices and schedule adding/removing them from the mid-layer
4463 * IPR_RC_JOB_CONTINUE
4465 static int ipr_init_res_table(struct ipr_cmnd *ipr_cmd)
4467 struct ipr_ioa_cfg *ioa_cfg = ipr_cmd->ioa_cfg;
4468 struct ipr_resource_entry *res, *temp;
4469 struct ipr_config_table_entry *cfgte;
4474 if (ioa_cfg->cfg_table->hdr.flags & IPR_UCODE_DOWNLOAD_REQ)
4475 dev_err(&ioa_cfg->pdev->dev, "Microcode download required\n");
4477 list_for_each_entry_safe(res, temp, &ioa_cfg->used_res_q, queue)
4478 list_move_tail(&res->queue, &old_res);
4480 for (i = 0; i < ioa_cfg->cfg_table->hdr.num_entries; i++) {
4481 cfgte = &ioa_cfg->cfg_table->dev[i];
4484 list_for_each_entry_safe(res, temp, &old_res, queue) {
4485 if (!memcmp(&res->cfgte.res_addr,
4486 &cfgte->res_addr, sizeof(cfgte->res_addr))) {
4487 list_move_tail(&res->queue, &ioa_cfg->used_res_q);
4494 if (list_empty(&ioa_cfg->free_res_q)) {
4495 dev_err(&ioa_cfg->pdev->dev, "Too many devices attached\n");
4500 res = list_entry(ioa_cfg->free_res_q.next,
4501 struct ipr_resource_entry, queue);
4502 list_move_tail(&res->queue, &ioa_cfg->used_res_q);
4503 ipr_init_res_entry(res);
4508 memcpy(&res->cfgte, cfgte, sizeof(struct ipr_config_table_entry));
4511 list_for_each_entry_safe(res, temp, &old_res, queue) {
4513 res->del_from_ml = 1;
4514 list_move_tail(&res->queue, &ioa_cfg->used_res_q);
4516 list_move_tail(&res->queue, &ioa_cfg->free_res_q);
4520 ipr_cmd->job_step = ipr_ioafp_mode_sense_page28;
4523 return IPR_RC_JOB_CONTINUE;
4527 * ipr_ioafp_query_ioa_cfg - Send a Query IOA Config to the adapter.
4528 * @ipr_cmd: ipr command struct
4530 * This function sends a Query IOA Configuration command
4531 * to the adapter to retrieve the IOA configuration table.
4536 static int ipr_ioafp_query_ioa_cfg(struct ipr_cmnd *ipr_cmd)
4538 struct ipr_ioa_cfg *ioa_cfg = ipr_cmd->ioa_cfg;
4539 struct ipr_ioarcb *ioarcb = &ipr_cmd->ioarcb;
4540 struct ipr_ioadl_desc *ioadl = ipr_cmd->ioadl;
4541 struct ipr_inquiry_page3 *ucode_vpd = &ioa_cfg->vpd_cbs->page3_data;
4544 dev_info(&ioa_cfg->pdev->dev, "Adapter firmware version: %02X%02X%02X%02X\n",
4545 ucode_vpd->major_release, ucode_vpd->card_type,
4546 ucode_vpd->minor_release[0], ucode_vpd->minor_release[1]);
4547 ioarcb->cmd_pkt.request_type = IPR_RQTYPE_IOACMD;
4548 ioarcb->res_handle = cpu_to_be32(IPR_IOA_RES_HANDLE);
4550 ioarcb->cmd_pkt.cdb[0] = IPR_QUERY_IOA_CONFIG;
4551 ioarcb->cmd_pkt.cdb[7] = (sizeof(struct ipr_config_table) >> 8) & 0xff;
4552 ioarcb->cmd_pkt.cdb[8] = sizeof(struct ipr_config_table) & 0xff;
4554 ioarcb->read_ioadl_len = cpu_to_be32(sizeof(struct ipr_ioadl_desc));
4555 ioarcb->read_data_transfer_length =
4556 cpu_to_be32(sizeof(struct ipr_config_table));
4558 ioadl->address = cpu_to_be32(ioa_cfg->cfg_table_dma);
4559 ioadl->flags_and_data_len =
4560 cpu_to_be32(IPR_IOADL_FLAGS_READ_LAST | sizeof(struct ipr_config_table));
4562 ipr_cmd->job_step = ipr_init_res_table;
4564 ipr_do_req(ipr_cmd, ipr_reset_ioa_job, ipr_timeout, IPR_INTERNAL_TIMEOUT);
4567 return IPR_RC_JOB_RETURN;
4571 * ipr_ioafp_inquiry - Send an Inquiry to the adapter.
4572 * @ipr_cmd: ipr command struct
4574 * This utility function sends an inquiry to the adapter.
4579 static void ipr_ioafp_inquiry(struct ipr_cmnd *ipr_cmd, u8 flags, u8 page,
4580 u32 dma_addr, u8 xfer_len)
4582 struct ipr_ioarcb *ioarcb = &ipr_cmd->ioarcb;
4583 struct ipr_ioadl_desc *ioadl = ipr_cmd->ioadl;
4586 ioarcb->cmd_pkt.request_type = IPR_RQTYPE_SCSICDB;
4587 ioarcb->res_handle = cpu_to_be32(IPR_IOA_RES_HANDLE);
4589 ioarcb->cmd_pkt.cdb[0] = INQUIRY;
4590 ioarcb->cmd_pkt.cdb[1] = flags;
4591 ioarcb->cmd_pkt.cdb[2] = page;
4592 ioarcb->cmd_pkt.cdb[4] = xfer_len;
4594 ioarcb->read_ioadl_len = cpu_to_be32(sizeof(struct ipr_ioadl_desc));
4595 ioarcb->read_data_transfer_length = cpu_to_be32(xfer_len);
4597 ioadl->address = cpu_to_be32(dma_addr);
4598 ioadl->flags_and_data_len =
4599 cpu_to_be32(IPR_IOADL_FLAGS_READ_LAST | xfer_len);
4601 ipr_do_req(ipr_cmd, ipr_reset_ioa_job, ipr_timeout, IPR_INTERNAL_TIMEOUT);
4606 * ipr_ioafp_page3_inquiry - Send a Page 3 Inquiry to the adapter.
4607 * @ipr_cmd: ipr command struct
4609 * This function sends a Page 3 inquiry to the adapter
4610 * to retrieve software VPD information.
4613 * IPR_RC_JOB_CONTINUE / IPR_RC_JOB_RETURN
4615 static int ipr_ioafp_page3_inquiry(struct ipr_cmnd *ipr_cmd)
4617 struct ipr_ioa_cfg *ioa_cfg = ipr_cmd->ioa_cfg;
4622 /* Grab the type out of the VPD and store it away */
4623 memcpy(type, ioa_cfg->vpd_cbs->ioa_vpd.std_inq_data.vpids.product_id, 4);
4625 ioa_cfg->type = simple_strtoul((char *)type, NULL, 16);
4627 ipr_cmd->job_step = ipr_ioafp_query_ioa_cfg;
4629 ipr_ioafp_inquiry(ipr_cmd, 1, 3,
4630 ioa_cfg->vpd_cbs_dma + offsetof(struct ipr_misc_cbs, page3_data),
4631 sizeof(struct ipr_inquiry_page3));
4634 return IPR_RC_JOB_RETURN;
4638 * ipr_ioafp_std_inquiry - Send a Standard Inquiry to the adapter.
4639 * @ipr_cmd: ipr command struct
4641 * This function sends a standard inquiry to the adapter.
4646 static int ipr_ioafp_std_inquiry(struct ipr_cmnd *ipr_cmd)
4648 struct ipr_ioa_cfg *ioa_cfg = ipr_cmd->ioa_cfg;
4651 ipr_cmd->job_step = ipr_ioafp_page3_inquiry;
4653 ipr_ioafp_inquiry(ipr_cmd, 0, 0,
4654 ioa_cfg->vpd_cbs_dma + offsetof(struct ipr_misc_cbs, ioa_vpd),
4655 sizeof(struct ipr_ioa_vpd));
4658 return IPR_RC_JOB_RETURN;
4662 * ipr_ioafp_indentify_hrrq - Send Identify Host RRQ.
4663 * @ipr_cmd: ipr command struct
4665 * This function send an Identify Host Request Response Queue
4666 * command to establish the HRRQ with the adapter.
4671 static int ipr_ioafp_indentify_hrrq(struct ipr_cmnd *ipr_cmd)
4673 struct ipr_ioa_cfg *ioa_cfg = ipr_cmd->ioa_cfg;
4674 struct ipr_ioarcb *ioarcb = &ipr_cmd->ioarcb;
4677 dev_info(&ioa_cfg->pdev->dev, "Starting IOA initialization sequence.\n");
4679 ioarcb->cmd_pkt.cdb[0] = IPR_ID_HOST_RR_Q;
4680 ioarcb->res_handle = cpu_to_be32(IPR_IOA_RES_HANDLE);
4682 ioarcb->cmd_pkt.request_type = IPR_RQTYPE_IOACMD;
4683 ioarcb->cmd_pkt.cdb[2] =
4684 ((u32) ioa_cfg->host_rrq_dma >> 24) & 0xff;
4685 ioarcb->cmd_pkt.cdb[3] =
4686 ((u32) ioa_cfg->host_rrq_dma >> 16) & 0xff;
4687 ioarcb->cmd_pkt.cdb[4] =
4688 ((u32) ioa_cfg->host_rrq_dma >> 8) & 0xff;
4689 ioarcb->cmd_pkt.cdb[5] =
4690 ((u32) ioa_cfg->host_rrq_dma) & 0xff;
4691 ioarcb->cmd_pkt.cdb[7] =
4692 ((sizeof(u32) * IPR_NUM_CMD_BLKS) >> 8) & 0xff;
4693 ioarcb->cmd_pkt.cdb[8] =
4694 (sizeof(u32) * IPR_NUM_CMD_BLKS) & 0xff;
4696 ipr_cmd->job_step = ipr_ioafp_std_inquiry;
4698 ipr_do_req(ipr_cmd, ipr_reset_ioa_job, ipr_timeout, IPR_INTERNAL_TIMEOUT);
4701 return IPR_RC_JOB_RETURN;
4705 * ipr_reset_timer_done - Adapter reset timer function
4706 * @ipr_cmd: ipr command struct
4708 * Description: This function is used in adapter reset processing
4709 * for timing events. If the reset_cmd pointer in the IOA
4710 * config struct is not this adapter's we are doing nested
4711 * resets and fail_all_ops will take care of freeing the
4717 static void ipr_reset_timer_done(struct ipr_cmnd *ipr_cmd)
4719 struct ipr_ioa_cfg *ioa_cfg = ipr_cmd->ioa_cfg;
4720 unsigned long lock_flags = 0;
4722 spin_lock_irqsave(ioa_cfg->host->host_lock, lock_flags);
4724 if (ioa_cfg->reset_cmd == ipr_cmd) {
4725 list_del(&ipr_cmd->queue);
4726 ipr_cmd->done(ipr_cmd);
4729 spin_unlock_irqrestore(ioa_cfg->host->host_lock, lock_flags);
4733 * ipr_reset_start_timer - Start a timer for adapter reset job
4734 * @ipr_cmd: ipr command struct
4735 * @timeout: timeout value
4737 * Description: This function is used in adapter reset processing
4738 * for timing events. If the reset_cmd pointer in the IOA
4739 * config struct is not this adapter's we are doing nested
4740 * resets and fail_all_ops will take care of freeing the
4746 static void ipr_reset_start_timer(struct ipr_cmnd *ipr_cmd,
4747 unsigned long timeout)
4749 list_add_tail(&ipr_cmd->queue, &ipr_cmd->ioa_cfg->pending_q);
4750 ipr_cmd->done = ipr_reset_ioa_job;
4752 ipr_cmd->timer.data = (unsigned long) ipr_cmd;
4753 ipr_cmd->timer.expires = jiffies + timeout;
4754 ipr_cmd->timer.function = (void (*)(unsigned long))ipr_reset_timer_done;
4755 add_timer(&ipr_cmd->timer);
4759 * ipr_init_ioa_mem - Initialize ioa_cfg control block
4760 * @ioa_cfg: ioa cfg struct
4765 static void ipr_init_ioa_mem(struct ipr_ioa_cfg *ioa_cfg)
4767 memset(ioa_cfg->host_rrq, 0, sizeof(u32) * IPR_NUM_CMD_BLKS);
4769 /* Initialize Host RRQ pointers */
4770 ioa_cfg->hrrq_start = ioa_cfg->host_rrq;
4771 ioa_cfg->hrrq_end = &ioa_cfg->host_rrq[IPR_NUM_CMD_BLKS - 1];
4772 ioa_cfg->hrrq_curr = ioa_cfg->hrrq_start;
4773 ioa_cfg->toggle_bit = 1;
4775 /* Zero out config table */
4776 memset(ioa_cfg->cfg_table, 0, sizeof(struct ipr_config_table));
4780 * ipr_reset_enable_ioa - Enable the IOA following a reset.
4781 * @ipr_cmd: ipr command struct
4783 * This function reinitializes some control blocks and
4784 * enables destructive diagnostics on the adapter.
4789 static int ipr_reset_enable_ioa(struct ipr_cmnd *ipr_cmd)
4791 struct ipr_ioa_cfg *ioa_cfg = ipr_cmd->ioa_cfg;
4792 volatile u32 int_reg;
4795 ipr_cmd->job_step = ipr_ioafp_indentify_hrrq;
4796 ipr_init_ioa_mem(ioa_cfg);
4798 ioa_cfg->allow_interrupts = 1;
4799 int_reg = readl(ioa_cfg->regs.sense_interrupt_reg);
4801 if (int_reg & IPR_PCII_IOA_TRANS_TO_OPER) {
4802 writel((IPR_PCII_ERROR_INTERRUPTS | IPR_PCII_HRRQ_UPDATED),
4803 ioa_cfg->regs.clr_interrupt_mask_reg);
4804 int_reg = readl(ioa_cfg->regs.sense_interrupt_mask_reg);
4805 return IPR_RC_JOB_CONTINUE;
4808 /* Enable destructive diagnostics on IOA */
4809 writel(IPR_DOORBELL, ioa_cfg->regs.set_uproc_interrupt_reg);
4811 writel(IPR_PCII_OPER_INTERRUPTS, ioa_cfg->regs.clr_interrupt_mask_reg);
4812 int_reg = readl(ioa_cfg->regs.sense_interrupt_mask_reg);
4814 dev_info(&ioa_cfg->pdev->dev, "Initializing IOA.\n");
4816 ipr_cmd->timer.data = (unsigned long) ipr_cmd;
4817 ipr_cmd->timer.expires = jiffies + IPR_OPERATIONAL_TIMEOUT;
4818 ipr_cmd->timer.function = (void (*)(unsigned long))ipr_timeout;
4819 ipr_cmd->done = ipr_reset_ioa_job;
4820 add_timer(&ipr_cmd->timer);
4821 list_add_tail(&ipr_cmd->queue, &ioa_cfg->pending_q);
4824 return IPR_RC_JOB_RETURN;
4828 * ipr_reset_wait_for_dump - Wait for a dump to timeout.
4829 * @ipr_cmd: ipr command struct
4831 * This function is invoked when an adapter dump has run out
4832 * of processing time.
4835 * IPR_RC_JOB_CONTINUE
4837 static int ipr_reset_wait_for_dump(struct ipr_cmnd *ipr_cmd)
4839 struct ipr_ioa_cfg *ioa_cfg = ipr_cmd->ioa_cfg;
4841 if (ioa_cfg->sdt_state == GET_DUMP)
4842 ioa_cfg->sdt_state = ABORT_DUMP;
4844 ipr_cmd->job_step = ipr_reset_alert;
4846 return IPR_RC_JOB_CONTINUE;
4850 * ipr_unit_check_no_data - Log a unit check/no data error log
4851 * @ioa_cfg: ioa config struct
4853 * Logs an error indicating the adapter unit checked, but for some
4854 * reason, we were unable to fetch the unit check buffer.
4859 static void ipr_unit_check_no_data(struct ipr_ioa_cfg *ioa_cfg)
4861 ioa_cfg->errors_logged++;
4862 dev_err(&ioa_cfg->pdev->dev, "IOA unit check with no data\n");
4866 * ipr_get_unit_check_buffer - Get the unit check buffer from the IOA
4867 * @ioa_cfg: ioa config struct
4869 * Fetches the unit check buffer from the adapter by clocking the data
4870 * through the mailbox register.
4875 static void ipr_get_unit_check_buffer(struct ipr_ioa_cfg *ioa_cfg)
4877 unsigned long mailbox;
4878 struct ipr_hostrcb *hostrcb;
4879 struct ipr_uc_sdt sdt;
4882 mailbox = readl(ioa_cfg->ioa_mailbox);
4884 if (!ipr_sdt_is_fmt2(mailbox)) {
4885 ipr_unit_check_no_data(ioa_cfg);
4889 memset(&sdt, 0, sizeof(struct ipr_uc_sdt));
4890 rc = ipr_get_ldump_data_section(ioa_cfg, mailbox, (u32 *) &sdt,
4891 (sizeof(struct ipr_uc_sdt)) / sizeof(u32));
4893 if (rc || (be32_to_cpu(sdt.hdr.state) != IPR_FMT2_SDT_READY_TO_USE) ||
4894 !(sdt.entry[0].flags & IPR_SDT_VALID_ENTRY)) {
4895 ipr_unit_check_no_data(ioa_cfg);
4899 /* Find length of the first sdt entry (UC buffer) */
4900 length = (be32_to_cpu(sdt.entry[0].end_offset) -
4901 be32_to_cpu(sdt.entry[0].bar_str_offset)) & IPR_FMT2_MBX_ADDR_MASK;
4903 hostrcb = list_entry(ioa_cfg->hostrcb_free_q.next,
4904 struct ipr_hostrcb, queue);
4905 list_del(&hostrcb->queue);
4906 memset(&hostrcb->hcam, 0, sizeof(hostrcb->hcam));
4908 rc = ipr_get_ldump_data_section(ioa_cfg,
4909 be32_to_cpu(sdt.entry[0].bar_str_offset),
4910 (u32 *)&hostrcb->hcam,
4911 min(length, (int)sizeof(hostrcb->hcam)) / sizeof(u32));
4914 ipr_handle_log_data(ioa_cfg, hostrcb);
4916 ipr_unit_check_no_data(ioa_cfg);
4918 list_add_tail(&hostrcb->queue, &ioa_cfg->hostrcb_free_q);
4922 * ipr_reset_restore_cfg_space - Restore PCI config space.
4923 * @ipr_cmd: ipr command struct
4925 * Description: This function restores the saved PCI config space of
4926 * the adapter, fails all outstanding ops back to the callers, and
4927 * fetches the dump/unit check if applicable to this reset.
4930 * IPR_RC_JOB_CONTINUE / IPR_RC_JOB_RETURN
4932 static int ipr_reset_restore_cfg_space(struct ipr_cmnd *ipr_cmd)
4934 struct ipr_ioa_cfg *ioa_cfg = ipr_cmd->ioa_cfg;
4938 rc = pci_restore_state(ioa_cfg->pdev);
4940 if (rc != PCIBIOS_SUCCESSFUL) {
4941 ipr_cmd->ioasa.ioasc = cpu_to_be32(IPR_IOASC_PCI_ACCESS_ERROR);
4942 return IPR_RC_JOB_CONTINUE;
4945 if (ipr_set_pcix_cmd_reg(ioa_cfg)) {
4946 ipr_cmd->ioasa.ioasc = cpu_to_be32(IPR_IOASC_PCI_ACCESS_ERROR);
4947 return IPR_RC_JOB_CONTINUE;
4950 ipr_fail_all_ops(ioa_cfg);
4952 if (ioa_cfg->ioa_unit_checked) {
4953 ioa_cfg->ioa_unit_checked = 0;
4954 ipr_get_unit_check_buffer(ioa_cfg);
4955 ipr_cmd->job_step = ipr_reset_alert;
4956 ipr_reset_start_timer(ipr_cmd, 0);
4957 return IPR_RC_JOB_RETURN;
4960 if (ioa_cfg->in_ioa_bringdown) {
4961 ipr_cmd->job_step = ipr_ioa_bringdown_done;
4963 ipr_cmd->job_step = ipr_reset_enable_ioa;
4965 if (GET_DUMP == ioa_cfg->sdt_state) {
4966 ipr_reset_start_timer(ipr_cmd, IPR_DUMP_TIMEOUT);
4967 ipr_cmd->job_step = ipr_reset_wait_for_dump;
4968 schedule_work(&ioa_cfg->work_q);
4969 return IPR_RC_JOB_RETURN;
4974 return IPR_RC_JOB_CONTINUE;
4978 * ipr_reset_start_bist - Run BIST on the adapter.
4979 * @ipr_cmd: ipr command struct
4981 * Description: This function runs BIST on the adapter, then delays 2 seconds.
4984 * IPR_RC_JOB_CONTINUE / IPR_RC_JOB_RETURN
4986 static int ipr_reset_start_bist(struct ipr_cmnd *ipr_cmd)
4988 struct ipr_ioa_cfg *ioa_cfg = ipr_cmd->ioa_cfg;
4992 rc = pci_write_config_byte(ioa_cfg->pdev, PCI_BIST, PCI_BIST_START);
4994 if (rc != PCIBIOS_SUCCESSFUL) {
4995 ipr_cmd->ioasa.ioasc = cpu_to_be32(IPR_IOASC_PCI_ACCESS_ERROR);
4996 rc = IPR_RC_JOB_CONTINUE;
4998 ipr_cmd->job_step = ipr_reset_restore_cfg_space;
4999 ipr_reset_start_timer(ipr_cmd, IPR_WAIT_FOR_BIST_TIMEOUT);
5000 rc = IPR_RC_JOB_RETURN;
5008 * ipr_reset_allowed - Query whether or not IOA can be reset
5009 * @ioa_cfg: ioa config struct
5012 * 0 if reset not allowed / non-zero if reset is allowed
5014 static int ipr_reset_allowed(struct ipr_ioa_cfg *ioa_cfg)
5016 volatile u32 temp_reg;
5018 temp_reg = readl(ioa_cfg->regs.sense_interrupt_reg);
5019 return ((temp_reg & IPR_PCII_CRITICAL_OPERATION) == 0);
5023 * ipr_reset_wait_to_start_bist - Wait for permission to reset IOA.
5024 * @ipr_cmd: ipr command struct
5026 * Description: This function waits for adapter permission to run BIST,
5027 * then runs BIST. If the adapter does not give permission after a
5028 * reasonable time, we will reset the adapter anyway. The impact of
5029 * resetting the adapter without warning the adapter is the risk of
5030 * losing the persistent error log on the adapter. If the adapter is
5031 * reset while it is writing to the flash on the adapter, the flash
5032 * segment will have bad ECC and be zeroed.
5035 * IPR_RC_JOB_CONTINUE / IPR_RC_JOB_RETURN
5037 static int ipr_reset_wait_to_start_bist(struct ipr_cmnd *ipr_cmd)
5039 struct ipr_ioa_cfg *ioa_cfg = ipr_cmd->ioa_cfg;
5040 int rc = IPR_RC_JOB_RETURN;
5042 if (!ipr_reset_allowed(ioa_cfg) && ipr_cmd->u.time_left) {
5043 ipr_cmd->u.time_left -= IPR_CHECK_FOR_RESET_TIMEOUT;
5044 ipr_reset_start_timer(ipr_cmd, IPR_CHECK_FOR_RESET_TIMEOUT);
5046 ipr_cmd->job_step = ipr_reset_start_bist;
5047 rc = IPR_RC_JOB_CONTINUE;
5054 * ipr_reset_alert_part2 - Alert the adapter of a pending reset
5055 * @ipr_cmd: ipr command struct
5057 * Description: This function alerts the adapter that it will be reset.
5058 * If memory space is not currently enabled, proceed directly
5059 * to running BIST on the adapter. The timer must always be started
5060 * so we guarantee we do not run BIST from ipr_isr.
5065 static int ipr_reset_alert(struct ipr_cmnd *ipr_cmd)
5067 struct ipr_ioa_cfg *ioa_cfg = ipr_cmd->ioa_cfg;
5072 rc = pci_read_config_word(ioa_cfg->pdev, PCI_COMMAND, &cmd_reg);
5074 if ((rc == PCIBIOS_SUCCESSFUL) && (cmd_reg & PCI_COMMAND_MEMORY)) {
5075 ipr_mask_and_clear_interrupts(ioa_cfg, ~0);
5076 writel(IPR_UPROCI_RESET_ALERT, ioa_cfg->regs.set_uproc_interrupt_reg);
5077 ipr_cmd->job_step = ipr_reset_wait_to_start_bist;
5079 ipr_cmd->job_step = ipr_reset_start_bist;
5082 ipr_cmd->u.time_left = IPR_WAIT_FOR_RESET_TIMEOUT;
5083 ipr_reset_start_timer(ipr_cmd, IPR_CHECK_FOR_RESET_TIMEOUT);
5086 return IPR_RC_JOB_RETURN;
5090 * ipr_reset_ucode_download_done - Microcode download completion
5091 * @ipr_cmd: ipr command struct
5093 * Description: This function unmaps the microcode download buffer.
5096 * IPR_RC_JOB_CONTINUE
5098 static int ipr_reset_ucode_download_done(struct ipr_cmnd *ipr_cmd)
5100 struct ipr_ioa_cfg *ioa_cfg = ipr_cmd->ioa_cfg;
5101 struct ipr_sglist *sglist = ioa_cfg->ucode_sglist;
5103 pci_unmap_sg(ioa_cfg->pdev, sglist->scatterlist,
5104 sglist->num_sg, DMA_TO_DEVICE);
5106 ipr_cmd->job_step = ipr_reset_alert;
5107 return IPR_RC_JOB_CONTINUE;
5111 * ipr_reset_ucode_download - Download microcode to the adapter
5112 * @ipr_cmd: ipr command struct
5114 * Description: This function checks to see if it there is microcode
5115 * to download to the adapter. If there is, a download is performed.
5118 * IPR_RC_JOB_CONTINUE / IPR_RC_JOB_RETURN
5120 static int ipr_reset_ucode_download(struct ipr_cmnd *ipr_cmd)
5122 struct ipr_ioa_cfg *ioa_cfg = ipr_cmd->ioa_cfg;
5123 struct ipr_sglist *sglist = ioa_cfg->ucode_sglist;
5126 ipr_cmd->job_step = ipr_reset_alert;
5129 return IPR_RC_JOB_CONTINUE;
5131 ipr_cmd->ioarcb.res_handle = cpu_to_be32(IPR_IOA_RES_HANDLE);
5132 ipr_cmd->ioarcb.cmd_pkt.request_type = IPR_RQTYPE_SCSICDB;
5133 ipr_cmd->ioarcb.cmd_pkt.cdb[0] = WRITE_BUFFER;
5134 ipr_cmd->ioarcb.cmd_pkt.cdb[1] = IPR_WR_BUF_DOWNLOAD_AND_SAVE;
5135 ipr_cmd->ioarcb.cmd_pkt.cdb[6] = (sglist->buffer_len & 0xff0000) >> 16;
5136 ipr_cmd->ioarcb.cmd_pkt.cdb[7] = (sglist->buffer_len & 0x00ff00) >> 8;
5137 ipr_cmd->ioarcb.cmd_pkt.cdb[8] = sglist->buffer_len & 0x0000ff;
5139 if (ipr_map_ucode_buffer(ipr_cmd, sglist, sglist->buffer_len)) {
5140 dev_err(&ioa_cfg->pdev->dev,
5141 "Failed to map microcode download buffer\n");
5142 return IPR_RC_JOB_CONTINUE;
5145 ipr_cmd->job_step = ipr_reset_ucode_download_done;
5147 ipr_do_req(ipr_cmd, ipr_reset_ioa_job, ipr_timeout,
5148 IPR_WRITE_BUFFER_TIMEOUT);
5151 return IPR_RC_JOB_RETURN;
5155 * ipr_reset_shutdown_ioa - Shutdown the adapter
5156 * @ipr_cmd: ipr command struct
5158 * Description: This function issues an adapter shutdown of the
5159 * specified type to the specified adapter as part of the
5160 * adapter reset job.
5163 * IPR_RC_JOB_CONTINUE / IPR_RC_JOB_RETURN
5165 static int ipr_reset_shutdown_ioa(struct ipr_cmnd *ipr_cmd)
5167 struct ipr_ioa_cfg *ioa_cfg = ipr_cmd->ioa_cfg;
5168 enum ipr_shutdown_type shutdown_type = ipr_cmd->u.shutdown_type;
5169 unsigned long timeout;
5170 int rc = IPR_RC_JOB_CONTINUE;
5173 if (shutdown_type != IPR_SHUTDOWN_NONE && !ioa_cfg->ioa_is_dead) {
5174 ipr_cmd->ioarcb.res_handle = cpu_to_be32(IPR_IOA_RES_HANDLE);
5175 ipr_cmd->ioarcb.cmd_pkt.request_type = IPR_RQTYPE_IOACMD;
5176 ipr_cmd->ioarcb.cmd_pkt.cdb[0] = IPR_IOA_SHUTDOWN;
5177 ipr_cmd->ioarcb.cmd_pkt.cdb[1] = shutdown_type;
5179 if (shutdown_type == IPR_SHUTDOWN_ABBREV)
5180 timeout = IPR_ABBREV_SHUTDOWN_TIMEOUT;
5181 else if (shutdown_type == IPR_SHUTDOWN_PREPARE_FOR_NORMAL)
5182 timeout = IPR_INTERNAL_TIMEOUT;
5184 timeout = IPR_SHUTDOWN_TIMEOUT;
5186 ipr_do_req(ipr_cmd, ipr_reset_ioa_job, ipr_timeout, timeout);
5188 rc = IPR_RC_JOB_RETURN;
5189 ipr_cmd->job_step = ipr_reset_ucode_download;
5191 ipr_cmd->job_step = ipr_reset_alert;
5198 * ipr_reset_ioa_job - Adapter reset job
5199 * @ipr_cmd: ipr command struct
5201 * Description: This function is the job router for the adapter reset job.
5206 static void ipr_reset_ioa_job(struct ipr_cmnd *ipr_cmd)
5209 unsigned long scratch = ipr_cmd->u.scratch;
5210 struct ipr_ioa_cfg *ioa_cfg = ipr_cmd->ioa_cfg;
5213 ioasc = be32_to_cpu(ipr_cmd->ioasa.ioasc);
5215 if (ioa_cfg->reset_cmd != ipr_cmd) {
5217 * We are doing nested adapter resets and this is
5218 * not the current reset job.
5220 list_add_tail(&ipr_cmd->queue, &ioa_cfg->free_q);
5224 if (IPR_IOASC_SENSE_KEY(ioasc)) {
5225 dev_err(&ioa_cfg->pdev->dev,
5226 "0x%02X failed with IOASC: 0x%08X\n",
5227 ipr_cmd->ioarcb.cmd_pkt.cdb[0], ioasc);
5229 ipr_initiate_ioa_reset(ioa_cfg, IPR_SHUTDOWN_NONE);
5230 list_add_tail(&ipr_cmd->queue, &ioa_cfg->free_q);
5234 ipr_reinit_ipr_cmnd(ipr_cmd);
5235 ipr_cmd->u.scratch = scratch;
5236 rc = ipr_cmd->job_step(ipr_cmd);
5237 } while(rc == IPR_RC_JOB_CONTINUE);
5241 * _ipr_initiate_ioa_reset - Initiate an adapter reset
5242 * @ioa_cfg: ioa config struct
5243 * @job_step: first job step of reset job
5244 * @shutdown_type: shutdown type
5246 * Description: This function will initiate the reset of the given adapter
5247 * starting at the selected job step.
5248 * If the caller needs to wait on the completion of the reset,
5249 * the caller must sleep on the reset_wait_q.
5254 static void _ipr_initiate_ioa_reset(struct ipr_ioa_cfg *ioa_cfg,
5255 int (*job_step) (struct ipr_cmnd *),
5256 enum ipr_shutdown_type shutdown_type)
5258 struct ipr_cmnd *ipr_cmd;
5260 ioa_cfg->in_reset_reload = 1;
5261 ioa_cfg->allow_cmds = 0;
5262 scsi_block_requests(ioa_cfg->host);
5264 ipr_cmd = ipr_get_free_ipr_cmnd(ioa_cfg);
5265 ioa_cfg->reset_cmd = ipr_cmd;
5266 ipr_cmd->job_step = job_step;
5267 ipr_cmd->u.shutdown_type = shutdown_type;
5269 ipr_reset_ioa_job(ipr_cmd);
5273 * ipr_initiate_ioa_reset - Initiate an adapter reset
5274 * @ioa_cfg: ioa config struct
5275 * @shutdown_type: shutdown type
5277 * Description: This function will initiate the reset of the given adapter.
5278 * If the caller needs to wait on the completion of the reset,
5279 * the caller must sleep on the reset_wait_q.
5284 static void ipr_initiate_ioa_reset(struct ipr_ioa_cfg *ioa_cfg,
5285 enum ipr_shutdown_type shutdown_type)
5287 if (ioa_cfg->ioa_is_dead)
5290 if (ioa_cfg->in_reset_reload && ioa_cfg->sdt_state == GET_DUMP)
5291 ioa_cfg->sdt_state = ABORT_DUMP;
5293 if (ioa_cfg->reset_retries++ > IPR_NUM_RESET_RELOAD_RETRIES) {
5294 dev_err(&ioa_cfg->pdev->dev,
5295 "IOA taken offline - error recovery failed\n");
5297 ioa_cfg->reset_retries = 0;
5298 ioa_cfg->ioa_is_dead = 1;
5300 if (ioa_cfg->in_ioa_bringdown) {
5301 ioa_cfg->reset_cmd = NULL;
5302 ioa_cfg->in_reset_reload = 0;
5303 ipr_fail_all_ops(ioa_cfg);
5304 wake_up_all(&ioa_cfg->reset_wait_q);
5306 spin_unlock_irq(ioa_cfg->host->host_lock);
5307 scsi_unblock_requests(ioa_cfg->host);
5308 spin_lock_irq(ioa_cfg->host->host_lock);
5311 ioa_cfg->in_ioa_bringdown = 1;
5312 shutdown_type = IPR_SHUTDOWN_NONE;
5316 _ipr_initiate_ioa_reset(ioa_cfg, ipr_reset_shutdown_ioa,
5321 * ipr_probe_ioa_part2 - Initializes IOAs found in ipr_probe_ioa(..)
5322 * @ioa_cfg: ioa cfg struct
5324 * Description: This is the second phase of adapter intialization
5325 * This function takes care of initilizing the adapter to the point
5326 * where it can accept new commands.
5329 * 0 on sucess / -EIO on failure
5331 static int __devinit ipr_probe_ioa_part2(struct ipr_ioa_cfg *ioa_cfg)
5334 unsigned long host_lock_flags = 0;
5337 spin_lock_irqsave(ioa_cfg->host->host_lock, host_lock_flags);
5338 dev_dbg(&ioa_cfg->pdev->dev, "ioa_cfg adx: 0x%p\n", ioa_cfg);
5339 _ipr_initiate_ioa_reset(ioa_cfg, ipr_reset_enable_ioa, IPR_SHUTDOWN_NONE);
5341 spin_unlock_irqrestore(ioa_cfg->host->host_lock, host_lock_flags);
5342 wait_event(ioa_cfg->reset_wait_q, !ioa_cfg->in_reset_reload);
5343 spin_lock_irqsave(ioa_cfg->host->host_lock, host_lock_flags);
5345 if (ioa_cfg->ioa_is_dead) {
5347 } else if (ipr_invalid_adapter(ioa_cfg)) {
5351 dev_err(&ioa_cfg->pdev->dev,
5352 "Adapter not supported in this hardware configuration.\n");
5355 spin_unlock_irqrestore(ioa_cfg->host->host_lock, host_lock_flags);
5362 * ipr_free_cmd_blks - Frees command blocks allocated for an adapter
5363 * @ioa_cfg: ioa config struct
5368 static void ipr_free_cmd_blks(struct ipr_ioa_cfg *ioa_cfg)
5372 for (i = 0; i < IPR_NUM_CMD_BLKS; i++) {
5373 if (ioa_cfg->ipr_cmnd_list[i])
5374 pci_pool_free(ioa_cfg->ipr_cmd_pool,
5375 ioa_cfg->ipr_cmnd_list[i],
5376 ioa_cfg->ipr_cmnd_list_dma[i]);
5378 ioa_cfg->ipr_cmnd_list[i] = NULL;
5381 if (ioa_cfg->ipr_cmd_pool)
5382 pci_pool_destroy (ioa_cfg->ipr_cmd_pool);
5384 ioa_cfg->ipr_cmd_pool = NULL;
5388 * ipr_free_mem - Frees memory allocated for an adapter
5389 * @ioa_cfg: ioa cfg struct
5394 static void ipr_free_mem(struct ipr_ioa_cfg *ioa_cfg)
5398 kfree(ioa_cfg->res_entries);
5399 pci_free_consistent(ioa_cfg->pdev, sizeof(struct ipr_misc_cbs),
5400 ioa_cfg->vpd_cbs, ioa_cfg->vpd_cbs_dma);
5401 ipr_free_cmd_blks(ioa_cfg);
5402 pci_free_consistent(ioa_cfg->pdev, sizeof(u32) * IPR_NUM_CMD_BLKS,
5403 ioa_cfg->host_rrq, ioa_cfg->host_rrq_dma);
5404 pci_free_consistent(ioa_cfg->pdev, sizeof(struct ipr_config_table),
5406 ioa_cfg->cfg_table_dma);
5408 for (i = 0; i < IPR_NUM_HCAMS; i++) {
5409 pci_free_consistent(ioa_cfg->pdev,
5410 sizeof(struct ipr_hostrcb),
5411 ioa_cfg->hostrcb[i],
5412 ioa_cfg->hostrcb_dma[i]);
5415 ipr_free_dump(ioa_cfg);
5416 kfree(ioa_cfg->saved_mode_pages);
5417 kfree(ioa_cfg->trace);
5421 * ipr_free_all_resources - Free all allocated resources for an adapter.
5422 * @ipr_cmd: ipr command struct
5424 * This function frees all allocated resources for the
5425 * specified adapter.
5430 static void ipr_free_all_resources(struct ipr_ioa_cfg *ioa_cfg)
5432 struct pci_dev *pdev = ioa_cfg->pdev;
5435 free_irq(pdev->irq, ioa_cfg);
5436 iounmap(ioa_cfg->hdw_dma_regs);
5437 pci_release_regions(pdev);
5438 ipr_free_mem(ioa_cfg);
5439 scsi_host_put(ioa_cfg->host);
5440 pci_disable_device(pdev);
5445 * ipr_alloc_cmd_blks - Allocate command blocks for an adapter
5446 * @ioa_cfg: ioa config struct
5449 * 0 on success / -ENOMEM on allocation failure
5451 static int __devinit ipr_alloc_cmd_blks(struct ipr_ioa_cfg *ioa_cfg)
5453 struct ipr_cmnd *ipr_cmd;
5454 struct ipr_ioarcb *ioarcb;
5455 dma_addr_t dma_addr;
5458 ioa_cfg->ipr_cmd_pool = pci_pool_create (IPR_NAME, ioa_cfg->pdev,
5459 sizeof(struct ipr_cmnd), 8, 0);
5461 if (!ioa_cfg->ipr_cmd_pool)
5464 for (i = 0; i < IPR_NUM_CMD_BLKS; i++) {
5465 ipr_cmd = pci_pool_alloc (ioa_cfg->ipr_cmd_pool, SLAB_KERNEL, &dma_addr);
5468 ipr_free_cmd_blks(ioa_cfg);
5472 memset(ipr_cmd, 0, sizeof(*ipr_cmd));
5473 ioa_cfg->ipr_cmnd_list[i] = ipr_cmd;
5474 ioa_cfg->ipr_cmnd_list_dma[i] = dma_addr;
5476 ioarcb = &ipr_cmd->ioarcb;
5477 ioarcb->ioarcb_host_pci_addr = cpu_to_be32(dma_addr);
5478 ioarcb->host_response_handle = cpu_to_be32(i << 2);
5479 ioarcb->write_ioadl_addr =
5480 cpu_to_be32(dma_addr + offsetof(struct ipr_cmnd, ioadl));
5481 ioarcb->read_ioadl_addr = ioarcb->write_ioadl_addr;
5482 ioarcb->ioasa_host_pci_addr =
5483 cpu_to_be32(dma_addr + offsetof(struct ipr_cmnd, ioasa));
5484 ioarcb->ioasa_len = cpu_to_be16(sizeof(struct ipr_ioasa));
5485 ipr_cmd->cmd_index = i;
5486 ipr_cmd->ioa_cfg = ioa_cfg;
5487 ipr_cmd->sense_buffer_dma = dma_addr +
5488 offsetof(struct ipr_cmnd, sense_buffer);
5490 list_add_tail(&ipr_cmd->queue, &ioa_cfg->free_q);
5497 * ipr_alloc_mem - Allocate memory for an adapter
5498 * @ioa_cfg: ioa config struct
5501 * 0 on success / non-zero for error
5503 static int __devinit ipr_alloc_mem(struct ipr_ioa_cfg *ioa_cfg)
5505 struct pci_dev *pdev = ioa_cfg->pdev;
5506 int i, rc = -ENOMEM;
5509 ioa_cfg->res_entries = kmalloc(sizeof(struct ipr_resource_entry) *
5510 IPR_MAX_PHYSICAL_DEVS, GFP_KERNEL);
5512 if (!ioa_cfg->res_entries)
5515 memset(ioa_cfg->res_entries, 0,
5516 sizeof(struct ipr_resource_entry) * IPR_MAX_PHYSICAL_DEVS);
5518 for (i = 0; i < IPR_MAX_PHYSICAL_DEVS; i++)
5519 list_add_tail(&ioa_cfg->res_entries[i].queue, &ioa_cfg->free_res_q);
5521 ioa_cfg->vpd_cbs = pci_alloc_consistent(ioa_cfg->pdev,
5522 sizeof(struct ipr_misc_cbs),
5523 &ioa_cfg->vpd_cbs_dma);
5525 if (!ioa_cfg->vpd_cbs)
5526 goto out_free_res_entries;
5528 if (ipr_alloc_cmd_blks(ioa_cfg))
5529 goto out_free_vpd_cbs;
5531 ioa_cfg->host_rrq = pci_alloc_consistent(ioa_cfg->pdev,
5532 sizeof(u32) * IPR_NUM_CMD_BLKS,
5533 &ioa_cfg->host_rrq_dma);
5535 if (!ioa_cfg->host_rrq)
5536 goto out_ipr_free_cmd_blocks;
5538 ioa_cfg->cfg_table = pci_alloc_consistent(ioa_cfg->pdev,
5539 sizeof(struct ipr_config_table),
5540 &ioa_cfg->cfg_table_dma);
5542 if (!ioa_cfg->cfg_table)
5543 goto out_free_host_rrq;
5545 for (i = 0; i < IPR_NUM_HCAMS; i++) {
5546 ioa_cfg->hostrcb[i] = pci_alloc_consistent(ioa_cfg->pdev,
5547 sizeof(struct ipr_hostrcb),
5548 &ioa_cfg->hostrcb_dma[i]);
5550 if (!ioa_cfg->hostrcb[i])
5551 goto out_free_hostrcb_dma;
5553 ioa_cfg->hostrcb[i]->hostrcb_dma =
5554 ioa_cfg->hostrcb_dma[i] + offsetof(struct ipr_hostrcb, hcam);
5555 list_add_tail(&ioa_cfg->hostrcb[i]->queue, &ioa_cfg->hostrcb_free_q);
5558 ioa_cfg->trace = kmalloc(sizeof(struct ipr_trace_entry) *
5559 IPR_NUM_TRACE_ENTRIES, GFP_KERNEL);
5561 if (!ioa_cfg->trace)
5562 goto out_free_hostrcb_dma;
5564 memset(ioa_cfg->trace, 0,
5565 sizeof(struct ipr_trace_entry) * IPR_NUM_TRACE_ENTRIES);
5572 out_free_hostrcb_dma:
5574 pci_free_consistent(pdev, sizeof(struct ipr_hostrcb),
5575 ioa_cfg->hostrcb[i],
5576 ioa_cfg->hostrcb_dma[i]);
5578 pci_free_consistent(pdev, sizeof(struct ipr_config_table),
5579 ioa_cfg->cfg_table, ioa_cfg->cfg_table_dma);
5581 pci_free_consistent(pdev, sizeof(u32) * IPR_NUM_CMD_BLKS,
5582 ioa_cfg->host_rrq, ioa_cfg->host_rrq_dma);
5583 out_ipr_free_cmd_blocks:
5584 ipr_free_cmd_blks(ioa_cfg);
5586 pci_free_consistent(pdev, sizeof(struct ipr_misc_cbs),
5587 ioa_cfg->vpd_cbs, ioa_cfg->vpd_cbs_dma);
5588 out_free_res_entries:
5589 kfree(ioa_cfg->res_entries);
5594 * ipr_initialize_bus_attr - Initialize SCSI bus attributes to default values
5595 * @ioa_cfg: ioa config struct
5600 static void __devinit ipr_initialize_bus_attr(struct ipr_ioa_cfg *ioa_cfg)
5604 for (i = 0; i < IPR_MAX_NUM_BUSES; i++) {
5605 ioa_cfg->bus_attr[i].bus = i;
5606 ioa_cfg->bus_attr[i].qas_enabled = 0;
5607 ioa_cfg->bus_attr[i].bus_width = IPR_DEFAULT_BUS_WIDTH;
5608 if (ipr_max_speed < ARRAY_SIZE(ipr_max_bus_speeds))
5609 ioa_cfg->bus_attr[i].max_xfer_rate = ipr_max_bus_speeds[ipr_max_speed];
5611 ioa_cfg->bus_attr[i].max_xfer_rate = IPR_U160_SCSI_RATE;
5616 * ipr_init_ioa_cfg - Initialize IOA config struct
5617 * @ioa_cfg: ioa config struct
5618 * @host: scsi host struct
5619 * @pdev: PCI dev struct
5624 static void __devinit ipr_init_ioa_cfg(struct ipr_ioa_cfg *ioa_cfg,
5625 struct Scsi_Host *host, struct pci_dev *pdev)
5627 const struct ipr_interrupt_offsets *p;
5628 struct ipr_interrupts *t;
5631 ioa_cfg->host = host;
5632 ioa_cfg->pdev = pdev;
5633 ioa_cfg->log_level = ipr_log_level;
5634 sprintf(ioa_cfg->eye_catcher, IPR_EYECATCHER);
5635 sprintf(ioa_cfg->trace_start, IPR_TRACE_START_LABEL);
5636 sprintf(ioa_cfg->ipr_free_label, IPR_FREEQ_LABEL);
5637 sprintf(ioa_cfg->ipr_pending_label, IPR_PENDQ_LABEL);
5638 sprintf(ioa_cfg->cfg_table_start, IPR_CFG_TBL_START);
5639 sprintf(ioa_cfg->resource_table_label, IPR_RES_TABLE_LABEL);
5640 sprintf(ioa_cfg->ipr_hcam_label, IPR_HCAM_LABEL);
5641 sprintf(ioa_cfg->ipr_cmd_label, IPR_CMD_LABEL);
5643 INIT_LIST_HEAD(&ioa_cfg->free_q);
5644 INIT_LIST_HEAD(&ioa_cfg->pending_q);
5645 INIT_LIST_HEAD(&ioa_cfg->hostrcb_free_q);
5646 INIT_LIST_HEAD(&ioa_cfg->hostrcb_pending_q);
5647 INIT_LIST_HEAD(&ioa_cfg->free_res_q);
5648 INIT_LIST_HEAD(&ioa_cfg->used_res_q);
5649 INIT_WORK(&ioa_cfg->work_q, ipr_worker_thread, ioa_cfg);
5650 init_waitqueue_head(&ioa_cfg->reset_wait_q);
5651 ioa_cfg->sdt_state = INACTIVE;
5653 ipr_initialize_bus_attr(ioa_cfg);
5655 host->max_id = IPR_MAX_NUM_TARGETS_PER_BUS;
5656 host->max_lun = IPR_MAX_NUM_LUNS_PER_TARGET;
5657 host->max_channel = IPR_MAX_BUS_TO_SCAN;
5658 host->unique_id = host->host_no;
5659 host->max_cmd_len = IPR_MAX_CDB_LEN;
5660 pci_set_drvdata(pdev, ioa_cfg);
5662 p = &ioa_cfg->chip_cfg->regs;
5664 base = ioa_cfg->hdw_dma_regs;
5666 t->set_interrupt_mask_reg = base + p->set_interrupt_mask_reg;
5667 t->clr_interrupt_mask_reg = base + p->clr_interrupt_mask_reg;
5668 t->sense_interrupt_mask_reg = base + p->sense_interrupt_mask_reg;
5669 t->clr_interrupt_reg = base + p->clr_interrupt_reg;
5670 t->sense_interrupt_reg = base + p->sense_interrupt_reg;
5671 t->ioarrin_reg = base + p->ioarrin_reg;
5672 t->sense_uproc_interrupt_reg = base + p->sense_uproc_interrupt_reg;
5673 t->set_uproc_interrupt_reg = base + p->set_uproc_interrupt_reg;
5674 t->clr_uproc_interrupt_reg = base + p->clr_uproc_interrupt_reg;
5678 * ipr_probe_ioa - Allocates memory and does first stage of initialization
5679 * @pdev: PCI device struct
5680 * @dev_id: PCI device id struct
5683 * 0 on success / non-zero on failure
5685 static int __devinit ipr_probe_ioa(struct pci_dev *pdev,
5686 const struct pci_device_id *dev_id)
5688 struct ipr_ioa_cfg *ioa_cfg;
5689 struct Scsi_Host *host;
5690 unsigned long ipr_regs_pci;
5691 void __iomem *ipr_regs;
5692 u32 rc = PCIBIOS_SUCCESSFUL;
5696 if ((rc = pci_enable_device(pdev))) {
5697 dev_err(&pdev->dev, "Cannot enable adapter\n");
5701 dev_info(&pdev->dev, "Found IOA with IRQ: %d\n", pdev->irq);
5703 host = scsi_host_alloc(&driver_template, sizeof(*ioa_cfg));
5706 dev_err(&pdev->dev, "call to scsi_host_alloc failed!\n");
5711 ioa_cfg = (struct ipr_ioa_cfg *)host->hostdata;
5712 memset(ioa_cfg, 0, sizeof(struct ipr_ioa_cfg));
5714 ioa_cfg->chip_cfg = (const struct ipr_chip_cfg_t *)dev_id->driver_data;
5716 ipr_regs_pci = pci_resource_start(pdev, 0);
5718 rc = pci_request_regions(pdev, IPR_NAME);
5721 "Couldn't register memory range of registers\n");
5722 goto out_scsi_host_put;
5725 ipr_regs = ioremap(ipr_regs_pci, pci_resource_len(pdev, 0));
5729 "Couldn't map memory range of registers\n");
5731 goto out_release_regions;
5734 ioa_cfg->hdw_dma_regs = ipr_regs;
5735 ioa_cfg->hdw_dma_regs_pci = ipr_regs_pci;
5736 ioa_cfg->ioa_mailbox = ioa_cfg->chip_cfg->mailbox + ipr_regs;
5738 ipr_init_ioa_cfg(ioa_cfg, host, pdev);
5740 pci_set_master(pdev);
5742 rc = pci_set_dma_mask(pdev, DMA_32BIT_MASK);
5744 dev_err(&pdev->dev, "Failed to set PCI DMA mask\n");
5748 rc = pci_write_config_byte(pdev, PCI_CACHE_LINE_SIZE,
5749 ioa_cfg->chip_cfg->cache_line_size);
5751 if (rc != PCIBIOS_SUCCESSFUL) {
5752 dev_err(&pdev->dev, "Write of cache line size failed\n");
5757 /* Save away PCI config space for use following IOA reset */
5758 rc = pci_save_state(pdev);
5760 if (rc != PCIBIOS_SUCCESSFUL) {
5761 dev_err(&pdev->dev, "Failed to save PCI config space\n");
5766 if ((rc = ipr_save_pcix_cmd_reg(ioa_cfg)))
5769 if ((rc = ipr_set_pcix_cmd_reg(ioa_cfg)))
5772 rc = ipr_alloc_mem(ioa_cfg);
5775 "Couldn't allocate enough memory for device driver!\n");
5779 ipr_mask_and_clear_interrupts(ioa_cfg, ~IPR_PCII_IOA_TRANS_TO_OPER);
5780 rc = request_irq(pdev->irq, ipr_isr, SA_SHIRQ, IPR_NAME, ioa_cfg);
5783 dev_err(&pdev->dev, "Couldn't register IRQ %d! rc=%d\n",
5788 spin_lock(&ipr_driver_lock);
5789 list_add_tail(&ioa_cfg->queue, &ipr_ioa_head);
5790 spin_unlock(&ipr_driver_lock);
5797 ipr_free_mem(ioa_cfg);
5800 out_release_regions:
5801 pci_release_regions(pdev);
5803 scsi_host_put(host);
5805 pci_disable_device(pdev);
5810 * ipr_scan_vsets - Scans for VSET devices
5811 * @ioa_cfg: ioa config struct
5813 * Description: Since the VSET resources do not follow SAM in that we can have
5814 * sparse LUNs with no LUN 0, we have to scan for these ourselves.
5819 static void ipr_scan_vsets(struct ipr_ioa_cfg *ioa_cfg)
5823 for (target = 0; target < IPR_MAX_NUM_TARGETS_PER_BUS; target++)
5824 for (lun = 0; lun < IPR_MAX_NUM_VSET_LUNS_PER_TARGET; lun++ )
5825 scsi_add_device(ioa_cfg->host, IPR_VSET_BUS, target, lun);
5829 * ipr_initiate_ioa_bringdown - Bring down an adapter
5830 * @ioa_cfg: ioa config struct
5831 * @shutdown_type: shutdown type
5833 * Description: This function will initiate bringing down the adapter.
5834 * This consists of issuing an IOA shutdown to the adapter
5835 * to flush the cache, and running BIST.
5836 * If the caller needs to wait on the completion of the reset,
5837 * the caller must sleep on the reset_wait_q.
5842 static void ipr_initiate_ioa_bringdown(struct ipr_ioa_cfg *ioa_cfg,
5843 enum ipr_shutdown_type shutdown_type)
5846 if (ioa_cfg->sdt_state == WAIT_FOR_DUMP)
5847 ioa_cfg->sdt_state = ABORT_DUMP;
5848 ioa_cfg->reset_retries = 0;
5849 ioa_cfg->in_ioa_bringdown = 1;
5850 ipr_initiate_ioa_reset(ioa_cfg, shutdown_type);
5855 * __ipr_remove - Remove a single adapter
5856 * @pdev: pci device struct
5858 * Adapter hot plug remove entry point.
5863 static void __ipr_remove(struct pci_dev *pdev)
5865 unsigned long host_lock_flags = 0;
5866 struct ipr_ioa_cfg *ioa_cfg = pci_get_drvdata(pdev);
5869 spin_lock_irqsave(ioa_cfg->host->host_lock, host_lock_flags);
5870 ipr_initiate_ioa_bringdown(ioa_cfg, IPR_SHUTDOWN_NORMAL);
5872 spin_unlock_irqrestore(ioa_cfg->host->host_lock, host_lock_flags);
5873 wait_event(ioa_cfg->reset_wait_q, !ioa_cfg->in_reset_reload);
5874 spin_lock_irqsave(ioa_cfg->host->host_lock, host_lock_flags);
5876 spin_lock(&ipr_driver_lock);
5877 list_del(&ioa_cfg->queue);
5878 spin_unlock(&ipr_driver_lock);
5880 if (ioa_cfg->sdt_state == ABORT_DUMP)
5881 ioa_cfg->sdt_state = WAIT_FOR_DUMP;
5882 spin_unlock_irqrestore(ioa_cfg->host->host_lock, host_lock_flags);
5884 ipr_free_all_resources(ioa_cfg);
5890 * ipr_remove - IOA hot plug remove entry point
5891 * @pdev: pci device struct
5893 * Adapter hot plug remove entry point.
5898 static void ipr_remove(struct pci_dev *pdev)
5900 struct ipr_ioa_cfg *ioa_cfg = pci_get_drvdata(pdev);
5904 ioa_cfg->allow_cmds = 0;
5905 flush_scheduled_work();
5906 ipr_remove_trace_file(&ioa_cfg->host->shost_classdev.kobj,
5908 ipr_remove_dump_file(&ioa_cfg->host->shost_classdev.kobj,
5910 scsi_remove_host(ioa_cfg->host);
5918 * ipr_probe - Adapter hot plug add entry point
5921 * 0 on success / non-zero on failure
5923 static int __devinit ipr_probe(struct pci_dev *pdev,
5924 const struct pci_device_id *dev_id)
5926 struct ipr_ioa_cfg *ioa_cfg;
5929 rc = ipr_probe_ioa(pdev, dev_id);
5934 ioa_cfg = pci_get_drvdata(pdev);
5935 rc = ipr_probe_ioa_part2(ioa_cfg);
5942 rc = scsi_add_host(ioa_cfg->host, &pdev->dev);
5949 rc = ipr_create_trace_file(&ioa_cfg->host->shost_classdev.kobj,
5953 scsi_remove_host(ioa_cfg->host);
5958 rc = ipr_create_dump_file(&ioa_cfg->host->shost_classdev.kobj,
5962 ipr_remove_trace_file(&ioa_cfg->host->shost_classdev.kobj,
5964 scsi_remove_host(ioa_cfg->host);
5969 scsi_scan_host(ioa_cfg->host);
5970 ipr_scan_vsets(ioa_cfg);
5971 scsi_add_device(ioa_cfg->host, IPR_IOA_BUS, IPR_IOA_TARGET, IPR_IOA_LUN);
5972 ioa_cfg->allow_ml_add_del = 1;
5973 schedule_work(&ioa_cfg->work_q);
5978 * ipr_shutdown - Shutdown handler.
5979 * @dev: device struct
5981 * This function is invoked upon system shutdown/reboot. It will issue
5982 * an adapter shutdown to the adapter to flush the write cache.
5987 static void ipr_shutdown(struct device *dev)
5989 struct ipr_ioa_cfg *ioa_cfg = pci_get_drvdata(to_pci_dev(dev));
5990 unsigned long lock_flags = 0;
5992 spin_lock_irqsave(ioa_cfg->host->host_lock, lock_flags);
5993 ipr_initiate_ioa_bringdown(ioa_cfg, IPR_SHUTDOWN_NORMAL);
5994 spin_unlock_irqrestore(ioa_cfg->host->host_lock, lock_flags);
5995 wait_event(ioa_cfg->reset_wait_q, !ioa_cfg->in_reset_reload);
5998 static struct pci_device_id ipr_pci_table[] __devinitdata = {
5999 { PCI_VENDOR_ID_MYLEX, PCI_DEVICE_ID_IBM_GEMSTONE,
6000 PCI_VENDOR_ID_IBM, IPR_SUBS_DEV_ID_5702,
6001 0, 0, (kernel_ulong_t)&ipr_chip_cfg[0] },
6002 { PCI_VENDOR_ID_MYLEX, PCI_DEVICE_ID_IBM_GEMSTONE,
6003 PCI_VENDOR_ID_IBM, IPR_SUBS_DEV_ID_572E,
6004 0, 0, (kernel_ulong_t)&ipr_chip_cfg[0] },
6005 { PCI_VENDOR_ID_MYLEX, PCI_DEVICE_ID_IBM_GEMSTONE,
6006 PCI_VENDOR_ID_IBM, IPR_SUBS_DEV_ID_5703,
6007 0, 0, (kernel_ulong_t)&ipr_chip_cfg[0] },
6008 { PCI_VENDOR_ID_MYLEX, PCI_DEVICE_ID_IBM_GEMSTONE,
6009 PCI_VENDOR_ID_IBM, IPR_SUBS_DEV_ID_573D,
6010 0, 0, (kernel_ulong_t)&ipr_chip_cfg[0] },
6011 { PCI_VENDOR_ID_IBM, PCI_DEVICE_ID_IBM_CITRINE,
6012 PCI_VENDOR_ID_IBM, IPR_SUBS_DEV_ID_571B,
6013 0, 0, (kernel_ulong_t)&ipr_chip_cfg[0] },
6014 { PCI_VENDOR_ID_IBM, PCI_DEVICE_ID_IBM_SNIPE,
6015 PCI_VENDOR_ID_IBM, IPR_SUBS_DEV_ID_2780,
6016 0, 0, (kernel_ulong_t)&ipr_chip_cfg[1] },
6017 { PCI_VENDOR_ID_IBM, PCI_DEVICE_ID_IBM_SNIPE,
6018 PCI_VENDOR_ID_IBM, IPR_SUBS_DEV_ID_570F,
6019 0, 0, (kernel_ulong_t)&ipr_chip_cfg[1] },
6022 MODULE_DEVICE_TABLE(pci, ipr_pci_table);
6024 static struct pci_driver ipr_driver = {
6026 .id_table = ipr_pci_table,
6028 .remove = ipr_remove,
6030 .shutdown = ipr_shutdown,
6035 * ipr_init - Module entry point
6038 * 0 on success / negative value on failure
6040 static int __init ipr_init(void)
6042 ipr_info("IBM Power RAID SCSI Device Driver version: %s %s\n",
6043 IPR_DRIVER_VERSION, IPR_DRIVER_DATE);
6045 return pci_module_init(&ipr_driver);
6049 * ipr_exit - Module unload
6051 * Module unload entry point.
6056 static void __exit ipr_exit(void)
6058 pci_unregister_driver(&ipr_driver);
6061 module_init(ipr_init);
6062 module_exit(ipr_exit);