2 * ipr.c -- driver for IBM Power Linux RAID adapters
4 * Written By: Brian King, IBM Corporation
6 * Copyright (C) 2003, 2004 IBM Corporation
8 * This program is free software; you can redistribute it and/or modify
9 * it under the terms of the GNU General Public License as published by
10 * the Free Software Foundation; either version 2 of the License, or
11 * (at your option) any later version.
13 * This program is distributed in the hope that it will be useful,
14 * but WITHOUT ANY WARRANTY; without even the implied warranty of
15 * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
16 * GNU General Public License for more details.
18 * You should have received a copy of the GNU General Public License
19 * along with this program; if not, write to the Free Software
20 * Foundation, Inc., 59 Temple Place, Suite 330, Boston, MA 02111-1307 USA
27 * This driver is used to control the following SCSI adapters:
29 * IBM iSeries: 5702, 5703, 2780, 5709, 570A, 570B
31 * IBM pSeries: PCI-X Dual Channel Ultra 320 SCSI RAID Adapter
32 * PCI-X Dual Channel Ultra 320 SCSI Adapter
33 * PCI-X Dual Channel Ultra 320 SCSI RAID Enablement Card
34 * Embedded SCSI adapter on p615 and p655 systems
36 * Supported Hardware Features:
37 * - Ultra 320 SCSI controller
38 * - PCI-X host interface
39 * - Embedded PowerPC RISC Processor and Hardware XOR DMA Engine
40 * - Non-Volatile Write Cache
41 * - Supports attachment of non-RAID disks, tape, and optical devices
42 * - RAID Levels 0, 5, 10
44 * - Background Parity Checking
45 * - Background Data Scrubbing
46 * - Ability to increase the capacity of an existing RAID 5 disk array
50 * - Tagged command queuing
51 * - Adapter microcode download
53 * - SCSI device hot plug
57 #include <linux/config.h>
59 #include <linux/init.h>
60 #include <linux/types.h>
61 #include <linux/errno.h>
62 #include <linux/kernel.h>
63 #include <linux/ioport.h>
64 #include <linux/delay.h>
65 #include <linux/pci.h>
66 #include <linux/wait.h>
67 #include <linux/spinlock.h>
68 #include <linux/sched.h>
69 #include <linux/interrupt.h>
70 #include <linux/blkdev.h>
71 #include <linux/firmware.h>
72 #include <linux/module.h>
73 #include <linux/moduleparam.h>
76 #include <asm/processor.h>
77 #include <scsi/scsi.h>
78 #include <scsi/scsi_host.h>
79 #include <scsi/scsi_tcq.h>
80 #include <scsi/scsi_eh.h>
81 #include <scsi/scsi_cmnd.h>
82 #include <scsi/scsi_request.h>
88 static struct list_head ipr_ioa_head = LIST_HEAD_INIT(ipr_ioa_head);
89 static unsigned int ipr_log_level = IPR_DEFAULT_LOG_LEVEL;
90 static unsigned int ipr_max_speed = 1;
91 static int ipr_testmode = 0;
92 static spinlock_t ipr_driver_lock = SPIN_LOCK_UNLOCKED;
94 /* This table describes the differences between DMA controller chips */
95 static const struct ipr_chip_cfg_t ipr_chip_cfg[] = {
98 .cache_line_size = 0x20,
100 .set_interrupt_mask_reg = 0x0022C,
101 .clr_interrupt_mask_reg = 0x00230,
102 .sense_interrupt_mask_reg = 0x0022C,
103 .clr_interrupt_reg = 0x00228,
104 .sense_interrupt_reg = 0x00224,
105 .ioarrin_reg = 0x00404,
106 .sense_uproc_interrupt_reg = 0x00214,
107 .set_uproc_interrupt_reg = 0x00214,
108 .clr_uproc_interrupt_reg = 0x00218
113 .cache_line_size = 0x20,
115 .set_interrupt_mask_reg = 0x00288,
116 .clr_interrupt_mask_reg = 0x0028C,
117 .sense_interrupt_mask_reg = 0x00288,
118 .clr_interrupt_reg = 0x00284,
119 .sense_interrupt_reg = 0x00280,
120 .ioarrin_reg = 0x00504,
121 .sense_uproc_interrupt_reg = 0x00290,
122 .set_uproc_interrupt_reg = 0x00290,
123 .clr_uproc_interrupt_reg = 0x00294
128 static int ipr_max_bus_speeds [] = {
129 IPR_80MBs_SCSI_RATE, IPR_U160_SCSI_RATE, IPR_U320_SCSI_RATE
132 MODULE_AUTHOR("Brian King <brking@us.ibm.com>");
133 MODULE_DESCRIPTION("IBM Power RAID SCSI Adapter Driver");
134 module_param_named(max_speed, ipr_max_speed, uint, 0);
135 MODULE_PARM_DESC(max_speed, "Maximum bus speed (0-2). Default: 1=U160. Speeds: 0=80 MB/s, 1=U160, 2=U320");
136 module_param_named(log_level, ipr_log_level, uint, 0);
137 MODULE_PARM_DESC(log_level, "Set to 0 - 4 for increasing verbosity of device driver");
138 module_param_named(testmode, ipr_testmode, int, 0);
139 MODULE_PARM_DESC(testmode, "DANGEROUS!!! Allows unsupported configurations");
140 MODULE_LICENSE("GPL");
141 MODULE_VERSION(IPR_DRIVER_VERSION);
143 static const char *ipr_gpdd_dev_end_states[] = {
145 "Terminated by host",
146 "Terminated by device reset",
147 "Terminated by bus reset",
149 "Command not started"
152 static const char *ipr_gpdd_dev_bus_phases[] = {
166 /* A constant array of IOASCs/URCs/Error Messages */
168 struct ipr_error_table_t ipr_error_table[] = {
170 "8155: An unknown error was received"},
172 "Soft underlength error"},
174 "Command to be cancelled not found"},
176 "Qualified success"},
178 "FFFE: Soft device bus error recovered by the IOA"},
180 "FFF9: Device sector reassign successful"},
182 "FFF7: Media error recovered by device rewrite procedures"},
184 "7001: IOA sector reassignment successful"},
186 "FFF9: Soft media error. Sector reassignment recommended"},
188 "FFF7: Media error recovered by IOA rewrite procedures"},
190 "FF3D: Soft PCI bus error recovered by the IOA"},
192 "FFF6: Device hardware error recovered by the IOA"},
194 "FFF6: Device hardware error recovered by the device"},
196 "FF3D: Soft IOA error recovered by the IOA"},
198 "FFFA: Undefined device response recovered by the IOA"},
200 "FFF6: Device bus error, message or command phase"},
202 "FFF6: Failure prediction threshold exceeded"},
204 "8009: Impending cache battery pack failure"},
206 "34FF: Disk device format in progress"},
208 "Synchronization required"},
210 "No ready, IOA shutdown"},
212 "3020: Storage subsystem configuration error"},
214 "FFF5: Medium error, data unreadable, recommend reassign"},
216 "7000: Medium error, data unreadable, do not reassign"},
218 "FFF3: Disk media format bad"},
220 "3002: Addressed device failed to respond to selection"},
222 "3100: Device bus error"},
224 "3109: IOA timed out a device command"},
226 "3120: SCSI bus is not operational"},
228 "9000: IOA reserved area data check"},
230 "9001: IOA reserved area invalid data pattern"},
232 "9002: IOA reserved area LRC error"},
234 "102E: Out of alternate sectors for disk storage"},
236 "FFF4: Data transfer underlength error"},
238 "FFF4: Data transfer overlength error"},
240 "3400: Logical unit failure"},
242 "FFF4: Device microcode is corrupt"},
244 "8150: PCI bus error"},
246 "Unsupported device bus message received"},
248 "FFF4: Disk device problem"},
250 "8150: Permanent IOA failure"},
252 "3010: Disk device returned wrong response to IOA"},
254 "8151: IOA microcode error"},
256 "Device bus status error"},
258 "8157: IOA error requiring IOA reset to recover"},
260 "Message reject received from the device"},
262 "8008: A permanent cache battery pack failure occurred"},
264 "9090: Disk unit has been modified after the last known status"},
266 "9081: IOA detected device error"},
268 "9082: IOA detected device error"},
270 "3110: Device bus error, message or command phase"},
272 "9091: Incorrect hardware configuration change has been detected"},
274 "FFF4: Command to logical unit failed"},
276 "Illegal request, invalid request type or request packet"},
278 "Illegal request, invalid resource handle"},
280 "Illegal request, invalid field in parameter list"},
282 "Illegal request, parameter not supported"},
284 "Illegal request, parameter value invalid"},
286 "Illegal request, command sequence error"},
288 "9031: Array protection temporarily suspended, protection resuming"},
290 "9040: Array protection temporarily suspended, protection resuming"},
292 "FFFB: SCSI bus was reset"},
294 "FFFE: SCSI bus transition to single ended"},
296 "FFFE: SCSI bus transition to LVD"},
298 "FFFB: SCSI bus was reset by another initiator"},
300 "3029: A device replacement has occurred"},
302 "9051: IOA cache data exists for a missing or failed device"},
304 "9025: Disk unit is not supported at its physical location"},
306 "3020: IOA detected a SCSI bus configuration error"},
308 "3150: SCSI bus configuration error"},
310 "9041: Array protection temporarily suspended"},
312 "9030: Array no longer protected due to missing or failed disk unit"},
314 "Failure due to other device"},
316 "9008: IOA does not support functions expected by devices"},
318 "9010: Cache data associated with attached devices cannot be found"},
320 "9011: Cache data belongs to devices other than those attached"},
322 "9020: Array missing 2 or more devices with only 1 device present"},
324 "9021: Array missing 2 or more devices with 2 or more devices present"},
326 "9022: Exposed array is missing a required device"},
328 "9023: Array member(s) not at required physical locations"},
330 "9024: Array not functional due to present hardware configuration"},
332 "9026: Array not functional due to present hardware configuration"},
334 "9027: Array is missing a device and parity is out of sync"},
336 "9028: Maximum number of arrays already exist"},
338 "9050: Required cache data cannot be located for a disk unit"},
340 "9052: Cache data exists for a device that has been modified"},
342 "9054: IOA resources not available due to previous problems"},
344 "9092: Disk unit requires initialization before use"},
346 "9029: Incorrect hardware configuration change has been detected"},
348 "9060: One or more disk pairs are missing from an array"},
350 "9061: One or more disks are missing from an array"},
352 "9062: One or more disks are missing from an array"},
354 "9063: Maximum number of functional arrays has been exceeded"},
356 "Aborted command, invalid descriptor"},
358 "Command terminated by host"}
361 static const struct ipr_ses_table_entry ipr_ses_table[] = {
362 { "2104-DL1 ", "XXXXXXXXXXXXXXXX", 80 },
363 { "2104-TL1 ", "XXXXXXXXXXXXXXXX", 80 },
364 { "HSBP07M P U2SCSI", "XXXXXXXXXXXXXXXX", 80 }, /* Hidive 7 slot */
365 { "HSBP05M P U2SCSI", "XXXXXXXXXXXXXXXX", 80 }, /* Hidive 5 slot */
366 { "HSBP05M S U2SCSI", "XXXXXXXXXXXXXXXX", 80 }, /* Bowtie */
367 { "HSBP06E ASU2SCSI", "XXXXXXXXXXXXXXXX", 80 }, /* MartinFenning */
368 { "2104-DU3 ", "XXXXXXXXXXXXXXXX", 160 },
369 { "2104-TU3 ", "XXXXXXXXXXXXXXXX", 160 },
370 { "HSBP04C RSU2SCSI", "XXXXXXX*XXXXXXXX", 160 },
371 { "HSBP06E RSU2SCSI", "XXXXXXX*XXXXXXXX", 160 },
372 { "St V1S2 ", "XXXXXXXXXXXXXXXX", 160 },
373 { "HSBPD4M PU3SCSI", "XXXXXXX*XXXXXXXX", 160 },
374 { "VSBPD1H U3SCSI", "XXXXXXX*XXXXXXXX", 160 }
378 * Function Prototypes
380 static int ipr_reset_alert(struct ipr_cmnd *);
381 static void ipr_process_ccn(struct ipr_cmnd *);
382 static void ipr_process_error(struct ipr_cmnd *);
383 static void ipr_reset_ioa_job(struct ipr_cmnd *);
384 static void ipr_initiate_ioa_reset(struct ipr_ioa_cfg *,
385 enum ipr_shutdown_type);
387 #ifdef CONFIG_SCSI_IPR_TRACE
389 * ipr_trc_hook - Add a trace entry to the driver trace
390 * @ipr_cmd: ipr command struct
392 * @add_data: additional data
397 static void ipr_trc_hook(struct ipr_cmnd *ipr_cmd,
398 u8 type, u32 add_data)
400 struct ipr_trace_entry *trace_entry;
401 struct ipr_ioa_cfg *ioa_cfg = ipr_cmd->ioa_cfg;
403 trace_entry = &ioa_cfg->trace[ioa_cfg->trace_index++];
404 trace_entry->time = jiffies;
405 trace_entry->op_code = ipr_cmd->ioarcb.cmd_pkt.cdb[0];
406 trace_entry->type = type;
407 trace_entry->cmd_index = ipr_cmd->cmd_index;
408 trace_entry->res_handle = ipr_cmd->ioarcb.res_handle;
409 trace_entry->u.add_data = add_data;
412 #define ipr_trc_hook(ipr_cmd, type, add_data) do { } while(0)
416 * ipr_reinit_ipr_cmnd - Re-initialize an IPR Cmnd block for reuse
417 * @ipr_cmd: ipr command struct
422 static void ipr_reinit_ipr_cmnd(struct ipr_cmnd *ipr_cmd)
424 struct ipr_ioarcb *ioarcb = &ipr_cmd->ioarcb;
425 struct ipr_ioasa *ioasa = &ipr_cmd->ioasa;
427 memset(&ioarcb->cmd_pkt, 0, sizeof(struct ipr_cmd_pkt));
428 ioarcb->write_data_transfer_length = 0;
429 ioarcb->read_data_transfer_length = 0;
430 ioarcb->write_ioadl_len = 0;
431 ioarcb->read_ioadl_len = 0;
433 ioasa->residual_data_len = 0;
435 ipr_cmd->scsi_cmd = NULL;
436 ipr_cmd->sense_buffer[0] = 0;
437 ipr_cmd->dma_use_sg = 0;
441 * ipr_init_ipr_cmnd - Initialize an IPR Cmnd block
442 * @ipr_cmd: ipr command struct
447 static void ipr_init_ipr_cmnd(struct ipr_cmnd *ipr_cmd)
449 ipr_reinit_ipr_cmnd(ipr_cmd);
450 ipr_cmd->u.scratch = 0;
451 ipr_cmd->sibling = NULL;
452 init_timer(&ipr_cmd->timer);
456 * ipr_get_free_ipr_cmnd - Get a free IPR Cmnd block
457 * @ioa_cfg: ioa config struct
460 * pointer to ipr command struct
463 struct ipr_cmnd *ipr_get_free_ipr_cmnd(struct ipr_ioa_cfg *ioa_cfg)
465 struct ipr_cmnd *ipr_cmd;
467 ipr_cmd = list_entry(ioa_cfg->free_q.next, struct ipr_cmnd, queue);
468 list_del(&ipr_cmd->queue);
469 ipr_init_ipr_cmnd(ipr_cmd);
475 * ipr_unmap_sglist - Unmap scatterlist if mapped
476 * @ioa_cfg: ioa config struct
477 * @ipr_cmd: ipr command struct
482 static void ipr_unmap_sglist(struct ipr_ioa_cfg *ioa_cfg,
483 struct ipr_cmnd *ipr_cmd)
485 struct scsi_cmnd *scsi_cmd = ipr_cmd->scsi_cmd;
487 if (ipr_cmd->dma_use_sg) {
488 if (scsi_cmd->use_sg > 0) {
489 pci_unmap_sg(ioa_cfg->pdev, scsi_cmd->request_buffer,
491 scsi_cmd->sc_data_direction);
493 pci_unmap_single(ioa_cfg->pdev, ipr_cmd->dma_handle,
494 scsi_cmd->request_bufflen,
495 scsi_cmd->sc_data_direction);
501 * ipr_mask_and_clear_interrupts - Mask all and clear specified interrupts
502 * @ioa_cfg: ioa config struct
503 * @clr_ints: interrupts to clear
505 * This function masks all interrupts on the adapter, then clears the
506 * interrupts specified in the mask
511 static void ipr_mask_and_clear_interrupts(struct ipr_ioa_cfg *ioa_cfg,
514 volatile u32 int_reg;
516 /* Stop new interrupts */
517 ioa_cfg->allow_interrupts = 0;
519 /* Set interrupt mask to stop all new interrupts */
520 writel(~0, ioa_cfg->regs.set_interrupt_mask_reg);
522 /* Clear any pending interrupts */
523 writel(clr_ints, ioa_cfg->regs.clr_interrupt_reg);
524 int_reg = readl(ioa_cfg->regs.sense_interrupt_reg);
528 * ipr_save_pcix_cmd_reg - Save PCI-X command register
529 * @ioa_cfg: ioa config struct
532 * 0 on success / -EIO on failure
534 static int ipr_save_pcix_cmd_reg(struct ipr_ioa_cfg *ioa_cfg)
536 int pcix_cmd_reg = pci_find_capability(ioa_cfg->pdev, PCI_CAP_ID_PCIX);
538 if (pcix_cmd_reg == 0) {
539 dev_err(&ioa_cfg->pdev->dev, "Failed to save PCI-X command register\n");
543 if (pci_read_config_word(ioa_cfg->pdev, pcix_cmd_reg,
544 &ioa_cfg->saved_pcix_cmd_reg) != PCIBIOS_SUCCESSFUL) {
545 dev_err(&ioa_cfg->pdev->dev, "Failed to save PCI-X command register\n");
549 ioa_cfg->saved_pcix_cmd_reg |= PCI_X_CMD_DPERR_E | PCI_X_CMD_ERO;
554 * ipr_set_pcix_cmd_reg - Setup PCI-X command register
555 * @ioa_cfg: ioa config struct
558 * 0 on success / -EIO on failure
560 static int ipr_set_pcix_cmd_reg(struct ipr_ioa_cfg *ioa_cfg)
562 int pcix_cmd_reg = pci_find_capability(ioa_cfg->pdev, PCI_CAP_ID_PCIX);
565 if (pci_write_config_word(ioa_cfg->pdev, pcix_cmd_reg,
566 ioa_cfg->saved_pcix_cmd_reg) != PCIBIOS_SUCCESSFUL) {
567 dev_err(&ioa_cfg->pdev->dev, "Failed to setup PCI-X command register\n");
571 dev_err(&ioa_cfg->pdev->dev,
572 "Failed to setup PCI-X command register\n");
580 * ipr_scsi_eh_done - mid-layer done function for aborted ops
581 * @ipr_cmd: ipr command struct
583 * This function is invoked by the interrupt handler for
584 * ops generated by the SCSI mid-layer which are being aborted.
589 static void ipr_scsi_eh_done(struct ipr_cmnd *ipr_cmd)
591 struct ipr_ioa_cfg *ioa_cfg = ipr_cmd->ioa_cfg;
592 struct scsi_cmnd *scsi_cmd = ipr_cmd->scsi_cmd;
594 scsi_cmd->result |= (DID_ERROR << 16);
596 ipr_unmap_sglist(ioa_cfg, ipr_cmd);
597 scsi_cmd->scsi_done(scsi_cmd);
598 list_add_tail(&ipr_cmd->queue, &ioa_cfg->free_q);
602 * ipr_fail_all_ops - Fails all outstanding ops.
603 * @ioa_cfg: ioa config struct
605 * This function fails all outstanding ops.
610 static void ipr_fail_all_ops(struct ipr_ioa_cfg *ioa_cfg)
612 struct ipr_cmnd *ipr_cmd, *temp;
615 list_for_each_entry_safe(ipr_cmd, temp, &ioa_cfg->pending_q, queue) {
616 list_del(&ipr_cmd->queue);
618 ipr_cmd->ioasa.ioasc = cpu_to_be32(IPR_IOASC_IOA_WAS_RESET);
619 ipr_cmd->ioasa.ilid = cpu_to_be32(IPR_DRIVER_ILID);
621 if (ipr_cmd->scsi_cmd)
622 ipr_cmd->done = ipr_scsi_eh_done;
624 ipr_trc_hook(ipr_cmd, IPR_TRACE_FINISH, IPR_IOASC_IOA_WAS_RESET);
625 del_timer(&ipr_cmd->timer);
626 ipr_cmd->done(ipr_cmd);
633 * ipr_do_req - Send driver initiated requests.
634 * @ipr_cmd: ipr command struct
635 * @done: done function
636 * @timeout_func: timeout function
637 * @timeout: timeout value
639 * This function sends the specified command to the adapter with the
640 * timeout given. The done function is invoked on command completion.
645 static void ipr_do_req(struct ipr_cmnd *ipr_cmd,
646 void (*done) (struct ipr_cmnd *),
647 void (*timeout_func) (struct ipr_cmnd *), u32 timeout)
649 struct ipr_ioa_cfg *ioa_cfg = ipr_cmd->ioa_cfg;
651 list_add_tail(&ipr_cmd->queue, &ioa_cfg->pending_q);
653 ipr_cmd->done = done;
655 ipr_cmd->timer.data = (unsigned long) ipr_cmd;
656 ipr_cmd->timer.expires = jiffies + timeout;
657 ipr_cmd->timer.function = (void (*)(unsigned long))timeout_func;
659 add_timer(&ipr_cmd->timer);
661 ipr_trc_hook(ipr_cmd, IPR_TRACE_START, 0);
664 writel(be32_to_cpu(ipr_cmd->ioarcb.ioarcb_host_pci_addr),
665 ioa_cfg->regs.ioarrin_reg);
669 * ipr_internal_cmd_done - Op done function for an internally generated op.
670 * @ipr_cmd: ipr command struct
672 * This function is the op done function for an internally generated,
673 * blocking op. It simply wakes the sleeping thread.
678 static void ipr_internal_cmd_done(struct ipr_cmnd *ipr_cmd)
680 if (ipr_cmd->sibling)
681 ipr_cmd->sibling = NULL;
683 complete(&ipr_cmd->completion);
687 * ipr_send_blocking_cmd - Send command and sleep on its completion.
688 * @ipr_cmd: ipr command struct
689 * @timeout_func: function to invoke if command times out
695 static void ipr_send_blocking_cmd(struct ipr_cmnd *ipr_cmd,
696 void (*timeout_func) (struct ipr_cmnd *ipr_cmd),
699 struct ipr_ioa_cfg *ioa_cfg = ipr_cmd->ioa_cfg;
701 init_completion(&ipr_cmd->completion);
702 ipr_do_req(ipr_cmd, ipr_internal_cmd_done, timeout_func, timeout);
704 spin_unlock_irq(ioa_cfg->host->host_lock);
705 wait_for_completion(&ipr_cmd->completion);
706 spin_lock_irq(ioa_cfg->host->host_lock);
710 * ipr_send_hcam - Send an HCAM to the adapter.
711 * @ioa_cfg: ioa config struct
713 * @hostrcb: hostrcb struct
715 * This function will send a Host Controlled Async command to the adapter.
716 * If HCAMs are currently not allowed to be issued to the adapter, it will
717 * place the hostrcb on the free queue.
722 static void ipr_send_hcam(struct ipr_ioa_cfg *ioa_cfg, u8 type,
723 struct ipr_hostrcb *hostrcb)
725 struct ipr_cmnd *ipr_cmd;
726 struct ipr_ioarcb *ioarcb;
728 if (ioa_cfg->allow_cmds) {
729 ipr_cmd = ipr_get_free_ipr_cmnd(ioa_cfg);
730 list_add_tail(&ipr_cmd->queue, &ioa_cfg->pending_q);
731 list_add_tail(&hostrcb->queue, &ioa_cfg->hostrcb_pending_q);
733 ipr_cmd->u.hostrcb = hostrcb;
734 ioarcb = &ipr_cmd->ioarcb;
736 ioarcb->res_handle = cpu_to_be32(IPR_IOA_RES_HANDLE);
737 ioarcb->cmd_pkt.request_type = IPR_RQTYPE_HCAM;
738 ioarcb->cmd_pkt.cdb[0] = IPR_HOST_CONTROLLED_ASYNC;
739 ioarcb->cmd_pkt.cdb[1] = type;
740 ioarcb->cmd_pkt.cdb[7] = (sizeof(hostrcb->hcam) >> 8) & 0xff;
741 ioarcb->cmd_pkt.cdb[8] = sizeof(hostrcb->hcam) & 0xff;
743 ioarcb->read_data_transfer_length = cpu_to_be32(sizeof(hostrcb->hcam));
744 ioarcb->read_ioadl_len = cpu_to_be32(sizeof(struct ipr_ioadl_desc));
745 ipr_cmd->ioadl[0].flags_and_data_len =
746 cpu_to_be32(IPR_IOADL_FLAGS_READ_LAST | sizeof(hostrcb->hcam));
747 ipr_cmd->ioadl[0].address = cpu_to_be32(hostrcb->hostrcb_dma);
749 if (type == IPR_HCAM_CDB_OP_CODE_CONFIG_CHANGE)
750 ipr_cmd->done = ipr_process_ccn;
752 ipr_cmd->done = ipr_process_error;
754 ipr_trc_hook(ipr_cmd, IPR_TRACE_START, IPR_IOA_RES_ADDR);
757 writel(be32_to_cpu(ipr_cmd->ioarcb.ioarcb_host_pci_addr),
758 ioa_cfg->regs.ioarrin_reg);
760 list_add_tail(&hostrcb->queue, &ioa_cfg->hostrcb_free_q);
765 * ipr_init_res_entry - Initialize a resource entry struct.
766 * @res: resource entry struct
771 static void ipr_init_res_entry(struct ipr_resource_entry *res)
773 res->needs_sync_complete = 1;
776 res->del_from_ml = 0;
777 res->resetting_device = 0;
779 res->qdepth = IPR_MAX_CMD_PER_LUN;
784 * ipr_handle_config_change - Handle a config change from the adapter
785 * @ioa_cfg: ioa config struct
791 static void ipr_handle_config_change(struct ipr_ioa_cfg *ioa_cfg,
792 struct ipr_hostrcb *hostrcb)
794 struct ipr_resource_entry *res = NULL;
795 struct ipr_config_table_entry *cfgte;
798 cfgte = &hostrcb->hcam.u.ccn.cfgte;
800 list_for_each_entry(res, &ioa_cfg->used_res_q, queue) {
801 if (!memcmp(&res->cfgte.res_addr, &cfgte->res_addr,
802 sizeof(cfgte->res_addr))) {
809 if (list_empty(&ioa_cfg->free_res_q)) {
810 ipr_send_hcam(ioa_cfg,
811 IPR_HCAM_CDB_OP_CODE_CONFIG_CHANGE,
816 res = list_entry(ioa_cfg->free_res_q.next,
817 struct ipr_resource_entry, queue);
819 list_del(&res->queue);
820 ipr_init_res_entry(res);
821 list_add_tail(&res->queue, &ioa_cfg->used_res_q);
824 memcpy(&res->cfgte, cfgte, sizeof(struct ipr_config_table_entry));
826 if (hostrcb->hcam.notify_type == IPR_HOST_RCB_NOTIF_TYPE_REM_ENTRY) {
828 res->sdev->hostdata = NULL;
829 res->del_from_ml = 1;
830 if (ioa_cfg->allow_ml_add_del)
831 schedule_work(&ioa_cfg->work_q);
833 list_move_tail(&res->queue, &ioa_cfg->free_res_q);
834 } else if (!res->sdev) {
836 if (ioa_cfg->allow_ml_add_del)
837 schedule_work(&ioa_cfg->work_q);
840 ipr_send_hcam(ioa_cfg, IPR_HCAM_CDB_OP_CODE_CONFIG_CHANGE, hostrcb);
844 * ipr_process_ccn - Op done function for a CCN.
845 * @ipr_cmd: ipr command struct
847 * This function is the op done function for a configuration
848 * change notification host controlled async from the adapter.
853 static void ipr_process_ccn(struct ipr_cmnd *ipr_cmd)
855 struct ipr_ioa_cfg *ioa_cfg = ipr_cmd->ioa_cfg;
856 struct ipr_hostrcb *hostrcb = ipr_cmd->u.hostrcb;
857 u32 ioasc = be32_to_cpu(ipr_cmd->ioasa.ioasc);
859 list_del(&hostrcb->queue);
860 list_add_tail(&ipr_cmd->queue, &ioa_cfg->free_q);
863 if (ioasc != IPR_IOASC_IOA_WAS_RESET)
864 dev_err(&ioa_cfg->pdev->dev,
865 "Host RCB failed with IOASC: 0x%08X\n", ioasc);
867 ipr_send_hcam(ioa_cfg, IPR_HCAM_CDB_OP_CODE_CONFIG_CHANGE, hostrcb);
869 ipr_handle_config_change(ioa_cfg, hostrcb);
874 * ipr_log_vpd - Log the passed VPD to the error log.
875 * @vpids: vendor/product id struct
876 * @serial_num: serial number string
881 static void ipr_log_vpd(struct ipr_std_inq_vpids *vpids, u8 *serial_num)
883 char buffer[max_t(int, sizeof(struct ipr_std_inq_vpids),
884 IPR_SERIAL_NUM_LEN) + 1];
886 memcpy(buffer, vpids, sizeof(struct ipr_std_inq_vpids));
887 buffer[sizeof(struct ipr_std_inq_vpids)] = '\0';
888 ipr_err("Vendor/Product ID: %s\n", buffer);
890 memcpy(buffer, serial_num, IPR_SERIAL_NUM_LEN);
891 buffer[IPR_SERIAL_NUM_LEN] = '\0';
892 ipr_err(" Serial Number: %s\n", buffer);
896 * ipr_log_cache_error - Log a cache error.
897 * @ioa_cfg: ioa config struct
898 * @hostrcb: hostrcb struct
903 static void ipr_log_cache_error(struct ipr_ioa_cfg *ioa_cfg,
904 struct ipr_hostrcb *hostrcb)
906 struct ipr_hostrcb_type_02_error *error =
907 &hostrcb->hcam.u.error.u.type_02_error;
909 ipr_err("-----Current Configuration-----\n");
910 ipr_err("Cache Directory Card Information:\n");
911 ipr_log_vpd(&error->ioa_vpids, error->ioa_sn);
912 ipr_err("Adapter Card Information:\n");
913 ipr_log_vpd(&error->cfc_vpids, error->cfc_sn);
915 ipr_err("-----Expected Configuration-----\n");
916 ipr_err("Cache Directory Card Information:\n");
917 ipr_log_vpd(&error->ioa_last_attached_to_cfc_vpids,
918 error->ioa_last_attached_to_cfc_sn);
919 ipr_err("Adapter Card Information:\n");
920 ipr_log_vpd(&error->cfc_last_attached_to_ioa_vpids,
921 error->cfc_last_attached_to_ioa_sn);
923 ipr_err("Additional IOA Data: %08X %08X %08X\n",
924 be32_to_cpu(error->ioa_data[0]),
925 be32_to_cpu(error->ioa_data[1]),
926 be32_to_cpu(error->ioa_data[2]));
930 * ipr_log_config_error - Log a configuration error.
931 * @ioa_cfg: ioa config struct
932 * @hostrcb: hostrcb struct
937 static void ipr_log_config_error(struct ipr_ioa_cfg *ioa_cfg,
938 struct ipr_hostrcb *hostrcb)
940 int errors_logged, i;
941 struct ipr_hostrcb_device_data_entry *dev_entry;
942 struct ipr_hostrcb_type_03_error *error;
944 error = &hostrcb->hcam.u.error.u.type_03_error;
945 errors_logged = be32_to_cpu(error->errors_logged);
947 ipr_err("Device Errors Detected/Logged: %d/%d\n",
948 be32_to_cpu(error->errors_detected), errors_logged);
950 dev_entry = error->dev_entry;
952 for (i = 0; i < errors_logged; i++, dev_entry++) {
955 if (dev_entry->dev_res_addr.bus >= IPR_MAX_NUM_BUSES) {
956 ipr_err("Device %d: missing\n", i + 1);
958 ipr_err("Device %d: %d:%d:%d:%d\n", i + 1,
959 ioa_cfg->host->host_no, dev_entry->dev_res_addr.bus,
960 dev_entry->dev_res_addr.target, dev_entry->dev_res_addr.lun);
962 ipr_log_vpd(&dev_entry->dev_vpids, dev_entry->dev_sn);
964 ipr_err("-----New Device Information-----\n");
965 ipr_log_vpd(&dev_entry->new_dev_vpids, dev_entry->new_dev_sn);
967 ipr_err("Cache Directory Card Information:\n");
968 ipr_log_vpd(&dev_entry->ioa_last_with_dev_vpids,
969 dev_entry->ioa_last_with_dev_sn);
971 ipr_err("Adapter Card Information:\n");
972 ipr_log_vpd(&dev_entry->cfc_last_with_dev_vpids,
973 dev_entry->cfc_last_with_dev_sn);
975 ipr_err("Additional IOA Data: %08X %08X %08X %08X %08X\n",
976 be32_to_cpu(dev_entry->ioa_data[0]),
977 be32_to_cpu(dev_entry->ioa_data[1]),
978 be32_to_cpu(dev_entry->ioa_data[2]),
979 be32_to_cpu(dev_entry->ioa_data[3]),
980 be32_to_cpu(dev_entry->ioa_data[4]));
985 * ipr_log_array_error - Log an array configuration error.
986 * @ioa_cfg: ioa config struct
987 * @hostrcb: hostrcb struct
992 static void ipr_log_array_error(struct ipr_ioa_cfg *ioa_cfg,
993 struct ipr_hostrcb *hostrcb)
996 struct ipr_hostrcb_type_04_error *error;
997 struct ipr_hostrcb_array_data_entry *array_entry;
998 u8 zero_sn[IPR_SERIAL_NUM_LEN];
1000 memset(zero_sn, '0', IPR_SERIAL_NUM_LEN);
1002 error = &hostrcb->hcam.u.error.u.type_04_error;
1006 ipr_err("RAID %s Array Configuration: %d:%d:%d:%d\n",
1007 error->protection_level,
1008 ioa_cfg->host->host_no,
1009 error->last_func_vset_res_addr.bus,
1010 error->last_func_vset_res_addr.target,
1011 error->last_func_vset_res_addr.lun);
1015 array_entry = error->array_member;
1017 for (i = 0; i < 18; i++) {
1018 if (!memcmp(array_entry->serial_num, zero_sn, IPR_SERIAL_NUM_LEN))
1021 if (error->exposed_mode_adn == i) {
1022 ipr_err("Exposed Array Member %d:\n", i);
1024 ipr_err("Array Member %d:\n", i);
1027 ipr_log_vpd(&array_entry->vpids, array_entry->serial_num);
1029 if (array_entry->dev_res_addr.bus >= IPR_MAX_NUM_BUSES) {
1030 ipr_err("Current Location: unknown\n");
1032 ipr_err("Current Location: %d:%d:%d:%d\n",
1033 ioa_cfg->host->host_no,
1034 array_entry->dev_res_addr.bus,
1035 array_entry->dev_res_addr.target,
1036 array_entry->dev_res_addr.lun);
1039 if (array_entry->dev_res_addr.bus >= IPR_MAX_NUM_BUSES) {
1040 ipr_err("Expected Location: unknown\n");
1042 ipr_err("Expected Location: %d:%d:%d:%d\n",
1043 ioa_cfg->host->host_no,
1044 array_entry->expected_dev_res_addr.bus,
1045 array_entry->expected_dev_res_addr.target,
1046 array_entry->expected_dev_res_addr.lun);
1052 array_entry = error->array_member2;
1059 * ipr_log_generic_error - Log an adapter error.
1060 * @ioa_cfg: ioa config struct
1061 * @hostrcb: hostrcb struct
1066 static void ipr_log_generic_error(struct ipr_ioa_cfg *ioa_cfg,
1067 struct ipr_hostrcb *hostrcb)
1070 int ioa_data_len = be32_to_cpu(hostrcb->hcam.length);
1072 if (ioa_data_len == 0)
1075 ipr_err("IOA Error Data:\n");
1076 ipr_err("Offset 0 1 2 3 4 5 6 7 8 9 A B C D E F\n");
1078 for (i = 0; i < ioa_data_len / 4; i += 4) {
1079 ipr_err("%08X: %08X %08X %08X %08X\n", i*4,
1080 be32_to_cpu(hostrcb->hcam.u.raw.data[i]),
1081 be32_to_cpu(hostrcb->hcam.u.raw.data[i+1]),
1082 be32_to_cpu(hostrcb->hcam.u.raw.data[i+2]),
1083 be32_to_cpu(hostrcb->hcam.u.raw.data[i+3]));
1088 * ipr_get_error - Find the specfied IOASC in the ipr_error_table.
1091 * This function will return the index of into the ipr_error_table
1092 * for the specified IOASC. If the IOASC is not in the table,
1093 * 0 will be returned, which points to the entry used for unknown errors.
1096 * index into the ipr_error_table
1098 static u32 ipr_get_error(u32 ioasc)
1102 for (i = 0; i < ARRAY_SIZE(ipr_error_table); i++)
1103 if (ipr_error_table[i].ioasc == ioasc)
1110 * ipr_handle_log_data - Log an adapter error.
1111 * @ioa_cfg: ioa config struct
1112 * @hostrcb: hostrcb struct
1114 * This function logs an adapter error to the system.
1119 static void ipr_handle_log_data(struct ipr_ioa_cfg *ioa_cfg,
1120 struct ipr_hostrcb *hostrcb)
1125 if (hostrcb->hcam.notify_type != IPR_HOST_RCB_NOTIF_TYPE_ERROR_LOG_ENTRY)
1128 if (hostrcb->hcam.notifications_lost == IPR_HOST_RCB_NOTIFICATIONS_LOST)
1129 dev_err(&ioa_cfg->pdev->dev, "Error notifications lost\n");
1131 ioasc = be32_to_cpu(hostrcb->hcam.u.error.failing_dev_ioasc);
1133 if (ioasc == IPR_IOASC_BUS_WAS_RESET ||
1134 ioasc == IPR_IOASC_BUS_WAS_RESET_BY_OTHER) {
1135 /* Tell the midlayer we had a bus reset so it will handle the UA properly */
1136 scsi_report_bus_reset(ioa_cfg->host,
1137 hostrcb->hcam.u.error.failing_dev_res_addr.bus);
1140 error_index = ipr_get_error(ioasc);
1142 if (!ipr_error_table[error_index].log_hcam)
1145 if (ipr_is_device(&hostrcb->hcam.u.error.failing_dev_res_addr)) {
1146 ipr_res_err(ioa_cfg, hostrcb->hcam.u.error.failing_dev_res_addr,
1147 "%s\n", ipr_error_table[error_index].error);
1149 dev_err(&ioa_cfg->pdev->dev, "%s\n",
1150 ipr_error_table[error_index].error);
1153 /* Set indication we have logged an error */
1154 ioa_cfg->errors_logged++;
1156 if (ioa_cfg->log_level < IPR_DEFAULT_LOG_LEVEL)
1159 switch (hostrcb->hcam.overlay_id) {
1160 case IPR_HOST_RCB_OVERLAY_ID_1:
1161 ipr_log_generic_error(ioa_cfg, hostrcb);
1163 case IPR_HOST_RCB_OVERLAY_ID_2:
1164 ipr_log_cache_error(ioa_cfg, hostrcb);
1166 case IPR_HOST_RCB_OVERLAY_ID_3:
1167 ipr_log_config_error(ioa_cfg, hostrcb);
1169 case IPR_HOST_RCB_OVERLAY_ID_4:
1170 case IPR_HOST_RCB_OVERLAY_ID_6:
1171 ipr_log_array_error(ioa_cfg, hostrcb);
1173 case IPR_HOST_RCB_OVERLAY_ID_DEFAULT:
1174 ipr_log_generic_error(ioa_cfg, hostrcb);
1177 dev_err(&ioa_cfg->pdev->dev,
1178 "Unknown error received. Overlay ID: %d\n",
1179 hostrcb->hcam.overlay_id);
1185 * ipr_process_error - Op done function for an adapter error log.
1186 * @ipr_cmd: ipr command struct
1188 * This function is the op done function for an error log host
1189 * controlled async from the adapter. It will log the error and
1190 * send the HCAM back to the adapter.
1195 static void ipr_process_error(struct ipr_cmnd *ipr_cmd)
1197 struct ipr_ioa_cfg *ioa_cfg = ipr_cmd->ioa_cfg;
1198 struct ipr_hostrcb *hostrcb = ipr_cmd->u.hostrcb;
1199 u32 ioasc = be32_to_cpu(ipr_cmd->ioasa.ioasc);
1201 list_del(&hostrcb->queue);
1202 list_add_tail(&ipr_cmd->queue, &ioa_cfg->free_q);
1205 ipr_handle_log_data(ioa_cfg, hostrcb);
1206 } else if (ioasc != IPR_IOASC_IOA_WAS_RESET) {
1207 dev_err(&ioa_cfg->pdev->dev,
1208 "Host RCB failed with IOASC: 0x%08X\n", ioasc);
1211 ipr_send_hcam(ioa_cfg, IPR_HCAM_CDB_OP_CODE_LOG_DATA, hostrcb);
1215 * ipr_timeout - An internally generated op has timed out.
1216 * @ipr_cmd: ipr command struct
1218 * This function blocks host requests and initiates an
1224 static void ipr_timeout(struct ipr_cmnd *ipr_cmd)
1226 unsigned long lock_flags = 0;
1227 struct ipr_ioa_cfg *ioa_cfg = ipr_cmd->ioa_cfg;
1230 spin_lock_irqsave(ioa_cfg->host->host_lock, lock_flags);
1232 ioa_cfg->errors_logged++;
1233 dev_err(&ioa_cfg->pdev->dev,
1234 "Adapter being reset due to command timeout.\n");
1236 if (WAIT_FOR_DUMP == ioa_cfg->sdt_state)
1237 ioa_cfg->sdt_state = GET_DUMP;
1239 if (!ioa_cfg->in_reset_reload || ioa_cfg->reset_cmd == ipr_cmd)
1240 ipr_initiate_ioa_reset(ioa_cfg, IPR_SHUTDOWN_NONE);
1242 spin_unlock_irqrestore(ioa_cfg->host->host_lock, lock_flags);
1247 * ipr_reset_reload - Reset/Reload the IOA
1248 * @ioa_cfg: ioa config struct
1249 * @shutdown_type: shutdown type
1251 * This function resets the adapter and re-initializes it.
1252 * This function assumes that all new host commands have been stopped.
1256 static int ipr_reset_reload(struct ipr_ioa_cfg *ioa_cfg,
1257 enum ipr_shutdown_type shutdown_type)
1259 if (!ioa_cfg->in_reset_reload)
1260 ipr_initiate_ioa_reset(ioa_cfg, shutdown_type);
1262 spin_unlock_irq(ioa_cfg->host->host_lock);
1263 wait_event(ioa_cfg->reset_wait_q, !ioa_cfg->in_reset_reload);
1264 spin_lock_irq(ioa_cfg->host->host_lock);
1266 /* If we got hit with a host reset while we were already resetting
1267 the adapter for some reason, and the reset failed. */
1268 if (ioa_cfg->ioa_is_dead) {
1277 * ipr_find_ses_entry - Find matching SES in SES table
1278 * @res: resource entry struct of SES
1281 * pointer to SES table entry / NULL on failure
1283 static const struct ipr_ses_table_entry *
1284 ipr_find_ses_entry(struct ipr_resource_entry *res)
1287 const struct ipr_ses_table_entry *ste = ipr_ses_table;
1289 for (i = 0; i < ARRAY_SIZE(ipr_ses_table); i++, ste++) {
1290 for (j = 0, matches = 0; j < IPR_PROD_ID_LEN; j++) {
1291 if (ste->compare_product_id_byte[j] == 'X') {
1292 if (res->cfgte.std_inq_data.vpids.product_id[j] == ste->product_id[j])
1300 if (matches == IPR_PROD_ID_LEN)
1308 * ipr_get_max_scsi_speed - Determine max SCSI speed for a given bus
1309 * @ioa_cfg: ioa config struct
1311 * @bus_width: bus width
1314 * SCSI bus speed in units of 100KHz, 1600 is 160 MHz
1315 * For a 2-byte wide SCSI bus, the maximum transfer speed is
1316 * twice the maximum transfer rate (e.g. for a wide enabled bus,
1317 * max 160MHz = max 320MB/sec).
1319 static u32 ipr_get_max_scsi_speed(struct ipr_ioa_cfg *ioa_cfg, u8 bus, u8 bus_width)
1321 struct ipr_resource_entry *res;
1322 const struct ipr_ses_table_entry *ste;
1323 u32 max_xfer_rate = IPR_MAX_SCSI_RATE(bus_width);
1325 /* Loop through each config table entry in the config table buffer */
1326 list_for_each_entry(res, &ioa_cfg->used_res_q, queue) {
1327 if (!(IPR_IS_SES_DEVICE(res->cfgte.std_inq_data)))
1330 if (bus != res->cfgte.res_addr.bus)
1333 if (!(ste = ipr_find_ses_entry(res)))
1336 max_xfer_rate = (ste->max_bus_speed_limit * 10) / (bus_width / 8);
1339 return max_xfer_rate;
1343 * ipr_wait_iodbg_ack - Wait for an IODEBUG ACK from the IOA
1344 * @ioa_cfg: ioa config struct
1345 * @max_delay: max delay in micro-seconds to wait
1347 * Waits for an IODEBUG ACK from the IOA, doing busy looping.
1350 * 0 on success / other on failure
1352 static int ipr_wait_iodbg_ack(struct ipr_ioa_cfg *ioa_cfg, int max_delay)
1354 volatile u32 pcii_reg;
1357 /* Read interrupt reg until IOA signals IO Debug Acknowledge */
1358 while (delay < max_delay) {
1359 pcii_reg = readl(ioa_cfg->regs.sense_interrupt_reg);
1361 if (pcii_reg & IPR_PCII_IO_DEBUG_ACKNOWLEDGE)
1364 /* udelay cannot be used if delay is more than a few milliseconds */
1365 if ((delay / 1000) > MAX_UDELAY_MS)
1366 mdelay(delay / 1000);
1376 * ipr_get_ldump_data_section - Dump IOA memory
1377 * @ioa_cfg: ioa config struct
1378 * @start_addr: adapter address to dump
1379 * @dest: destination kernel buffer
1380 * @length_in_words: length to dump in 4 byte words
1383 * 0 on success / -EIO on failure
1385 static int ipr_get_ldump_data_section(struct ipr_ioa_cfg *ioa_cfg,
1387 u32 *dest, u32 length_in_words)
1389 volatile u32 temp_pcii_reg;
1392 /* Write IOA interrupt reg starting LDUMP state */
1393 writel((IPR_UPROCI_RESET_ALERT | IPR_UPROCI_IO_DEBUG_ALERT),
1394 ioa_cfg->regs.set_uproc_interrupt_reg);
1396 /* Wait for IO debug acknowledge */
1397 if (ipr_wait_iodbg_ack(ioa_cfg,
1398 IPR_LDUMP_MAX_LONG_ACK_DELAY_IN_USEC)) {
1399 dev_err(&ioa_cfg->pdev->dev,
1400 "IOA dump long data transfer timeout\n");
1404 /* Signal LDUMP interlocked - clear IO debug ack */
1405 writel(IPR_PCII_IO_DEBUG_ACKNOWLEDGE,
1406 ioa_cfg->regs.clr_interrupt_reg);
1408 /* Write Mailbox with starting address */
1409 writel(start_addr, ioa_cfg->ioa_mailbox);
1411 /* Signal address valid - clear IOA Reset alert */
1412 writel(IPR_UPROCI_RESET_ALERT,
1413 ioa_cfg->regs.clr_uproc_interrupt_reg);
1415 for (i = 0; i < length_in_words; i++) {
1416 /* Wait for IO debug acknowledge */
1417 if (ipr_wait_iodbg_ack(ioa_cfg,
1418 IPR_LDUMP_MAX_SHORT_ACK_DELAY_IN_USEC)) {
1419 dev_err(&ioa_cfg->pdev->dev,
1420 "IOA dump short data transfer timeout\n");
1424 /* Read data from mailbox and increment destination pointer */
1425 *dest = cpu_to_be32(readl(ioa_cfg->ioa_mailbox));
1428 /* For all but the last word of data, signal data received */
1429 if (i < (length_in_words - 1)) {
1430 /* Signal dump data received - Clear IO debug Ack */
1431 writel(IPR_PCII_IO_DEBUG_ACKNOWLEDGE,
1432 ioa_cfg->regs.clr_interrupt_reg);
1436 /* Signal end of block transfer. Set reset alert then clear IO debug ack */
1437 writel(IPR_UPROCI_RESET_ALERT,
1438 ioa_cfg->regs.set_uproc_interrupt_reg);
1440 writel(IPR_UPROCI_IO_DEBUG_ALERT,
1441 ioa_cfg->regs.clr_uproc_interrupt_reg);
1443 /* Signal dump data received - Clear IO debug Ack */
1444 writel(IPR_PCII_IO_DEBUG_ACKNOWLEDGE,
1445 ioa_cfg->regs.clr_interrupt_reg);
1447 /* Wait for IOA to signal LDUMP exit - IOA reset alert will be cleared */
1448 while (delay < IPR_LDUMP_MAX_SHORT_ACK_DELAY_IN_USEC) {
1450 readl(ioa_cfg->regs.sense_uproc_interrupt_reg);
1452 if (!(temp_pcii_reg & IPR_UPROCI_RESET_ALERT))
1462 #ifdef CONFIG_SCSI_IPR_DUMP
1464 * ipr_sdt_copy - Copy Smart Dump Table to kernel buffer
1465 * @ioa_cfg: ioa config struct
1466 * @pci_address: adapter address
1467 * @length: length of data to copy
1469 * Copy data from PCI adapter to kernel buffer.
1470 * Note: length MUST be a 4 byte multiple
1472 * 0 on success / other on failure
1474 static int ipr_sdt_copy(struct ipr_ioa_cfg *ioa_cfg,
1475 unsigned long pci_address, u32 length)
1477 int bytes_copied = 0;
1478 int cur_len, rc, rem_len, rem_page_len;
1480 unsigned long lock_flags = 0;
1481 struct ipr_ioa_dump *ioa_dump = &ioa_cfg->dump->ioa_dump;
1483 while (bytes_copied < length &&
1484 (ioa_dump->hdr.len + bytes_copied) < IPR_MAX_IOA_DUMP_SIZE) {
1485 if (ioa_dump->page_offset >= PAGE_SIZE ||
1486 ioa_dump->page_offset == 0) {
1487 page = (u32 *)__get_free_page(GFP_ATOMIC);
1491 return bytes_copied;
1494 ioa_dump->page_offset = 0;
1495 ioa_dump->ioa_data[ioa_dump->next_page_index] = page;
1496 ioa_dump->next_page_index++;
1498 page = ioa_dump->ioa_data[ioa_dump->next_page_index - 1];
1500 rem_len = length - bytes_copied;
1501 rem_page_len = PAGE_SIZE - ioa_dump->page_offset;
1502 cur_len = min(rem_len, rem_page_len);
1504 spin_lock_irqsave(ioa_cfg->host->host_lock, lock_flags);
1505 if (ioa_cfg->sdt_state == ABORT_DUMP) {
1508 rc = ipr_get_ldump_data_section(ioa_cfg,
1509 pci_address + bytes_copied,
1510 &page[ioa_dump->page_offset / 4],
1511 (cur_len / sizeof(u32)));
1513 spin_unlock_irqrestore(ioa_cfg->host->host_lock, lock_flags);
1516 ioa_dump->page_offset += cur_len;
1517 bytes_copied += cur_len;
1525 return bytes_copied;
1529 * ipr_init_dump_entry_hdr - Initialize a dump entry header.
1530 * @hdr: dump entry header struct
1535 static void ipr_init_dump_entry_hdr(struct ipr_dump_entry_header *hdr)
1537 hdr->eye_catcher = IPR_DUMP_EYE_CATCHER;
1539 hdr->offset = sizeof(*hdr);
1540 hdr->status = IPR_DUMP_STATUS_SUCCESS;
1544 * ipr_dump_ioa_type_data - Fill in the adapter type in the dump.
1545 * @ioa_cfg: ioa config struct
1546 * @driver_dump: driver dump struct
1551 static void ipr_dump_ioa_type_data(struct ipr_ioa_cfg *ioa_cfg,
1552 struct ipr_driver_dump *driver_dump)
1554 struct ipr_inquiry_page3 *ucode_vpd = &ioa_cfg->vpd_cbs->page3_data;
1556 ipr_init_dump_entry_hdr(&driver_dump->ioa_type_entry.hdr);
1557 driver_dump->ioa_type_entry.hdr.len =
1558 sizeof(struct ipr_dump_ioa_type_entry) -
1559 sizeof(struct ipr_dump_entry_header);
1560 driver_dump->ioa_type_entry.hdr.data_type = IPR_DUMP_DATA_TYPE_BINARY;
1561 driver_dump->ioa_type_entry.hdr.id = IPR_DUMP_DRIVER_TYPE_ID;
1562 driver_dump->ioa_type_entry.type = ioa_cfg->type;
1563 driver_dump->ioa_type_entry.fw_version = (ucode_vpd->major_release << 24) |
1564 (ucode_vpd->card_type << 16) | (ucode_vpd->minor_release[0] << 8) |
1565 ucode_vpd->minor_release[1];
1566 driver_dump->hdr.num_entries++;
1570 * ipr_dump_version_data - Fill in the driver version in the dump.
1571 * @ioa_cfg: ioa config struct
1572 * @driver_dump: driver dump struct
1577 static void ipr_dump_version_data(struct ipr_ioa_cfg *ioa_cfg,
1578 struct ipr_driver_dump *driver_dump)
1580 ipr_init_dump_entry_hdr(&driver_dump->version_entry.hdr);
1581 driver_dump->version_entry.hdr.len =
1582 sizeof(struct ipr_dump_version_entry) -
1583 sizeof(struct ipr_dump_entry_header);
1584 driver_dump->version_entry.hdr.data_type = IPR_DUMP_DATA_TYPE_ASCII;
1585 driver_dump->version_entry.hdr.id = IPR_DUMP_DRIVER_VERSION_ID;
1586 strcpy(driver_dump->version_entry.version, IPR_DRIVER_VERSION);
1587 driver_dump->hdr.num_entries++;
1591 * ipr_dump_trace_data - Fill in the IOA trace in the dump.
1592 * @ioa_cfg: ioa config struct
1593 * @driver_dump: driver dump struct
1598 static void ipr_dump_trace_data(struct ipr_ioa_cfg *ioa_cfg,
1599 struct ipr_driver_dump *driver_dump)
1601 ipr_init_dump_entry_hdr(&driver_dump->trace_entry.hdr);
1602 driver_dump->trace_entry.hdr.len =
1603 sizeof(struct ipr_dump_trace_entry) -
1604 sizeof(struct ipr_dump_entry_header);
1605 driver_dump->trace_entry.hdr.data_type = IPR_DUMP_DATA_TYPE_BINARY;
1606 driver_dump->trace_entry.hdr.id = IPR_DUMP_TRACE_ID;
1607 memcpy(driver_dump->trace_entry.trace, ioa_cfg->trace, IPR_TRACE_SIZE);
1608 driver_dump->hdr.num_entries++;
1612 * ipr_dump_location_data - Fill in the IOA location in the dump.
1613 * @ioa_cfg: ioa config struct
1614 * @driver_dump: driver dump struct
1619 static void ipr_dump_location_data(struct ipr_ioa_cfg *ioa_cfg,
1620 struct ipr_driver_dump *driver_dump)
1622 ipr_init_dump_entry_hdr(&driver_dump->location_entry.hdr);
1623 driver_dump->location_entry.hdr.len =
1624 sizeof(struct ipr_dump_location_entry) -
1625 sizeof(struct ipr_dump_entry_header);
1626 driver_dump->location_entry.hdr.data_type = IPR_DUMP_DATA_TYPE_ASCII;
1627 driver_dump->location_entry.hdr.id = IPR_DUMP_LOCATION_ID;
1628 strcpy(driver_dump->location_entry.location, ioa_cfg->pdev->dev.bus_id);
1629 driver_dump->hdr.num_entries++;
1633 * ipr_get_ioa_dump - Perform a dump of the driver and adapter.
1634 * @ioa_cfg: ioa config struct
1635 * @dump: dump struct
1640 static void ipr_get_ioa_dump(struct ipr_ioa_cfg *ioa_cfg, struct ipr_dump *dump)
1642 unsigned long start_addr, sdt_word;
1643 unsigned long lock_flags = 0;
1644 struct ipr_driver_dump *driver_dump = &dump->driver_dump;
1645 struct ipr_ioa_dump *ioa_dump = &dump->ioa_dump;
1646 u32 num_entries, start_off, end_off;
1647 u32 bytes_to_copy, bytes_copied, rc;
1648 struct ipr_sdt *sdt;
1653 spin_lock_irqsave(ioa_cfg->host->host_lock, lock_flags);
1655 if (ioa_cfg->sdt_state != GET_DUMP) {
1656 spin_unlock_irqrestore(ioa_cfg->host->host_lock, lock_flags);
1660 start_addr = readl(ioa_cfg->ioa_mailbox);
1662 if (!ipr_sdt_is_fmt2(start_addr)) {
1663 dev_err(&ioa_cfg->pdev->dev,
1664 "Invalid dump table format: %lx\n", start_addr);
1665 spin_unlock_irqrestore(ioa_cfg->host->host_lock, lock_flags);
1669 dev_err(&ioa_cfg->pdev->dev, "Dump of IOA initiated\n");
1671 driver_dump->hdr.eye_catcher = IPR_DUMP_EYE_CATCHER;
1673 /* Initialize the overall dump header */
1674 driver_dump->hdr.len = sizeof(struct ipr_driver_dump);
1675 driver_dump->hdr.num_entries = 1;
1676 driver_dump->hdr.first_entry_offset = sizeof(struct ipr_dump_header);
1677 driver_dump->hdr.status = IPR_DUMP_STATUS_SUCCESS;
1678 driver_dump->hdr.os = IPR_DUMP_OS_LINUX;
1679 driver_dump->hdr.driver_name = IPR_DUMP_DRIVER_NAME;
1681 ipr_dump_version_data(ioa_cfg, driver_dump);
1682 ipr_dump_location_data(ioa_cfg, driver_dump);
1683 ipr_dump_ioa_type_data(ioa_cfg, driver_dump);
1684 ipr_dump_trace_data(ioa_cfg, driver_dump);
1686 /* Update dump_header */
1687 driver_dump->hdr.len += sizeof(struct ipr_dump_entry_header);
1689 /* IOA Dump entry */
1690 ipr_init_dump_entry_hdr(&ioa_dump->hdr);
1691 ioa_dump->format = IPR_SDT_FMT2;
1692 ioa_dump->hdr.len = 0;
1693 ioa_dump->hdr.data_type = IPR_DUMP_DATA_TYPE_BINARY;
1694 ioa_dump->hdr.id = IPR_DUMP_IOA_DUMP_ID;
1696 /* First entries in sdt are actually a list of dump addresses and
1697 lengths to gather the real dump data. sdt represents the pointer
1698 to the ioa generated dump table. Dump data will be extracted based
1699 on entries in this table */
1700 sdt = &ioa_dump->sdt;
1702 rc = ipr_get_ldump_data_section(ioa_cfg, start_addr, (u32 *)sdt,
1703 sizeof(struct ipr_sdt) / sizeof(u32));
1705 /* Smart Dump table is ready to use and the first entry is valid */
1706 if (rc || (be32_to_cpu(sdt->hdr.state) != IPR_FMT2_SDT_READY_TO_USE)) {
1707 dev_err(&ioa_cfg->pdev->dev,
1708 "Dump of IOA failed. Dump table not valid: %d, %X.\n",
1709 rc, be32_to_cpu(sdt->hdr.state));
1710 driver_dump->hdr.status = IPR_DUMP_STATUS_FAILED;
1711 ioa_cfg->sdt_state = DUMP_OBTAINED;
1712 spin_unlock_irqrestore(ioa_cfg->host->host_lock, lock_flags);
1716 num_entries = be32_to_cpu(sdt->hdr.num_entries_used);
1718 if (num_entries > IPR_NUM_SDT_ENTRIES)
1719 num_entries = IPR_NUM_SDT_ENTRIES;
1721 spin_unlock_irqrestore(ioa_cfg->host->host_lock, lock_flags);
1723 for (i = 0; i < num_entries; i++) {
1724 if (ioa_dump->hdr.len > IPR_MAX_IOA_DUMP_SIZE) {
1725 driver_dump->hdr.status = IPR_DUMP_STATUS_QUAL_SUCCESS;
1729 if (sdt->entry[i].flags & IPR_SDT_VALID_ENTRY) {
1730 sdt_word = be32_to_cpu(sdt->entry[i].bar_str_offset);
1731 start_off = sdt_word & IPR_FMT2_MBX_ADDR_MASK;
1732 end_off = be32_to_cpu(sdt->entry[i].end_offset);
1734 if (ipr_sdt_is_fmt2(sdt_word) && sdt_word) {
1735 bytes_to_copy = end_off - start_off;
1736 if (bytes_to_copy > IPR_MAX_IOA_DUMP_SIZE) {
1737 sdt->entry[i].flags &= ~IPR_SDT_VALID_ENTRY;
1741 /* Copy data from adapter to driver buffers */
1742 bytes_copied = ipr_sdt_copy(ioa_cfg, sdt_word,
1745 ioa_dump->hdr.len += bytes_copied;
1747 if (bytes_copied != bytes_to_copy) {
1748 driver_dump->hdr.status = IPR_DUMP_STATUS_QUAL_SUCCESS;
1755 dev_err(&ioa_cfg->pdev->dev, "Dump of IOA completed.\n");
1757 /* Update dump_header */
1758 driver_dump->hdr.len += ioa_dump->hdr.len;
1760 ioa_cfg->sdt_state = DUMP_OBTAINED;
1765 #define ipr_get_ioa_dump(ioa_cfg, dump) do { } while(0)
1769 * ipr_worker_thread - Worker thread
1770 * @data: ioa config struct
1772 * Called at task level from a work thread. This function takes care
1773 * of adding and removing device from the mid-layer as configuration
1774 * changes are detected by the adapter.
1779 static void ipr_worker_thread(void *data)
1781 unsigned long lock_flags;
1782 struct ipr_resource_entry *res;
1783 struct scsi_device *sdev;
1784 struct ipr_dump *dump;
1785 struct ipr_ioa_cfg *ioa_cfg = data;
1786 u8 bus, target, lun;
1790 spin_lock_irqsave(ioa_cfg->host->host_lock, lock_flags);
1792 if (ioa_cfg->sdt_state == GET_DUMP) {
1793 dump = ioa_cfg->dump;
1794 if (!dump || !kobject_get(&dump->kobj)) {
1795 spin_unlock_irqrestore(ioa_cfg->host->host_lock, lock_flags);
1798 spin_unlock_irqrestore(ioa_cfg->host->host_lock, lock_flags);
1799 ipr_get_ioa_dump(ioa_cfg, dump);
1800 kobject_put(&dump->kobj);
1802 spin_lock_irqsave(ioa_cfg->host->host_lock, lock_flags);
1803 if (ioa_cfg->sdt_state == DUMP_OBTAINED)
1804 ipr_initiate_ioa_reset(ioa_cfg, IPR_SHUTDOWN_NONE);
1805 spin_unlock_irqrestore(ioa_cfg->host->host_lock, lock_flags);
1812 if (!ioa_cfg->allow_cmds || !ioa_cfg->allow_ml_add_del) {
1813 spin_unlock_irqrestore(ioa_cfg->host->host_lock, lock_flags);
1817 list_for_each_entry(res, &ioa_cfg->used_res_q, queue) {
1818 if (res->del_from_ml && res->sdev) {
1821 if (!scsi_device_get(sdev)) {
1823 list_move_tail(&res->queue, &ioa_cfg->free_res_q);
1824 spin_unlock_irqrestore(ioa_cfg->host->host_lock, lock_flags);
1825 scsi_remove_device(sdev);
1826 scsi_device_put(sdev);
1827 spin_lock_irqsave(ioa_cfg->host->host_lock, lock_flags);
1834 list_for_each_entry(res, &ioa_cfg->used_res_q, queue) {
1835 if (res->add_to_ml) {
1836 bus = res->cfgte.res_addr.bus;
1837 target = res->cfgte.res_addr.target;
1838 lun = res->cfgte.res_addr.lun;
1839 spin_unlock_irqrestore(ioa_cfg->host->host_lock, lock_flags);
1840 scsi_add_device(ioa_cfg->host, bus, target, lun);
1841 spin_lock_irqsave(ioa_cfg->host->host_lock, lock_flags);
1846 spin_unlock_irqrestore(ioa_cfg->host->host_lock, lock_flags);
1850 #ifdef CONFIG_SCSI_IPR_TRACE
1852 * ipr_read_trace - Dump the adapter trace
1853 * @kobj: kobject struct
1856 * @count: buffer size
1859 * number of bytes printed to buffer
1861 static ssize_t ipr_read_trace(struct kobject *kobj, char *buf,
1862 loff_t off, size_t count)
1864 struct class_device *cdev = container_of(kobj,struct class_device,kobj);
1865 struct Scsi_Host *shost = class_to_shost(cdev);
1866 struct ipr_ioa_cfg *ioa_cfg = (struct ipr_ioa_cfg *)shost->hostdata;
1867 unsigned long lock_flags = 0;
1868 int size = IPR_TRACE_SIZE;
1869 char *src = (char *)ioa_cfg->trace;
1873 if (off + count > size) {
1878 spin_lock_irqsave(ioa_cfg->host->host_lock, lock_flags);
1879 memcpy(buf, &src[off], count);
1880 spin_unlock_irqrestore(ioa_cfg->host->host_lock, lock_flags);
1884 static struct bin_attribute ipr_trace_attr = {
1890 .read = ipr_read_trace,
1895 * ipr_show_fw_version - Show the firmware version
1896 * @class_dev: class device struct
1900 * number of bytes printed to buffer
1902 static ssize_t ipr_show_fw_version(struct class_device *class_dev, char *buf)
1904 struct Scsi_Host *shost = class_to_shost(class_dev);
1905 struct ipr_ioa_cfg *ioa_cfg = (struct ipr_ioa_cfg *)shost->hostdata;
1906 struct ipr_inquiry_page3 *ucode_vpd = &ioa_cfg->vpd_cbs->page3_data;
1907 unsigned long lock_flags = 0;
1910 spin_lock_irqsave(ioa_cfg->host->host_lock, lock_flags);
1911 len = snprintf(buf, PAGE_SIZE, "%02X%02X%02X%02X\n",
1912 ucode_vpd->major_release, ucode_vpd->card_type,
1913 ucode_vpd->minor_release[0],
1914 ucode_vpd->minor_release[1]);
1915 spin_unlock_irqrestore(ioa_cfg->host->host_lock, lock_flags);
1919 static struct class_device_attribute ipr_fw_version_attr = {
1921 .name = "fw_version",
1924 .show = ipr_show_fw_version,
1928 * ipr_show_log_level - Show the adapter's error logging level
1929 * @class_dev: class device struct
1933 * number of bytes printed to buffer
1935 static ssize_t ipr_show_log_level(struct class_device *class_dev, char *buf)
1937 struct Scsi_Host *shost = class_to_shost(class_dev);
1938 struct ipr_ioa_cfg *ioa_cfg = (struct ipr_ioa_cfg *)shost->hostdata;
1939 unsigned long lock_flags = 0;
1942 spin_lock_irqsave(ioa_cfg->host->host_lock, lock_flags);
1943 len = snprintf(buf, PAGE_SIZE, "%d\n", ioa_cfg->log_level);
1944 spin_unlock_irqrestore(ioa_cfg->host->host_lock, lock_flags);
1949 * ipr_store_log_level - Change the adapter's error logging level
1950 * @class_dev: class device struct
1954 * number of bytes printed to buffer
1956 static ssize_t ipr_store_log_level(struct class_device *class_dev,
1957 const char *buf, size_t count)
1959 struct Scsi_Host *shost = class_to_shost(class_dev);
1960 struct ipr_ioa_cfg *ioa_cfg = (struct ipr_ioa_cfg *)shost->hostdata;
1961 unsigned long lock_flags = 0;
1963 spin_lock_irqsave(ioa_cfg->host->host_lock, lock_flags);
1964 ioa_cfg->log_level = simple_strtoul(buf, NULL, 10);
1965 spin_unlock_irqrestore(ioa_cfg->host->host_lock, lock_flags);
1969 static struct class_device_attribute ipr_log_level_attr = {
1971 .name = "log_level",
1972 .mode = S_IRUGO | S_IWUSR,
1974 .show = ipr_show_log_level,
1975 .store = ipr_store_log_level
1979 * ipr_store_diagnostics - IOA Diagnostics interface
1980 * @class_dev: class_device struct
1982 * @count: buffer size
1984 * This function will reset the adapter and wait a reasonable
1985 * amount of time for any errors that the adapter might log.
1988 * count on success / other on failure
1990 static ssize_t ipr_store_diagnostics(struct class_device *class_dev,
1991 const char *buf, size_t count)
1993 struct Scsi_Host *shost = class_to_shost(class_dev);
1994 struct ipr_ioa_cfg *ioa_cfg = (struct ipr_ioa_cfg *)shost->hostdata;
1995 unsigned long lock_flags = 0;
1998 if (!capable(CAP_SYS_ADMIN))
2001 wait_event(ioa_cfg->reset_wait_q, !ioa_cfg->in_reset_reload);
2002 spin_lock_irqsave(ioa_cfg->host->host_lock, lock_flags);
2003 ioa_cfg->errors_logged = 0;
2004 ipr_initiate_ioa_reset(ioa_cfg, IPR_SHUTDOWN_NORMAL);
2006 if (ioa_cfg->in_reset_reload) {
2007 spin_unlock_irqrestore(ioa_cfg->host->host_lock, lock_flags);
2008 wait_event(ioa_cfg->reset_wait_q, !ioa_cfg->in_reset_reload);
2010 /* Wait for a second for any errors to be logged */
2011 schedule_timeout(HZ);
2013 spin_unlock_irqrestore(ioa_cfg->host->host_lock, lock_flags);
2017 spin_lock_irqsave(ioa_cfg->host->host_lock, lock_flags);
2018 if (ioa_cfg->in_reset_reload || ioa_cfg->errors_logged)
2020 spin_unlock_irqrestore(ioa_cfg->host->host_lock, lock_flags);
2025 static struct class_device_attribute ipr_diagnostics_attr = {
2027 .name = "run_diagnostics",
2030 .store = ipr_store_diagnostics
2034 * ipr_store_reset_adapter - Reset the adapter
2035 * @class_dev: class_device struct
2037 * @count: buffer size
2039 * This function will reset the adapter.
2042 * count on success / other on failure
2044 static ssize_t ipr_store_reset_adapter(struct class_device *class_dev,
2045 const char *buf, size_t count)
2047 struct Scsi_Host *shost = class_to_shost(class_dev);
2048 struct ipr_ioa_cfg *ioa_cfg = (struct ipr_ioa_cfg *)shost->hostdata;
2049 unsigned long lock_flags;
2052 if (!capable(CAP_SYS_ADMIN))
2055 spin_lock_irqsave(ioa_cfg->host->host_lock, lock_flags);
2056 if (!ioa_cfg->in_reset_reload)
2057 ipr_initiate_ioa_reset(ioa_cfg, IPR_SHUTDOWN_NORMAL);
2058 spin_unlock_irqrestore(ioa_cfg->host->host_lock, lock_flags);
2059 wait_event(ioa_cfg->reset_wait_q, !ioa_cfg->in_reset_reload);
2064 static struct class_device_attribute ipr_ioa_reset_attr = {
2066 .name = "reset_host",
2069 .store = ipr_store_reset_adapter
2073 * ipr_alloc_ucode_buffer - Allocates a microcode download buffer
2074 * @buf_len: buffer length
2076 * Allocates a DMA'able buffer in chunks and assembles a scatter/gather
2077 * list to use for microcode download
2080 * pointer to sglist / NULL on failure
2082 static struct ipr_sglist *ipr_alloc_ucode_buffer(int buf_len)
2084 int sg_size, order, bsize_elem, num_elem, i, j;
2085 struct ipr_sglist *sglist;
2086 struct scatterlist *scatterlist;
2089 /* Get the minimum size per scatter/gather element */
2090 sg_size = buf_len / (IPR_MAX_SGLIST - 1);
2092 /* Get the actual size per element */
2093 order = get_order(sg_size);
2095 /* Determine the actual number of bytes per element */
2096 bsize_elem = PAGE_SIZE * (1 << order);
2098 /* Determine the actual number of sg entries needed */
2099 if (buf_len % bsize_elem)
2100 num_elem = (buf_len / bsize_elem) + 1;
2102 num_elem = buf_len / bsize_elem;
2104 /* Allocate a scatter/gather list for the DMA */
2105 sglist = kmalloc(sizeof(struct ipr_sglist) +
2106 (sizeof(struct scatterlist) * (num_elem - 1)),
2109 if (sglist == NULL) {
2114 memset(sglist, 0, sizeof(struct ipr_sglist) +
2115 (sizeof(struct scatterlist) * (num_elem - 1)));
2117 scatterlist = sglist->scatterlist;
2119 sglist->order = order;
2120 sglist->num_sg = num_elem;
2122 /* Allocate a bunch of sg elements */
2123 for (i = 0; i < num_elem; i++) {
2124 page = alloc_pages(GFP_KERNEL, order);
2128 /* Free up what we already allocated */
2129 for (j = i - 1; j >= 0; j--)
2130 __free_pages(scatterlist[j].page, order);
2135 scatterlist[i].page = page;
2142 * ipr_free_ucode_buffer - Frees a microcode download buffer
2143 * @p_dnld: scatter/gather list pointer
2145 * Free a DMA'able ucode download buffer previously allocated with
2146 * ipr_alloc_ucode_buffer
2151 static void ipr_free_ucode_buffer(struct ipr_sglist *sglist)
2155 for (i = 0; i < sglist->num_sg; i++)
2156 __free_pages(sglist->scatterlist[i].page, sglist->order);
2162 * ipr_copy_ucode_buffer - Copy user buffer to kernel buffer
2163 * @sglist: scatter/gather list pointer
2164 * @buffer: buffer pointer
2165 * @len: buffer length
2167 * Copy a microcode image from a user buffer into a buffer allocated by
2168 * ipr_alloc_ucode_buffer
2171 * 0 on success / other on failure
2173 static int ipr_copy_ucode_buffer(struct ipr_sglist *sglist,
2174 u8 *buffer, u32 len)
2176 int bsize_elem, i, result = 0;
2177 struct scatterlist *scatterlist;
2180 /* Determine the actual number of bytes per element */
2181 bsize_elem = PAGE_SIZE * (1 << sglist->order);
2183 scatterlist = sglist->scatterlist;
2185 for (i = 0; i < (len / bsize_elem); i++, buffer += bsize_elem) {
2186 kaddr = kmap(scatterlist[i].page);
2187 memcpy(kaddr, buffer, bsize_elem);
2188 kunmap(scatterlist[i].page);
2190 scatterlist[i].length = bsize_elem;
2198 if (len % bsize_elem) {
2199 kaddr = kmap(scatterlist[i].page);
2200 memcpy(kaddr, buffer, len % bsize_elem);
2201 kunmap(scatterlist[i].page);
2203 scatterlist[i].length = len % bsize_elem;
2206 sglist->buffer_len = len;
2211 * ipr_map_ucode_buffer - Map a microcode download buffer
2212 * @ipr_cmd: ipr command struct
2213 * @sglist: scatter/gather list
2214 * @len: total length of download buffer
2216 * Maps a microcode download scatter/gather list for DMA and
2220 * 0 on success / -EIO on failure
2222 static int ipr_map_ucode_buffer(struct ipr_cmnd *ipr_cmd,
2223 struct ipr_sglist *sglist, int len)
2225 struct ipr_ioa_cfg *ioa_cfg = ipr_cmd->ioa_cfg;
2226 struct ipr_ioarcb *ioarcb = &ipr_cmd->ioarcb;
2227 struct ipr_ioadl_desc *ioadl = ipr_cmd->ioadl;
2228 struct scatterlist *scatterlist = sglist->scatterlist;
2231 ipr_cmd->dma_use_sg = pci_map_sg(ioa_cfg->pdev, scatterlist,
2232 sglist->num_sg, DMA_TO_DEVICE);
2234 ioarcb->cmd_pkt.flags_hi |= IPR_FLAGS_HI_WRITE_NOT_READ;
2235 ioarcb->write_data_transfer_length = cpu_to_be32(len);
2236 ioarcb->write_ioadl_len =
2237 cpu_to_be32(sizeof(struct ipr_ioadl_desc) * ipr_cmd->dma_use_sg);
2239 for (i = 0; i < ipr_cmd->dma_use_sg; i++) {
2240 ioadl[i].flags_and_data_len =
2241 cpu_to_be32(IPR_IOADL_FLAGS_WRITE | sg_dma_len(&scatterlist[i]));
2243 cpu_to_be32(sg_dma_address(&scatterlist[i]));
2246 if (likely(ipr_cmd->dma_use_sg)) {
2247 ioadl[i-1].flags_and_data_len |=
2248 cpu_to_be32(IPR_IOADL_FLAGS_LAST);
2251 dev_err(&ioa_cfg->pdev->dev, "pci_map_sg failed!\n");
2259 * ipr_store_update_fw - Update the firmware on the adapter
2260 * @class_dev: class_device struct
2262 * @count: buffer size
2264 * This function will update the firmware on the adapter.
2267 * count on success / other on failure
2269 static ssize_t ipr_store_update_fw(struct class_device *class_dev,
2270 const char *buf, size_t count)
2272 struct Scsi_Host *shost = class_to_shost(class_dev);
2273 struct ipr_ioa_cfg *ioa_cfg = (struct ipr_ioa_cfg *)shost->hostdata;
2274 struct ipr_ucode_image_header *image_hdr;
2275 const struct firmware *fw_entry;
2276 struct ipr_sglist *sglist;
2277 unsigned long lock_flags;
2280 int len, result, dnld_size;
2282 if (!capable(CAP_SYS_ADMIN))
2285 len = snprintf(fname, 99, "%s", buf);
2286 fname[len-1] = '\0';
2288 if(request_firmware(&fw_entry, fname, &ioa_cfg->pdev->dev)) {
2289 dev_err(&ioa_cfg->pdev->dev, "Firmware file %s not found\n", fname);
2293 image_hdr = (struct ipr_ucode_image_header *)fw_entry->data;
2295 if (be32_to_cpu(image_hdr->header_length) > fw_entry->size ||
2296 (ioa_cfg->vpd_cbs->page3_data.card_type &&
2297 ioa_cfg->vpd_cbs->page3_data.card_type != image_hdr->card_type)) {
2298 dev_err(&ioa_cfg->pdev->dev, "Invalid microcode buffer\n");
2299 release_firmware(fw_entry);
2303 src = (u8 *)image_hdr + be32_to_cpu(image_hdr->header_length);
2304 dnld_size = fw_entry->size - be32_to_cpu(image_hdr->header_length);
2305 sglist = ipr_alloc_ucode_buffer(dnld_size);
2308 dev_err(&ioa_cfg->pdev->dev, "Microcode buffer allocation failed\n");
2309 release_firmware(fw_entry);
2313 result = ipr_copy_ucode_buffer(sglist, src, dnld_size);
2316 dev_err(&ioa_cfg->pdev->dev,
2317 "Microcode buffer copy to DMA buffer failed\n");
2318 ipr_free_ucode_buffer(sglist);
2319 release_firmware(fw_entry);
2323 spin_lock_irqsave(ioa_cfg->host->host_lock, lock_flags);
2325 if (ioa_cfg->ucode_sglist) {
2326 spin_unlock_irqrestore(ioa_cfg->host->host_lock, lock_flags);
2327 dev_err(&ioa_cfg->pdev->dev,
2328 "Microcode download already in progress\n");
2329 ipr_free_ucode_buffer(sglist);
2330 release_firmware(fw_entry);
2334 ioa_cfg->ucode_sglist = sglist;
2335 ipr_initiate_ioa_reset(ioa_cfg, IPR_SHUTDOWN_NORMAL);
2336 spin_unlock_irqrestore(ioa_cfg->host->host_lock, lock_flags);
2337 wait_event(ioa_cfg->reset_wait_q, !ioa_cfg->in_reset_reload);
2339 spin_lock_irqsave(ioa_cfg->host->host_lock, lock_flags);
2340 ioa_cfg->ucode_sglist = NULL;
2341 spin_unlock_irqrestore(ioa_cfg->host->host_lock, lock_flags);
2343 ipr_free_ucode_buffer(sglist);
2344 release_firmware(fw_entry);
2349 static struct class_device_attribute ipr_update_fw_attr = {
2351 .name = "update_fw",
2354 .store = ipr_store_update_fw
2357 static struct class_device_attribute *ipr_ioa_attrs[] = {
2358 &ipr_fw_version_attr,
2359 &ipr_log_level_attr,
2360 &ipr_diagnostics_attr,
2361 &ipr_ioa_reset_attr,
2362 &ipr_update_fw_attr,
2366 #ifdef CONFIG_SCSI_IPR_DUMP
2368 * ipr_read_dump - Dump the adapter
2369 * @kobj: kobject struct
2372 * @count: buffer size
2375 * number of bytes printed to buffer
2377 static ssize_t ipr_read_dump(struct kobject *kobj, char *buf,
2378 loff_t off, size_t count)
2380 struct class_device *cdev = container_of(kobj,struct class_device,kobj);
2381 struct Scsi_Host *shost = class_to_shost(cdev);
2382 struct ipr_ioa_cfg *ioa_cfg = (struct ipr_ioa_cfg *)shost->hostdata;
2383 struct ipr_dump *dump;
2384 unsigned long lock_flags = 0;
2389 if (!capable(CAP_SYS_ADMIN))
2392 spin_lock_irqsave(ioa_cfg->host->host_lock, lock_flags);
2393 dump = ioa_cfg->dump;
2395 if (ioa_cfg->sdt_state != DUMP_OBTAINED || !dump || !kobject_get(&dump->kobj)) {
2396 spin_unlock_irqrestore(ioa_cfg->host->host_lock, lock_flags);
2400 spin_unlock_irqrestore(ioa_cfg->host->host_lock, lock_flags);
2402 if (off > dump->driver_dump.hdr.len) {
2403 kobject_put(&dump->kobj);
2407 if (off + count > dump->driver_dump.hdr.len) {
2408 count = dump->driver_dump.hdr.len - off;
2412 if (count && off < sizeof(dump->driver_dump)) {
2413 if (off + count > sizeof(dump->driver_dump))
2414 len = sizeof(dump->driver_dump) - off;
2417 src = (u8 *)&dump->driver_dump + off;
2418 memcpy(buf, src, len);
2424 off -= sizeof(dump->driver_dump);
2426 if (count && off < offsetof(struct ipr_ioa_dump, ioa_data)) {
2427 if (off + count > offsetof(struct ipr_ioa_dump, ioa_data))
2428 len = offsetof(struct ipr_ioa_dump, ioa_data) - off;
2431 src = (u8 *)&dump->ioa_dump + off;
2432 memcpy(buf, src, len);
2438 off -= offsetof(struct ipr_ioa_dump, ioa_data);
2441 if ((off & PAGE_MASK) != ((off + count) & PAGE_MASK))
2442 len = PAGE_ALIGN(off) - off;
2445 src = (u8 *)dump->ioa_dump.ioa_data[(off & PAGE_MASK) >> PAGE_SHIFT];
2446 src += off & ~PAGE_MASK;
2447 memcpy(buf, src, len);
2453 kobject_put(&dump->kobj);
2458 * ipr_release_dump - Free adapter dump memory
2459 * @kobj: kobject struct
2464 static void ipr_release_dump(struct kobject *kobj)
2466 struct ipr_dump *dump = container_of(kobj,struct ipr_dump,kobj);
2467 struct ipr_ioa_cfg *ioa_cfg = dump->ioa_cfg;
2468 unsigned long lock_flags = 0;
2472 spin_lock_irqsave(ioa_cfg->host->host_lock, lock_flags);
2473 ioa_cfg->dump = NULL;
2474 ioa_cfg->sdt_state = INACTIVE;
2475 spin_unlock_irqrestore(ioa_cfg->host->host_lock, lock_flags);
2477 for (i = 0; i < dump->ioa_dump.next_page_index; i++)
2478 free_page((unsigned long) dump->ioa_dump.ioa_data[i]);
2484 static struct kobj_type ipr_dump_kobj_type = {
2485 .release = ipr_release_dump,
2489 * ipr_alloc_dump - Prepare for adapter dump
2490 * @ioa_cfg: ioa config struct
2493 * 0 on success / other on failure
2495 static int ipr_alloc_dump(struct ipr_ioa_cfg *ioa_cfg)
2497 struct ipr_dump *dump;
2498 unsigned long lock_flags = 0;
2501 dump = kmalloc(sizeof(struct ipr_dump), GFP_KERNEL);
2504 ipr_err("Dump memory allocation failed\n");
2508 memset(dump, 0, sizeof(struct ipr_dump));
2509 kobject_init(&dump->kobj);
2510 dump->kobj.ktype = &ipr_dump_kobj_type;
2511 dump->ioa_cfg = ioa_cfg;
2513 spin_lock_irqsave(ioa_cfg->host->host_lock, lock_flags);
2515 if (INACTIVE != ioa_cfg->sdt_state) {
2516 spin_unlock_irqrestore(ioa_cfg->host->host_lock, lock_flags);
2521 ioa_cfg->dump = dump;
2522 ioa_cfg->sdt_state = WAIT_FOR_DUMP;
2523 if (ioa_cfg->ioa_is_dead && !ioa_cfg->dump_taken) {
2524 ioa_cfg->dump_taken = 1;
2525 schedule_work(&ioa_cfg->work_q);
2527 spin_unlock_irqrestore(ioa_cfg->host->host_lock, lock_flags);
2534 * ipr_free_dump - Free adapter dump memory
2535 * @ioa_cfg: ioa config struct
2538 * 0 on success / other on failure
2540 static int ipr_free_dump(struct ipr_ioa_cfg *ioa_cfg)
2542 struct ipr_dump *dump;
2543 unsigned long lock_flags = 0;
2547 spin_lock_irqsave(ioa_cfg->host->host_lock, lock_flags);
2548 dump = ioa_cfg->dump;
2550 spin_unlock_irqrestore(ioa_cfg->host->host_lock, lock_flags);
2554 ioa_cfg->dump = NULL;
2555 spin_unlock_irqrestore(ioa_cfg->host->host_lock, lock_flags);
2557 kobject_put(&dump->kobj);
2564 * ipr_write_dump - Setup dump state of adapter
2565 * @kobj: kobject struct
2568 * @count: buffer size
2571 * number of bytes printed to buffer
2573 static ssize_t ipr_write_dump(struct kobject *kobj, char *buf,
2574 loff_t off, size_t count)
2576 struct class_device *cdev = container_of(kobj,struct class_device,kobj);
2577 struct Scsi_Host *shost = class_to_shost(cdev);
2578 struct ipr_ioa_cfg *ioa_cfg = (struct ipr_ioa_cfg *)shost->hostdata;
2581 if (!capable(CAP_SYS_ADMIN))
2585 rc = ipr_alloc_dump(ioa_cfg);
2586 else if (buf[0] == '0')
2587 rc = ipr_free_dump(ioa_cfg);
2597 static struct bin_attribute ipr_dump_attr = {
2600 .mode = S_IRUSR | S_IWUSR,
2603 .read = ipr_read_dump,
2604 .write = ipr_write_dump
2607 static int ipr_free_dump(struct ipr_ioa_cfg *ioa_cfg) { return 0; };
2611 * ipr_store_queue_depth - Change the device's queue depth
2612 * @dev: device struct
2616 * number of bytes printed to buffer
2618 static ssize_t ipr_store_queue_depth(struct device *dev,
2619 const char *buf, size_t count)
2621 struct scsi_device *sdev = to_scsi_device(dev);
2622 struct ipr_ioa_cfg *ioa_cfg = (struct ipr_ioa_cfg *)sdev->host->hostdata;
2623 struct ipr_resource_entry *res;
2624 int qdepth = simple_strtoul(buf, NULL, 10);
2626 unsigned long lock_flags = 0;
2627 ssize_t len = -ENXIO;
2629 spin_lock_irqsave(ioa_cfg->host->host_lock, lock_flags);
2630 res = (struct ipr_resource_entry *)sdev->hostdata;
2632 res->qdepth = qdepth;
2634 if (ipr_is_gscsi(res) && res->tcq_active)
2635 tagged = MSG_ORDERED_TAG;
2640 spin_unlock_irqrestore(ioa_cfg->host->host_lock, lock_flags);
2641 scsi_adjust_queue_depth(sdev, tagged, qdepth);
2645 static struct device_attribute ipr_queue_depth_attr = {
2647 .name = "queue_depth",
2648 .mode = S_IRUSR | S_IWUSR,
2650 .store = ipr_store_queue_depth
2654 * ipr_show_tcq_enable - Show if the device is enabled for tcqing
2655 * @dev: device struct
2659 * number of bytes printed to buffer
2661 static ssize_t ipr_show_tcq_enable(struct device *dev, char *buf)
2663 struct scsi_device *sdev = to_scsi_device(dev);
2664 struct ipr_ioa_cfg *ioa_cfg = (struct ipr_ioa_cfg *)sdev->host->hostdata;
2665 struct ipr_resource_entry *res;
2666 unsigned long lock_flags = 0;
2667 ssize_t len = -ENXIO;
2669 spin_lock_irqsave(ioa_cfg->host->host_lock, lock_flags);
2670 res = (struct ipr_resource_entry *)sdev->hostdata;
2672 len = snprintf(buf, PAGE_SIZE, "%d\n", res->tcq_active);
2673 spin_unlock_irqrestore(ioa_cfg->host->host_lock, lock_flags);
2678 * ipr_store_tcq_enable - Change the device's TCQing state
2679 * @dev: device struct
2683 * number of bytes printed to buffer
2685 static ssize_t ipr_store_tcq_enable(struct device *dev,
2686 const char *buf, size_t count)
2688 struct scsi_device *sdev = to_scsi_device(dev);
2689 struct ipr_ioa_cfg *ioa_cfg = (struct ipr_ioa_cfg *)sdev->host->hostdata;
2690 struct ipr_resource_entry *res;
2691 unsigned long lock_flags = 0;
2692 int tcq_active = simple_strtoul(buf, NULL, 10);
2693 int qdepth = IPR_MAX_CMD_PER_LUN;
2695 ssize_t len = -ENXIO;
2697 spin_lock_irqsave(ioa_cfg->host->host_lock, lock_flags);
2699 res = (struct ipr_resource_entry *)sdev->hostdata;
2702 res->tcq_active = 0;
2703 qdepth = res->qdepth;
2705 if (ipr_is_gscsi(res) && sdev->tagged_supported) {
2707 tagged = MSG_ORDERED_TAG;
2708 res->tcq_active = 1;
2712 } else if (tcq_active) {
2717 spin_unlock_irqrestore(ioa_cfg->host->host_lock, lock_flags);
2718 scsi_adjust_queue_depth(sdev, tagged, qdepth);
2722 static struct device_attribute ipr_tcqing_attr = {
2724 .name = "tcq_enable",
2725 .mode = S_IRUSR | S_IWUSR,
2727 .store = ipr_store_tcq_enable,
2728 .show = ipr_show_tcq_enable
2732 * ipr_show_adapter_handle - Show the adapter's resource handle for this device
2733 * @dev: device struct
2737 * number of bytes printed to buffer
2739 static ssize_t ipr_show_adapter_handle(struct device *dev, char *buf)
2741 struct scsi_device *sdev = to_scsi_device(dev);
2742 struct ipr_ioa_cfg *ioa_cfg = (struct ipr_ioa_cfg *)sdev->host->hostdata;
2743 struct ipr_resource_entry *res;
2744 unsigned long lock_flags = 0;
2745 ssize_t len = -ENXIO;
2747 spin_lock_irqsave(ioa_cfg->host->host_lock, lock_flags);
2748 res = (struct ipr_resource_entry *)sdev->hostdata;
2750 len = snprintf(buf, PAGE_SIZE, "%08X\n", res->cfgte.res_handle);
2751 spin_unlock_irqrestore(ioa_cfg->host->host_lock, lock_flags);
2755 static struct device_attribute ipr_adapter_handle_attr = {
2757 .name = "adapter_handle",
2760 .show = ipr_show_adapter_handle
2763 static struct device_attribute *ipr_dev_attrs[] = {
2764 &ipr_queue_depth_attr,
2766 &ipr_adapter_handle_attr,
2771 * ipr_biosparam - Return the HSC mapping
2772 * @sdev: scsi device struct
2773 * @block_device: block device pointer
2774 * @capacity: capacity of the device
2775 * @parm: Array containing returned HSC values.
2777 * This function generates the HSC parms that fdisk uses.
2778 * We want to make sure we return something that places partitions
2779 * on 4k boundaries for best performance with the IOA.
2784 static int ipr_biosparam(struct scsi_device *sdev,
2785 struct block_device *block_device,
2786 sector_t capacity, int *parm)
2788 int heads, sectors, cylinders;
2793 cylinders = capacity;
2794 sector_div(cylinders, (128 * 32));
2799 parm[2] = cylinders;
2805 * ipr_slave_destroy - Unconfigure a SCSI device
2806 * @sdev: scsi device struct
2811 static void ipr_slave_destroy(struct scsi_device *sdev)
2813 struct ipr_resource_entry *res;
2814 struct ipr_ioa_cfg *ioa_cfg;
2815 unsigned long lock_flags = 0;
2817 ioa_cfg = (struct ipr_ioa_cfg *) sdev->host->hostdata;
2819 spin_lock_irqsave(ioa_cfg->host->host_lock, lock_flags);
2820 res = (struct ipr_resource_entry *) sdev->hostdata;
2822 sdev->hostdata = NULL;
2825 spin_unlock_irqrestore(ioa_cfg->host->host_lock, lock_flags);
2829 * ipr_slave_configure - Configure a SCSI device
2830 * @sdev: scsi device struct
2832 * This function configures the specified scsi device.
2837 static int ipr_slave_configure(struct scsi_device *sdev)
2839 struct ipr_ioa_cfg *ioa_cfg = (struct ipr_ioa_cfg *) sdev->host->hostdata;
2840 struct ipr_resource_entry *res;
2841 unsigned long lock_flags = 0;
2843 spin_lock_irqsave(ioa_cfg->host->host_lock, lock_flags);
2844 res = sdev->hostdata;
2846 if (ipr_is_af_dasd_device(res))
2847 sdev->type = TYPE_RAID;
2848 if (ipr_is_af_dasd_device(res) || ipr_is_ioa_resource(res))
2849 sdev->scsi_level = 4;
2850 if (ipr_is_vset_device(res))
2851 sdev->timeout = IPR_VSET_RW_TIMEOUT;
2853 sdev->allow_restart = 1;
2854 scsi_adjust_queue_depth(sdev, 0, res->qdepth);
2856 spin_unlock_irqrestore(ioa_cfg->host->host_lock, lock_flags);
2861 * ipr_slave_alloc - Prepare for commands to a device.
2862 * @sdev: scsi device struct
2864 * This function saves a pointer to the resource entry
2865 * in the scsi device struct if the device exists. We
2866 * can then use this pointer in ipr_queuecommand when
2867 * handling new commands.
2872 static int ipr_slave_alloc(struct scsi_device *sdev)
2874 struct ipr_ioa_cfg *ioa_cfg = (struct ipr_ioa_cfg *) sdev->host->hostdata;
2875 struct ipr_resource_entry *res;
2876 unsigned long lock_flags;
2878 sdev->hostdata = NULL;
2880 spin_lock_irqsave(ioa_cfg->host->host_lock, lock_flags);
2882 list_for_each_entry(res, &ioa_cfg->used_res_q, queue) {
2883 if ((res->cfgte.res_addr.bus == sdev->channel) &&
2884 (res->cfgte.res_addr.target == sdev->id) &&
2885 (res->cfgte.res_addr.lun == sdev->lun)) {
2889 sdev->hostdata = res;
2890 res->needs_sync_complete = 1;
2895 spin_unlock_irqrestore(ioa_cfg->host->host_lock, lock_flags);
2901 * ipr_eh_host_reset - Reset the host adapter
2902 * @scsi_cmd: scsi command struct
2907 static int ipr_eh_host_reset(struct scsi_cmnd * scsi_cmd)
2909 struct ipr_ioa_cfg *ioa_cfg;
2913 ioa_cfg = (struct ipr_ioa_cfg *) scsi_cmd->device->host->hostdata;
2915 dev_err(&ioa_cfg->pdev->dev,
2916 "Adapter being reset as a result of error recovery.\n");
2918 if (WAIT_FOR_DUMP == ioa_cfg->sdt_state)
2919 ioa_cfg->sdt_state = GET_DUMP;
2921 rc = ipr_reset_reload(ioa_cfg, IPR_SHUTDOWN_ABBREV);
2928 * ipr_eh_dev_reset - Reset the device
2929 * @scsi_cmd: scsi command struct
2931 * This function issues a device reset to the affected device.
2932 * A LUN reset will be sent to the device first. If that does
2933 * not work, a target reset will be sent.
2938 static int ipr_eh_dev_reset(struct scsi_cmnd * scsi_cmd)
2940 struct ipr_cmnd *ipr_cmd;
2941 struct ipr_ioa_cfg *ioa_cfg;
2942 struct ipr_resource_entry *res;
2943 struct ipr_cmd_pkt *cmd_pkt;
2947 ioa_cfg = (struct ipr_ioa_cfg *) scsi_cmd->device->host->hostdata;
2948 res = scsi_cmd->device->hostdata;
2950 if (!res || (!ipr_is_gscsi(res) && !ipr_is_vset_device(res)))
2954 * If we are currently going through reset/reload, return failed. This will force the
2955 * mid-layer to call ipr_eh_host_reset, which will then go to sleep and wait for the
2958 if (ioa_cfg->in_reset_reload)
2960 if (ioa_cfg->ioa_is_dead)
2963 list_for_each_entry(ipr_cmd, &ioa_cfg->pending_q, queue) {
2964 if (ipr_cmd->ioarcb.res_handle == res->cfgte.res_handle) {
2965 if (ipr_cmd->scsi_cmd)
2966 ipr_cmd->done = ipr_scsi_eh_done;
2970 res->resetting_device = 1;
2972 ipr_cmd = ipr_get_free_ipr_cmnd(ioa_cfg);
2974 ipr_cmd->ioarcb.res_handle = res->cfgte.res_handle;
2975 cmd_pkt = &ipr_cmd->ioarcb.cmd_pkt;
2976 cmd_pkt->request_type = IPR_RQTYPE_IOACMD;
2977 cmd_pkt->cdb[0] = IPR_RESET_DEVICE;
2979 ipr_sdev_err(scsi_cmd->device, "Resetting device\n");
2980 ipr_send_blocking_cmd(ipr_cmd, ipr_timeout, IPR_DEVICE_RESET_TIMEOUT);
2982 ioasc = be32_to_cpu(ipr_cmd->ioasa.ioasc);
2984 res->resetting_device = 0;
2986 list_add_tail(&ipr_cmd->queue, &ioa_cfg->free_q);
2989 return (IPR_IOASC_SENSE_KEY(ioasc) ? FAILED : SUCCESS);
2993 * ipr_bus_reset_done - Op done function for bus reset.
2994 * @ipr_cmd: ipr command struct
2996 * This function is the op done function for a bus reset
3001 static void ipr_bus_reset_done(struct ipr_cmnd *ipr_cmd)
3003 struct ipr_ioa_cfg *ioa_cfg = ipr_cmd->ioa_cfg;
3004 struct ipr_resource_entry *res;
3007 list_for_each_entry(res, &ioa_cfg->used_res_q, queue) {
3008 if (!memcmp(&res->cfgte.res_handle, &ipr_cmd->ioarcb.res_handle,
3009 sizeof(res->cfgte.res_handle))) {
3010 scsi_report_bus_reset(ioa_cfg->host, res->cfgte.res_addr.bus);
3016 * If abort has not completed, indicate the reset has, else call the
3017 * abort's done function to wake the sleeping eh thread
3019 if (ipr_cmd->sibling->sibling)
3020 ipr_cmd->sibling->sibling = NULL;
3022 ipr_cmd->sibling->done(ipr_cmd->sibling);
3024 list_add_tail(&ipr_cmd->queue, &ioa_cfg->free_q);
3029 * ipr_abort_timeout - An abort task has timed out
3030 * @ipr_cmd: ipr command struct
3032 * This function handles when an abort task times out. If this
3033 * happens we issue a bus reset since we have resources tied
3034 * up that must be freed before returning to the midlayer.
3039 static void ipr_abort_timeout(struct ipr_cmnd *ipr_cmd)
3041 struct ipr_cmnd *reset_cmd;
3042 struct ipr_ioa_cfg *ioa_cfg = ipr_cmd->ioa_cfg;
3043 struct ipr_cmd_pkt *cmd_pkt;
3044 unsigned long lock_flags = 0;
3047 spin_lock_irqsave(ioa_cfg->host->host_lock, lock_flags);
3048 if (ipr_cmd->completion.done || ioa_cfg->in_reset_reload) {
3049 spin_unlock_irqrestore(ioa_cfg->host->host_lock, lock_flags);
3053 ipr_sdev_err(ipr_cmd->u.sdev, "Abort timed out. Resetting bus\n");
3054 reset_cmd = ipr_get_free_ipr_cmnd(ioa_cfg);
3055 ipr_cmd->sibling = reset_cmd;
3056 reset_cmd->sibling = ipr_cmd;
3057 reset_cmd->ioarcb.res_handle = ipr_cmd->ioarcb.res_handle;
3058 cmd_pkt = &reset_cmd->ioarcb.cmd_pkt;
3059 cmd_pkt->request_type = IPR_RQTYPE_IOACMD;
3060 cmd_pkt->cdb[0] = IPR_RESET_DEVICE;
3061 cmd_pkt->cdb[2] = IPR_RESET_TYPE_SELECT | IPR_BUS_RESET;
3063 ipr_do_req(reset_cmd, ipr_bus_reset_done, ipr_timeout, IPR_DEVICE_RESET_TIMEOUT);
3064 spin_unlock_irqrestore(ioa_cfg->host->host_lock, lock_flags);
3069 * ipr_cancel_op - Cancel specified op
3070 * @scsi_cmd: scsi command struct
3072 * This function cancels specified op.
3077 static int ipr_cancel_op(struct scsi_cmnd * scsi_cmd)
3079 struct ipr_cmnd *ipr_cmd;
3080 struct ipr_ioa_cfg *ioa_cfg;
3081 struct ipr_resource_entry *res;
3082 struct ipr_cmd_pkt *cmd_pkt;
3083 u32 ioasc, ioarcb_addr;
3087 ioa_cfg = (struct ipr_ioa_cfg *)scsi_cmd->device->host->hostdata;
3088 res = scsi_cmd->device->hostdata;
3090 if (!res || (!ipr_is_gscsi(res) && !ipr_is_vset_device(res)))
3093 list_for_each_entry(ipr_cmd, &ioa_cfg->pending_q, queue) {
3094 if (ipr_cmd->scsi_cmd == scsi_cmd) {
3095 ipr_cmd->done = ipr_scsi_eh_done;
3104 ioarcb_addr = be32_to_cpu(ipr_cmd->ioarcb.ioarcb_host_pci_addr);
3106 ipr_cmd = ipr_get_free_ipr_cmnd(ioa_cfg);
3107 ipr_cmd->ioarcb.res_handle = res->cfgte.res_handle;
3108 cmd_pkt = &ipr_cmd->ioarcb.cmd_pkt;
3109 cmd_pkt->request_type = IPR_RQTYPE_IOACMD;
3110 cmd_pkt->cdb[0] = IPR_ABORT_TASK;
3111 cmd_pkt->cdb[2] = (ioarcb_addr >> 24) & 0xff;
3112 cmd_pkt->cdb[3] = (ioarcb_addr >> 16) & 0xff;
3113 cmd_pkt->cdb[4] = (ioarcb_addr >> 8) & 0xff;
3114 cmd_pkt->cdb[5] = ioarcb_addr & 0xff;
3115 ipr_cmd->u.sdev = scsi_cmd->device;
3117 ipr_sdev_err(scsi_cmd->device, "Aborting command: %02X\n", scsi_cmd->cmnd[0]);
3118 ipr_send_blocking_cmd(ipr_cmd, ipr_abort_timeout, IPR_ABORT_TASK_TIMEOUT);
3119 ioasc = be32_to_cpu(ipr_cmd->ioasa.ioasc);
3122 * If the abort task timed out and we sent a bus reset, we will get
3123 * one the following responses to the abort
3125 if (ioasc == IPR_IOASC_BUS_WAS_RESET || ioasc == IPR_IOASC_SYNC_REQUIRED) {
3130 list_add_tail(&ipr_cmd->queue, &ioa_cfg->free_q);
3131 res->needs_sync_complete = 1;
3134 return (IPR_IOASC_SENSE_KEY(ioasc) ? FAILED : SUCCESS);
3138 * ipr_eh_abort - Abort a single op
3139 * @scsi_cmd: scsi command struct
3144 static int ipr_eh_abort(struct scsi_cmnd * scsi_cmd)
3146 struct ipr_ioa_cfg *ioa_cfg;
3149 ioa_cfg = (struct ipr_ioa_cfg *) scsi_cmd->device->host->hostdata;
3151 /* If we are currently going through reset/reload, return failed. This will force the
3152 mid-layer to call ipr_eh_host_reset, which will then go to sleep and wait for the
3153 reset to complete */
3154 if (ioa_cfg->in_reset_reload)
3156 if (ioa_cfg->ioa_is_dead)
3158 if (!scsi_cmd->device->hostdata)
3162 return ipr_cancel_op(scsi_cmd);
3166 * ipr_handle_other_interrupt - Handle "other" interrupts
3167 * @ioa_cfg: ioa config struct
3168 * @int_reg: interrupt register
3171 * IRQ_NONE / IRQ_HANDLED
3173 static irqreturn_t ipr_handle_other_interrupt(struct ipr_ioa_cfg *ioa_cfg,
3174 volatile u32 int_reg)
3176 irqreturn_t rc = IRQ_HANDLED;
3178 if (int_reg & IPR_PCII_IOA_TRANS_TO_OPER) {
3179 /* Mask the interrupt */
3180 writel(IPR_PCII_IOA_TRANS_TO_OPER, ioa_cfg->regs.set_interrupt_mask_reg);
3182 /* Clear the interrupt */
3183 writel(IPR_PCII_IOA_TRANS_TO_OPER, ioa_cfg->regs.clr_interrupt_reg);
3184 int_reg = readl(ioa_cfg->regs.sense_interrupt_reg);
3186 list_del(&ioa_cfg->reset_cmd->queue);
3187 del_timer(&ioa_cfg->reset_cmd->timer);
3188 ipr_reset_ioa_job(ioa_cfg->reset_cmd);
3190 if (int_reg & IPR_PCII_IOA_UNIT_CHECKED)
3191 ioa_cfg->ioa_unit_checked = 1;
3193 dev_err(&ioa_cfg->pdev->dev,
3194 "Permanent IOA failure. 0x%08X\n", int_reg);
3196 if (WAIT_FOR_DUMP == ioa_cfg->sdt_state)
3197 ioa_cfg->sdt_state = GET_DUMP;
3199 ipr_mask_and_clear_interrupts(ioa_cfg, ~0);
3200 ipr_initiate_ioa_reset(ioa_cfg, IPR_SHUTDOWN_NONE);
3207 * ipr_isr - Interrupt service routine
3209 * @devp: pointer to ioa config struct
3210 * @regs: pt_regs struct
3213 * IRQ_NONE / IRQ_HANDLED
3215 static irqreturn_t ipr_isr(int irq, void *devp, struct pt_regs *regs)
3217 struct ipr_ioa_cfg *ioa_cfg = (struct ipr_ioa_cfg *)devp;
3218 unsigned long lock_flags = 0;
3219 volatile u32 int_reg, int_mask_reg;
3222 struct ipr_cmnd *ipr_cmd;
3223 irqreturn_t rc = IRQ_NONE;
3225 spin_lock_irqsave(ioa_cfg->host->host_lock, lock_flags);
3227 /* If interrupts are disabled, ignore the interrupt */
3228 if (!ioa_cfg->allow_interrupts) {
3229 spin_unlock_irqrestore(ioa_cfg->host->host_lock, lock_flags);
3233 int_mask_reg = readl(ioa_cfg->regs.sense_interrupt_mask_reg);
3234 int_reg = readl(ioa_cfg->regs.sense_interrupt_reg) & ~int_mask_reg;
3236 /* If an interrupt on the adapter did not occur, ignore it */
3237 if (unlikely((int_reg & IPR_PCII_OPER_INTERRUPTS) == 0)) {
3238 spin_unlock_irqrestore(ioa_cfg->host->host_lock, lock_flags);
3245 while ((be32_to_cpu(*ioa_cfg->hrrq_curr) & IPR_HRRQ_TOGGLE_BIT) ==
3246 ioa_cfg->toggle_bit) {
3248 cmd_index = (be32_to_cpu(*ioa_cfg->hrrq_curr) &
3249 IPR_HRRQ_REQ_RESP_HANDLE_MASK) >> IPR_HRRQ_REQ_RESP_HANDLE_SHIFT;
3251 if (unlikely(cmd_index >= IPR_NUM_CMD_BLKS)) {
3252 ioa_cfg->errors_logged++;
3253 dev_err(&ioa_cfg->pdev->dev, "Invalid response handle from IOA\n");
3255 if (WAIT_FOR_DUMP == ioa_cfg->sdt_state)
3256 ioa_cfg->sdt_state = GET_DUMP;
3258 ipr_initiate_ioa_reset(ioa_cfg, IPR_SHUTDOWN_NONE);
3259 spin_unlock_irqrestore(ioa_cfg->host->host_lock, lock_flags);
3263 ipr_cmd = ioa_cfg->ipr_cmnd_list[cmd_index];
3265 ioasc = be32_to_cpu(ipr_cmd->ioasa.ioasc);
3267 ipr_trc_hook(ipr_cmd, IPR_TRACE_FINISH, ioasc);
3269 list_del(&ipr_cmd->queue);
3270 del_timer(&ipr_cmd->timer);
3271 ipr_cmd->done(ipr_cmd);
3275 if (ioa_cfg->hrrq_curr < ioa_cfg->hrrq_end) {
3276 ioa_cfg->hrrq_curr++;
3278 ioa_cfg->hrrq_curr = ioa_cfg->hrrq_start;
3279 ioa_cfg->toggle_bit ^= 1u;
3283 if (ipr_cmd != NULL) {
3284 /* Clear the PCI interrupt */
3285 writel(IPR_PCII_HRRQ_UPDATED, ioa_cfg->regs.clr_interrupt_reg);
3286 int_reg = readl(ioa_cfg->regs.sense_interrupt_reg) & ~int_mask_reg;
3291 if (unlikely(rc == IRQ_NONE))
3292 rc = ipr_handle_other_interrupt(ioa_cfg, int_reg);
3294 spin_unlock_irqrestore(ioa_cfg->host->host_lock, lock_flags);
3299 * ipr_build_ioadl - Build a scatter/gather list and map the buffer
3300 * @ioa_cfg: ioa config struct
3301 * @ipr_cmd: ipr command struct
3304 * 0 on success / -1 on failure
3306 static int ipr_build_ioadl(struct ipr_ioa_cfg *ioa_cfg,
3307 struct ipr_cmnd *ipr_cmd)
3310 struct scatterlist *sglist;
3312 u32 ioadl_flags = 0;
3313 struct scsi_cmnd *scsi_cmd = ipr_cmd->scsi_cmd;
3314 struct ipr_ioarcb *ioarcb = &ipr_cmd->ioarcb;
3315 struct ipr_ioadl_desc *ioadl = ipr_cmd->ioadl;
3317 length = scsi_cmd->request_bufflen;
3322 if (scsi_cmd->use_sg) {
3323 ipr_cmd->dma_use_sg = pci_map_sg(ioa_cfg->pdev,
3324 scsi_cmd->request_buffer,
3326 scsi_cmd->sc_data_direction);
3328 if (scsi_cmd->sc_data_direction == DMA_TO_DEVICE) {
3329 ioadl_flags = IPR_IOADL_FLAGS_WRITE;
3330 ioarcb->cmd_pkt.flags_hi |= IPR_FLAGS_HI_WRITE_NOT_READ;
3331 ioarcb->write_data_transfer_length = cpu_to_be32(length);
3332 ioarcb->write_ioadl_len =
3333 cpu_to_be32(sizeof(struct ipr_ioadl_desc) * ipr_cmd->dma_use_sg);
3334 } else if (scsi_cmd->sc_data_direction == DMA_FROM_DEVICE) {
3335 ioadl_flags = IPR_IOADL_FLAGS_READ;
3336 ioarcb->read_data_transfer_length = cpu_to_be32(length);
3337 ioarcb->read_ioadl_len =
3338 cpu_to_be32(sizeof(struct ipr_ioadl_desc) * ipr_cmd->dma_use_sg);
3341 sglist = scsi_cmd->request_buffer;
3343 for (i = 0; i < ipr_cmd->dma_use_sg; i++) {
3344 ioadl[i].flags_and_data_len =
3345 cpu_to_be32(ioadl_flags | sg_dma_len(&sglist[i]));
3347 cpu_to_be32(sg_dma_address(&sglist[i]));
3350 if (likely(ipr_cmd->dma_use_sg)) {
3351 ioadl[i-1].flags_and_data_len |=
3352 cpu_to_be32(IPR_IOADL_FLAGS_LAST);
3355 dev_err(&ioa_cfg->pdev->dev, "pci_map_sg failed!\n");
3357 if (scsi_cmd->sc_data_direction == DMA_TO_DEVICE) {
3358 ioadl_flags = IPR_IOADL_FLAGS_WRITE;
3359 ioarcb->cmd_pkt.flags_hi |= IPR_FLAGS_HI_WRITE_NOT_READ;
3360 ioarcb->write_data_transfer_length = cpu_to_be32(length);
3361 ioarcb->write_ioadl_len = cpu_to_be32(sizeof(struct ipr_ioadl_desc));
3362 } else if (scsi_cmd->sc_data_direction == DMA_FROM_DEVICE) {
3363 ioadl_flags = IPR_IOADL_FLAGS_READ;
3364 ioarcb->read_data_transfer_length = cpu_to_be32(length);
3365 ioarcb->read_ioadl_len = cpu_to_be32(sizeof(struct ipr_ioadl_desc));
3368 ipr_cmd->dma_handle = pci_map_single(ioa_cfg->pdev,
3369 scsi_cmd->request_buffer, length,
3370 scsi_cmd->sc_data_direction);
3372 if (likely(!pci_dma_mapping_error(ipr_cmd->dma_handle))) {
3373 ipr_cmd->dma_use_sg = 1;
3374 ioadl[0].flags_and_data_len =
3375 cpu_to_be32(ioadl_flags | length | IPR_IOADL_FLAGS_LAST);
3376 ioadl[0].address = cpu_to_be32(ipr_cmd->dma_handle);
3379 dev_err(&ioa_cfg->pdev->dev, "pci_map_single failed!\n");
3386 * ipr_get_task_attributes - Translate SPI Q-Tag to task attributes
3387 * @scsi_cmd: scsi command struct
3392 static u8 ipr_get_task_attributes(struct scsi_cmnd *scsi_cmd)
3395 u8 rc = IPR_FLAGS_LO_UNTAGGED_TASK;
3397 if (scsi_populate_tag_msg(scsi_cmd, tag)) {
3399 case MSG_SIMPLE_TAG:
3400 rc = IPR_FLAGS_LO_SIMPLE_TASK;
3403 rc = IPR_FLAGS_LO_HEAD_OF_Q_TASK;
3405 case MSG_ORDERED_TAG:
3406 rc = IPR_FLAGS_LO_ORDERED_TASK;
3415 * ipr_erp_done - Process completion of ERP for a device
3416 * @ipr_cmd: ipr command struct
3418 * This function copies the sense buffer into the scsi_cmd
3419 * struct and pushes the scsi_done function.
3424 static void ipr_erp_done(struct ipr_cmnd *ipr_cmd)
3426 struct scsi_cmnd *scsi_cmd = ipr_cmd->scsi_cmd;
3427 struct ipr_resource_entry *res = scsi_cmd->device->hostdata;
3428 struct ipr_ioa_cfg *ioa_cfg = ipr_cmd->ioa_cfg;
3429 u32 ioasc = be32_to_cpu(ipr_cmd->ioasa.ioasc);
3431 if (IPR_IOASC_SENSE_KEY(ioasc) > 0) {
3432 scsi_cmd->result |= (DID_ERROR << 16);
3433 ipr_sdev_err(scsi_cmd->device,
3434 "Request Sense failed with IOASC: 0x%08X\n", ioasc);
3436 memcpy(scsi_cmd->sense_buffer, ipr_cmd->sense_buffer,
3437 SCSI_SENSE_BUFFERSIZE);
3441 res->needs_sync_complete = 1;
3444 ipr_unmap_sglist(ioa_cfg, ipr_cmd);
3445 list_add_tail(&ipr_cmd->queue, &ioa_cfg->free_q);
3446 scsi_cmd->scsi_done(scsi_cmd);
3450 * ipr_reinit_ipr_cmnd_for_erp - Re-initialize a cmnd block to be used for ERP
3451 * @ipr_cmd: ipr command struct
3456 static void ipr_reinit_ipr_cmnd_for_erp(struct ipr_cmnd *ipr_cmd)
3458 struct ipr_ioarcb *ioarcb;
3459 struct ipr_ioasa *ioasa;
3461 ioarcb = &ipr_cmd->ioarcb;
3462 ioasa = &ipr_cmd->ioasa;
3464 memset(&ioarcb->cmd_pkt, 0, sizeof(struct ipr_cmd_pkt));
3465 ioarcb->write_data_transfer_length = 0;
3466 ioarcb->read_data_transfer_length = 0;
3467 ioarcb->write_ioadl_len = 0;
3468 ioarcb->read_ioadl_len = 0;
3470 ioasa->residual_data_len = 0;
3474 * ipr_erp_request_sense - Send request sense to a device
3475 * @ipr_cmd: ipr command struct
3477 * This function sends a request sense to a device as a result
3478 * of a check condition.
3483 static void ipr_erp_request_sense(struct ipr_cmnd *ipr_cmd)
3485 struct ipr_cmd_pkt *cmd_pkt = &ipr_cmd->ioarcb.cmd_pkt;
3486 u32 ioasc = be32_to_cpu(ipr_cmd->ioasa.ioasc);
3488 if (IPR_IOASC_SENSE_KEY(ioasc) > 0) {
3489 ipr_erp_done(ipr_cmd);
3493 ipr_reinit_ipr_cmnd_for_erp(ipr_cmd);
3495 cmd_pkt->request_type = IPR_RQTYPE_SCSICDB;
3496 cmd_pkt->cdb[0] = REQUEST_SENSE;
3497 cmd_pkt->cdb[4] = SCSI_SENSE_BUFFERSIZE;
3498 cmd_pkt->flags_hi |= IPR_FLAGS_HI_SYNC_OVERRIDE;
3499 cmd_pkt->flags_hi |= IPR_FLAGS_HI_NO_ULEN_CHK;
3500 cmd_pkt->timeout = cpu_to_be16(IPR_REQUEST_SENSE_TIMEOUT / HZ);
3502 ipr_cmd->ioadl[0].flags_and_data_len =
3503 cpu_to_be32(IPR_IOADL_FLAGS_READ_LAST | SCSI_SENSE_BUFFERSIZE);
3504 ipr_cmd->ioadl[0].address =
3505 cpu_to_be32(ipr_cmd->sense_buffer_dma);
3507 ipr_cmd->ioarcb.read_ioadl_len =
3508 cpu_to_be32(sizeof(struct ipr_ioadl_desc));
3509 ipr_cmd->ioarcb.read_data_transfer_length =
3510 cpu_to_be32(SCSI_SENSE_BUFFERSIZE);
3512 ipr_do_req(ipr_cmd, ipr_erp_done, ipr_timeout,
3513 IPR_REQUEST_SENSE_TIMEOUT * 2);
3517 * ipr_erp_cancel_all - Send cancel all to a device
3518 * @ipr_cmd: ipr command struct
3520 * This function sends a cancel all to a device to clear the
3521 * queue. If we are running TCQ on the device, QERR is set to 1,
3522 * which means all outstanding ops have been dropped on the floor.
3523 * Cancel all will return them to us.
3528 static void ipr_erp_cancel_all(struct ipr_cmnd *ipr_cmd)
3530 struct scsi_cmnd *scsi_cmd = ipr_cmd->scsi_cmd;
3531 struct ipr_resource_entry *res = scsi_cmd->device->hostdata;
3532 struct ipr_cmd_pkt *cmd_pkt;
3536 ipr_reinit_ipr_cmnd_for_erp(ipr_cmd);
3538 if (!res->tcq_active) {
3539 ipr_erp_request_sense(ipr_cmd);
3543 cmd_pkt = &ipr_cmd->ioarcb.cmd_pkt;
3544 cmd_pkt->request_type = IPR_RQTYPE_IOACMD;
3545 cmd_pkt->cdb[0] = IPR_CANCEL_ALL_REQUESTS;
3547 ipr_do_req(ipr_cmd, ipr_erp_request_sense, ipr_timeout,
3548 IPR_CANCEL_ALL_TIMEOUT);
3552 * ipr_dump_ioasa - Dump contents of IOASA
3553 * @ioa_cfg: ioa config struct
3554 * @ipr_cmd: ipr command struct
3556 * This function is invoked by the interrupt handler when ops
3557 * fail. It will log the IOASA if appropriate. Only called
3563 static void ipr_dump_ioasa(struct ipr_ioa_cfg *ioa_cfg,
3564 struct ipr_cmnd *ipr_cmd)
3569 struct ipr_ioasa *ioasa = &ipr_cmd->ioasa;
3570 u32 *ioasa_data = (u32 *)ioasa;
3573 ioasc = be32_to_cpu(ioasa->ioasc) & IPR_IOASC_IOASC_MASK;
3578 if (ioa_cfg->log_level < IPR_DEFAULT_LOG_LEVEL)
3581 error_index = ipr_get_error(ioasc);
3583 if (ioa_cfg->log_level < IPR_MAX_LOG_LEVEL) {
3584 /* Don't log an error if the IOA already logged one */
3585 if (ioasa->ilid != 0)
3588 if (ipr_error_table[error_index].log_ioasa == 0)
3592 ipr_sdev_err(ipr_cmd->scsi_cmd->device, "%s\n",
3593 ipr_error_table[error_index].error);
3595 if ((ioasa->u.gpdd.end_state <= ARRAY_SIZE(ipr_gpdd_dev_end_states)) &&
3596 (ioasa->u.gpdd.bus_phase <= ARRAY_SIZE(ipr_gpdd_dev_bus_phases))) {
3597 ipr_sdev_err(ipr_cmd->scsi_cmd->device,
3598 "Device End state: %s Phase: %s\n",
3599 ipr_gpdd_dev_end_states[ioasa->u.gpdd.end_state],
3600 ipr_gpdd_dev_bus_phases[ioasa->u.gpdd.bus_phase]);
3603 if (sizeof(struct ipr_ioasa) < be16_to_cpu(ioasa->ret_stat_len))
3604 data_len = sizeof(struct ipr_ioasa);
3606 data_len = be16_to_cpu(ioasa->ret_stat_len);
3608 ipr_err("IOASA Dump:\n");
3610 for (i = 0; i < data_len / 4; i += 4) {
3611 ipr_err("%08X: %08X %08X %08X %08X\n", i*4,
3612 be32_to_cpu(ioasa_data[i]),
3613 be32_to_cpu(ioasa_data[i+1]),
3614 be32_to_cpu(ioasa_data[i+2]),
3615 be32_to_cpu(ioasa_data[i+3]));
3620 * ipr_gen_sense - Generate SCSI sense data from an IOASA
3622 * @sense_buf: sense data buffer
3627 static void ipr_gen_sense(struct ipr_cmnd *ipr_cmd)
3630 u8 *sense_buf = ipr_cmd->scsi_cmd->sense_buffer;
3631 struct ipr_resource_entry *res = ipr_cmd->scsi_cmd->device->hostdata;
3632 struct ipr_ioasa *ioasa = &ipr_cmd->ioasa;
3633 u32 ioasc = be32_to_cpu(ioasa->ioasc);
3635 memset(sense_buf, 0, SCSI_SENSE_BUFFERSIZE);
3637 if (ioasc >= IPR_FIRST_DRIVER_IOASC)
3640 ipr_cmd->scsi_cmd->result = SAM_STAT_CHECK_CONDITION;
3642 if (ipr_is_vset_device(res) &&
3643 ioasc == IPR_IOASC_MED_DO_NOT_REALLOC &&
3644 ioasa->u.vset.failing_lba_hi != 0) {
3645 sense_buf[0] = 0x72;
3646 sense_buf[1] = IPR_IOASC_SENSE_KEY(ioasc);
3647 sense_buf[2] = IPR_IOASC_SENSE_CODE(ioasc);
3648 sense_buf[3] = IPR_IOASC_SENSE_QUAL(ioasc);
3652 sense_buf[9] = 0x0A;
3653 sense_buf[10] = 0x80;
3655 failing_lba = be32_to_cpu(ioasa->u.vset.failing_lba_hi);
3657 sense_buf[12] = (failing_lba & 0xff000000) >> 24;
3658 sense_buf[13] = (failing_lba & 0x00ff0000) >> 16;
3659 sense_buf[14] = (failing_lba & 0x0000ff00) >> 8;
3660 sense_buf[15] = failing_lba & 0x000000ff;
3662 failing_lba = be32_to_cpu(ioasa->u.vset.failing_lba_lo);
3664 sense_buf[16] = (failing_lba & 0xff000000) >> 24;
3665 sense_buf[17] = (failing_lba & 0x00ff0000) >> 16;
3666 sense_buf[18] = (failing_lba & 0x0000ff00) >> 8;
3667 sense_buf[19] = failing_lba & 0x000000ff;
3669 sense_buf[0] = 0x70;
3670 sense_buf[2] = IPR_IOASC_SENSE_KEY(ioasc);
3671 sense_buf[12] = IPR_IOASC_SENSE_CODE(ioasc);
3672 sense_buf[13] = IPR_IOASC_SENSE_QUAL(ioasc);
3674 /* Illegal request */
3675 if ((IPR_IOASC_SENSE_KEY(ioasc) == 0x05) &&
3676 (be32_to_cpu(ioasa->ioasc_specific) & IPR_FIELD_POINTER_VALID)) {
3677 sense_buf[7] = 10; /* additional length */
3679 /* IOARCB was in error */
3680 if (IPR_IOASC_SENSE_CODE(ioasc) == 0x24)
3681 sense_buf[15] = 0xC0;
3682 else /* Parameter data was invalid */
3683 sense_buf[15] = 0x80;
3686 ((IPR_FIELD_POINTER_MASK &
3687 be32_to_cpu(ioasa->ioasc_specific)) >> 8) & 0xff;
3689 (IPR_FIELD_POINTER_MASK &
3690 be32_to_cpu(ioasa->ioasc_specific)) & 0xff;
3692 if (ioasc == IPR_IOASC_MED_DO_NOT_REALLOC) {
3693 if (ipr_is_vset_device(res))
3694 failing_lba = be32_to_cpu(ioasa->u.vset.failing_lba_lo);
3696 failing_lba = be32_to_cpu(ioasa->u.dasd.failing_lba);
3698 sense_buf[0] |= 0x80; /* Or in the Valid bit */
3699 sense_buf[3] = (failing_lba & 0xff000000) >> 24;
3700 sense_buf[4] = (failing_lba & 0x00ff0000) >> 16;
3701 sense_buf[5] = (failing_lba & 0x0000ff00) >> 8;
3702 sense_buf[6] = failing_lba & 0x000000ff;
3705 sense_buf[7] = 6; /* additional length */
3711 * ipr_erp_start - Process an error response for a SCSI op
3712 * @ioa_cfg: ioa config struct
3713 * @ipr_cmd: ipr command struct
3715 * This function determines whether or not to initiate ERP
3716 * on the affected device.
3721 static void ipr_erp_start(struct ipr_ioa_cfg *ioa_cfg,
3722 struct ipr_cmnd *ipr_cmd)
3724 struct scsi_cmnd *scsi_cmd = ipr_cmd->scsi_cmd;
3725 struct ipr_resource_entry *res = scsi_cmd->device->hostdata;
3726 u32 ioasc = be32_to_cpu(ipr_cmd->ioasa.ioasc);
3729 ipr_scsi_eh_done(ipr_cmd);
3733 if (ipr_is_gscsi(res))
3734 ipr_dump_ioasa(ioa_cfg, ipr_cmd);
3736 ipr_gen_sense(ipr_cmd);
3738 switch (ioasc & IPR_IOASC_IOASC_MASK) {
3739 case IPR_IOASC_ABORTED_CMD_TERM_BY_HOST:
3740 scsi_cmd->result |= (DID_ERROR << 16);
3742 case IPR_IOASC_IR_RESOURCE_HANDLE:
3743 scsi_cmd->result |= (DID_NO_CONNECT << 16);
3745 case IPR_IOASC_HW_SEL_TIMEOUT:
3746 scsi_cmd->result |= (DID_NO_CONNECT << 16);
3747 res->needs_sync_complete = 1;
3749 case IPR_IOASC_SYNC_REQUIRED:
3751 res->needs_sync_complete = 1;
3752 scsi_cmd->result |= (DID_IMM_RETRY << 16);
3754 case IPR_IOASC_MED_DO_NOT_REALLOC: /* prevent retries */
3755 scsi_cmd->result |= (DID_PASSTHROUGH << 16);
3757 case IPR_IOASC_BUS_WAS_RESET:
3758 case IPR_IOASC_BUS_WAS_RESET_BY_OTHER:
3760 * Report the bus reset and ask for a retry. The device
3761 * will give CC/UA the next command.
3763 if (!res->resetting_device)
3764 scsi_report_bus_reset(ioa_cfg->host, scsi_cmd->device->channel);
3765 scsi_cmd->result |= (DID_ERROR << 16);
3766 res->needs_sync_complete = 1;
3768 case IPR_IOASC_HW_DEV_BUS_STATUS:
3769 scsi_cmd->result |= IPR_IOASC_SENSE_STATUS(ioasc);
3770 if (IPR_IOASC_SENSE_STATUS(ioasc) == SAM_STAT_CHECK_CONDITION) {
3771 ipr_erp_cancel_all(ipr_cmd);
3774 res->needs_sync_complete = 1;
3776 case IPR_IOASC_NR_INIT_CMD_REQUIRED:
3779 scsi_cmd->result |= (DID_ERROR << 16);
3780 if (!ipr_is_vset_device(res))
3781 res->needs_sync_complete = 1;
3785 ipr_unmap_sglist(ioa_cfg, ipr_cmd);
3786 list_add_tail(&ipr_cmd->queue, &ioa_cfg->free_q);
3787 scsi_cmd->scsi_done(scsi_cmd);
3791 * ipr_scsi_done - mid-layer done function
3792 * @ipr_cmd: ipr command struct
3794 * This function is invoked by the interrupt handler for
3795 * ops generated by the SCSI mid-layer
3800 static void ipr_scsi_done(struct ipr_cmnd *ipr_cmd)
3802 struct ipr_ioa_cfg *ioa_cfg = ipr_cmd->ioa_cfg;
3803 struct scsi_cmnd *scsi_cmd = ipr_cmd->scsi_cmd;
3804 u32 ioasc = be32_to_cpu(ipr_cmd->ioasa.ioasc);
3806 scsi_cmd->resid = be32_to_cpu(ipr_cmd->ioasa.residual_data_len);
3808 if (likely(IPR_IOASC_SENSE_KEY(ioasc) == 0)) {
3809 ipr_unmap_sglist(ioa_cfg, ipr_cmd);
3810 list_add_tail(&ipr_cmd->queue, &ioa_cfg->free_q);
3811 scsi_cmd->scsi_done(scsi_cmd);
3813 ipr_erp_start(ioa_cfg, ipr_cmd);
3817 * ipr_save_ioafp_mode_select - Save adapters mode select data
3818 * @ioa_cfg: ioa config struct
3819 * @scsi_cmd: scsi command struct
3821 * This function saves mode select data for the adapter to
3822 * use following an adapter reset.
3825 * 0 on success / SCSI_MLQUEUE_HOST_BUSY on failure
3827 static int ipr_save_ioafp_mode_select(struct ipr_ioa_cfg *ioa_cfg,
3828 struct scsi_cmnd *scsi_cmd)
3830 if (!ioa_cfg->saved_mode_pages) {
3831 ioa_cfg->saved_mode_pages = kmalloc(sizeof(struct ipr_mode_pages),
3833 if (!ioa_cfg->saved_mode_pages) {
3834 dev_err(&ioa_cfg->pdev->dev,
3835 "IOA mode select buffer allocation failed\n");
3836 return SCSI_MLQUEUE_HOST_BUSY;
3840 memcpy(ioa_cfg->saved_mode_pages, scsi_cmd->buffer, scsi_cmd->cmnd[4]);
3841 ioa_cfg->saved_mode_page_len = scsi_cmd->cmnd[4];
3846 * ipr_queuecommand - Queue a mid-layer request
3847 * @scsi_cmd: scsi command struct
3848 * @done: done function
3850 * This function queues a request generated by the mid-layer.
3854 * SCSI_MLQUEUE_DEVICE_BUSY if device is busy
3855 * SCSI_MLQUEUE_HOST_BUSY if host is busy
3857 static int ipr_queuecommand(struct scsi_cmnd *scsi_cmd,
3858 void (*done) (struct scsi_cmnd *))
3860 struct ipr_ioa_cfg *ioa_cfg;
3861 struct ipr_resource_entry *res;
3862 struct ipr_ioarcb *ioarcb;
3863 struct ipr_cmnd *ipr_cmd;
3866 scsi_cmd->scsi_done = done;
3867 ioa_cfg = (struct ipr_ioa_cfg *)scsi_cmd->device->host->hostdata;
3868 res = scsi_cmd->device->hostdata;
3869 scsi_cmd->result = (DID_OK << 16);
3872 * We are currently blocking all devices due to a host reset
3873 * We have told the host to stop giving us new requests, but
3874 * ERP ops don't count. FIXME
3876 if (unlikely(!ioa_cfg->allow_cmds))
3877 return SCSI_MLQUEUE_HOST_BUSY;
3880 * FIXME - Create scsi_set_host_offline interface
3881 * and the ioa_is_dead check can be removed
3883 if (unlikely(ioa_cfg->ioa_is_dead || !res)) {
3884 memset(scsi_cmd->sense_buffer, 0, SCSI_SENSE_BUFFERSIZE);
3885 scsi_cmd->result = (DID_NO_CONNECT << 16);
3886 scsi_cmd->scsi_done(scsi_cmd);
3890 ipr_cmd = ipr_get_free_ipr_cmnd(ioa_cfg);
3891 ioarcb = &ipr_cmd->ioarcb;
3892 list_add_tail(&ipr_cmd->queue, &ioa_cfg->pending_q);
3894 memcpy(ioarcb->cmd_pkt.cdb, scsi_cmd->cmnd, scsi_cmd->cmd_len);
3895 ipr_cmd->scsi_cmd = scsi_cmd;
3896 ioarcb->res_handle = res->cfgte.res_handle;
3897 ipr_cmd->done = ipr_scsi_done;
3898 ipr_trc_hook(ipr_cmd, IPR_TRACE_START, IPR_GET_PHYS_LOC(res->cfgte.res_addr));
3900 if (ipr_is_gscsi(res) || ipr_is_vset_device(res)) {
3901 if (scsi_cmd->underflow == 0)
3902 ioarcb->cmd_pkt.flags_hi |= IPR_FLAGS_HI_NO_ULEN_CHK;
3904 if (res->needs_sync_complete) {
3905 ioarcb->cmd_pkt.flags_hi |= IPR_FLAGS_HI_SYNC_COMPLETE;
3906 res->needs_sync_complete = 0;
3909 ioarcb->cmd_pkt.flags_hi |= IPR_FLAGS_HI_NO_LINK_DESC;
3910 ioarcb->cmd_pkt.flags_lo |= IPR_FLAGS_LO_DELAY_AFTER_RST;
3911 ioarcb->cmd_pkt.flags_lo |= IPR_FLAGS_LO_ALIGNED_BFR;
3912 ioarcb->cmd_pkt.flags_lo |= ipr_get_task_attributes(scsi_cmd);
3915 if (!ipr_is_gscsi(res) && scsi_cmd->cmnd[0] >= 0xC0)
3916 ioarcb->cmd_pkt.request_type = IPR_RQTYPE_IOACMD;
3918 if (ipr_is_ioa_resource(res) && scsi_cmd->cmnd[0] == MODE_SELECT)
3919 rc = ipr_save_ioafp_mode_select(ioa_cfg, scsi_cmd);
3921 if (likely(rc == 0))
3922 rc = ipr_build_ioadl(ioa_cfg, ipr_cmd);
3924 if (likely(rc == 0)) {
3926 writel(be32_to_cpu(ipr_cmd->ioarcb.ioarcb_host_pci_addr),
3927 ioa_cfg->regs.ioarrin_reg);
3929 list_move_tail(&ipr_cmd->queue, &ioa_cfg->free_q);
3930 return SCSI_MLQUEUE_HOST_BUSY;
3937 * ipr_info - Get information about the card/driver
3938 * @scsi_host: scsi host struct
3941 * pointer to buffer with description string
3943 static const char * ipr_ioa_info(struct Scsi_Host *host)
3945 static char buffer[512];
3946 struct ipr_ioa_cfg *ioa_cfg;
3947 unsigned long lock_flags = 0;
3949 ioa_cfg = (struct ipr_ioa_cfg *) host->hostdata;
3951 spin_lock_irqsave(host->host_lock, lock_flags);
3952 sprintf(buffer, "IBM %X Storage Adapter", ioa_cfg->type);
3953 spin_unlock_irqrestore(host->host_lock, lock_flags);
3958 static struct scsi_host_template driver_template = {
3959 .module = THIS_MODULE,
3961 .info = ipr_ioa_info,
3962 .queuecommand = ipr_queuecommand,
3963 .eh_abort_handler = ipr_eh_abort,
3964 .eh_device_reset_handler = ipr_eh_dev_reset,
3965 .eh_host_reset_handler = ipr_eh_host_reset,
3966 .slave_alloc = ipr_slave_alloc,
3967 .slave_configure = ipr_slave_configure,
3968 .slave_destroy = ipr_slave_destroy,
3969 .bios_param = ipr_biosparam,
3970 .can_queue = IPR_MAX_COMMANDS,
3972 .sg_tablesize = IPR_MAX_SGLIST,
3973 .max_sectors = IPR_MAX_SECTORS,
3974 .cmd_per_lun = IPR_MAX_CMD_PER_LUN,
3975 .use_clustering = ENABLE_CLUSTERING,
3976 .shost_attrs = ipr_ioa_attrs,
3977 .sdev_attrs = ipr_dev_attrs,
3978 .proc_name = IPR_NAME
3981 #ifdef CONFIG_PPC_PSERIES
3982 static const u16 ipr_blocked_processors[] = {
3994 * ipr_invalid_adapter - Determine if this adapter is supported on this hardware
3995 * @ioa_cfg: ioa cfg struct
3997 * Adapters that use Gemstone revision < 3.1 do not work reliably on
3998 * certain pSeries hardware. This function determines if the given
3999 * adapter is in one of these confgurations or not.
4002 * 1 if adapter is not supported / 0 if adapter is supported
4004 static int ipr_invalid_adapter(struct ipr_ioa_cfg *ioa_cfg)
4009 if (ioa_cfg->type == 0x5702) {
4010 if (pci_read_config_byte(ioa_cfg->pdev, PCI_REVISION_ID,
4011 &rev_id) == PCIBIOS_SUCCESSFUL) {
4013 for (i = 0; i < ARRAY_SIZE(ipr_blocked_processors); i++){
4014 if (__is_processor(ipr_blocked_processors[i]))
4023 #define ipr_invalid_adapter(ioa_cfg) 0
4027 * ipr_ioa_bringdown_done - IOA bring down completion.
4028 * @ipr_cmd: ipr command struct
4030 * This function processes the completion of an adapter bring down.
4031 * It wakes any reset sleepers.
4036 static int ipr_ioa_bringdown_done(struct ipr_cmnd *ipr_cmd)
4038 struct ipr_ioa_cfg *ioa_cfg = ipr_cmd->ioa_cfg;
4041 ioa_cfg->in_reset_reload = 0;
4042 ioa_cfg->reset_retries = 0;
4043 list_add_tail(&ipr_cmd->queue, &ioa_cfg->free_q);
4044 wake_up_all(&ioa_cfg->reset_wait_q);
4046 spin_unlock_irq(ioa_cfg->host->host_lock);
4047 scsi_unblock_requests(ioa_cfg->host);
4048 spin_lock_irq(ioa_cfg->host->host_lock);
4051 return IPR_RC_JOB_RETURN;
4055 * ipr_ioa_reset_done - IOA reset completion.
4056 * @ipr_cmd: ipr command struct
4058 * This function processes the completion of an adapter reset.
4059 * It schedules any necessary mid-layer add/removes and
4060 * wakes any reset sleepers.
4065 static int ipr_ioa_reset_done(struct ipr_cmnd *ipr_cmd)
4067 struct ipr_ioa_cfg *ioa_cfg = ipr_cmd->ioa_cfg;
4068 struct ipr_resource_entry *res;
4069 struct ipr_hostrcb *hostrcb, *temp;
4073 ioa_cfg->in_reset_reload = 0;
4074 ioa_cfg->allow_cmds = 1;
4075 ioa_cfg->reset_cmd = NULL;
4077 list_for_each_entry(res, &ioa_cfg->used_res_q, queue) {
4078 if (ioa_cfg->allow_ml_add_del && (res->add_to_ml || res->del_from_ml)) {
4080 schedule_work(&ioa_cfg->work_q);
4085 list_for_each_entry_safe(hostrcb, temp, &ioa_cfg->hostrcb_free_q, queue) {
4086 list_del(&hostrcb->queue);
4087 if (i++ < IPR_NUM_LOG_HCAMS)
4088 ipr_send_hcam(ioa_cfg, IPR_HCAM_CDB_OP_CODE_LOG_DATA, hostrcb);
4090 ipr_send_hcam(ioa_cfg, IPR_HCAM_CDB_OP_CODE_CONFIG_CHANGE, hostrcb);
4093 dev_info(&ioa_cfg->pdev->dev, "IOA initialized.\n");
4095 ioa_cfg->reset_retries = 0;
4096 list_add_tail(&ipr_cmd->queue, &ioa_cfg->free_q);
4097 wake_up_all(&ioa_cfg->reset_wait_q);
4099 spin_unlock_irq(ioa_cfg->host->host_lock);
4100 scsi_unblock_requests(ioa_cfg->host);
4101 spin_lock_irq(ioa_cfg->host->host_lock);
4103 if (!ioa_cfg->allow_cmds)
4104 scsi_block_requests(ioa_cfg->host);
4107 return IPR_RC_JOB_RETURN;
4111 * ipr_set_sup_dev_dflt - Initialize a Set Supported Device buffer
4112 * @supported_dev: supported device struct
4113 * @vpids: vendor product id struct
4118 static void ipr_set_sup_dev_dflt(struct ipr_supported_device *supported_dev,
4119 struct ipr_std_inq_vpids *vpids)
4121 memset(supported_dev, 0, sizeof(struct ipr_supported_device));
4122 memcpy(&supported_dev->vpids, vpids, sizeof(struct ipr_std_inq_vpids));
4123 supported_dev->num_records = 1;
4124 supported_dev->data_length =
4125 cpu_to_be16(sizeof(struct ipr_supported_device));
4126 supported_dev->reserved = 0;
4130 * ipr_set_supported_devs - Send Set Supported Devices for a device
4131 * @ipr_cmd: ipr command struct
4133 * This function send a Set Supported Devices to the adapter
4136 * IPR_RC_JOB_CONTINUE / IPR_RC_JOB_RETURN
4138 static int ipr_set_supported_devs(struct ipr_cmnd *ipr_cmd)
4140 struct ipr_ioa_cfg *ioa_cfg = ipr_cmd->ioa_cfg;
4141 struct ipr_supported_device *supp_dev = &ioa_cfg->vpd_cbs->supp_dev;
4142 struct ipr_ioadl_desc *ioadl = ipr_cmd->ioadl;
4143 struct ipr_ioarcb *ioarcb = &ipr_cmd->ioarcb;
4144 struct ipr_resource_entry *res = ipr_cmd->u.res;
4146 ipr_cmd->job_step = ipr_ioa_reset_done;
4148 list_for_each_entry_continue(res, &ioa_cfg->used_res_q, queue) {
4149 if (!ipr_is_af_dasd_device(res))
4152 ipr_cmd->u.res = res;
4153 ipr_set_sup_dev_dflt(supp_dev, &res->cfgte.std_inq_data.vpids);
4155 ioarcb->res_handle = cpu_to_be32(IPR_IOA_RES_HANDLE);
4156 ioarcb->cmd_pkt.flags_hi |= IPR_FLAGS_HI_WRITE_NOT_READ;
4157 ioarcb->cmd_pkt.request_type = IPR_RQTYPE_IOACMD;
4159 ioarcb->cmd_pkt.cdb[0] = IPR_SET_SUPPORTED_DEVICES;
4160 ioarcb->cmd_pkt.cdb[7] = (sizeof(struct ipr_supported_device) >> 8) & 0xff;
4161 ioarcb->cmd_pkt.cdb[8] = sizeof(struct ipr_supported_device) & 0xff;
4163 ioadl->flags_and_data_len = cpu_to_be32(IPR_IOADL_FLAGS_WRITE_LAST |
4164 sizeof(struct ipr_supported_device));
4165 ioadl->address = cpu_to_be32(ioa_cfg->vpd_cbs_dma +
4166 offsetof(struct ipr_misc_cbs, supp_dev));
4167 ioarcb->write_ioadl_len = cpu_to_be32(sizeof(struct ipr_ioadl_desc));
4168 ioarcb->write_data_transfer_length =
4169 cpu_to_be32(sizeof(struct ipr_supported_device));
4171 ipr_do_req(ipr_cmd, ipr_reset_ioa_job, ipr_timeout,
4172 IPR_SET_SUP_DEVICE_TIMEOUT);
4174 ipr_cmd->job_step = ipr_set_supported_devs;
4175 return IPR_RC_JOB_RETURN;
4178 return IPR_RC_JOB_CONTINUE;
4182 * ipr_get_mode_page - Locate specified mode page
4183 * @mode_pages: mode page buffer
4184 * @page_code: page code to find
4185 * @len: minimum required length for mode page
4188 * pointer to mode page / NULL on failure
4190 static void *ipr_get_mode_page(struct ipr_mode_pages *mode_pages,
4191 u32 page_code, u32 len)
4193 struct ipr_mode_page_hdr *mode_hdr;
4197 if (!mode_pages || (mode_pages->hdr.length == 0))
4200 length = (mode_pages->hdr.length + 1) - 4 - mode_pages->hdr.block_desc_len;
4201 mode_hdr = (struct ipr_mode_page_hdr *)
4202 (mode_pages->data + mode_pages->hdr.block_desc_len);
4205 if (IPR_GET_MODE_PAGE_CODE(mode_hdr) == page_code) {
4206 if (mode_hdr->page_length >= (len - sizeof(struct ipr_mode_page_hdr)))
4210 page_length = (sizeof(struct ipr_mode_page_hdr) +
4211 mode_hdr->page_length);
4212 length -= page_length;
4213 mode_hdr = (struct ipr_mode_page_hdr *)
4214 ((unsigned long)mode_hdr + page_length);
4221 * ipr_check_term_power - Check for term power errors
4222 * @ioa_cfg: ioa config struct
4223 * @mode_pages: IOAFP mode pages buffer
4225 * Check the IOAFP's mode page 28 for term power errors
4230 static void ipr_check_term_power(struct ipr_ioa_cfg *ioa_cfg,
4231 struct ipr_mode_pages *mode_pages)
4235 struct ipr_dev_bus_entry *bus;
4236 struct ipr_mode_page28 *mode_page;
4238 mode_page = ipr_get_mode_page(mode_pages, 0x28,
4239 sizeof(struct ipr_mode_page28));
4241 entry_length = mode_page->entry_length;
4243 bus = mode_page->bus;
4245 for (i = 0; i < mode_page->num_entries; i++) {
4246 if (bus->flags & IPR_SCSI_ATTR_NO_TERM_PWR) {
4247 dev_err(&ioa_cfg->pdev->dev,
4248 "Term power is absent on scsi bus %d\n",
4252 bus = (struct ipr_dev_bus_entry *)((char *)bus + entry_length);
4257 * ipr_scsi_bus_speed_limit - Limit the SCSI speed based on SES table
4258 * @ioa_cfg: ioa config struct
4260 * Looks through the config table checking for SES devices. If
4261 * the SES device is in the SES table indicating a maximum SCSI
4262 * bus speed, the speed is limited for the bus.
4267 static void ipr_scsi_bus_speed_limit(struct ipr_ioa_cfg *ioa_cfg)
4272 for (i = 0; i < IPR_MAX_NUM_BUSES; i++) {
4273 max_xfer_rate = ipr_get_max_scsi_speed(ioa_cfg, i,
4274 ioa_cfg->bus_attr[i].bus_width);
4276 if (max_xfer_rate < ioa_cfg->bus_attr[i].max_xfer_rate)
4277 ioa_cfg->bus_attr[i].max_xfer_rate = max_xfer_rate;
4282 * ipr_modify_ioafp_mode_page_28 - Modify IOAFP Mode Page 28
4283 * @ioa_cfg: ioa config struct
4284 * @mode_pages: mode page 28 buffer
4286 * Updates mode page 28 based on driver configuration
4291 static void ipr_modify_ioafp_mode_page_28(struct ipr_ioa_cfg *ioa_cfg,
4292 struct ipr_mode_pages *mode_pages)
4294 int i, entry_length;
4295 struct ipr_dev_bus_entry *bus;
4296 struct ipr_bus_attributes *bus_attr;
4297 struct ipr_mode_page28 *mode_page;
4299 mode_page = ipr_get_mode_page(mode_pages, 0x28,
4300 sizeof(struct ipr_mode_page28));
4302 entry_length = mode_page->entry_length;
4304 /* Loop for each device bus entry */
4305 for (i = 0, bus = mode_page->bus;
4306 i < mode_page->num_entries;
4307 i++, bus = (struct ipr_dev_bus_entry *)((u8 *)bus + entry_length)) {
4308 if (bus->res_addr.bus > IPR_MAX_NUM_BUSES) {
4309 dev_err(&ioa_cfg->pdev->dev,
4310 "Invalid resource address reported: 0x%08X\n",
4311 IPR_GET_PHYS_LOC(bus->res_addr));
4315 bus_attr = &ioa_cfg->bus_attr[i];
4316 bus->extended_reset_delay = IPR_EXTENDED_RESET_DELAY;
4317 bus->bus_width = bus_attr->bus_width;
4318 bus->max_xfer_rate = cpu_to_be32(bus_attr->max_xfer_rate);
4319 bus->flags &= ~IPR_SCSI_ATTR_QAS_MASK;
4320 if (bus_attr->qas_enabled)
4321 bus->flags |= IPR_SCSI_ATTR_ENABLE_QAS;
4323 bus->flags |= IPR_SCSI_ATTR_DISABLE_QAS;
4328 * ipr_build_mode_select - Build a mode select command
4329 * @ipr_cmd: ipr command struct
4330 * @res_handle: resource handle to send command to
4331 * @parm: Byte 2 of Mode Sense command
4332 * @dma_addr: DMA buffer address
4333 * @xfer_len: data transfer length
4338 static void ipr_build_mode_select(struct ipr_cmnd *ipr_cmd,
4339 u32 res_handle, u8 parm, u32 dma_addr,
4342 struct ipr_ioadl_desc *ioadl = ipr_cmd->ioadl;
4343 struct ipr_ioarcb *ioarcb = &ipr_cmd->ioarcb;
4345 ioarcb->res_handle = res_handle;
4346 ioarcb->cmd_pkt.request_type = IPR_RQTYPE_SCSICDB;
4347 ioarcb->cmd_pkt.flags_hi |= IPR_FLAGS_HI_WRITE_NOT_READ;
4348 ioarcb->cmd_pkt.cdb[0] = MODE_SELECT;
4349 ioarcb->cmd_pkt.cdb[1] = parm;
4350 ioarcb->cmd_pkt.cdb[4] = xfer_len;
4352 ioadl->flags_and_data_len =
4353 cpu_to_be32(IPR_IOADL_FLAGS_WRITE_LAST | xfer_len);
4354 ioadl->address = cpu_to_be32(dma_addr);
4355 ioarcb->write_ioadl_len = cpu_to_be32(sizeof(struct ipr_ioadl_desc));
4356 ioarcb->write_data_transfer_length = cpu_to_be32(xfer_len);
4360 * ipr_ioafp_mode_select_page28 - Issue Mode Select Page 28 to IOA
4361 * @ipr_cmd: ipr command struct
4363 * This function sets up the SCSI bus attributes and sends
4364 * a Mode Select for Page 28 to activate them.
4369 static int ipr_ioafp_mode_select_page28(struct ipr_cmnd *ipr_cmd)
4371 struct ipr_ioa_cfg *ioa_cfg = ipr_cmd->ioa_cfg;
4372 struct ipr_mode_pages *mode_pages = &ioa_cfg->vpd_cbs->mode_pages;
4376 if (ioa_cfg->saved_mode_pages) {
4377 memcpy(mode_pages, ioa_cfg->saved_mode_pages,
4378 ioa_cfg->saved_mode_page_len);
4379 length = ioa_cfg->saved_mode_page_len;
4381 ipr_scsi_bus_speed_limit(ioa_cfg);
4382 ipr_check_term_power(ioa_cfg, mode_pages);
4383 ipr_modify_ioafp_mode_page_28(ioa_cfg, mode_pages);
4384 length = mode_pages->hdr.length + 1;
4385 mode_pages->hdr.length = 0;
4388 ipr_build_mode_select(ipr_cmd, cpu_to_be32(IPR_IOA_RES_HANDLE), 0x11,
4389 ioa_cfg->vpd_cbs_dma + offsetof(struct ipr_misc_cbs, mode_pages),
4392 ipr_cmd->job_step = ipr_set_supported_devs;
4393 ipr_cmd->u.res = list_entry(ioa_cfg->used_res_q.next,
4394 struct ipr_resource_entry, queue);
4396 ipr_do_req(ipr_cmd, ipr_reset_ioa_job, ipr_timeout, IPR_INTERNAL_TIMEOUT);
4399 return IPR_RC_JOB_RETURN;
4403 * ipr_build_mode_sense - Builds a mode sense command
4404 * @ipr_cmd: ipr command struct
4405 * @res: resource entry struct
4406 * @parm: Byte 2 of mode sense command
4407 * @dma_addr: DMA address of mode sense buffer
4408 * @xfer_len: Size of DMA buffer
4413 static void ipr_build_mode_sense(struct ipr_cmnd *ipr_cmd,
4415 u8 parm, u32 dma_addr, u8 xfer_len)
4417 struct ipr_ioadl_desc *ioadl = ipr_cmd->ioadl;
4418 struct ipr_ioarcb *ioarcb = &ipr_cmd->ioarcb;
4420 ioarcb->res_handle = res_handle;
4421 ioarcb->cmd_pkt.cdb[0] = MODE_SENSE;
4422 ioarcb->cmd_pkt.cdb[2] = parm;
4423 ioarcb->cmd_pkt.cdb[4] = xfer_len;
4424 ioarcb->cmd_pkt.request_type = IPR_RQTYPE_SCSICDB;
4426 ioadl->flags_and_data_len =
4427 cpu_to_be32(IPR_IOADL_FLAGS_READ_LAST | xfer_len);
4428 ioadl->address = cpu_to_be32(dma_addr);
4429 ioarcb->read_ioadl_len = cpu_to_be32(sizeof(struct ipr_ioadl_desc));
4430 ioarcb->read_data_transfer_length = cpu_to_be32(xfer_len);
4434 * ipr_ioafp_mode_sense_page28 - Issue Mode Sense Page 28 to IOA
4435 * @ipr_cmd: ipr command struct
4437 * This function send a Page 28 mode sense to the IOA to
4438 * retrieve SCSI bus attributes.
4443 static int ipr_ioafp_mode_sense_page28(struct ipr_cmnd *ipr_cmd)
4445 struct ipr_ioa_cfg *ioa_cfg = ipr_cmd->ioa_cfg;
4448 ipr_build_mode_sense(ipr_cmd, cpu_to_be32(IPR_IOA_RES_HANDLE),
4449 0x28, ioa_cfg->vpd_cbs_dma +
4450 offsetof(struct ipr_misc_cbs, mode_pages),
4451 sizeof(struct ipr_mode_pages));
4453 ipr_cmd->job_step = ipr_ioafp_mode_select_page28;
4455 ipr_do_req(ipr_cmd, ipr_reset_ioa_job, ipr_timeout, IPR_INTERNAL_TIMEOUT);
4458 return IPR_RC_JOB_RETURN;
4462 * ipr_init_res_table - Initialize the resource table
4463 * @ipr_cmd: ipr command struct
4465 * This function looks through the existing resource table, comparing
4466 * it with the config table. This function will take care of old/new
4467 * devices and schedule adding/removing them from the mid-layer
4471 * IPR_RC_JOB_CONTINUE
4473 static int ipr_init_res_table(struct ipr_cmnd *ipr_cmd)
4475 struct ipr_ioa_cfg *ioa_cfg = ipr_cmd->ioa_cfg;
4476 struct ipr_resource_entry *res, *temp;
4477 struct ipr_config_table_entry *cfgte;
4482 if (ioa_cfg->cfg_table->hdr.flags & IPR_UCODE_DOWNLOAD_REQ)
4483 dev_err(&ioa_cfg->pdev->dev, "Microcode download required\n");
4485 list_for_each_entry_safe(res, temp, &ioa_cfg->used_res_q, queue)
4486 list_move_tail(&res->queue, &old_res);
4488 for (i = 0; i < ioa_cfg->cfg_table->hdr.num_entries; i++) {
4489 cfgte = &ioa_cfg->cfg_table->dev[i];
4492 list_for_each_entry_safe(res, temp, &old_res, queue) {
4493 if (!memcmp(&res->cfgte.res_addr,
4494 &cfgte->res_addr, sizeof(cfgte->res_addr))) {
4495 list_move_tail(&res->queue, &ioa_cfg->used_res_q);
4502 if (list_empty(&ioa_cfg->free_res_q)) {
4503 dev_err(&ioa_cfg->pdev->dev, "Too many devices attached\n");
4508 res = list_entry(ioa_cfg->free_res_q.next,
4509 struct ipr_resource_entry, queue);
4510 list_move_tail(&res->queue, &ioa_cfg->used_res_q);
4511 ipr_init_res_entry(res);
4516 memcpy(&res->cfgte, cfgte, sizeof(struct ipr_config_table_entry));
4519 list_for_each_entry_safe(res, temp, &old_res, queue) {
4521 res->del_from_ml = 1;
4522 list_move_tail(&res->queue, &ioa_cfg->used_res_q);
4524 list_move_tail(&res->queue, &ioa_cfg->free_res_q);
4528 ipr_cmd->job_step = ipr_ioafp_mode_sense_page28;
4531 return IPR_RC_JOB_CONTINUE;
4535 * ipr_ioafp_query_ioa_cfg - Send a Query IOA Config to the adapter.
4536 * @ipr_cmd: ipr command struct
4538 * This function sends a Query IOA Configuration command
4539 * to the adapter to retrieve the IOA configuration table.
4544 static int ipr_ioafp_query_ioa_cfg(struct ipr_cmnd *ipr_cmd)
4546 struct ipr_ioa_cfg *ioa_cfg = ipr_cmd->ioa_cfg;
4547 struct ipr_ioarcb *ioarcb = &ipr_cmd->ioarcb;
4548 struct ipr_ioadl_desc *ioadl = ipr_cmd->ioadl;
4549 struct ipr_inquiry_page3 *ucode_vpd = &ioa_cfg->vpd_cbs->page3_data;
4552 dev_info(&ioa_cfg->pdev->dev, "Adapter firmware version: %02X%02X%02X%02X\n",
4553 ucode_vpd->major_release, ucode_vpd->card_type,
4554 ucode_vpd->minor_release[0], ucode_vpd->minor_release[1]);
4555 ioarcb->cmd_pkt.request_type = IPR_RQTYPE_IOACMD;
4556 ioarcb->res_handle = cpu_to_be32(IPR_IOA_RES_HANDLE);
4558 ioarcb->cmd_pkt.cdb[0] = IPR_QUERY_IOA_CONFIG;
4559 ioarcb->cmd_pkt.cdb[7] = (sizeof(struct ipr_config_table) >> 8) & 0xff;
4560 ioarcb->cmd_pkt.cdb[8] = sizeof(struct ipr_config_table) & 0xff;
4562 ioarcb->read_ioadl_len = cpu_to_be32(sizeof(struct ipr_ioadl_desc));
4563 ioarcb->read_data_transfer_length =
4564 cpu_to_be32(sizeof(struct ipr_config_table));
4566 ioadl->address = cpu_to_be32(ioa_cfg->cfg_table_dma);
4567 ioadl->flags_and_data_len =
4568 cpu_to_be32(IPR_IOADL_FLAGS_READ_LAST | sizeof(struct ipr_config_table));
4570 ipr_cmd->job_step = ipr_init_res_table;
4572 ipr_do_req(ipr_cmd, ipr_reset_ioa_job, ipr_timeout, IPR_INTERNAL_TIMEOUT);
4575 return IPR_RC_JOB_RETURN;
4579 * ipr_ioafp_inquiry - Send an Inquiry to the adapter.
4580 * @ipr_cmd: ipr command struct
4582 * This utility function sends an inquiry to the adapter.
4587 static void ipr_ioafp_inquiry(struct ipr_cmnd *ipr_cmd, u8 flags, u8 page,
4588 u32 dma_addr, u8 xfer_len)
4590 struct ipr_ioarcb *ioarcb = &ipr_cmd->ioarcb;
4591 struct ipr_ioadl_desc *ioadl = ipr_cmd->ioadl;
4594 ioarcb->cmd_pkt.request_type = IPR_RQTYPE_SCSICDB;
4595 ioarcb->res_handle = cpu_to_be32(IPR_IOA_RES_HANDLE);
4597 ioarcb->cmd_pkt.cdb[0] = INQUIRY;
4598 ioarcb->cmd_pkt.cdb[1] = flags;
4599 ioarcb->cmd_pkt.cdb[2] = page;
4600 ioarcb->cmd_pkt.cdb[4] = xfer_len;
4602 ioarcb->read_ioadl_len = cpu_to_be32(sizeof(struct ipr_ioadl_desc));
4603 ioarcb->read_data_transfer_length = cpu_to_be32(xfer_len);
4605 ioadl->address = cpu_to_be32(dma_addr);
4606 ioadl->flags_and_data_len =
4607 cpu_to_be32(IPR_IOADL_FLAGS_READ_LAST | xfer_len);
4609 ipr_do_req(ipr_cmd, ipr_reset_ioa_job, ipr_timeout, IPR_INTERNAL_TIMEOUT);
4614 * ipr_ioafp_page3_inquiry - Send a Page 3 Inquiry to the adapter.
4615 * @ipr_cmd: ipr command struct
4617 * This function sends a Page 3 inquiry to the adapter
4618 * to retrieve software VPD information.
4621 * IPR_RC_JOB_CONTINUE / IPR_RC_JOB_RETURN
4623 static int ipr_ioafp_page3_inquiry(struct ipr_cmnd *ipr_cmd)
4625 struct ipr_ioa_cfg *ioa_cfg = ipr_cmd->ioa_cfg;
4630 /* Grab the type out of the VPD and store it away */
4631 memcpy(type, ioa_cfg->vpd_cbs->ioa_vpd.std_inq_data.vpids.product_id, 4);
4633 ioa_cfg->type = simple_strtoul((char *)type, NULL, 16);
4635 ipr_cmd->job_step = ipr_ioafp_query_ioa_cfg;
4637 ipr_ioafp_inquiry(ipr_cmd, 1, 3,
4638 ioa_cfg->vpd_cbs_dma + offsetof(struct ipr_misc_cbs, page3_data),
4639 sizeof(struct ipr_inquiry_page3));
4642 return IPR_RC_JOB_RETURN;
4646 * ipr_ioafp_std_inquiry - Send a Standard Inquiry to the adapter.
4647 * @ipr_cmd: ipr command struct
4649 * This function sends a standard inquiry to the adapter.
4654 static int ipr_ioafp_std_inquiry(struct ipr_cmnd *ipr_cmd)
4656 struct ipr_ioa_cfg *ioa_cfg = ipr_cmd->ioa_cfg;
4659 ipr_cmd->job_step = ipr_ioafp_page3_inquiry;
4661 ipr_ioafp_inquiry(ipr_cmd, 0, 0,
4662 ioa_cfg->vpd_cbs_dma + offsetof(struct ipr_misc_cbs, ioa_vpd),
4663 sizeof(struct ipr_ioa_vpd));
4666 return IPR_RC_JOB_RETURN;
4670 * ipr_ioafp_indentify_hrrq - Send Identify Host RRQ.
4671 * @ipr_cmd: ipr command struct
4673 * This function send an Identify Host Request Response Queue
4674 * command to establish the HRRQ with the adapter.
4679 static int ipr_ioafp_indentify_hrrq(struct ipr_cmnd *ipr_cmd)
4681 struct ipr_ioa_cfg *ioa_cfg = ipr_cmd->ioa_cfg;
4682 struct ipr_ioarcb *ioarcb = &ipr_cmd->ioarcb;
4685 dev_info(&ioa_cfg->pdev->dev, "Starting IOA initialization sequence.\n");
4687 ioarcb->cmd_pkt.cdb[0] = IPR_ID_HOST_RR_Q;
4688 ioarcb->res_handle = cpu_to_be32(IPR_IOA_RES_HANDLE);
4690 ioarcb->cmd_pkt.request_type = IPR_RQTYPE_IOACMD;
4691 ioarcb->cmd_pkt.cdb[2] =
4692 ((u32) ioa_cfg->host_rrq_dma >> 24) & 0xff;
4693 ioarcb->cmd_pkt.cdb[3] =
4694 ((u32) ioa_cfg->host_rrq_dma >> 16) & 0xff;
4695 ioarcb->cmd_pkt.cdb[4] =
4696 ((u32) ioa_cfg->host_rrq_dma >> 8) & 0xff;
4697 ioarcb->cmd_pkt.cdb[5] =
4698 ((u32) ioa_cfg->host_rrq_dma) & 0xff;
4699 ioarcb->cmd_pkt.cdb[7] =
4700 ((sizeof(u32) * IPR_NUM_CMD_BLKS) >> 8) & 0xff;
4701 ioarcb->cmd_pkt.cdb[8] =
4702 (sizeof(u32) * IPR_NUM_CMD_BLKS) & 0xff;
4704 ipr_cmd->job_step = ipr_ioafp_std_inquiry;
4706 ipr_do_req(ipr_cmd, ipr_reset_ioa_job, ipr_timeout, IPR_INTERNAL_TIMEOUT);
4709 return IPR_RC_JOB_RETURN;
4713 * ipr_reset_timer_done - Adapter reset timer function
4714 * @ipr_cmd: ipr command struct
4716 * Description: This function is used in adapter reset processing
4717 * for timing events. If the reset_cmd pointer in the IOA
4718 * config struct is not this adapter's we are doing nested
4719 * resets and fail_all_ops will take care of freeing the
4725 static void ipr_reset_timer_done(struct ipr_cmnd *ipr_cmd)
4727 struct ipr_ioa_cfg *ioa_cfg = ipr_cmd->ioa_cfg;
4728 unsigned long lock_flags = 0;
4730 spin_lock_irqsave(ioa_cfg->host->host_lock, lock_flags);
4732 if (ioa_cfg->reset_cmd == ipr_cmd) {
4733 list_del(&ipr_cmd->queue);
4734 ipr_cmd->done(ipr_cmd);
4737 spin_unlock_irqrestore(ioa_cfg->host->host_lock, lock_flags);
4741 * ipr_reset_start_timer - Start a timer for adapter reset job
4742 * @ipr_cmd: ipr command struct
4743 * @timeout: timeout value
4745 * Description: This function is used in adapter reset processing
4746 * for timing events. If the reset_cmd pointer in the IOA
4747 * config struct is not this adapter's we are doing nested
4748 * resets and fail_all_ops will take care of freeing the
4754 static void ipr_reset_start_timer(struct ipr_cmnd *ipr_cmd,
4755 unsigned long timeout)
4757 list_add_tail(&ipr_cmd->queue, &ipr_cmd->ioa_cfg->pending_q);
4758 ipr_cmd->done = ipr_reset_ioa_job;
4760 ipr_cmd->timer.data = (unsigned long) ipr_cmd;
4761 ipr_cmd->timer.expires = jiffies + timeout;
4762 ipr_cmd->timer.function = (void (*)(unsigned long))ipr_reset_timer_done;
4763 add_timer(&ipr_cmd->timer);
4767 * ipr_init_ioa_mem - Initialize ioa_cfg control block
4768 * @ioa_cfg: ioa cfg struct
4773 static void ipr_init_ioa_mem(struct ipr_ioa_cfg *ioa_cfg)
4775 memset(ioa_cfg->host_rrq, 0, sizeof(u32) * IPR_NUM_CMD_BLKS);
4777 /* Initialize Host RRQ pointers */
4778 ioa_cfg->hrrq_start = ioa_cfg->host_rrq;
4779 ioa_cfg->hrrq_end = &ioa_cfg->host_rrq[IPR_NUM_CMD_BLKS - 1];
4780 ioa_cfg->hrrq_curr = ioa_cfg->hrrq_start;
4781 ioa_cfg->toggle_bit = 1;
4783 /* Zero out config table */
4784 memset(ioa_cfg->cfg_table, 0, sizeof(struct ipr_config_table));
4788 * ipr_reset_enable_ioa - Enable the IOA following a reset.
4789 * @ipr_cmd: ipr command struct
4791 * This function reinitializes some control blocks and
4792 * enables destructive diagnostics on the adapter.
4797 static int ipr_reset_enable_ioa(struct ipr_cmnd *ipr_cmd)
4799 struct ipr_ioa_cfg *ioa_cfg = ipr_cmd->ioa_cfg;
4800 volatile u32 int_reg;
4803 ipr_cmd->job_step = ipr_ioafp_indentify_hrrq;
4804 ipr_init_ioa_mem(ioa_cfg);
4806 ioa_cfg->allow_interrupts = 1;
4807 int_reg = readl(ioa_cfg->regs.sense_interrupt_reg);
4809 if (int_reg & IPR_PCII_IOA_TRANS_TO_OPER) {
4810 writel((IPR_PCII_ERROR_INTERRUPTS | IPR_PCII_HRRQ_UPDATED),
4811 ioa_cfg->regs.clr_interrupt_mask_reg);
4812 int_reg = readl(ioa_cfg->regs.sense_interrupt_mask_reg);
4813 return IPR_RC_JOB_CONTINUE;
4816 /* Enable destructive diagnostics on IOA */
4817 writel(IPR_DOORBELL, ioa_cfg->regs.set_uproc_interrupt_reg);
4819 writel(IPR_PCII_OPER_INTERRUPTS, ioa_cfg->regs.clr_interrupt_mask_reg);
4820 int_reg = readl(ioa_cfg->regs.sense_interrupt_mask_reg);
4822 dev_info(&ioa_cfg->pdev->dev, "Initializing IOA.\n");
4824 ipr_cmd->timer.data = (unsigned long) ipr_cmd;
4825 ipr_cmd->timer.expires = jiffies + IPR_OPERATIONAL_TIMEOUT;
4826 ipr_cmd->timer.function = (void (*)(unsigned long))ipr_timeout;
4827 ipr_cmd->done = ipr_reset_ioa_job;
4828 add_timer(&ipr_cmd->timer);
4829 list_add_tail(&ipr_cmd->queue, &ioa_cfg->pending_q);
4832 return IPR_RC_JOB_RETURN;
4836 * ipr_reset_wait_for_dump - Wait for a dump to timeout.
4837 * @ipr_cmd: ipr command struct
4839 * This function is invoked when an adapter dump has run out
4840 * of processing time.
4843 * IPR_RC_JOB_CONTINUE
4845 static int ipr_reset_wait_for_dump(struct ipr_cmnd *ipr_cmd)
4847 struct ipr_ioa_cfg *ioa_cfg = ipr_cmd->ioa_cfg;
4849 if (ioa_cfg->sdt_state == GET_DUMP)
4850 ioa_cfg->sdt_state = ABORT_DUMP;
4852 ipr_cmd->job_step = ipr_reset_alert;
4854 return IPR_RC_JOB_CONTINUE;
4858 * ipr_unit_check_no_data - Log a unit check/no data error log
4859 * @ioa_cfg: ioa config struct
4861 * Logs an error indicating the adapter unit checked, but for some
4862 * reason, we were unable to fetch the unit check buffer.
4867 static void ipr_unit_check_no_data(struct ipr_ioa_cfg *ioa_cfg)
4869 ioa_cfg->errors_logged++;
4870 dev_err(&ioa_cfg->pdev->dev, "IOA unit check with no data\n");
4874 * ipr_get_unit_check_buffer - Get the unit check buffer from the IOA
4875 * @ioa_cfg: ioa config struct
4877 * Fetches the unit check buffer from the adapter by clocking the data
4878 * through the mailbox register.
4883 static void ipr_get_unit_check_buffer(struct ipr_ioa_cfg *ioa_cfg)
4885 unsigned long mailbox;
4886 struct ipr_hostrcb *hostrcb;
4887 struct ipr_uc_sdt sdt;
4890 mailbox = readl(ioa_cfg->ioa_mailbox);
4892 if (!ipr_sdt_is_fmt2(mailbox)) {
4893 ipr_unit_check_no_data(ioa_cfg);
4897 memset(&sdt, 0, sizeof(struct ipr_uc_sdt));
4898 rc = ipr_get_ldump_data_section(ioa_cfg, mailbox, (u32 *) &sdt,
4899 (sizeof(struct ipr_uc_sdt)) / sizeof(u32));
4901 if (rc || (be32_to_cpu(sdt.hdr.state) != IPR_FMT2_SDT_READY_TO_USE) ||
4902 !(sdt.entry[0].flags & IPR_SDT_VALID_ENTRY)) {
4903 ipr_unit_check_no_data(ioa_cfg);
4907 /* Find length of the first sdt entry (UC buffer) */
4908 length = (be32_to_cpu(sdt.entry[0].end_offset) -
4909 be32_to_cpu(sdt.entry[0].bar_str_offset)) & IPR_FMT2_MBX_ADDR_MASK;
4911 hostrcb = list_entry(ioa_cfg->hostrcb_free_q.next,
4912 struct ipr_hostrcb, queue);
4913 list_del(&hostrcb->queue);
4914 memset(&hostrcb->hcam, 0, sizeof(hostrcb->hcam));
4916 rc = ipr_get_ldump_data_section(ioa_cfg,
4917 be32_to_cpu(sdt.entry[0].bar_str_offset),
4918 (u32 *)&hostrcb->hcam,
4919 min(length, (int)sizeof(hostrcb->hcam)) / sizeof(u32));
4922 ipr_handle_log_data(ioa_cfg, hostrcb);
4924 ipr_unit_check_no_data(ioa_cfg);
4926 list_add_tail(&hostrcb->queue, &ioa_cfg->hostrcb_free_q);
4930 * ipr_reset_restore_cfg_space - Restore PCI config space.
4931 * @ipr_cmd: ipr command struct
4933 * Description: This function restores the saved PCI config space of
4934 * the adapter, fails all outstanding ops back to the callers, and
4935 * fetches the dump/unit check if applicable to this reset.
4938 * IPR_RC_JOB_CONTINUE / IPR_RC_JOB_RETURN
4940 static int ipr_reset_restore_cfg_space(struct ipr_cmnd *ipr_cmd)
4942 struct ipr_ioa_cfg *ioa_cfg = ipr_cmd->ioa_cfg;
4946 rc = pci_restore_state(ioa_cfg->pdev, ioa_cfg->pci_cfg_buf);
4948 if (rc != PCIBIOS_SUCCESSFUL) {
4949 ipr_cmd->ioasa.ioasc = cpu_to_be32(IPR_IOASC_PCI_ACCESS_ERROR);
4950 return IPR_RC_JOB_CONTINUE;
4953 if (ipr_set_pcix_cmd_reg(ioa_cfg)) {
4954 ipr_cmd->ioasa.ioasc = cpu_to_be32(IPR_IOASC_PCI_ACCESS_ERROR);
4955 return IPR_RC_JOB_CONTINUE;
4958 ipr_fail_all_ops(ioa_cfg);
4960 if (ioa_cfg->ioa_unit_checked) {
4961 ioa_cfg->ioa_unit_checked = 0;
4962 ipr_get_unit_check_buffer(ioa_cfg);
4963 ipr_cmd->job_step = ipr_reset_alert;
4964 ipr_reset_start_timer(ipr_cmd, 0);
4965 return IPR_RC_JOB_RETURN;
4968 if (ioa_cfg->in_ioa_bringdown) {
4969 ipr_cmd->job_step = ipr_ioa_bringdown_done;
4971 ipr_cmd->job_step = ipr_reset_enable_ioa;
4973 if (GET_DUMP == ioa_cfg->sdt_state) {
4974 ipr_reset_start_timer(ipr_cmd, IPR_DUMP_TIMEOUT);
4975 ipr_cmd->job_step = ipr_reset_wait_for_dump;
4976 schedule_work(&ioa_cfg->work_q);
4977 return IPR_RC_JOB_RETURN;
4982 return IPR_RC_JOB_CONTINUE;
4986 * ipr_reset_start_bist - Run BIST on the adapter.
4987 * @ipr_cmd: ipr command struct
4989 * Description: This function runs BIST on the adapter, then delays 2 seconds.
4992 * IPR_RC_JOB_CONTINUE / IPR_RC_JOB_RETURN
4994 static int ipr_reset_start_bist(struct ipr_cmnd *ipr_cmd)
4996 struct ipr_ioa_cfg *ioa_cfg = ipr_cmd->ioa_cfg;
5000 rc = pci_write_config_byte(ioa_cfg->pdev, PCI_BIST, PCI_BIST_START);
5002 if (rc != PCIBIOS_SUCCESSFUL) {
5003 ipr_cmd->ioasa.ioasc = cpu_to_be32(IPR_IOASC_PCI_ACCESS_ERROR);
5004 rc = IPR_RC_JOB_CONTINUE;
5006 ipr_cmd->job_step = ipr_reset_restore_cfg_space;
5007 ipr_reset_start_timer(ipr_cmd, IPR_WAIT_FOR_BIST_TIMEOUT);
5008 rc = IPR_RC_JOB_RETURN;
5016 * ipr_reset_allowed - Query whether or not IOA can be reset
5017 * @ioa_cfg: ioa config struct
5020 * 0 if reset not allowed / non-zero if reset is allowed
5022 static int ipr_reset_allowed(struct ipr_ioa_cfg *ioa_cfg)
5024 volatile u32 temp_reg;
5026 temp_reg = readl(ioa_cfg->regs.sense_interrupt_reg);
5027 return ((temp_reg & IPR_PCII_CRITICAL_OPERATION) == 0);
5031 * ipr_reset_wait_to_start_bist - Wait for permission to reset IOA.
5032 * @ipr_cmd: ipr command struct
5034 * Description: This function waits for adapter permission to run BIST,
5035 * then runs BIST. If the adapter does not give permission after a
5036 * reasonable time, we will reset the adapter anyway. The impact of
5037 * resetting the adapter without warning the adapter is the risk of
5038 * losing the persistent error log on the adapter. If the adapter is
5039 * reset while it is writing to the flash on the adapter, the flash
5040 * segment will have bad ECC and be zeroed.
5043 * IPR_RC_JOB_CONTINUE / IPR_RC_JOB_RETURN
5045 static int ipr_reset_wait_to_start_bist(struct ipr_cmnd *ipr_cmd)
5047 struct ipr_ioa_cfg *ioa_cfg = ipr_cmd->ioa_cfg;
5048 int rc = IPR_RC_JOB_RETURN;
5050 if (!ipr_reset_allowed(ioa_cfg) && ipr_cmd->u.time_left) {
5051 ipr_cmd->u.time_left -= IPR_CHECK_FOR_RESET_TIMEOUT;
5052 ipr_reset_start_timer(ipr_cmd, IPR_CHECK_FOR_RESET_TIMEOUT);
5054 ipr_cmd->job_step = ipr_reset_start_bist;
5055 rc = IPR_RC_JOB_CONTINUE;
5062 * ipr_reset_alert_part2 - Alert the adapter of a pending reset
5063 * @ipr_cmd: ipr command struct
5065 * Description: This function alerts the adapter that it will be reset.
5066 * If memory space is not currently enabled, proceed directly
5067 * to running BIST on the adapter. The timer must always be started
5068 * so we guarantee we do not run BIST from ipr_isr.
5073 static int ipr_reset_alert(struct ipr_cmnd *ipr_cmd)
5075 struct ipr_ioa_cfg *ioa_cfg = ipr_cmd->ioa_cfg;
5080 rc = pci_read_config_word(ioa_cfg->pdev, PCI_COMMAND, &cmd_reg);
5082 if ((rc == PCIBIOS_SUCCESSFUL) && (cmd_reg & PCI_COMMAND_MEMORY)) {
5083 ipr_mask_and_clear_interrupts(ioa_cfg, ~0);
5084 writel(IPR_UPROCI_RESET_ALERT, ioa_cfg->regs.set_uproc_interrupt_reg);
5085 ipr_cmd->job_step = ipr_reset_wait_to_start_bist;
5087 ipr_cmd->job_step = ipr_reset_start_bist;
5090 ipr_cmd->u.time_left = IPR_WAIT_FOR_RESET_TIMEOUT;
5091 ipr_reset_start_timer(ipr_cmd, IPR_CHECK_FOR_RESET_TIMEOUT);
5094 return IPR_RC_JOB_RETURN;
5098 * ipr_reset_ucode_download_done - Microcode download completion
5099 * @ipr_cmd: ipr command struct
5101 * Description: This function unmaps the microcode download buffer.
5104 * IPR_RC_JOB_CONTINUE
5106 static int ipr_reset_ucode_download_done(struct ipr_cmnd *ipr_cmd)
5108 struct ipr_ioa_cfg *ioa_cfg = ipr_cmd->ioa_cfg;
5109 struct ipr_sglist *sglist = ioa_cfg->ucode_sglist;
5111 pci_unmap_sg(ioa_cfg->pdev, sglist->scatterlist,
5112 sglist->num_sg, DMA_TO_DEVICE);
5114 ipr_cmd->job_step = ipr_reset_alert;
5115 return IPR_RC_JOB_CONTINUE;
5119 * ipr_reset_ucode_download - Download microcode to the adapter
5120 * @ipr_cmd: ipr command struct
5122 * Description: This function checks to see if it there is microcode
5123 * to download to the adapter. If there is, a download is performed.
5126 * IPR_RC_JOB_CONTINUE / IPR_RC_JOB_RETURN
5128 static int ipr_reset_ucode_download(struct ipr_cmnd *ipr_cmd)
5130 struct ipr_ioa_cfg *ioa_cfg = ipr_cmd->ioa_cfg;
5131 struct ipr_sglist *sglist = ioa_cfg->ucode_sglist;
5134 ipr_cmd->job_step = ipr_reset_alert;
5137 return IPR_RC_JOB_CONTINUE;
5139 ipr_cmd->ioarcb.res_handle = cpu_to_be32(IPR_IOA_RES_HANDLE);
5140 ipr_cmd->ioarcb.cmd_pkt.request_type = IPR_RQTYPE_SCSICDB;
5141 ipr_cmd->ioarcb.cmd_pkt.cdb[0] = WRITE_BUFFER;
5142 ipr_cmd->ioarcb.cmd_pkt.cdb[1] = IPR_WR_BUF_DOWNLOAD_AND_SAVE;
5143 ipr_cmd->ioarcb.cmd_pkt.cdb[6] = (sglist->buffer_len & 0xff0000) >> 16;
5144 ipr_cmd->ioarcb.cmd_pkt.cdb[7] = (sglist->buffer_len & 0x00ff00) >> 8;
5145 ipr_cmd->ioarcb.cmd_pkt.cdb[8] = sglist->buffer_len & 0x0000ff;
5147 if (ipr_map_ucode_buffer(ipr_cmd, sglist, sglist->buffer_len)) {
5148 dev_err(&ioa_cfg->pdev->dev,
5149 "Failed to map microcode download buffer\n");
5150 return IPR_RC_JOB_CONTINUE;
5153 ipr_cmd->job_step = ipr_reset_ucode_download_done;
5155 ipr_do_req(ipr_cmd, ipr_reset_ioa_job, ipr_timeout,
5156 IPR_WRITE_BUFFER_TIMEOUT);
5159 return IPR_RC_JOB_RETURN;
5163 * ipr_reset_shutdown_ioa - Shutdown the adapter
5164 * @ipr_cmd: ipr command struct
5166 * Description: This function issues an adapter shutdown of the
5167 * specified type to the specified adapter as part of the
5168 * adapter reset job.
5171 * IPR_RC_JOB_CONTINUE / IPR_RC_JOB_RETURN
5173 static int ipr_reset_shutdown_ioa(struct ipr_cmnd *ipr_cmd)
5175 struct ipr_ioa_cfg *ioa_cfg = ipr_cmd->ioa_cfg;
5176 enum ipr_shutdown_type shutdown_type = ipr_cmd->u.shutdown_type;
5177 unsigned long timeout;
5178 int rc = IPR_RC_JOB_CONTINUE;
5181 if (shutdown_type != IPR_SHUTDOWN_NONE && !ioa_cfg->ioa_is_dead) {
5182 ipr_cmd->ioarcb.res_handle = cpu_to_be32(IPR_IOA_RES_HANDLE);
5183 ipr_cmd->ioarcb.cmd_pkt.request_type = IPR_RQTYPE_IOACMD;
5184 ipr_cmd->ioarcb.cmd_pkt.cdb[0] = IPR_IOA_SHUTDOWN;
5185 ipr_cmd->ioarcb.cmd_pkt.cdb[1] = shutdown_type;
5187 if (shutdown_type == IPR_SHUTDOWN_ABBREV)
5188 timeout = IPR_ABBREV_SHUTDOWN_TIMEOUT;
5189 else if (shutdown_type == IPR_SHUTDOWN_PREPARE_FOR_NORMAL)
5190 timeout = IPR_INTERNAL_TIMEOUT;
5192 timeout = IPR_SHUTDOWN_TIMEOUT;
5194 ipr_do_req(ipr_cmd, ipr_reset_ioa_job, ipr_timeout, timeout);
5196 rc = IPR_RC_JOB_RETURN;
5197 ipr_cmd->job_step = ipr_reset_ucode_download;
5199 ipr_cmd->job_step = ipr_reset_alert;
5206 * ipr_reset_ioa_job - Adapter reset job
5207 * @ipr_cmd: ipr command struct
5209 * Description: This function is the job router for the adapter reset job.
5214 static void ipr_reset_ioa_job(struct ipr_cmnd *ipr_cmd)
5217 unsigned long scratch = ipr_cmd->u.scratch;
5218 struct ipr_ioa_cfg *ioa_cfg = ipr_cmd->ioa_cfg;
5221 ioasc = be32_to_cpu(ipr_cmd->ioasa.ioasc);
5223 if (ioa_cfg->reset_cmd != ipr_cmd) {
5225 * We are doing nested adapter resets and this is
5226 * not the current reset job.
5228 list_add_tail(&ipr_cmd->queue, &ioa_cfg->free_q);
5232 if (IPR_IOASC_SENSE_KEY(ioasc)) {
5233 dev_err(&ioa_cfg->pdev->dev,
5234 "0x%02X failed with IOASC: 0x%08X\n",
5235 ipr_cmd->ioarcb.cmd_pkt.cdb[0], ioasc);
5237 ipr_initiate_ioa_reset(ioa_cfg, IPR_SHUTDOWN_NONE);
5238 list_add_tail(&ipr_cmd->queue, &ioa_cfg->free_q);
5242 ipr_reinit_ipr_cmnd(ipr_cmd);
5243 ipr_cmd->u.scratch = scratch;
5244 rc = ipr_cmd->job_step(ipr_cmd);
5245 } while(rc == IPR_RC_JOB_CONTINUE);
5249 * _ipr_initiate_ioa_reset - Initiate an adapter reset
5250 * @ioa_cfg: ioa config struct
5251 * @job_step: first job step of reset job
5252 * @shutdown_type: shutdown type
5254 * Description: This function will initiate the reset of the given adapter
5255 * starting at the selected job step.
5256 * If the caller needs to wait on the completion of the reset,
5257 * the caller must sleep on the reset_wait_q.
5262 static void _ipr_initiate_ioa_reset(struct ipr_ioa_cfg *ioa_cfg,
5263 int (*job_step) (struct ipr_cmnd *),
5264 enum ipr_shutdown_type shutdown_type)
5266 struct ipr_cmnd *ipr_cmd;
5268 ioa_cfg->in_reset_reload = 1;
5269 ioa_cfg->allow_cmds = 0;
5270 scsi_block_requests(ioa_cfg->host);
5272 ipr_cmd = ipr_get_free_ipr_cmnd(ioa_cfg);
5273 ioa_cfg->reset_cmd = ipr_cmd;
5274 ipr_cmd->job_step = job_step;
5275 ipr_cmd->u.shutdown_type = shutdown_type;
5277 ipr_reset_ioa_job(ipr_cmd);
5281 * ipr_initiate_ioa_reset - Initiate an adapter reset
5282 * @ioa_cfg: ioa config struct
5283 * @shutdown_type: shutdown type
5285 * Description: This function will initiate the reset of the given adapter.
5286 * If the caller needs to wait on the completion of the reset,
5287 * the caller must sleep on the reset_wait_q.
5292 static void ipr_initiate_ioa_reset(struct ipr_ioa_cfg *ioa_cfg,
5293 enum ipr_shutdown_type shutdown_type)
5295 if (ioa_cfg->ioa_is_dead)
5298 if (ioa_cfg->in_reset_reload && ioa_cfg->sdt_state == GET_DUMP)
5299 ioa_cfg->sdt_state = ABORT_DUMP;
5301 if (ioa_cfg->reset_retries++ > IPR_NUM_RESET_RELOAD_RETRIES) {
5302 dev_err(&ioa_cfg->pdev->dev,
5303 "IOA taken offline - error recovery failed\n");
5305 ioa_cfg->reset_retries = 0;
5306 ioa_cfg->ioa_is_dead = 1;
5308 if (ioa_cfg->in_ioa_bringdown) {
5309 ioa_cfg->reset_cmd = NULL;
5310 ioa_cfg->in_reset_reload = 0;
5311 ipr_fail_all_ops(ioa_cfg);
5312 wake_up_all(&ioa_cfg->reset_wait_q);
5314 spin_unlock_irq(ioa_cfg->host->host_lock);
5315 scsi_unblock_requests(ioa_cfg->host);
5316 spin_lock_irq(ioa_cfg->host->host_lock);
5319 ioa_cfg->in_ioa_bringdown = 1;
5320 shutdown_type = IPR_SHUTDOWN_NONE;
5324 _ipr_initiate_ioa_reset(ioa_cfg, ipr_reset_shutdown_ioa,
5329 * ipr_probe_ioa_part2 - Initializes IOAs found in ipr_probe_ioa(..)
5330 * @ioa_cfg: ioa cfg struct
5332 * Description: This is the second phase of adapter intialization
5333 * This function takes care of initilizing the adapter to the point
5334 * where it can accept new commands.
5337 * 0 on sucess / -EIO on failure
5339 static int __devinit ipr_probe_ioa_part2(struct ipr_ioa_cfg *ioa_cfg)
5342 unsigned long host_lock_flags = 0;
5345 spin_lock_irqsave(ioa_cfg->host->host_lock, host_lock_flags);
5346 dev_dbg(&ioa_cfg->pdev->dev, "ioa_cfg adx: 0x%p\n", ioa_cfg);
5347 _ipr_initiate_ioa_reset(ioa_cfg, ipr_reset_enable_ioa, IPR_SHUTDOWN_NONE);
5349 spin_unlock_irqrestore(ioa_cfg->host->host_lock, host_lock_flags);
5350 wait_event(ioa_cfg->reset_wait_q, !ioa_cfg->in_reset_reload);
5351 spin_lock_irqsave(ioa_cfg->host->host_lock, host_lock_flags);
5353 if (ioa_cfg->ioa_is_dead) {
5355 } else if (ipr_invalid_adapter(ioa_cfg)) {
5359 dev_err(&ioa_cfg->pdev->dev,
5360 "Adapter not supported in this hardware configuration.\n");
5363 spin_unlock_irqrestore(ioa_cfg->host->host_lock, host_lock_flags);
5370 * ipr_free_cmd_blks - Frees command blocks allocated for an adapter
5371 * @ioa_cfg: ioa config struct
5376 static void ipr_free_cmd_blks(struct ipr_ioa_cfg *ioa_cfg)
5380 for (i = 0; i < IPR_NUM_CMD_BLKS; i++) {
5381 if (ioa_cfg->ipr_cmnd_list[i])
5382 pci_pool_free(ioa_cfg->ipr_cmd_pool,
5383 ioa_cfg->ipr_cmnd_list[i],
5384 ioa_cfg->ipr_cmnd_list_dma[i]);
5386 ioa_cfg->ipr_cmnd_list[i] = NULL;
5389 if (ioa_cfg->ipr_cmd_pool)
5390 pci_pool_destroy (ioa_cfg->ipr_cmd_pool);
5392 ioa_cfg->ipr_cmd_pool = NULL;
5396 * ipr_free_mem - Frees memory allocated for an adapter
5397 * @ioa_cfg: ioa cfg struct
5402 static void ipr_free_mem(struct ipr_ioa_cfg *ioa_cfg)
5406 kfree(ioa_cfg->res_entries);
5407 pci_free_consistent(ioa_cfg->pdev, sizeof(struct ipr_misc_cbs),
5408 ioa_cfg->vpd_cbs, ioa_cfg->vpd_cbs_dma);
5409 ipr_free_cmd_blks(ioa_cfg);
5410 pci_free_consistent(ioa_cfg->pdev, sizeof(u32) * IPR_NUM_CMD_BLKS,
5411 ioa_cfg->host_rrq, ioa_cfg->host_rrq_dma);
5412 pci_free_consistent(ioa_cfg->pdev, sizeof(struct ipr_config_table),
5414 ioa_cfg->cfg_table_dma);
5416 for (i = 0; i < IPR_NUM_HCAMS; i++) {
5417 pci_free_consistent(ioa_cfg->pdev,
5418 sizeof(struct ipr_hostrcb),
5419 ioa_cfg->hostrcb[i],
5420 ioa_cfg->hostrcb_dma[i]);
5423 ipr_free_dump(ioa_cfg);
5424 kfree(ioa_cfg->saved_mode_pages);
5425 kfree(ioa_cfg->trace);
5429 * ipr_free_all_resources - Free all allocated resources for an adapter.
5430 * @ipr_cmd: ipr command struct
5432 * This function frees all allocated resources for the
5433 * specified adapter.
5438 static void ipr_free_all_resources(struct ipr_ioa_cfg *ioa_cfg)
5441 free_irq(ioa_cfg->pdev->irq, ioa_cfg);
5442 iounmap((void *) ioa_cfg->hdw_dma_regs);
5443 release_mem_region(ioa_cfg->hdw_dma_regs_pci,
5444 pci_resource_len(ioa_cfg->pdev, 0));
5445 ipr_free_mem(ioa_cfg);
5446 scsi_host_put(ioa_cfg->host);
5451 * ipr_alloc_cmd_blks - Allocate command blocks for an adapter
5452 * @ioa_cfg: ioa config struct
5455 * 0 on success / -ENOMEM on allocation failure
5457 static int __devinit ipr_alloc_cmd_blks(struct ipr_ioa_cfg *ioa_cfg)
5459 struct ipr_cmnd *ipr_cmd;
5460 struct ipr_ioarcb *ioarcb;
5464 ioa_cfg->ipr_cmd_pool = pci_pool_create (IPR_NAME, ioa_cfg->pdev,
5465 sizeof(struct ipr_cmnd), 8, 0);
5467 if (!ioa_cfg->ipr_cmd_pool)
5470 for (i = 0; i < IPR_NUM_CMD_BLKS; i++) {
5471 ipr_cmd = pci_pool_alloc (ioa_cfg->ipr_cmd_pool, SLAB_KERNEL, &dma_addr);
5474 ipr_free_cmd_blks(ioa_cfg);
5478 memset(ipr_cmd, 0, sizeof(*ipr_cmd));
5479 ioa_cfg->ipr_cmnd_list[i] = ipr_cmd;
5480 ioa_cfg->ipr_cmnd_list_dma[i] = dma_addr;
5482 ioarcb = &ipr_cmd->ioarcb;
5483 ioarcb->ioarcb_host_pci_addr = cpu_to_be32(dma_addr);
5484 ioarcb->host_response_handle = cpu_to_be32(i << 2);
5485 ioarcb->write_ioadl_addr =
5486 cpu_to_be32(dma_addr + offsetof(struct ipr_cmnd, ioadl));
5487 ioarcb->read_ioadl_addr = ioarcb->write_ioadl_addr;
5488 ioarcb->ioasa_host_pci_addr =
5489 cpu_to_be32(dma_addr + offsetof(struct ipr_cmnd, ioasa));
5490 ioarcb->ioasa_len = cpu_to_be16(sizeof(struct ipr_ioasa));
5491 ipr_cmd->cmd_index = i;
5492 ipr_cmd->ioa_cfg = ioa_cfg;
5493 ipr_cmd->sense_buffer_dma = dma_addr +
5494 offsetof(struct ipr_cmnd, sense_buffer);
5496 list_add_tail(&ipr_cmd->queue, &ioa_cfg->free_q);
5503 * ipr_alloc_mem - Allocate memory for an adapter
5504 * @ioa_cfg: ioa config struct
5507 * 0 on success / non-zero for error
5509 static int __devinit ipr_alloc_mem(struct ipr_ioa_cfg *ioa_cfg)
5514 ioa_cfg->res_entries = kmalloc(sizeof(struct ipr_resource_entry) *
5515 IPR_MAX_PHYSICAL_DEVS, GFP_KERNEL);
5517 if (!ioa_cfg->res_entries)
5520 memset(ioa_cfg->res_entries, 0,
5521 sizeof(struct ipr_resource_entry) * IPR_MAX_PHYSICAL_DEVS);
5523 for (i = 0; i < IPR_MAX_PHYSICAL_DEVS; i++)
5524 list_add_tail(&ioa_cfg->res_entries[i].queue, &ioa_cfg->free_res_q);
5526 ioa_cfg->vpd_cbs = pci_alloc_consistent(ioa_cfg->pdev,
5527 sizeof(struct ipr_misc_cbs),
5528 &ioa_cfg->vpd_cbs_dma);
5530 if (!ioa_cfg->vpd_cbs)
5533 if (ipr_alloc_cmd_blks(ioa_cfg))
5536 ioa_cfg->host_rrq = pci_alloc_consistent(ioa_cfg->pdev,
5537 sizeof(u32) * IPR_NUM_CMD_BLKS,
5538 &ioa_cfg->host_rrq_dma);
5540 if (!ioa_cfg->host_rrq)
5543 ioa_cfg->cfg_table = pci_alloc_consistent(ioa_cfg->pdev,
5544 sizeof(struct ipr_config_table),
5545 &ioa_cfg->cfg_table_dma);
5547 if (!ioa_cfg->cfg_table)
5550 for (i = 0; i < IPR_NUM_HCAMS; i++) {
5551 ioa_cfg->hostrcb[i] = pci_alloc_consistent(ioa_cfg->pdev,
5552 sizeof(struct ipr_hostrcb),
5553 &ioa_cfg->hostrcb_dma[i]);
5555 if (!ioa_cfg->hostrcb[i])
5558 memset(ioa_cfg->hostrcb[i], 0, sizeof(struct ipr_hostrcb));
5559 ioa_cfg->hostrcb[i]->hostrcb_dma =
5560 ioa_cfg->hostrcb_dma[i] + offsetof(struct ipr_hostrcb, hcam);
5561 list_add_tail(&ioa_cfg->hostrcb[i]->queue, &ioa_cfg->hostrcb_free_q);
5564 ioa_cfg->trace = kmalloc(sizeof(struct ipr_trace_entry) *
5565 IPR_NUM_TRACE_ENTRIES, GFP_KERNEL);
5567 if (!ioa_cfg->trace)
5570 memset(ioa_cfg->trace, 0,
5571 sizeof(struct ipr_trace_entry) * IPR_NUM_TRACE_ENTRIES);
5577 ipr_free_mem(ioa_cfg);
5584 * ipr_initialize_bus_attr - Initialize SCSI bus attributes to default values
5585 * @ioa_cfg: ioa config struct
5590 static void __devinit ipr_initialize_bus_attr(struct ipr_ioa_cfg *ioa_cfg)
5594 for (i = 0; i < IPR_MAX_NUM_BUSES; i++) {
5595 ioa_cfg->bus_attr[i].bus = i;
5596 ioa_cfg->bus_attr[i].qas_enabled = 0;
5597 ioa_cfg->bus_attr[i].bus_width = IPR_DEFAULT_BUS_WIDTH;
5598 if (ipr_max_speed < ARRAY_SIZE(ipr_max_bus_speeds))
5599 ioa_cfg->bus_attr[i].max_xfer_rate = ipr_max_bus_speeds[ipr_max_speed];
5601 ioa_cfg->bus_attr[i].max_xfer_rate = IPR_U160_SCSI_RATE;
5606 * ipr_init_ioa_cfg - Initialize IOA config struct
5607 * @ioa_cfg: ioa config struct
5608 * @host: scsi host struct
5609 * @pdev: PCI dev struct
5614 static void __devinit ipr_init_ioa_cfg(struct ipr_ioa_cfg *ioa_cfg,
5615 struct Scsi_Host *host, struct pci_dev *pdev)
5617 ioa_cfg->host = host;
5618 ioa_cfg->pdev = pdev;
5619 ioa_cfg->log_level = ipr_log_level;
5620 sprintf(ioa_cfg->eye_catcher, IPR_EYECATCHER);
5621 sprintf(ioa_cfg->trace_start, IPR_TRACE_START_LABEL);
5622 sprintf(ioa_cfg->ipr_free_label, IPR_FREEQ_LABEL);
5623 sprintf(ioa_cfg->ipr_pending_label, IPR_PENDQ_LABEL);
5624 sprintf(ioa_cfg->cfg_table_start, IPR_CFG_TBL_START);
5625 sprintf(ioa_cfg->resource_table_label, IPR_RES_TABLE_LABEL);
5626 sprintf(ioa_cfg->ipr_hcam_label, IPR_HCAM_LABEL);
5627 sprintf(ioa_cfg->ipr_cmd_label, IPR_CMD_LABEL);
5629 INIT_LIST_HEAD(&ioa_cfg->free_q);
5630 INIT_LIST_HEAD(&ioa_cfg->pending_q);
5631 INIT_LIST_HEAD(&ioa_cfg->hostrcb_free_q);
5632 INIT_LIST_HEAD(&ioa_cfg->hostrcb_pending_q);
5633 INIT_LIST_HEAD(&ioa_cfg->free_res_q);
5634 INIT_LIST_HEAD(&ioa_cfg->used_res_q);
5635 INIT_WORK(&ioa_cfg->work_q, ipr_worker_thread, ioa_cfg);
5636 init_waitqueue_head(&ioa_cfg->reset_wait_q);
5637 ioa_cfg->sdt_state = INACTIVE;
5639 ipr_initialize_bus_attr(ioa_cfg);
5641 host->max_id = IPR_MAX_NUM_TARGETS_PER_BUS;
5642 host->max_lun = IPR_MAX_NUM_LUNS_PER_TARGET;
5643 host->max_channel = IPR_MAX_BUS_TO_SCAN;
5644 host->unique_id = host->host_no;
5645 host->max_cmd_len = IPR_MAX_CDB_LEN;
5646 pci_set_drvdata(pdev, ioa_cfg);
5648 memcpy(&ioa_cfg->regs, &ioa_cfg->chip_cfg->regs, sizeof(ioa_cfg->regs));
5650 ioa_cfg->regs.set_interrupt_mask_reg += ioa_cfg->hdw_dma_regs;
5651 ioa_cfg->regs.clr_interrupt_mask_reg += ioa_cfg->hdw_dma_regs;
5652 ioa_cfg->regs.sense_interrupt_mask_reg += ioa_cfg->hdw_dma_regs;
5653 ioa_cfg->regs.clr_interrupt_reg += ioa_cfg->hdw_dma_regs;
5654 ioa_cfg->regs.sense_interrupt_reg += ioa_cfg->hdw_dma_regs;
5655 ioa_cfg->regs.ioarrin_reg += ioa_cfg->hdw_dma_regs;
5656 ioa_cfg->regs.sense_uproc_interrupt_reg += ioa_cfg->hdw_dma_regs;
5657 ioa_cfg->regs.set_uproc_interrupt_reg += ioa_cfg->hdw_dma_regs;
5658 ioa_cfg->regs.clr_uproc_interrupt_reg += ioa_cfg->hdw_dma_regs;
5662 * ipr_probe_ioa - Allocates memory and does first stage of initialization
5663 * @pdev: PCI device struct
5664 * @dev_id: PCI device id struct
5667 * 0 on success / non-zero on failure
5669 static int __devinit ipr_probe_ioa(struct pci_dev *pdev,
5670 const struct pci_device_id *dev_id)
5672 struct ipr_ioa_cfg *ioa_cfg;
5673 struct Scsi_Host *host;
5674 unsigned long ipr_regs, ipr_regs_pci;
5675 u32 rc = PCIBIOS_SUCCESSFUL;
5679 if ((rc = pci_enable_device(pdev))) {
5680 dev_err(&pdev->dev, "Cannot enable adapter\n");
5684 dev_info(&pdev->dev, "Found IOA with IRQ: %d\n", pdev->irq);
5686 host = scsi_host_alloc(&driver_template, sizeof(*ioa_cfg));
5689 dev_err(&pdev->dev, "call to scsi_host_alloc failed!\n");
5693 ioa_cfg = (struct ipr_ioa_cfg *)host->hostdata;
5694 memset(ioa_cfg, 0, sizeof(struct ipr_ioa_cfg));
5696 ioa_cfg->chip_cfg = (const struct ipr_chip_cfg_t *)dev_id->driver_data;
5698 ipr_regs_pci = pci_resource_start(pdev, 0);
5700 if (!request_mem_region(ipr_regs_pci,
5701 pci_resource_len(pdev, 0), IPR_NAME)) {
5703 "Couldn't register memory range of registers\n");
5704 scsi_host_put(host);
5708 ipr_regs = (unsigned long)ioremap(ipr_regs_pci,
5709 pci_resource_len(pdev, 0));
5713 "Couldn't map memory range of registers\n");
5714 release_mem_region(ipr_regs_pci, pci_resource_len(pdev, 0));
5715 scsi_host_put(host);
5719 ioa_cfg->hdw_dma_regs = ipr_regs;
5720 ioa_cfg->hdw_dma_regs_pci = ipr_regs_pci;
5721 ioa_cfg->ioa_mailbox = ioa_cfg->chip_cfg->mailbox + ipr_regs;
5723 ipr_init_ioa_cfg(ioa_cfg, host, pdev);
5725 pci_set_master(pdev);
5726 rc = pci_set_dma_mask(pdev, 0xffffffff);
5728 if (rc != PCIBIOS_SUCCESSFUL) {
5729 dev_err(&pdev->dev, "Failed to set PCI DMA mask\n");
5734 rc = pci_write_config_byte(pdev, PCI_CACHE_LINE_SIZE,
5735 ioa_cfg->chip_cfg->cache_line_size);
5737 if (rc != PCIBIOS_SUCCESSFUL) {
5738 dev_err(&pdev->dev, "Write of cache line size failed\n");
5743 /* Save away PCI config space for use following IOA reset */
5744 rc = pci_save_state(pdev, ioa_cfg->pci_cfg_buf);
5746 if (rc != PCIBIOS_SUCCESSFUL) {
5747 dev_err(&pdev->dev, "Failed to save PCI config space\n");
5752 if ((rc = ipr_save_pcix_cmd_reg(ioa_cfg)))
5755 if ((rc = ipr_set_pcix_cmd_reg(ioa_cfg)))
5758 if ((rc = ipr_alloc_mem(ioa_cfg)))
5761 ipr_mask_and_clear_interrupts(ioa_cfg, ~IPR_PCII_IOA_TRANS_TO_OPER);
5762 rc = request_irq(pdev->irq, ipr_isr, SA_SHIRQ, IPR_NAME, ioa_cfg);
5765 dev_err(&pdev->dev, "Couldn't register IRQ %d! rc=%d\n",
5770 spin_lock(&ipr_driver_lock);
5771 list_add_tail(&ioa_cfg->queue, &ipr_ioa_head);
5772 spin_unlock(&ipr_driver_lock);
5778 dev_err(&pdev->dev, "Couldn't allocate enough memory for device driver!\n");
5780 ipr_free_mem(ioa_cfg);
5782 iounmap((void *) ipr_regs);
5783 release_mem_region(ipr_regs_pci, pci_resource_len(pdev, 0));
5784 scsi_host_put(host);
5790 * ipr_scan_vsets - Scans for VSET devices
5791 * @ioa_cfg: ioa config struct
5793 * Description: Since the VSET resources do not follow SAM in that we can have
5794 * sparse LUNs with no LUN 0, we have to scan for these ourselves.
5799 static void ipr_scan_vsets(struct ipr_ioa_cfg *ioa_cfg)
5803 for (target = 0; target < IPR_MAX_NUM_TARGETS_PER_BUS; target++)
5804 for (lun = 0; lun < IPR_MAX_NUM_VSET_LUNS_PER_TARGET; lun++ )
5805 scsi_add_device(ioa_cfg->host, IPR_VSET_BUS, target, lun);
5809 * ipr_initiate_ioa_bringdown - Bring down an adapter
5810 * @ioa_cfg: ioa config struct
5811 * @shutdown_type: shutdown type
5813 * Description: This function will initiate bringing down the adapter.
5814 * This consists of issuing an IOA shutdown to the adapter
5815 * to flush the cache, and running BIST.
5816 * If the caller needs to wait on the completion of the reset,
5817 * the caller must sleep on the reset_wait_q.
5822 static void ipr_initiate_ioa_bringdown(struct ipr_ioa_cfg *ioa_cfg,
5823 enum ipr_shutdown_type shutdown_type)
5826 if (ioa_cfg->sdt_state == WAIT_FOR_DUMP)
5827 ioa_cfg->sdt_state = ABORT_DUMP;
5828 ioa_cfg->reset_retries = 0;
5829 ioa_cfg->in_ioa_bringdown = 1;
5830 ipr_initiate_ioa_reset(ioa_cfg, shutdown_type);
5835 * __ipr_remove - Remove a single adapter
5836 * @pdev: pci device struct
5838 * Adapter hot plug remove entry point.
5843 static void __ipr_remove(struct pci_dev *pdev)
5845 unsigned long host_lock_flags = 0;
5846 struct ipr_ioa_cfg *ioa_cfg = pci_get_drvdata(pdev);
5849 spin_lock_irqsave(ioa_cfg->host->host_lock, host_lock_flags);
5850 ipr_initiate_ioa_bringdown(ioa_cfg, IPR_SHUTDOWN_NORMAL);
5852 spin_unlock_irqrestore(ioa_cfg->host->host_lock, host_lock_flags);
5853 wait_event(ioa_cfg->reset_wait_q, !ioa_cfg->in_reset_reload);
5854 spin_lock_irqsave(ioa_cfg->host->host_lock, host_lock_flags);
5856 spin_lock(&ipr_driver_lock);
5857 list_del(&ioa_cfg->queue);
5858 spin_unlock(&ipr_driver_lock);
5860 if (ioa_cfg->sdt_state == ABORT_DUMP)
5861 ioa_cfg->sdt_state = WAIT_FOR_DUMP;
5862 spin_unlock_irqrestore(ioa_cfg->host->host_lock, host_lock_flags);
5864 ipr_free_all_resources(ioa_cfg);
5870 * ipr_remove - IOA hot plug remove entry point
5871 * @pdev: pci device struct
5873 * Adapter hot plug remove entry point.
5878 static void ipr_remove(struct pci_dev *pdev)
5880 struct ipr_ioa_cfg *ioa_cfg = pci_get_drvdata(pdev);
5884 ioa_cfg->allow_cmds = 0;
5885 flush_scheduled_work();
5886 ipr_remove_trace_file(&ioa_cfg->host->shost_classdev.kobj,
5888 ipr_remove_dump_file(&ioa_cfg->host->shost_classdev.kobj,
5890 scsi_remove_host(ioa_cfg->host);
5898 * ipr_probe - Adapter hot plug add entry point
5901 * 0 on success / non-zero on failure
5903 static int __devinit ipr_probe(struct pci_dev *pdev,
5904 const struct pci_device_id *dev_id)
5906 struct ipr_ioa_cfg *ioa_cfg;
5909 rc = ipr_probe_ioa(pdev, dev_id);
5914 ioa_cfg = pci_get_drvdata(pdev);
5915 rc = ipr_probe_ioa_part2(ioa_cfg);
5922 rc = scsi_add_host(ioa_cfg->host, &pdev->dev);
5929 rc = ipr_create_trace_file(&ioa_cfg->host->shost_classdev.kobj,
5933 scsi_remove_host(ioa_cfg->host);
5938 rc = ipr_create_dump_file(&ioa_cfg->host->shost_classdev.kobj,
5942 ipr_remove_trace_file(&ioa_cfg->host->shost_classdev.kobj,
5944 scsi_remove_host(ioa_cfg->host);
5949 scsi_scan_host(ioa_cfg->host);
5950 ipr_scan_vsets(ioa_cfg);
5951 scsi_add_device(ioa_cfg->host, IPR_IOA_BUS, IPR_IOA_TARGET, IPR_IOA_LUN);
5952 ioa_cfg->allow_ml_add_del = 1;
5953 schedule_work(&ioa_cfg->work_q);
5958 * ipr_shutdown - Shutdown handler.
5959 * @dev: device struct
5961 * This function is invoked upon system shutdown/reboot. It will issue
5962 * an adapter shutdown to the adapter to flush the write cache.
5967 static void ipr_shutdown(struct device *dev)
5969 struct ipr_ioa_cfg *ioa_cfg = pci_get_drvdata(to_pci_dev(dev));
5970 unsigned long lock_flags = 0;
5972 spin_lock_irqsave(ioa_cfg->host->host_lock, lock_flags);
5973 ipr_initiate_ioa_bringdown(ioa_cfg, IPR_SHUTDOWN_NORMAL);
5974 spin_unlock_irqrestore(ioa_cfg->host->host_lock, lock_flags);
5975 wait_event(ioa_cfg->reset_wait_q, !ioa_cfg->in_reset_reload);
5978 static struct pci_device_id ipr_pci_table[] __devinitdata = {
5979 { PCI_VENDOR_ID_MYLEX, PCI_DEVICE_ID_IBM_GEMSTONE,
5980 PCI_VENDOR_ID_IBM, IPR_SUBS_DEV_ID_5702,
5981 0, 0, (kernel_ulong_t)&ipr_chip_cfg[0] },
5982 { PCI_VENDOR_ID_MYLEX, PCI_DEVICE_ID_IBM_GEMSTONE,
5983 PCI_VENDOR_ID_IBM, IPR_SUBS_DEV_ID_572E,
5984 0, 0, (kernel_ulong_t)&ipr_chip_cfg[0] },
5985 { PCI_VENDOR_ID_MYLEX, PCI_DEVICE_ID_IBM_GEMSTONE,
5986 PCI_VENDOR_ID_IBM, IPR_SUBS_DEV_ID_5703,
5987 0, 0, (kernel_ulong_t)&ipr_chip_cfg[0] },
5988 { PCI_VENDOR_ID_MYLEX, PCI_DEVICE_ID_IBM_GEMSTONE,
5989 PCI_VENDOR_ID_IBM, IPR_SUBS_DEV_ID_573D,
5990 0, 0, (kernel_ulong_t)&ipr_chip_cfg[0] },
5991 { PCI_VENDOR_ID_IBM, PCI_DEVICE_ID_IBM_SNIPE,
5992 PCI_VENDOR_ID_IBM, IPR_SUBS_DEV_ID_2780,
5993 0, 0, (kernel_ulong_t)&ipr_chip_cfg[1] },
5996 MODULE_DEVICE_TABLE(pci, ipr_pci_table);
5998 static struct pci_driver ipr_driver = {
6000 .id_table = ipr_pci_table,
6002 .remove = ipr_remove,
6004 .shutdown = ipr_shutdown,
6009 * ipr_init - Module entry point
6012 * 0 on success / non-zero on failure
6014 static int __init ipr_init(void)
6016 ipr_info("IBM Power RAID SCSI Device Driver version: %s %s\n",
6017 IPR_DRIVER_VERSION, IPR_DRIVER_DATE);
6019 pci_register_driver(&ipr_driver);
6025 * ipr_exit - Module unload
6027 * Module unload entry point.
6032 static void __exit ipr_exit(void)
6034 pci_unregister_driver(&ipr_driver);
6037 module_init(ipr_init);
6038 module_exit(ipr_exit);