X-Git-Url: http://git.onelab.eu/?a=blobdiff_plain;f=drivers%2Fscsi%2Fipr.c;h=b318500785e58cdc9556064ee25f115feec070c6;hb=97bf2856c6014879bd04983a3e9dfcdac1e7fe85;hp=bf3942769ca7460a14658c58116ee96d4664637b;hpb=9bf4aaab3e101692164d49b7ca357651eb691cb6;p=linux-2.6.git diff --git a/drivers/scsi/ipr.c b/drivers/scsi/ipr.c index bf3942769..b31850078 100644 --- a/drivers/scsi/ipr.c +++ b/drivers/scsi/ipr.c @@ -1,7 +1,7 @@ /* * ipr.c -- driver for IBM Power Linux RAID adapters * - * Written By: Brian King, IBM Corporation + * Written By: Brian King , IBM Corporation * * Copyright (C) 2003, 2004 IBM Corporation * @@ -54,7 +54,6 @@ * */ -#include #include #include #include @@ -71,6 +70,7 @@ #include #include #include +#include #include #include #include @@ -79,7 +79,6 @@ #include #include #include -#include #include "ipr.h" /* @@ -89,11 +88,16 @@ static struct list_head ipr_ioa_head = LIST_HEAD_INIT(ipr_ioa_head); static unsigned int ipr_log_level = IPR_DEFAULT_LOG_LEVEL; static unsigned int ipr_max_speed = 1; static int ipr_testmode = 0; -static spinlock_t ipr_driver_lock = SPIN_LOCK_UNLOCKED; +static unsigned int ipr_fastfail = 0; +static unsigned int ipr_transop_timeout = IPR_OPERATIONAL_TIMEOUT; +static unsigned int ipr_enable_cache = 1; +static unsigned int ipr_debug = 0; +static int ipr_auto_create = 1; +static DEFINE_SPINLOCK(ipr_driver_lock); /* This table describes the differences between DMA controller chips */ static const struct ipr_chip_cfg_t ipr_chip_cfg[] = { - { /* Gemstone */ + { /* Gemstone, Citrine, Obsidian, and Obsidian-E */ .mailbox = 0x0042C, .cache_line_size = 0x20, { @@ -108,7 +112,7 @@ static const struct ipr_chip_cfg_t ipr_chip_cfg[] = { .clr_uproc_interrupt_reg = 0x00218 } }, - { /* Snipe */ + { /* Snipe and Scamp */ .mailbox = 0x0052C, .cache_line_size = 0x20, { @@ -125,6 +129,16 @@ static const struct ipr_chip_cfg_t ipr_chip_cfg[] = { }, }; +static const struct ipr_chip_t ipr_chip[] = { + { PCI_VENDOR_ID_MYLEX, PCI_DEVICE_ID_IBM_GEMSTONE, &ipr_chip_cfg[0] }, + { PCI_VENDOR_ID_IBM, PCI_DEVICE_ID_IBM_CITRINE, &ipr_chip_cfg[0] }, + { PCI_VENDOR_ID_ADAPTEC2, PCI_DEVICE_ID_ADAPTEC2_OBSIDIAN, &ipr_chip_cfg[0] }, + { PCI_VENDOR_ID_IBM, PCI_DEVICE_ID_IBM_OBSIDIAN, &ipr_chip_cfg[0] }, + { PCI_VENDOR_ID_IBM, PCI_DEVICE_ID_IBM_OBSIDIAN_E, &ipr_chip_cfg[0] }, + { PCI_VENDOR_ID_IBM, PCI_DEVICE_ID_IBM_SNIPE, &ipr_chip_cfg[1] }, + { PCI_VENDOR_ID_ADAPTEC2, PCI_DEVICE_ID_ADAPTEC2_SCAMP, &ipr_chip_cfg[1] } +}; + static int ipr_max_bus_speeds [] = { IPR_80MBs_SCSI_RATE, IPR_U160_SCSI_RATE, IPR_U320_SCSI_RATE }; @@ -137,32 +151,19 @@ module_param_named(log_level, ipr_log_level, uint, 0); MODULE_PARM_DESC(log_level, "Set to 0 - 4 for increasing verbosity of device driver"); module_param_named(testmode, ipr_testmode, int, 0); MODULE_PARM_DESC(testmode, "DANGEROUS!!! Allows unsupported configurations"); +module_param_named(fastfail, ipr_fastfail, int, 0); +MODULE_PARM_DESC(fastfail, "Reduce timeouts and retries"); +module_param_named(transop_timeout, ipr_transop_timeout, int, 0); +MODULE_PARM_DESC(transop_timeout, "Time in seconds to wait for adapter to come operational (default: 300)"); +module_param_named(enable_cache, ipr_enable_cache, int, 0); +MODULE_PARM_DESC(enable_cache, "Enable adapter's non-volatile write cache (default: 1)"); +module_param_named(debug, ipr_debug, int, 0); +MODULE_PARM_DESC(debug, "Enable device driver debugging logging. Set to 1 to enable. (default: 0)"); +module_param_named(auto_create, ipr_auto_create, int, 0); +MODULE_PARM_DESC(auto_create, "Auto-create single device RAID 0 arrays when initialized (default: 1)"); MODULE_LICENSE("GPL"); MODULE_VERSION(IPR_DRIVER_VERSION); -static const char *ipr_gpdd_dev_end_states[] = { - "Command complete", - "Terminated by host", - "Terminated by device reset", - "Terminated by bus reset", - "Unknown", - "Command not started" -}; - -static const char *ipr_gpdd_dev_bus_phases[] = { - "Bus free", - "Arbitration", - "Selection", - "Message out", - "Command", - "Message in", - "Data out", - "Data in", - "Status", - "Reselection", - "Unknown" -}; - /* A constant array of IOASCs/URCs/Error Messages */ static const struct ipr_error_table_t ipr_error_table[] = { @@ -176,6 +177,8 @@ struct ipr_error_table_t ipr_error_table[] = { "Qualified success"}, {0x01080000, 1, 1, "FFFE: Soft device bus error recovered by the IOA"}, + {0x01088100, 0, 1, + "4101: Soft device bus fabric error"}, {0x01170600, 0, 1, "FFF9: Device sector reassign successful"}, {0x01170900, 0, 1, @@ -198,6 +201,8 @@ struct ipr_error_table_t ipr_error_table[] = { "FFFA: Undefined device response recovered by the IOA"}, {0x014A0000, 1, 1, "FFF6: Device bus error, message or command phase"}, + {0x014A8000, 0, 1, + "FFFE: Task Management Function failed"}, {0x015D0000, 0, 1, "FFF6: Failure prediction threshold exceeded"}, {0x015D9200, 0, 1, @@ -208,6 +213,8 @@ struct ipr_error_table_t ipr_error_table[] = { "Synchronization required"}, {0x024E0000, 0, 0, "No ready, IOA shutdown"}, + {0x025A0000, 0, 0, + "Not ready, IOA has been shutdown"}, {0x02670100, 0, 1, "3020: Storage subsystem configuration error"}, {0x03110B00, 0, 0, @@ -224,6 +231,8 @@ struct ipr_error_table_t ipr_error_table[] = { "3109: IOA timed out a device command"}, {0x04088000, 0, 0, "3120: SCSI bus is not operational"}, + {0x04088100, 0, 1, + "4100: Hard device bus fabric error"}, {0x04118000, 0, 1, "9000: IOA reserved area data check"}, {0x04118100, 0, 1, @@ -256,6 +265,8 @@ struct ipr_error_table_t ipr_error_table[] = { "Device bus status error"}, {0x04448600, 0, 1, "8157: IOA error requiring IOA reset to recover"}, + {0x04448700, 0, 0, + "ATA device status error"}, {0x04490000, 0, 0, "Message reject received from the device"}, {0x04449200, 0, 1, @@ -268,14 +279,30 @@ struct ipr_error_table_t ipr_error_table[] = { "9082: IOA detected device error"}, {0x044A0000, 1, 1, "3110: Device bus error, message or command phase"}, + {0x044A8000, 1, 1, + "3110: SAS Command / Task Management Function failed"}, {0x04670400, 0, 1, "9091: Incorrect hardware configuration change has been detected"}, + {0x04678000, 0, 1, + "9073: Invalid multi-adapter configuration"}, + {0x04678100, 0, 1, + "4010: Incorrect connection between cascaded expanders"}, + {0x04678200, 0, 1, + "4020: Connections exceed IOA design limits"}, + {0x04678300, 0, 1, + "4030: Incorrect multipath connection"}, + {0x04679000, 0, 1, + "4110: Unsupported enclosure function"}, {0x046E0000, 0, 1, "FFF4: Command to logical unit failed"}, {0x05240000, 1, 0, "Illegal request, invalid request type or request packet"}, {0x05250000, 0, 0, "Illegal request, invalid resource handle"}, + {0x05258000, 0, 0, + "Illegal request, commands not allowed to this device"}, + {0x05258100, 0, 0, + "Illegal request, command not allowed to a secondary adapter"}, {0x05260000, 0, 0, "Illegal request, invalid field in parameter list"}, {0x05260100, 0, 0, @@ -284,10 +311,14 @@ struct ipr_error_table_t ipr_error_table[] = { "Illegal request, parameter value invalid"}, {0x052C0000, 0, 0, "Illegal request, command sequence error"}, + {0x052C8000, 1, 0, + "Illegal request, dual adapter support not enabled"}, {0x06040500, 0, 1, "9031: Array protection temporarily suspended, protection resuming"}, {0x06040600, 0, 1, "9040: Array protection temporarily suspended, protection resuming"}, + {0x06288000, 0, 1, + "3140: Device bus not ready to ready transition"}, {0x06290000, 0, 1, "FFFB: SCSI bus was reset"}, {0x06290500, 0, 0, @@ -300,16 +331,42 @@ struct ipr_error_table_t ipr_error_table[] = { "3029: A device replacement has occurred"}, {0x064C8000, 0, 1, "9051: IOA cache data exists for a missing or failed device"}, + {0x064C8100, 0, 1, + "9055: Auxiliary cache IOA contains cache data needed by the primary IOA"}, {0x06670100, 0, 1, "9025: Disk unit is not supported at its physical location"}, {0x06670600, 0, 1, "3020: IOA detected a SCSI bus configuration error"}, {0x06678000, 0, 1, "3150: SCSI bus configuration error"}, + {0x06678100, 0, 1, + "9074: Asymmetric advanced function disk configuration"}, + {0x06678300, 0, 1, + "4040: Incomplete multipath connection between IOA and enclosure"}, + {0x06678400, 0, 1, + "4041: Incomplete multipath connection between enclosure and device"}, + {0x06678500, 0, 1, + "9075: Incomplete multipath connection between IOA and remote IOA"}, + {0x06678600, 0, 1, + "9076: Configuration error, missing remote IOA"}, + {0x06679100, 0, 1, + "4050: Enclosure does not support a required multipath function"}, {0x06690200, 0, 1, "9041: Array protection temporarily suspended"}, + {0x06698200, 0, 1, + "9042: Corrupt array parity detected on specified device"}, {0x066B0200, 0, 1, "9030: Array no longer protected due to missing or failed disk unit"}, + {0x066B8000, 0, 1, + "9071: Link operational transition"}, + {0x066B8100, 0, 1, + "9072: Link not operational transition"}, + {0x066B8200, 0, 1, + "9032: Array exposed but still protected"}, + {0x066B9100, 0, 1, + "4061: Multipath redundancy level got better"}, + {0x066B9200, 0, 1, + "4060: Multipath redundancy level got worse"}, {0x07270000, 0, 0, "Failure due to other device"}, {0x07278000, 0, 1, @@ -404,7 +461,8 @@ static void ipr_trc_hook(struct ipr_cmnd *ipr_cmd, trace_entry->time = jiffies; trace_entry->op_code = ipr_cmd->ioarcb.cmd_pkt.cdb[0]; trace_entry->type = type; - trace_entry->cmd_index = ipr_cmd->cmd_index; + trace_entry->ata_op_code = ipr_cmd->ioarcb.add_data.u.regs.command; + trace_entry->cmd_index = ipr_cmd->cmd_index & 0xff; trace_entry->res_handle = ipr_cmd->ioarcb.res_handle; trace_entry->u.add_data = add_data; } @@ -431,8 +489,10 @@ static void ipr_reinit_ipr_cmnd(struct ipr_cmnd *ipr_cmd) ioarcb->read_ioadl_len = 0; ioasa->ioasc = 0; ioasa->residual_data_len = 0; + ioasa->u.gata.status = 0; ipr_cmd->scsi_cmd = NULL; + ipr_cmd->qc = NULL; ipr_cmd->sense_buffer[0] = 0; ipr_cmd->dma_use_sg = 0; } @@ -540,7 +600,7 @@ static int ipr_save_pcix_cmd_reg(struct ipr_ioa_cfg *ioa_cfg) return -EIO; } - if (pci_read_config_word(ioa_cfg->pdev, pcix_cmd_reg, + if (pci_read_config_word(ioa_cfg->pdev, pcix_cmd_reg + PCI_X_CMD, &ioa_cfg->saved_pcix_cmd_reg) != PCIBIOS_SUCCESSFUL) { dev_err(&ioa_cfg->pdev->dev, "Failed to save PCI-X command register\n"); return -EIO; @@ -562,7 +622,7 @@ static int ipr_set_pcix_cmd_reg(struct ipr_ioa_cfg *ioa_cfg) int pcix_cmd_reg = pci_find_capability(ioa_cfg->pdev, PCI_CAP_ID_PCIX); if (pcix_cmd_reg) { - if (pci_write_config_word(ioa_cfg->pdev, pcix_cmd_reg, + if (pci_write_config_word(ioa_cfg->pdev, pcix_cmd_reg + PCI_X_CMD, ioa_cfg->saved_pcix_cmd_reg) != PCIBIOS_SUCCESSFUL) { dev_err(&ioa_cfg->pdev->dev, "Failed to setup PCI-X command register\n"); return -EIO; @@ -576,6 +636,28 @@ static int ipr_set_pcix_cmd_reg(struct ipr_ioa_cfg *ioa_cfg) return 0; } +/** + * ipr_sata_eh_done - done function for aborted SATA commands + * @ipr_cmd: ipr command struct + * + * This function is invoked for ops generated to SATA + * devices which are being aborted. + * + * Return value: + * none + **/ +static void ipr_sata_eh_done(struct ipr_cmnd *ipr_cmd) +{ + struct ipr_ioa_cfg *ioa_cfg = ipr_cmd->ioa_cfg; + struct ata_queued_cmd *qc = ipr_cmd->qc; + struct ipr_sata_port *sata_port = qc->ap->private_data; + + qc->err_mask |= AC_ERR_OTHER; + sata_port->ioasa.status |= ATA_BUSY; + list_add_tail(&ipr_cmd->queue, &ioa_cfg->free_q); + ata_qc_complete(qc); +} + /** * ipr_scsi_eh_done - mid-layer done function for aborted ops * @ipr_cmd: ipr command struct @@ -620,6 +702,8 @@ static void ipr_fail_all_ops(struct ipr_ioa_cfg *ioa_cfg) if (ipr_cmd->scsi_cmd) ipr_cmd->done = ipr_scsi_eh_done; + else if (ipr_cmd->qc) + ipr_cmd->done = ipr_sata_eh_done; ipr_trc_hook(ipr_cmd, IPR_TRACE_FINISH, IPR_IOASC_IOA_WAS_RESET); del_timer(&ipr_cmd->timer); @@ -770,14 +854,13 @@ static void ipr_send_hcam(struct ipr_ioa_cfg *ioa_cfg, u8 type, **/ static void ipr_init_res_entry(struct ipr_resource_entry *res) { - res->needs_sync_complete = 1; + res->needs_sync_complete = 0; res->in_erp = 0; res->add_to_ml = 0; res->del_from_ml = 0; res->resetting_device = 0; - res->tcq_active = 0; - res->qdepth = IPR_MAX_CMD_PER_LUN; res->sdev = NULL; + res->sata_port = NULL; } /** @@ -825,8 +908,8 @@ static void ipr_handle_config_change(struct ipr_ioa_cfg *ioa_cfg, if (hostrcb->hcam.notify_type == IPR_HOST_RCB_NOTIF_TYPE_REM_ENTRY) { if (res->sdev) { - res->sdev->hostdata = NULL; res->del_from_ml = 1; + res->cfgte.res_handle = IPR_INVALID_RES_HANDLE; if (ioa_cfg->allow_ml_add_del) schedule_work(&ioa_cfg->work_q); } else @@ -872,26 +955,73 @@ static void ipr_process_ccn(struct ipr_cmnd *ipr_cmd) /** * ipr_log_vpd - Log the passed VPD to the error log. - * @vpids: vendor/product id struct - * @serial_num: serial number string + * @vpd: vendor/product id/sn struct * * Return value: * none **/ -static void ipr_log_vpd(struct ipr_std_inq_vpids *vpids, u8 *serial_num) +static void ipr_log_vpd(struct ipr_vpd *vpd) { - char buffer[max_t(int, sizeof(struct ipr_std_inq_vpids), - IPR_SERIAL_NUM_LEN) + 1]; + char buffer[IPR_VENDOR_ID_LEN + IPR_PROD_ID_LEN + + IPR_SERIAL_NUM_LEN]; - memcpy(buffer, vpids, sizeof(struct ipr_std_inq_vpids)); - buffer[sizeof(struct ipr_std_inq_vpids)] = '\0'; + memcpy(buffer, vpd->vpids.vendor_id, IPR_VENDOR_ID_LEN); + memcpy(buffer + IPR_VENDOR_ID_LEN, vpd->vpids.product_id, + IPR_PROD_ID_LEN); + buffer[IPR_VENDOR_ID_LEN + IPR_PROD_ID_LEN] = '\0'; ipr_err("Vendor/Product ID: %s\n", buffer); - memcpy(buffer, serial_num, IPR_SERIAL_NUM_LEN); + memcpy(buffer, vpd->sn, IPR_SERIAL_NUM_LEN); buffer[IPR_SERIAL_NUM_LEN] = '\0'; ipr_err(" Serial Number: %s\n", buffer); } +/** + * ipr_log_ext_vpd - Log the passed extended VPD to the error log. + * @vpd: vendor/product id/sn/wwn struct + * + * Return value: + * none + **/ +static void ipr_log_ext_vpd(struct ipr_ext_vpd *vpd) +{ + ipr_log_vpd(&vpd->vpd); + ipr_err(" WWN: %08X%08X\n", be32_to_cpu(vpd->wwid[0]), + be32_to_cpu(vpd->wwid[1])); +} + +/** + * ipr_log_enhanced_cache_error - Log a cache error. + * @ioa_cfg: ioa config struct + * @hostrcb: hostrcb struct + * + * Return value: + * none + **/ +static void ipr_log_enhanced_cache_error(struct ipr_ioa_cfg *ioa_cfg, + struct ipr_hostrcb *hostrcb) +{ + struct ipr_hostrcb_type_12_error *error = + &hostrcb->hcam.u.error.u.type_12_error; + + ipr_err("-----Current Configuration-----\n"); + ipr_err("Cache Directory Card Information:\n"); + ipr_log_ext_vpd(&error->ioa_vpd); + ipr_err("Adapter Card Information:\n"); + ipr_log_ext_vpd(&error->cfc_vpd); + + ipr_err("-----Expected Configuration-----\n"); + ipr_err("Cache Directory Card Information:\n"); + ipr_log_ext_vpd(&error->ioa_last_attached_to_cfc_vpd); + ipr_err("Adapter Card Information:\n"); + ipr_log_ext_vpd(&error->cfc_last_attached_to_ioa_vpd); + + ipr_err("Additional IOA Data: %08X %08X %08X\n", + be32_to_cpu(error->ioa_data[0]), + be32_to_cpu(error->ioa_data[1]), + be32_to_cpu(error->ioa_data[2])); +} + /** * ipr_log_cache_error - Log a cache error. * @ioa_cfg: ioa config struct @@ -908,17 +1038,15 @@ static void ipr_log_cache_error(struct ipr_ioa_cfg *ioa_cfg, ipr_err("-----Current Configuration-----\n"); ipr_err("Cache Directory Card Information:\n"); - ipr_log_vpd(&error->ioa_vpids, error->ioa_sn); + ipr_log_vpd(&error->ioa_vpd); ipr_err("Adapter Card Information:\n"); - ipr_log_vpd(&error->cfc_vpids, error->cfc_sn); + ipr_log_vpd(&error->cfc_vpd); ipr_err("-----Expected Configuration-----\n"); ipr_err("Cache Directory Card Information:\n"); - ipr_log_vpd(&error->ioa_last_attached_to_cfc_vpids, - error->ioa_last_attached_to_cfc_sn); + ipr_log_vpd(&error->ioa_last_attached_to_cfc_vpd); ipr_err("Adapter Card Information:\n"); - ipr_log_vpd(&error->cfc_last_attached_to_ioa_vpids, - error->cfc_last_attached_to_ioa_sn); + ipr_log_vpd(&error->cfc_last_attached_to_ioa_vpd); ipr_err("Additional IOA Data: %08X %08X %08X\n", be32_to_cpu(error->ioa_data[0]), @@ -926,6 +1054,46 @@ static void ipr_log_cache_error(struct ipr_ioa_cfg *ioa_cfg, be32_to_cpu(error->ioa_data[2])); } +/** + * ipr_log_enhanced_config_error - Log a configuration error. + * @ioa_cfg: ioa config struct + * @hostrcb: hostrcb struct + * + * Return value: + * none + **/ +static void ipr_log_enhanced_config_error(struct ipr_ioa_cfg *ioa_cfg, + struct ipr_hostrcb *hostrcb) +{ + int errors_logged, i; + struct ipr_hostrcb_device_data_entry_enhanced *dev_entry; + struct ipr_hostrcb_type_13_error *error; + + error = &hostrcb->hcam.u.error.u.type_13_error; + errors_logged = be32_to_cpu(error->errors_logged); + + ipr_err("Device Errors Detected/Logged: %d/%d\n", + be32_to_cpu(error->errors_detected), errors_logged); + + dev_entry = error->dev; + + for (i = 0; i < errors_logged; i++, dev_entry++) { + ipr_err_separator; + + ipr_phys_res_err(ioa_cfg, dev_entry->dev_res_addr, "Device %d", i + 1); + ipr_log_ext_vpd(&dev_entry->vpd); + + ipr_err("-----New Device Information-----\n"); + ipr_log_ext_vpd(&dev_entry->new_vpd); + + ipr_err("Cache Directory Card Information:\n"); + ipr_log_ext_vpd(&dev_entry->ioa_last_with_dev_vpd); + + ipr_err("Adapter Card Information:\n"); + ipr_log_ext_vpd(&dev_entry->cfc_last_with_dev_vpd); + } +} + /** * ipr_log_config_error - Log a configuration error. * @ioa_cfg: ioa config struct @@ -947,30 +1115,22 @@ static void ipr_log_config_error(struct ipr_ioa_cfg *ioa_cfg, ipr_err("Device Errors Detected/Logged: %d/%d\n", be32_to_cpu(error->errors_detected), errors_logged); - dev_entry = error->dev_entry; + dev_entry = error->dev; for (i = 0; i < errors_logged; i++, dev_entry++) { ipr_err_separator; - if (dev_entry->dev_res_addr.bus >= IPR_MAX_NUM_BUSES) { - ipr_err("Device %d: missing\n", i + 1); - } else { - ipr_err("Device %d: %d:%d:%d:%d\n", i + 1, - ioa_cfg->host->host_no, dev_entry->dev_res_addr.bus, - dev_entry->dev_res_addr.target, dev_entry->dev_res_addr.lun); - } - ipr_log_vpd(&dev_entry->dev_vpids, dev_entry->dev_sn); + ipr_phys_res_err(ioa_cfg, dev_entry->dev_res_addr, "Device %d", i + 1); + ipr_log_vpd(&dev_entry->vpd); ipr_err("-----New Device Information-----\n"); - ipr_log_vpd(&dev_entry->new_dev_vpids, dev_entry->new_dev_sn); + ipr_log_vpd(&dev_entry->new_vpd); ipr_err("Cache Directory Card Information:\n"); - ipr_log_vpd(&dev_entry->ioa_last_with_dev_vpids, - dev_entry->ioa_last_with_dev_sn); + ipr_log_vpd(&dev_entry->ioa_last_with_dev_vpd); ipr_err("Adapter Card Information:\n"); - ipr_log_vpd(&dev_entry->cfc_last_with_dev_vpids, - dev_entry->cfc_last_with_dev_sn); + ipr_log_vpd(&dev_entry->cfc_last_with_dev_vpd); ipr_err("Additional IOA Data: %08X %08X %08X %08X %08X\n", be32_to_cpu(dev_entry->ioa_data[0]), @@ -981,6 +1141,57 @@ static void ipr_log_config_error(struct ipr_ioa_cfg *ioa_cfg, } } +/** + * ipr_log_enhanced_array_error - Log an array configuration error. + * @ioa_cfg: ioa config struct + * @hostrcb: hostrcb struct + * + * Return value: + * none + **/ +static void ipr_log_enhanced_array_error(struct ipr_ioa_cfg *ioa_cfg, + struct ipr_hostrcb *hostrcb) +{ + int i, num_entries; + struct ipr_hostrcb_type_14_error *error; + struct ipr_hostrcb_array_data_entry_enhanced *array_entry; + const u8 zero_sn[IPR_SERIAL_NUM_LEN] = { [0 ... IPR_SERIAL_NUM_LEN-1] = '0' }; + + error = &hostrcb->hcam.u.error.u.type_14_error; + + ipr_err_separator; + + ipr_err("RAID %s Array Configuration: %d:%d:%d:%d\n", + error->protection_level, + ioa_cfg->host->host_no, + error->last_func_vset_res_addr.bus, + error->last_func_vset_res_addr.target, + error->last_func_vset_res_addr.lun); + + ipr_err_separator; + + array_entry = error->array_member; + num_entries = min_t(u32, be32_to_cpu(error->num_entries), + sizeof(error->array_member)); + + for (i = 0; i < num_entries; i++, array_entry++) { + if (!memcmp(array_entry->vpd.vpd.sn, zero_sn, IPR_SERIAL_NUM_LEN)) + continue; + + if (be32_to_cpu(error->exposed_mode_adn) == i) + ipr_err("Exposed Array Member %d:\n", i); + else + ipr_err("Array Member %d:\n", i); + + ipr_log_ext_vpd(&array_entry->vpd); + ipr_phys_res_err(ioa_cfg, array_entry->dev_res_addr, "Current Location"); + ipr_phys_res_err(ioa_cfg, array_entry->expected_dev_res_addr, + "Expected Location"); + + ipr_err_separator; + } +} + /** * ipr_log_array_error - Log an array configuration error. * @ioa_cfg: ioa config struct @@ -995,9 +1206,7 @@ static void ipr_log_array_error(struct ipr_ioa_cfg *ioa_cfg, int i; struct ipr_hostrcb_type_04_error *error; struct ipr_hostrcb_array_data_entry *array_entry; - u8 zero_sn[IPR_SERIAL_NUM_LEN]; - - memset(zero_sn, '0', IPR_SERIAL_NUM_LEN); + const u8 zero_sn[IPR_SERIAL_NUM_LEN] = { [0 ... IPR_SERIAL_NUM_LEN-1] = '0' }; error = &hostrcb->hcam.u.error.u.type_04_error; @@ -1015,36 +1224,19 @@ static void ipr_log_array_error(struct ipr_ioa_cfg *ioa_cfg, array_entry = error->array_member; for (i = 0; i < 18; i++) { - if (!memcmp(array_entry->serial_num, zero_sn, IPR_SERIAL_NUM_LEN)) + if (!memcmp(array_entry->vpd.sn, zero_sn, IPR_SERIAL_NUM_LEN)) continue; - if (error->exposed_mode_adn == i) { + if (be32_to_cpu(error->exposed_mode_adn) == i) ipr_err("Exposed Array Member %d:\n", i); - } else { + else ipr_err("Array Member %d:\n", i); - } - ipr_log_vpd(&array_entry->vpids, array_entry->serial_num); - - if (array_entry->dev_res_addr.bus >= IPR_MAX_NUM_BUSES) { - ipr_err("Current Location: unknown\n"); - } else { - ipr_err("Current Location: %d:%d:%d:%d\n", - ioa_cfg->host->host_no, - array_entry->dev_res_addr.bus, - array_entry->dev_res_addr.target, - array_entry->dev_res_addr.lun); - } + ipr_log_vpd(&array_entry->vpd); - if (array_entry->dev_res_addr.bus >= IPR_MAX_NUM_BUSES) { - ipr_err("Expected Location: unknown\n"); - } else { - ipr_err("Expected Location: %d:%d:%d:%d\n", - ioa_cfg->host->host_no, - array_entry->expected_dev_res_addr.bus, - array_entry->expected_dev_res_addr.target, - array_entry->expected_dev_res_addr.lun); - } + ipr_phys_res_err(ioa_cfg, array_entry->dev_res_addr, "Current Location"); + ipr_phys_res_err(ioa_cfg, array_entry->expected_dev_res_addr, + "Expected Location"); ipr_err_separator; @@ -1056,32 +1248,309 @@ static void ipr_log_array_error(struct ipr_ioa_cfg *ioa_cfg, } /** - * ipr_log_generic_error - Log an adapter error. + * ipr_log_hex_data - Log additional hex IOA error data. * @ioa_cfg: ioa config struct - * @hostrcb: hostrcb struct + * @data: IOA error data + * @len: data length * * Return value: * none **/ -static void ipr_log_generic_error(struct ipr_ioa_cfg *ioa_cfg, - struct ipr_hostrcb *hostrcb) +static void ipr_log_hex_data(struct ipr_ioa_cfg *ioa_cfg, u32 *data, int len) { int i; - int ioa_data_len = be32_to_cpu(hostrcb->hcam.length); - if (ioa_data_len == 0) + if (len == 0) return; - ipr_err("IOA Error Data:\n"); - ipr_err("Offset 0 1 2 3 4 5 6 7 8 9 A B C D E F\n"); + if (ioa_cfg->log_level <= IPR_DEFAULT_LOG_LEVEL) + len = min_t(int, len, IPR_DEFAULT_MAX_ERROR_DUMP); - for (i = 0; i < ioa_data_len / 4; i += 4) { + for (i = 0; i < len / 4; i += 4) { ipr_err("%08X: %08X %08X %08X %08X\n", i*4, - be32_to_cpu(hostrcb->hcam.u.raw.data[i]), - be32_to_cpu(hostrcb->hcam.u.raw.data[i+1]), - be32_to_cpu(hostrcb->hcam.u.raw.data[i+2]), - be32_to_cpu(hostrcb->hcam.u.raw.data[i+3])); + be32_to_cpu(data[i]), + be32_to_cpu(data[i+1]), + be32_to_cpu(data[i+2]), + be32_to_cpu(data[i+3])); + } +} + +/** + * ipr_log_enhanced_dual_ioa_error - Log an enhanced dual adapter error. + * @ioa_cfg: ioa config struct + * @hostrcb: hostrcb struct + * + * Return value: + * none + **/ +static void ipr_log_enhanced_dual_ioa_error(struct ipr_ioa_cfg *ioa_cfg, + struct ipr_hostrcb *hostrcb) +{ + struct ipr_hostrcb_type_17_error *error; + + error = &hostrcb->hcam.u.error.u.type_17_error; + error->failure_reason[sizeof(error->failure_reason) - 1] = '\0'; + + ipr_err("%s\n", error->failure_reason); + ipr_err("Remote Adapter VPD:\n"); + ipr_log_ext_vpd(&error->vpd); + ipr_log_hex_data(ioa_cfg, error->data, + be32_to_cpu(hostrcb->hcam.length) - + (offsetof(struct ipr_hostrcb_error, u) + + offsetof(struct ipr_hostrcb_type_17_error, data))); +} + +/** + * ipr_log_dual_ioa_error - Log a dual adapter error. + * @ioa_cfg: ioa config struct + * @hostrcb: hostrcb struct + * + * Return value: + * none + **/ +static void ipr_log_dual_ioa_error(struct ipr_ioa_cfg *ioa_cfg, + struct ipr_hostrcb *hostrcb) +{ + struct ipr_hostrcb_type_07_error *error; + + error = &hostrcb->hcam.u.error.u.type_07_error; + error->failure_reason[sizeof(error->failure_reason) - 1] = '\0'; + + ipr_err("%s\n", error->failure_reason); + ipr_err("Remote Adapter VPD:\n"); + ipr_log_vpd(&error->vpd); + ipr_log_hex_data(ioa_cfg, error->data, + be32_to_cpu(hostrcb->hcam.length) - + (offsetof(struct ipr_hostrcb_error, u) + + offsetof(struct ipr_hostrcb_type_07_error, data))); +} + +static const struct { + u8 active; + char *desc; +} path_active_desc[] = { + { IPR_PATH_NO_INFO, "Path" }, + { IPR_PATH_ACTIVE, "Active path" }, + { IPR_PATH_NOT_ACTIVE, "Inactive path" } +}; + +static const struct { + u8 state; + char *desc; +} path_state_desc[] = { + { IPR_PATH_STATE_NO_INFO, "has no path state information available" }, + { IPR_PATH_HEALTHY, "is healthy" }, + { IPR_PATH_DEGRADED, "is degraded" }, + { IPR_PATH_FAILED, "is failed" } +}; + +/** + * ipr_log_fabric_path - Log a fabric path error + * @hostrcb: hostrcb struct + * @fabric: fabric descriptor + * + * Return value: + * none + **/ +static void ipr_log_fabric_path(struct ipr_hostrcb *hostrcb, + struct ipr_hostrcb_fabric_desc *fabric) +{ + int i, j; + u8 path_state = fabric->path_state; + u8 active = path_state & IPR_PATH_ACTIVE_MASK; + u8 state = path_state & IPR_PATH_STATE_MASK; + + for (i = 0; i < ARRAY_SIZE(path_active_desc); i++) { + if (path_active_desc[i].active != active) + continue; + + for (j = 0; j < ARRAY_SIZE(path_state_desc); j++) { + if (path_state_desc[j].state != state) + continue; + + if (fabric->cascaded_expander == 0xff && fabric->phy == 0xff) { + ipr_hcam_err(hostrcb, "%s %s: IOA Port=%d\n", + path_active_desc[i].desc, path_state_desc[j].desc, + fabric->ioa_port); + } else if (fabric->cascaded_expander == 0xff) { + ipr_hcam_err(hostrcb, "%s %s: IOA Port=%d, Phy=%d\n", + path_active_desc[i].desc, path_state_desc[j].desc, + fabric->ioa_port, fabric->phy); + } else if (fabric->phy == 0xff) { + ipr_hcam_err(hostrcb, "%s %s: IOA Port=%d, Cascade=%d\n", + path_active_desc[i].desc, path_state_desc[j].desc, + fabric->ioa_port, fabric->cascaded_expander); + } else { + ipr_hcam_err(hostrcb, "%s %s: IOA Port=%d, Cascade=%d, Phy=%d\n", + path_active_desc[i].desc, path_state_desc[j].desc, + fabric->ioa_port, fabric->cascaded_expander, fabric->phy); + } + return; + } + } + + ipr_err("Path state=%02X IOA Port=%d Cascade=%d Phy=%d\n", path_state, + fabric->ioa_port, fabric->cascaded_expander, fabric->phy); +} + +static const struct { + u8 type; + char *desc; +} path_type_desc[] = { + { IPR_PATH_CFG_IOA_PORT, "IOA port" }, + { IPR_PATH_CFG_EXP_PORT, "Expander port" }, + { IPR_PATH_CFG_DEVICE_PORT, "Device port" }, + { IPR_PATH_CFG_DEVICE_LUN, "Device LUN" } +}; + +static const struct { + u8 status; + char *desc; +} path_status_desc[] = { + { IPR_PATH_CFG_NO_PROB, "Functional" }, + { IPR_PATH_CFG_DEGRADED, "Degraded" }, + { IPR_PATH_CFG_FAILED, "Failed" }, + { IPR_PATH_CFG_SUSPECT, "Suspect" }, + { IPR_PATH_NOT_DETECTED, "Missing" }, + { IPR_PATH_INCORRECT_CONN, "Incorrectly connected" } +}; + +static const char *link_rate[] = { + "unknown", + "disabled", + "phy reset problem", + "spinup hold", + "port selector", + "unknown", + "unknown", + "unknown", + "1.5Gbps", + "3.0Gbps", + "unknown", + "unknown", + "unknown", + "unknown", + "unknown", + "unknown" +}; + +/** + * ipr_log_path_elem - Log a fabric path element. + * @hostrcb: hostrcb struct + * @cfg: fabric path element struct + * + * Return value: + * none + **/ +static void ipr_log_path_elem(struct ipr_hostrcb *hostrcb, + struct ipr_hostrcb_config_element *cfg) +{ + int i, j; + u8 type = cfg->type_status & IPR_PATH_CFG_TYPE_MASK; + u8 status = cfg->type_status & IPR_PATH_CFG_STATUS_MASK; + + if (type == IPR_PATH_CFG_NOT_EXIST) + return; + + for (i = 0; i < ARRAY_SIZE(path_type_desc); i++) { + if (path_type_desc[i].type != type) + continue; + + for (j = 0; j < ARRAY_SIZE(path_status_desc); j++) { + if (path_status_desc[j].status != status) + continue; + + if (type == IPR_PATH_CFG_IOA_PORT) { + ipr_hcam_err(hostrcb, "%s %s: Phy=%d, Link rate=%s, WWN=%08X%08X\n", + path_status_desc[j].desc, path_type_desc[i].desc, + cfg->phy, link_rate[cfg->link_rate & IPR_PHY_LINK_RATE_MASK], + be32_to_cpu(cfg->wwid[0]), be32_to_cpu(cfg->wwid[1])); + } else { + if (cfg->cascaded_expander == 0xff && cfg->phy == 0xff) { + ipr_hcam_err(hostrcb, "%s %s: Link rate=%s, WWN=%08X%08X\n", + path_status_desc[j].desc, path_type_desc[i].desc, + link_rate[cfg->link_rate & IPR_PHY_LINK_RATE_MASK], + be32_to_cpu(cfg->wwid[0]), be32_to_cpu(cfg->wwid[1])); + } else if (cfg->cascaded_expander == 0xff) { + ipr_hcam_err(hostrcb, "%s %s: Phy=%d, Link rate=%s, " + "WWN=%08X%08X\n", path_status_desc[j].desc, + path_type_desc[i].desc, cfg->phy, + link_rate[cfg->link_rate & IPR_PHY_LINK_RATE_MASK], + be32_to_cpu(cfg->wwid[0]), be32_to_cpu(cfg->wwid[1])); + } else if (cfg->phy == 0xff) { + ipr_hcam_err(hostrcb, "%s %s: Cascade=%d, Link rate=%s, " + "WWN=%08X%08X\n", path_status_desc[j].desc, + path_type_desc[i].desc, cfg->cascaded_expander, + link_rate[cfg->link_rate & IPR_PHY_LINK_RATE_MASK], + be32_to_cpu(cfg->wwid[0]), be32_to_cpu(cfg->wwid[1])); + } else { + ipr_hcam_err(hostrcb, "%s %s: Cascade=%d, Phy=%d, Link rate=%s " + "WWN=%08X%08X\n", path_status_desc[j].desc, + path_type_desc[i].desc, cfg->cascaded_expander, cfg->phy, + link_rate[cfg->link_rate & IPR_PHY_LINK_RATE_MASK], + be32_to_cpu(cfg->wwid[0]), be32_to_cpu(cfg->wwid[1])); + } + } + return; + } + } + + ipr_hcam_err(hostrcb, "Path element=%02X: Cascade=%d Phy=%d Link rate=%s " + "WWN=%08X%08X\n", cfg->type_status, cfg->cascaded_expander, cfg->phy, + link_rate[cfg->link_rate & IPR_PHY_LINK_RATE_MASK], + be32_to_cpu(cfg->wwid[0]), be32_to_cpu(cfg->wwid[1])); +} + +/** + * ipr_log_fabric_error - Log a fabric error. + * @ioa_cfg: ioa config struct + * @hostrcb: hostrcb struct + * + * Return value: + * none + **/ +static void ipr_log_fabric_error(struct ipr_ioa_cfg *ioa_cfg, + struct ipr_hostrcb *hostrcb) +{ + struct ipr_hostrcb_type_20_error *error; + struct ipr_hostrcb_fabric_desc *fabric; + struct ipr_hostrcb_config_element *cfg; + int i, add_len; + + error = &hostrcb->hcam.u.error.u.type_20_error; + error->failure_reason[sizeof(error->failure_reason) - 1] = '\0'; + ipr_hcam_err(hostrcb, "%s\n", error->failure_reason); + + add_len = be32_to_cpu(hostrcb->hcam.length) - + (offsetof(struct ipr_hostrcb_error, u) + + offsetof(struct ipr_hostrcb_type_20_error, desc)); + + for (i = 0, fabric = error->desc; i < error->num_entries; i++) { + ipr_log_fabric_path(hostrcb, fabric); + for_each_fabric_cfg(fabric, cfg) + ipr_log_path_elem(hostrcb, cfg); + + add_len -= be16_to_cpu(fabric->length); + fabric = (struct ipr_hostrcb_fabric_desc *) + ((unsigned long)fabric + be16_to_cpu(fabric->length)); } + + ipr_log_hex_data(ioa_cfg, (u32 *)fabric, add_len); +} + +/** + * ipr_log_generic_error - Log an adapter error. + * @ioa_cfg: ioa config struct + * @hostrcb: hostrcb struct + * + * Return value: + * none + **/ +static void ipr_log_generic_error(struct ipr_ioa_cfg *ioa_cfg, + struct ipr_hostrcb *hostrcb) +{ + ipr_log_hex_data(ioa_cfg, hostrcb->hcam.u.raw.data, + be32_to_cpu(hostrcb->hcam.length)); } /** @@ -1100,7 +1569,7 @@ static u32 ipr_get_error(u32 ioasc) int i; for (i = 0; i < ARRAY_SIZE(ipr_error_table); i++) - if (ipr_error_table[i].ioasc == ioasc) + if (ipr_error_table[i].ioasc == (ioasc & IPR_IOASC_IOASC_MASK)) return i; return 0; @@ -1142,24 +1611,17 @@ static void ipr_handle_log_data(struct ipr_ioa_cfg *ioa_cfg, if (!ipr_error_table[error_index].log_hcam) return; - if (ipr_is_device(&hostrcb->hcam.u.error.failing_dev_res_addr)) { - ipr_res_err(ioa_cfg, hostrcb->hcam.u.error.failing_dev_res_addr, - "%s\n", ipr_error_table[error_index].error); - } else { - dev_err(&ioa_cfg->pdev->dev, "%s\n", - ipr_error_table[error_index].error); - } + ipr_hcam_err(hostrcb, "%s\n", ipr_error_table[error_index].error); /* Set indication we have logged an error */ ioa_cfg->errors_logged++; if (ioa_cfg->log_level < IPR_DEFAULT_LOG_LEVEL) return; + if (be32_to_cpu(hostrcb->hcam.length) > sizeof(hostrcb->hcam.u.raw)) + hostrcb->hcam.length = cpu_to_be32(sizeof(hostrcb->hcam.u.raw)); switch (hostrcb->hcam.overlay_id) { - case IPR_HOST_RCB_OVERLAY_ID_1: - ipr_log_generic_error(ioa_cfg, hostrcb); - break; case IPR_HOST_RCB_OVERLAY_ID_2: ipr_log_cache_error(ioa_cfg, hostrcb); break; @@ -1170,13 +1632,29 @@ static void ipr_handle_log_data(struct ipr_ioa_cfg *ioa_cfg, case IPR_HOST_RCB_OVERLAY_ID_6: ipr_log_array_error(ioa_cfg, hostrcb); break; - case IPR_HOST_RCB_OVERLAY_ID_DEFAULT: - ipr_log_generic_error(ioa_cfg, hostrcb); + case IPR_HOST_RCB_OVERLAY_ID_7: + ipr_log_dual_ioa_error(ioa_cfg, hostrcb); + break; + case IPR_HOST_RCB_OVERLAY_ID_12: + ipr_log_enhanced_cache_error(ioa_cfg, hostrcb); + break; + case IPR_HOST_RCB_OVERLAY_ID_13: + ipr_log_enhanced_config_error(ioa_cfg, hostrcb); break; + case IPR_HOST_RCB_OVERLAY_ID_14: + case IPR_HOST_RCB_OVERLAY_ID_16: + ipr_log_enhanced_array_error(ioa_cfg, hostrcb); + break; + case IPR_HOST_RCB_OVERLAY_ID_17: + ipr_log_enhanced_dual_ioa_error(ioa_cfg, hostrcb); + break; + case IPR_HOST_RCB_OVERLAY_ID_20: + ipr_log_fabric_error(ioa_cfg, hostrcb); + break; + case IPR_HOST_RCB_OVERLAY_ID_1: + case IPR_HOST_RCB_OVERLAY_ID_DEFAULT: default: - dev_err(&ioa_cfg->pdev->dev, - "Unknown error received. Overlay ID: %d\n", - hostrcb->hcam.overlay_id); + ipr_log_generic_error(ioa_cfg, hostrcb); break; } } @@ -1244,12 +1722,47 @@ static void ipr_timeout(struct ipr_cmnd *ipr_cmd) } /** - * ipr_reset_reload - Reset/Reload the IOA - * @ioa_cfg: ioa config struct - * @shutdown_type: shutdown type + * ipr_oper_timeout - Adapter timed out transitioning to operational + * @ipr_cmd: ipr command struct + * + * This function blocks host requests and initiates an + * adapter reset. * - * This function resets the adapter and re-initializes it. - * This function assumes that all new host commands have been stopped. + * Return value: + * none + **/ +static void ipr_oper_timeout(struct ipr_cmnd *ipr_cmd) +{ + unsigned long lock_flags = 0; + struct ipr_ioa_cfg *ioa_cfg = ipr_cmd->ioa_cfg; + + ENTER; + spin_lock_irqsave(ioa_cfg->host->host_lock, lock_flags); + + ioa_cfg->errors_logged++; + dev_err(&ioa_cfg->pdev->dev, + "Adapter timed out transitioning to operational.\n"); + + if (WAIT_FOR_DUMP == ioa_cfg->sdt_state) + ioa_cfg->sdt_state = GET_DUMP; + + if (!ioa_cfg->in_reset_reload || ioa_cfg->reset_cmd == ipr_cmd) { + if (ipr_fastfail) + ioa_cfg->reset_retries += IPR_NUM_RESET_RELOAD_RETRIES; + ipr_initiate_ioa_reset(ioa_cfg, IPR_SHUTDOWN_NONE); + } + + spin_unlock_irqrestore(ioa_cfg->host->host_lock, lock_flags); + LEAVE; +} + +/** + * ipr_reset_reload - Reset/Reload the IOA + * @ioa_cfg: ioa config struct + * @shutdown_type: shutdown type + * + * This function resets the adapter and re-initializes it. + * This function assumes that all new host commands have been stopped. * Return value: * SUCCESS / FAILED **/ @@ -1384,7 +1897,7 @@ static int ipr_wait_iodbg_ack(struct ipr_ioa_cfg *ioa_cfg, int max_delay) **/ static int ipr_get_ldump_data_section(struct ipr_ioa_cfg *ioa_cfg, u32 start_addr, - u32 *dest, u32 length_in_words) + __be32 *dest, u32 length_in_words) { volatile u32 temp_pcii_reg; int i, delay = 0; @@ -1476,7 +1989,7 @@ static int ipr_sdt_copy(struct ipr_ioa_cfg *ioa_cfg, { int bytes_copied = 0; int cur_len, rc, rem_len, rem_page_len; - u32 *page; + __be32 *page; unsigned long lock_flags = 0; struct ipr_ioa_dump *ioa_dump = &ioa_cfg->dump->ioa_dump; @@ -1484,7 +1997,7 @@ static int ipr_sdt_copy(struct ipr_ioa_cfg *ioa_cfg, (ioa_dump->hdr.len + bytes_copied) < IPR_MAX_IOA_DUMP_SIZE) { if (ioa_dump->page_offset >= PAGE_SIZE || ioa_dump->page_offset == 0) { - page = (u32 *)__get_free_page(GFP_ATOMIC); + page = (__be32 *)__get_free_page(GFP_ATOMIC); if (!page) { ipr_trace; @@ -1699,8 +2212,8 @@ static void ipr_get_ioa_dump(struct ipr_ioa_cfg *ioa_cfg, struct ipr_dump *dump) on entries in this table */ sdt = &ioa_dump->sdt; - rc = ipr_get_ldump_data_section(ioa_cfg, start_addr, (u32 *)sdt, - sizeof(struct ipr_sdt) / sizeof(u32)); + rc = ipr_get_ldump_data_section(ioa_cfg, start_addr, (__be32 *)sdt, + sizeof(struct ipr_sdt) / sizeof(__be32)); /* Smart Dump table is ready to use and the first entry is valid */ if (rc || (be32_to_cpu(sdt->hdr.state) != IPR_FMT2_SDT_READY_TO_USE)) { @@ -1765,9 +2278,36 @@ static void ipr_get_ioa_dump(struct ipr_ioa_cfg *ioa_cfg, struct ipr_dump *dump) #define ipr_get_ioa_dump(ioa_cfg, dump) do { } while(0) #endif +/** + * ipr_release_dump - Free adapter dump memory + * @kref: kref struct + * + * Return value: + * nothing + **/ +static void ipr_release_dump(struct kref *kref) +{ + struct ipr_dump *dump = container_of(kref,struct ipr_dump,kref); + struct ipr_ioa_cfg *ioa_cfg = dump->ioa_cfg; + unsigned long lock_flags = 0; + int i; + + ENTER; + spin_lock_irqsave(ioa_cfg->host->host_lock, lock_flags); + ioa_cfg->dump = NULL; + ioa_cfg->sdt_state = INACTIVE; + spin_unlock_irqrestore(ioa_cfg->host->host_lock, lock_flags); + + for (i = 0; i < dump->ioa_dump.next_page_index; i++) + free_page((unsigned long) dump->ioa_dump.ioa_data[i]); + + kfree(dump); + LEAVE; +} + /** * ipr_worker_thread - Worker thread - * @data: ioa config struct + * @work: ioa config struct * * Called at task level from a work thread. This function takes care * of adding and removing device from the mid-layer as configuration @@ -1776,13 +2316,14 @@ static void ipr_get_ioa_dump(struct ipr_ioa_cfg *ioa_cfg, struct ipr_dump *dump) * Return value: * nothing **/ -static void ipr_worker_thread(void *data) +static void ipr_worker_thread(struct work_struct *work) { unsigned long lock_flags; struct ipr_resource_entry *res; struct scsi_device *sdev; struct ipr_dump *dump; - struct ipr_ioa_cfg *ioa_cfg = data; + struct ipr_ioa_cfg *ioa_cfg = + container_of(work, struct ipr_ioa_cfg, work_q); u8 bus, target, lun; int did_work; @@ -1791,13 +2332,14 @@ static void ipr_worker_thread(void *data) if (ioa_cfg->sdt_state == GET_DUMP) { dump = ioa_cfg->dump; - if (!dump || !kobject_get(&dump->kobj)) { + if (!dump) { spin_unlock_irqrestore(ioa_cfg->host->host_lock, lock_flags); return; } + kref_get(&dump->kref); spin_unlock_irqrestore(ioa_cfg->host->host_lock, lock_flags); ipr_get_ioa_dump(ioa_cfg, dump); - kobject_put(&dump->kobj); + kref_put(&dump->kref, ipr_release_dump); spin_lock_irqsave(ioa_cfg->host->host_lock, lock_flags); if (ioa_cfg->sdt_state == DUMP_OBTAINED) @@ -1819,7 +2361,6 @@ restart: did_work = 1; sdev = res->sdev; if (!scsi_device_get(sdev)) { - res->sdev = NULL; list_move_tail(&res->queue, &ioa_cfg->free_res_q); spin_unlock_irqrestore(ioa_cfg->host->host_lock, lock_flags); scsi_remove_device(sdev); @@ -1836,6 +2377,7 @@ restart: bus = res->cfgte.res_addr.bus; target = res->cfgte.res_addr.target; lun = res->cfgte.res_addr.lun; + res->add_to_ml = 0; spin_unlock_irqrestore(ioa_cfg->host->host_lock, lock_flags); scsi_add_device(ioa_cfg->host, bus, target, lun); spin_lock_irqsave(ioa_cfg->host->host_lock, lock_flags); @@ -1844,6 +2386,7 @@ restart: } spin_unlock_irqrestore(ioa_cfg->host->host_lock, lock_flags); + kobject_uevent(&ioa_cfg->host->shost_classdev.kobj, KOBJ_CHANGE); LEAVE; } @@ -1891,6 +2434,103 @@ static struct bin_attribute ipr_trace_attr = { }; #endif +static const struct { + enum ipr_cache_state state; + char *name; +} cache_state [] = { + { CACHE_NONE, "none" }, + { CACHE_DISABLED, "disabled" }, + { CACHE_ENABLED, "enabled" } +}; + +/** + * ipr_show_write_caching - Show the write caching attribute + * @class_dev: class device struct + * @buf: buffer + * + * Return value: + * number of bytes printed to buffer + **/ +static ssize_t ipr_show_write_caching(struct class_device *class_dev, char *buf) +{ + struct Scsi_Host *shost = class_to_shost(class_dev); + struct ipr_ioa_cfg *ioa_cfg = (struct ipr_ioa_cfg *)shost->hostdata; + unsigned long lock_flags = 0; + int i, len = 0; + + spin_lock_irqsave(ioa_cfg->host->host_lock, lock_flags); + for (i = 0; i < ARRAY_SIZE(cache_state); i++) { + if (cache_state[i].state == ioa_cfg->cache_state) { + len = snprintf(buf, PAGE_SIZE, "%s\n", cache_state[i].name); + break; + } + } + spin_unlock_irqrestore(ioa_cfg->host->host_lock, lock_flags); + return len; +} + + +/** + * ipr_store_write_caching - Enable/disable adapter write cache + * @class_dev: class_device struct + * @buf: buffer + * @count: buffer size + * + * This function will enable/disable adapter write cache. + * + * Return value: + * count on success / other on failure + **/ +static ssize_t ipr_store_write_caching(struct class_device *class_dev, + const char *buf, size_t count) +{ + struct Scsi_Host *shost = class_to_shost(class_dev); + struct ipr_ioa_cfg *ioa_cfg = (struct ipr_ioa_cfg *)shost->hostdata; + unsigned long lock_flags = 0; + enum ipr_cache_state new_state = CACHE_INVALID; + int i; + + if (!capable(CAP_SYS_ADMIN)) + return -EACCES; + if (ioa_cfg->cache_state == CACHE_NONE) + return -EINVAL; + + for (i = 0; i < ARRAY_SIZE(cache_state); i++) { + if (!strncmp(cache_state[i].name, buf, strlen(cache_state[i].name))) { + new_state = cache_state[i].state; + break; + } + } + + if (new_state != CACHE_DISABLED && new_state != CACHE_ENABLED) + return -EINVAL; + + spin_lock_irqsave(ioa_cfg->host->host_lock, lock_flags); + if (ioa_cfg->cache_state == new_state) { + spin_unlock_irqrestore(ioa_cfg->host->host_lock, lock_flags); + return count; + } + + ioa_cfg->cache_state = new_state; + dev_info(&ioa_cfg->pdev->dev, "%s adapter write cache.\n", + new_state == CACHE_ENABLED ? "Enabling" : "Disabling"); + if (!ioa_cfg->in_reset_reload) + ipr_initiate_ioa_reset(ioa_cfg, IPR_SHUTDOWN_NORMAL); + spin_unlock_irqrestore(ioa_cfg->host->host_lock, lock_flags); + wait_event(ioa_cfg->reset_wait_q, !ioa_cfg->in_reset_reload); + + return count; +} + +static struct class_device_attribute ipr_ioa_cache_attr = { + .attr = { + .name = "write_cache", + .mode = S_IRUGO | S_IWUSR, + }, + .show = ipr_show_write_caching, + .store = ipr_store_write_caching +}; + /** * ipr_show_fw_version - Show the firmware version * @class_dev: class device struct @@ -2008,7 +2648,7 @@ static ssize_t ipr_store_diagnostics(struct class_device *class_dev, wait_event(ioa_cfg->reset_wait_q, !ioa_cfg->in_reset_reload); /* Wait for a second for any errors to be logged */ - schedule_timeout(HZ); + msleep(1000); } else { spin_unlock_irqrestore(ioa_cfg->host->host_lock, lock_flags); return -EIO; @@ -2030,6 +2670,74 @@ static struct class_device_attribute ipr_diagnostics_attr = { .store = ipr_store_diagnostics }; +/** + * ipr_show_adapter_state - Show the adapter's state + * @class_dev: class device struct + * @buf: buffer + * + * Return value: + * number of bytes printed to buffer + **/ +static ssize_t ipr_show_adapter_state(struct class_device *class_dev, char *buf) +{ + struct Scsi_Host *shost = class_to_shost(class_dev); + struct ipr_ioa_cfg *ioa_cfg = (struct ipr_ioa_cfg *)shost->hostdata; + unsigned long lock_flags = 0; + int len; + + spin_lock_irqsave(ioa_cfg->host->host_lock, lock_flags); + if (ioa_cfg->ioa_is_dead) + len = snprintf(buf, PAGE_SIZE, "offline\n"); + else + len = snprintf(buf, PAGE_SIZE, "online\n"); + spin_unlock_irqrestore(ioa_cfg->host->host_lock, lock_flags); + return len; +} + +/** + * ipr_store_adapter_state - Change adapter state + * @class_dev: class_device struct + * @buf: buffer + * @count: buffer size + * + * This function will change the adapter's state. + * + * Return value: + * count on success / other on failure + **/ +static ssize_t ipr_store_adapter_state(struct class_device *class_dev, + const char *buf, size_t count) +{ + struct Scsi_Host *shost = class_to_shost(class_dev); + struct ipr_ioa_cfg *ioa_cfg = (struct ipr_ioa_cfg *)shost->hostdata; + unsigned long lock_flags; + int result = count; + + if (!capable(CAP_SYS_ADMIN)) + return -EACCES; + + spin_lock_irqsave(ioa_cfg->host->host_lock, lock_flags); + if (ioa_cfg->ioa_is_dead && !strncmp(buf, "online", 6)) { + ioa_cfg->ioa_is_dead = 0; + ioa_cfg->reset_retries = 0; + ioa_cfg->in_ioa_bringdown = 0; + ipr_initiate_ioa_reset(ioa_cfg, IPR_SHUTDOWN_NONE); + } + spin_unlock_irqrestore(ioa_cfg->host->host_lock, lock_flags); + wait_event(ioa_cfg->reset_wait_q, !ioa_cfg->in_reset_reload); + + return result; +} + +static struct class_device_attribute ipr_ioa_state_attr = { + .attr = { + .name = "state", + .mode = S_IRUGO | S_IWUSR, + }, + .show = ipr_show_adapter_state, + .store = ipr_store_adapter_state +}; + /** * ipr_store_reset_adapter - Reset the adapter * @class_dev: class_device struct @@ -2102,7 +2810,7 @@ static struct ipr_sglist *ipr_alloc_ucode_buffer(int buf_len) num_elem = buf_len / bsize_elem; /* Allocate a scatter/gather list for the DMA */ - sglist = kmalloc(sizeof(struct ipr_sglist) + + sglist = kzalloc(sizeof(struct ipr_sglist) + (sizeof(struct scatterlist) * (num_elem - 1)), GFP_KERNEL); @@ -2111,9 +2819,6 @@ static struct ipr_sglist *ipr_alloc_ucode_buffer(int buf_len) return NULL; } - memset(sglist, 0, sizeof(struct ipr_sglist) + - (sizeof(struct scatterlist) * (num_elem - 1))); - scatterlist = sglist->scatterlist; sglist->order = order; @@ -2208,31 +2913,24 @@ static int ipr_copy_ucode_buffer(struct ipr_sglist *sglist, } /** - * ipr_map_ucode_buffer - Map a microcode download buffer + * ipr_build_ucode_ioadl - Build a microcode download IOADL * @ipr_cmd: ipr command struct * @sglist: scatter/gather list - * @len: total length of download buffer * - * Maps a microcode download scatter/gather list for DMA and - * builds the IOADL. + * Builds a microcode download IOA data list (IOADL). * - * Return value: - * 0 on success / -EIO on failure **/ -static int ipr_map_ucode_buffer(struct ipr_cmnd *ipr_cmd, - struct ipr_sglist *sglist, int len) +static void ipr_build_ucode_ioadl(struct ipr_cmnd *ipr_cmd, + struct ipr_sglist *sglist) { - struct ipr_ioa_cfg *ioa_cfg = ipr_cmd->ioa_cfg; struct ipr_ioarcb *ioarcb = &ipr_cmd->ioarcb; struct ipr_ioadl_desc *ioadl = ipr_cmd->ioadl; struct scatterlist *scatterlist = sglist->scatterlist; int i; - ipr_cmd->dma_use_sg = pci_map_sg(ioa_cfg->pdev, scatterlist, - sglist->num_sg, DMA_TO_DEVICE); - + ipr_cmd->dma_use_sg = sglist->num_dma_sg; ioarcb->cmd_pkt.flags_hi |= IPR_FLAGS_HI_WRITE_NOT_READ; - ioarcb->write_data_transfer_length = cpu_to_be32(len); + ioarcb->write_data_transfer_length = cpu_to_be32(sglist->buffer_len); ioarcb->write_ioadl_len = cpu_to_be32(sizeof(struct ipr_ioadl_desc) * ipr_cmd->dma_use_sg); @@ -2243,15 +2941,52 @@ static int ipr_map_ucode_buffer(struct ipr_cmnd *ipr_cmd, cpu_to_be32(sg_dma_address(&scatterlist[i])); } - if (likely(ipr_cmd->dma_use_sg)) { - ioadl[i-1].flags_and_data_len |= - cpu_to_be32(IPR_IOADL_FLAGS_LAST); + ioadl[i-1].flags_and_data_len |= + cpu_to_be32(IPR_IOADL_FLAGS_LAST); +} + +/** + * ipr_update_ioa_ucode - Update IOA's microcode + * @ioa_cfg: ioa config struct + * @sglist: scatter/gather list + * + * Initiate an adapter reset to update the IOA's microcode + * + * Return value: + * 0 on success / -EIO on failure + **/ +static int ipr_update_ioa_ucode(struct ipr_ioa_cfg *ioa_cfg, + struct ipr_sglist *sglist) +{ + unsigned long lock_flags; + + spin_lock_irqsave(ioa_cfg->host->host_lock, lock_flags); + + if (ioa_cfg->ucode_sglist) { + spin_unlock_irqrestore(ioa_cfg->host->host_lock, lock_flags); + dev_err(&ioa_cfg->pdev->dev, + "Microcode download already in progress\n"); + return -EIO; } - else { - dev_err(&ioa_cfg->pdev->dev, "pci_map_sg failed!\n"); + + sglist->num_dma_sg = pci_map_sg(ioa_cfg->pdev, sglist->scatterlist, + sglist->num_sg, DMA_TO_DEVICE); + + if (!sglist->num_dma_sg) { + spin_unlock_irqrestore(ioa_cfg->host->host_lock, lock_flags); + dev_err(&ioa_cfg->pdev->dev, + "Failed to map microcode download buffer!\n"); return -EIO; } + ioa_cfg->ucode_sglist = sglist; + ipr_initiate_ioa_reset(ioa_cfg, IPR_SHUTDOWN_NORMAL); + spin_unlock_irqrestore(ioa_cfg->host->host_lock, lock_flags); + wait_event(ioa_cfg->reset_wait_q, !ioa_cfg->in_reset_reload); + + spin_lock_irqsave(ioa_cfg->host->host_lock, lock_flags); + ioa_cfg->ucode_sglist = NULL; + spin_unlock_irqrestore(ioa_cfg->host->host_lock, lock_flags); return 0; } @@ -2274,7 +3009,6 @@ static ssize_t ipr_store_update_fw(struct class_device *class_dev, struct ipr_ucode_image_header *image_hdr; const struct firmware *fw_entry; struct ipr_sglist *sglist; - unsigned long lock_flags; char fname[100]; char *src; int len, result, dnld_size; @@ -2315,35 +3049,17 @@ static ssize_t ipr_store_update_fw(struct class_device *class_dev, if (result) { dev_err(&ioa_cfg->pdev->dev, "Microcode buffer copy to DMA buffer failed\n"); - ipr_free_ucode_buffer(sglist); - release_firmware(fw_entry); - return result; - } - - spin_lock_irqsave(ioa_cfg->host->host_lock, lock_flags); - - if (ioa_cfg->ucode_sglist) { - spin_unlock_irqrestore(ioa_cfg->host->host_lock, lock_flags); - dev_err(&ioa_cfg->pdev->dev, - "Microcode download already in progress\n"); - ipr_free_ucode_buffer(sglist); - release_firmware(fw_entry); - return -EIO; + goto out; } - ioa_cfg->ucode_sglist = sglist; - ipr_initiate_ioa_reset(ioa_cfg, IPR_SHUTDOWN_NORMAL); - spin_unlock_irqrestore(ioa_cfg->host->host_lock, lock_flags); - wait_event(ioa_cfg->reset_wait_q, !ioa_cfg->in_reset_reload); - - spin_lock_irqsave(ioa_cfg->host->host_lock, lock_flags); - ioa_cfg->ucode_sglist = NULL; - spin_unlock_irqrestore(ioa_cfg->host->host_lock, lock_flags); + result = ipr_update_ioa_ucode(ioa_cfg, sglist); + if (!result) + result = count; +out: ipr_free_ucode_buffer(sglist); release_firmware(fw_entry); - - return count; + return result; } static struct class_device_attribute ipr_update_fw_attr = { @@ -2358,8 +3074,10 @@ static struct class_device_attribute *ipr_ioa_attrs[] = { &ipr_fw_version_attr, &ipr_log_level_attr, &ipr_diagnostics_attr, + &ipr_ioa_state_attr, &ipr_ioa_reset_attr, &ipr_update_fw_attr, + &ipr_ioa_cache_attr, NULL, }; @@ -2392,15 +3110,15 @@ static ssize_t ipr_read_dump(struct kobject *kobj, char *buf, spin_lock_irqsave(ioa_cfg->host->host_lock, lock_flags); dump = ioa_cfg->dump; - if (ioa_cfg->sdt_state != DUMP_OBTAINED || !dump || !kobject_get(&dump->kobj)) { + if (ioa_cfg->sdt_state != DUMP_OBTAINED || !dump) { spin_unlock_irqrestore(ioa_cfg->host->host_lock, lock_flags); return 0; } - + kref_get(&dump->kref); spin_unlock_irqrestore(ioa_cfg->host->host_lock, lock_flags); if (off > dump->driver_dump.hdr.len) { - kobject_put(&dump->kobj); + kref_put(&dump->kref, ipr_release_dump); return 0; } @@ -2450,64 +3168,30 @@ static ssize_t ipr_read_dump(struct kobject *kobj, char *buf, count -= len; } - kobject_put(&dump->kobj); + kref_put(&dump->kref, ipr_release_dump); return rc; } /** - * ipr_release_dump - Free adapter dump memory - * @kobj: kobject struct + * ipr_alloc_dump - Prepare for adapter dump + * @ioa_cfg: ioa config struct * * Return value: - * nothing + * 0 on success / other on failure **/ -static void ipr_release_dump(struct kobject *kobj) +static int ipr_alloc_dump(struct ipr_ioa_cfg *ioa_cfg) { - struct ipr_dump *dump = container_of(kobj,struct ipr_dump,kobj); - struct ipr_ioa_cfg *ioa_cfg = dump->ioa_cfg; + struct ipr_dump *dump; unsigned long lock_flags = 0; - int i; - ENTER; - spin_lock_irqsave(ioa_cfg->host->host_lock, lock_flags); - ioa_cfg->dump = NULL; - ioa_cfg->sdt_state = INACTIVE; - spin_unlock_irqrestore(ioa_cfg->host->host_lock, lock_flags); + dump = kzalloc(sizeof(struct ipr_dump), GFP_KERNEL); - for (i = 0; i < dump->ioa_dump.next_page_index; i++) - free_page((unsigned long) dump->ioa_dump.ioa_data[i]); + if (!dump) { + ipr_err("Dump memory allocation failed\n"); + return -ENOMEM; + } - kfree(dump); - LEAVE; -} - -static struct kobj_type ipr_dump_kobj_type = { - .release = ipr_release_dump, -}; - -/** - * ipr_alloc_dump - Prepare for adapter dump - * @ioa_cfg: ioa config struct - * - * Return value: - * 0 on success / other on failure - **/ -static int ipr_alloc_dump(struct ipr_ioa_cfg *ioa_cfg) -{ - struct ipr_dump *dump; - unsigned long lock_flags = 0; - - ENTER; - dump = kmalloc(sizeof(struct ipr_dump), GFP_KERNEL); - - if (!dump) { - ipr_err("Dump memory allocation failed\n"); - return -ENOMEM; - } - - memset(dump, 0, sizeof(struct ipr_dump)); - kobject_init(&dump->kobj); - dump->kobj.ktype = &ipr_dump_kobj_type; + kref_init(&dump->kref); dump->ioa_cfg = ioa_cfg; spin_lock_irqsave(ioa_cfg->host->host_lock, lock_flags); @@ -2526,7 +3210,6 @@ static int ipr_alloc_dump(struct ipr_ioa_cfg *ioa_cfg) } spin_unlock_irqrestore(ioa_cfg->host->host_lock, lock_flags); - LEAVE; return 0; } @@ -2554,7 +3237,7 @@ static int ipr_free_dump(struct ipr_ioa_cfg *ioa_cfg) ioa_cfg->dump = NULL; spin_unlock_irqrestore(ioa_cfg->host->host_lock, lock_flags); - kobject_put(&dump->kobj); + kref_put(&dump->kref, ipr_release_dump); LEAVE; return 0; @@ -2608,126 +3291,68 @@ static int ipr_free_dump(struct ipr_ioa_cfg *ioa_cfg) { return 0; }; #endif /** - * ipr_store_queue_depth - Change the device's queue depth - * @dev: device struct - * @buf: buffer + * ipr_change_queue_depth - Change the device's queue depth + * @sdev: scsi device struct + * @qdepth: depth to set * * Return value: - * number of bytes printed to buffer + * actual depth set **/ -static ssize_t ipr_store_queue_depth(struct device *dev, - const char *buf, size_t count) +static int ipr_change_queue_depth(struct scsi_device *sdev, int qdepth) { - struct scsi_device *sdev = to_scsi_device(dev); struct ipr_ioa_cfg *ioa_cfg = (struct ipr_ioa_cfg *)sdev->host->hostdata; struct ipr_resource_entry *res; - int qdepth = simple_strtoul(buf, NULL, 10); - int tagged = 0; unsigned long lock_flags = 0; - ssize_t len = -ENXIO; spin_lock_irqsave(ioa_cfg->host->host_lock, lock_flags); res = (struct ipr_resource_entry *)sdev->hostdata; - if (res) { - res->qdepth = qdepth; - - if (ipr_is_gscsi(res) && res->tcq_active) - tagged = MSG_ORDERED_TAG; - - len = strlen(buf); - } + if (res && ipr_is_gata(res) && qdepth > IPR_MAX_CMD_PER_ATA_LUN) + qdepth = IPR_MAX_CMD_PER_ATA_LUN; spin_unlock_irqrestore(ioa_cfg->host->host_lock, lock_flags); - scsi_adjust_queue_depth(sdev, tagged, qdepth); - return len; -} - -static struct device_attribute ipr_queue_depth_attr = { - .attr = { - .name = "queue_depth", - .mode = S_IRUSR | S_IWUSR, - }, - .store = ipr_store_queue_depth -}; - -/** - * ipr_show_tcq_enable - Show if the device is enabled for tcqing - * @dev: device struct - * @buf: buffer - * - * Return value: - * number of bytes printed to buffer - **/ -static ssize_t ipr_show_tcq_enable(struct device *dev, char *buf) -{ - struct scsi_device *sdev = to_scsi_device(dev); - struct ipr_ioa_cfg *ioa_cfg = (struct ipr_ioa_cfg *)sdev->host->hostdata; - struct ipr_resource_entry *res; - unsigned long lock_flags = 0; - ssize_t len = -ENXIO; - spin_lock_irqsave(ioa_cfg->host->host_lock, lock_flags); - res = (struct ipr_resource_entry *)sdev->hostdata; - if (res) - len = snprintf(buf, PAGE_SIZE, "%d\n", res->tcq_active); - spin_unlock_irqrestore(ioa_cfg->host->host_lock, lock_flags); - return len; + scsi_adjust_queue_depth(sdev, scsi_get_tag_type(sdev), qdepth); + return sdev->queue_depth; } /** - * ipr_store_tcq_enable - Change the device's TCQing state - * @dev: device struct - * @buf: buffer + * ipr_change_queue_type - Change the device's queue type + * @dsev: scsi device struct + * @tag_type: type of tags to use * * Return value: - * number of bytes printed to buffer + * actual queue type set **/ -static ssize_t ipr_store_tcq_enable(struct device *dev, - const char *buf, size_t count) +static int ipr_change_queue_type(struct scsi_device *sdev, int tag_type) { - struct scsi_device *sdev = to_scsi_device(dev); struct ipr_ioa_cfg *ioa_cfg = (struct ipr_ioa_cfg *)sdev->host->hostdata; struct ipr_resource_entry *res; unsigned long lock_flags = 0; - int tcq_active = simple_strtoul(buf, NULL, 10); - int qdepth = IPR_MAX_CMD_PER_LUN; - int tagged = 0; - ssize_t len = -ENXIO; spin_lock_irqsave(ioa_cfg->host->host_lock, lock_flags); - res = (struct ipr_resource_entry *)sdev->hostdata; if (res) { - res->tcq_active = 0; - qdepth = res->qdepth; - if (ipr_is_gscsi(res) && sdev->tagged_supported) { - if (tcq_active) { - tagged = MSG_ORDERED_TAG; - res->tcq_active = 1; - } + /* + * We don't bother quiescing the device here since the + * adapter firmware does it for us. + */ + scsi_set_tag_type(sdev, tag_type); - len = strlen(buf); - } else if (tcq_active) { - len = -EINVAL; - } - } + if (tag_type) + scsi_activate_tcq(sdev, sdev->queue_depth); + else + scsi_deactivate_tcq(sdev, sdev->queue_depth); + } else + tag_type = 0; + } else + tag_type = 0; spin_unlock_irqrestore(ioa_cfg->host->host_lock, lock_flags); - scsi_adjust_queue_depth(sdev, tagged, qdepth); - return len; + return tag_type; } -static struct device_attribute ipr_tcqing_attr = { - .attr = { - .name = "tcq_enable", - .mode = S_IRUSR | S_IWUSR, - }, - .store = ipr_store_tcq_enable, - .show = ipr_show_tcq_enable -}; - /** * ipr_show_adapter_handle - Show the adapter's resource handle for this device * @dev: device struct @@ -2736,7 +3361,7 @@ static struct device_attribute ipr_tcqing_attr = { * Return value: * number of bytes printed to buffer **/ -static ssize_t ipr_show_adapter_handle(struct device *dev, char *buf) +static ssize_t ipr_show_adapter_handle(struct device *dev, struct device_attribute *attr, char *buf) { struct scsi_device *sdev = to_scsi_device(dev); struct ipr_ioa_cfg *ioa_cfg = (struct ipr_ioa_cfg *)sdev->host->hostdata; @@ -2761,8 +3386,6 @@ static struct device_attribute ipr_adapter_handle_attr = { }; static struct device_attribute *ipr_dev_attrs[] = { - &ipr_queue_depth_attr, - &ipr_tcqing_attr, &ipr_adapter_handle_attr, NULL, }; @@ -2785,7 +3408,8 @@ static int ipr_biosparam(struct scsi_device *sdev, struct block_device *block_device, sector_t capacity, int *parm) { - int heads, sectors, cylinders; + int heads, sectors; + sector_t cylinders; heads = 128; sectors = 32; @@ -2801,6 +3425,122 @@ static int ipr_biosparam(struct scsi_device *sdev, return 0; } +/** + * ipr_find_starget - Find target based on bus/target. + * @starget: scsi target struct + * + * Return value: + * resource entry pointer if found / NULL if not found + **/ +static struct ipr_resource_entry *ipr_find_starget(struct scsi_target *starget) +{ + struct Scsi_Host *shost = dev_to_shost(&starget->dev); + struct ipr_ioa_cfg *ioa_cfg = (struct ipr_ioa_cfg *) shost->hostdata; + struct ipr_resource_entry *res; + + list_for_each_entry(res, &ioa_cfg->used_res_q, queue) { + if ((res->cfgte.res_addr.bus == starget->channel) && + (res->cfgte.res_addr.target == starget->id) && + (res->cfgte.res_addr.lun == 0)) { + return res; + } + } + + return NULL; +} + +static struct ata_port_info sata_port_info; + +/** + * ipr_target_alloc - Prepare for commands to a SCSI target + * @starget: scsi target struct + * + * If the device is a SATA device, this function allocates an + * ATA port with libata, else it does nothing. + * + * Return value: + * 0 on success / non-0 on failure + **/ +static int ipr_target_alloc(struct scsi_target *starget) +{ + struct Scsi_Host *shost = dev_to_shost(&starget->dev); + struct ipr_ioa_cfg *ioa_cfg = (struct ipr_ioa_cfg *) shost->hostdata; + struct ipr_sata_port *sata_port; + struct ata_port *ap; + struct ipr_resource_entry *res; + unsigned long lock_flags; + + spin_lock_irqsave(ioa_cfg->host->host_lock, lock_flags); + res = ipr_find_starget(starget); + starget->hostdata = NULL; + + if (res && ipr_is_gata(res)) { + spin_unlock_irqrestore(ioa_cfg->host->host_lock, lock_flags); + sata_port = kzalloc(sizeof(*sata_port), GFP_KERNEL); + if (!sata_port) + return -ENOMEM; + + ap = ata_sas_port_alloc(&ioa_cfg->ata_host, &sata_port_info, shost); + if (ap) { + spin_lock_irqsave(ioa_cfg->host->host_lock, lock_flags); + sata_port->ioa_cfg = ioa_cfg; + sata_port->ap = ap; + sata_port->res = res; + + res->sata_port = sata_port; + ap->private_data = sata_port; + starget->hostdata = sata_port; + } else { + kfree(sata_port); + return -ENOMEM; + } + } + spin_unlock_irqrestore(ioa_cfg->host->host_lock, lock_flags); + + return 0; +} + +/** + * ipr_target_destroy - Destroy a SCSI target + * @starget: scsi target struct + * + * If the device was a SATA device, this function frees the libata + * ATA port, else it does nothing. + * + **/ +static void ipr_target_destroy(struct scsi_target *starget) +{ + struct ipr_sata_port *sata_port = starget->hostdata; + + if (sata_port) { + starget->hostdata = NULL; + ata_sas_port_destroy(sata_port->ap); + kfree(sata_port); + } +} + +/** + * ipr_find_sdev - Find device based on bus/target/lun. + * @sdev: scsi device struct + * + * Return value: + * resource entry pointer if found / NULL if not found + **/ +static struct ipr_resource_entry *ipr_find_sdev(struct scsi_device *sdev) +{ + struct ipr_ioa_cfg *ioa_cfg = (struct ipr_ioa_cfg *) sdev->host->hostdata; + struct ipr_resource_entry *res; + + list_for_each_entry(res, &ioa_cfg->used_res_q, queue) { + if ((res->cfgte.res_addr.bus == sdev->channel) && + (res->cfgte.res_addr.target == sdev->id) && + (res->cfgte.res_addr.lun == sdev->lun)) + return res; + } + + return NULL; +} + /** * ipr_slave_destroy - Unconfigure a SCSI device * @sdev: scsi device struct @@ -2819,8 +3559,11 @@ static void ipr_slave_destroy(struct scsi_device *sdev) spin_lock_irqsave(ioa_cfg->host->host_lock, lock_flags); res = (struct ipr_resource_entry *) sdev->hostdata; if (res) { + if (res->sata_port) + ata_port_disable(res->sata_port->ap); sdev->hostdata = NULL; res->sdev = NULL; + res->sata_port = NULL; } spin_unlock_irqrestore(ioa_cfg->host->host_lock, lock_flags); } @@ -2845,18 +3588,54 @@ static int ipr_slave_configure(struct scsi_device *sdev) if (res) { if (ipr_is_af_dasd_device(res)) sdev->type = TYPE_RAID; - if (ipr_is_af_dasd_device(res) || ipr_is_ioa_resource(res)) + if (ipr_is_af_dasd_device(res) || ipr_is_ioa_resource(res)) { sdev->scsi_level = 4; - if (ipr_is_vset_device(res)) + sdev->no_uld_attach = 1; + } + if (ipr_is_vset_device(res)) { sdev->timeout = IPR_VSET_RW_TIMEOUT; - - sdev->allow_restart = 1; - scsi_adjust_queue_depth(sdev, 0, res->qdepth); + blk_queue_max_sectors(sdev->request_queue, IPR_VSET_MAX_SECTORS); + } + if (ipr_is_vset_device(res) || ipr_is_scsi_disk(res)) + sdev->allow_restart = 1; + if (ipr_is_gata(res) && res->sata_port) { + scsi_adjust_queue_depth(sdev, 0, IPR_MAX_CMD_PER_ATA_LUN); + ata_sas_slave_configure(sdev, res->sata_port->ap); + } else { + scsi_adjust_queue_depth(sdev, 0, sdev->host->cmd_per_lun); + } } spin_unlock_irqrestore(ioa_cfg->host->host_lock, lock_flags); return 0; } +/** + * ipr_ata_slave_alloc - Prepare for commands to a SATA device + * @sdev: scsi device struct + * + * This function initializes an ATA port so that future commands + * sent through queuecommand will work. + * + * Return value: + * 0 on success + **/ +static int ipr_ata_slave_alloc(struct scsi_device *sdev) +{ + struct ipr_sata_port *sata_port = NULL; + int rc = -ENXIO; + + ENTER; + if (sdev->sdev_target) + sata_port = sdev->sdev_target->hostdata; + if (sata_port) + rc = ata_sas_port_init(sata_port->ap); + if (rc) + ipr_slave_destroy(sdev); + + LEAVE; + return rc; +} + /** * ipr_slave_alloc - Prepare for commands to a device. * @sdev: scsi device struct @@ -2867,34 +3646,37 @@ static int ipr_slave_configure(struct scsi_device *sdev) * handling new commands. * * Return value: - * 0 on success + * 0 on success / -ENXIO if device does not exist **/ static int ipr_slave_alloc(struct scsi_device *sdev) { struct ipr_ioa_cfg *ioa_cfg = (struct ipr_ioa_cfg *) sdev->host->hostdata; struct ipr_resource_entry *res; unsigned long lock_flags; + int rc = -ENXIO; sdev->hostdata = NULL; spin_lock_irqsave(ioa_cfg->host->host_lock, lock_flags); - list_for_each_entry(res, &ioa_cfg->used_res_q, queue) { - if ((res->cfgte.res_addr.bus == sdev->channel) && - (res->cfgte.res_addr.target == sdev->id) && - (res->cfgte.res_addr.lun == sdev->lun)) { - res->sdev = sdev; - res->add_to_ml = 0; - res->in_erp = 0; - sdev->hostdata = res; + res = ipr_find_sdev(sdev); + if (res) { + res->sdev = sdev; + res->add_to_ml = 0; + res->in_erp = 0; + sdev->hostdata = res; + if (!ipr_is_naca_model(res)) res->needs_sync_complete = 1; - break; + rc = 0; + if (ipr_is_gata(res)) { + spin_unlock_irqrestore(ioa_cfg->host->host_lock, lock_flags); + return ipr_ata_slave_alloc(sdev); } } spin_unlock_irqrestore(ioa_cfg->host->host_lock, lock_flags); - return 0; + return rc; } /** @@ -2904,7 +3686,7 @@ static int ipr_slave_alloc(struct scsi_device *sdev) * Return value: * SUCCESS / FAILED **/ -static int ipr_eh_host_reset(struct scsi_cmnd * scsi_cmd) +static int __ipr_eh_host_reset(struct scsi_cmnd * scsi_cmd) { struct ipr_ioa_cfg *ioa_cfg; int rc; @@ -2924,6 +3706,115 @@ static int ipr_eh_host_reset(struct scsi_cmnd * scsi_cmd) return rc; } +static int ipr_eh_host_reset(struct scsi_cmnd * cmd) +{ + int rc; + + spin_lock_irq(cmd->device->host->host_lock); + rc = __ipr_eh_host_reset(cmd); + spin_unlock_irq(cmd->device->host->host_lock); + + return rc; +} + +/** + * ipr_device_reset - Reset the device + * @ioa_cfg: ioa config struct + * @res: resource entry struct + * + * This function issues a device reset to the affected device. + * If the device is a SCSI device, a LUN reset will be sent + * to the device first. If that does not work, a target reset + * will be sent. If the device is a SATA device, a PHY reset will + * be sent. + * + * Return value: + * 0 on success / non-zero on failure + **/ +static int ipr_device_reset(struct ipr_ioa_cfg *ioa_cfg, + struct ipr_resource_entry *res) +{ + struct ipr_cmnd *ipr_cmd; + struct ipr_ioarcb *ioarcb; + struct ipr_cmd_pkt *cmd_pkt; + struct ipr_ioarcb_ata_regs *regs; + u32 ioasc; + + ENTER; + ipr_cmd = ipr_get_free_ipr_cmnd(ioa_cfg); + ioarcb = &ipr_cmd->ioarcb; + cmd_pkt = &ioarcb->cmd_pkt; + regs = &ioarcb->add_data.u.regs; + + ioarcb->res_handle = res->cfgte.res_handle; + cmd_pkt->request_type = IPR_RQTYPE_IOACMD; + cmd_pkt->cdb[0] = IPR_RESET_DEVICE; + if (ipr_is_gata(res)) { + cmd_pkt->cdb[2] = IPR_ATA_PHY_RESET; + ioarcb->add_cmd_parms_len = cpu_to_be32(sizeof(regs->flags)); + regs->flags |= IPR_ATA_FLAG_STATUS_ON_GOOD_COMPLETION; + } + + ipr_send_blocking_cmd(ipr_cmd, ipr_timeout, IPR_DEVICE_RESET_TIMEOUT); + ioasc = be32_to_cpu(ipr_cmd->ioasa.ioasc); + list_add_tail(&ipr_cmd->queue, &ioa_cfg->free_q); + if (ipr_is_gata(res) && res->sata_port && ioasc != IPR_IOASC_IOA_WAS_RESET) + memcpy(&res->sata_port->ioasa, &ipr_cmd->ioasa.u.gata, + sizeof(struct ipr_ioasa_gata)); + + LEAVE; + return (IPR_IOASC_SENSE_KEY(ioasc) ? -EIO : 0); +} + +/** + * ipr_sata_reset - Reset the SATA port + * @ap: SATA port to reset + * @classes: class of the attached device + * + * This function issues a SATA phy reset to the affected ATA port. + * + * Return value: + * 0 on success / non-zero on failure + **/ +static int ipr_sata_reset(struct ata_port *ap, unsigned int *classes) +{ + struct ipr_sata_port *sata_port = ap->private_data; + struct ipr_ioa_cfg *ioa_cfg = sata_port->ioa_cfg; + struct ipr_resource_entry *res; + unsigned long lock_flags = 0; + int rc = -ENXIO; + + ENTER; + spin_lock_irqsave(ioa_cfg->host->host_lock, lock_flags); + while(ioa_cfg->in_reset_reload) { + spin_unlock_irqrestore(ioa_cfg->host->host_lock, lock_flags); + wait_event(ioa_cfg->reset_wait_q, !ioa_cfg->in_reset_reload); + spin_lock_irqsave(ioa_cfg->host->host_lock, lock_flags); + } + + res = sata_port->res; + if (res) { + rc = ipr_device_reset(ioa_cfg, res); + switch(res->cfgte.proto) { + case IPR_PROTO_SATA: + case IPR_PROTO_SAS_STP: + *classes = ATA_DEV_ATA; + break; + case IPR_PROTO_SATA_ATAPI: + case IPR_PROTO_SAS_STP_ATAPI: + *classes = ATA_DEV_ATAPI; + break; + default: + *classes = ATA_DEV_UNKNOWN; + break; + }; + } + + spin_unlock_irqrestore(ioa_cfg->host->host_lock, lock_flags); + LEAVE; + return rc; +} + /** * ipr_eh_dev_reset - Reset the device * @scsi_cmd: scsi command struct @@ -2935,19 +3826,19 @@ static int ipr_eh_host_reset(struct scsi_cmnd * scsi_cmd) * Return value: * SUCCESS / FAILED **/ -static int ipr_eh_dev_reset(struct scsi_cmnd * scsi_cmd) +static int __ipr_eh_dev_reset(struct scsi_cmnd * scsi_cmd) { struct ipr_cmnd *ipr_cmd; struct ipr_ioa_cfg *ioa_cfg; struct ipr_resource_entry *res; - struct ipr_cmd_pkt *cmd_pkt; - u32 ioasc; + struct ata_port *ap; + int rc = 0; ENTER; ioa_cfg = (struct ipr_ioa_cfg *) scsi_cmd->device->host->hostdata; res = scsi_cmd->device->hostdata; - if (!res || (!ipr_is_gscsi(res) && !ipr_is_vset_device(res))) + if (!res) return FAILED; /* @@ -2964,29 +3855,38 @@ static int ipr_eh_dev_reset(struct scsi_cmnd * scsi_cmd) if (ipr_cmd->ioarcb.res_handle == res->cfgte.res_handle) { if (ipr_cmd->scsi_cmd) ipr_cmd->done = ipr_scsi_eh_done; + if (ipr_cmd->qc && !(ipr_cmd->qc->flags & ATA_QCFLAG_FAILED)) { + ipr_cmd->qc->err_mask |= AC_ERR_TIMEOUT; + ipr_cmd->qc->flags |= ATA_QCFLAG_FAILED; + } } } res->resetting_device = 1; + scmd_printk(KERN_ERR, scsi_cmd, "Resetting device\n"); - ipr_cmd = ipr_get_free_ipr_cmnd(ioa_cfg); - - ipr_cmd->ioarcb.res_handle = res->cfgte.res_handle; - cmd_pkt = &ipr_cmd->ioarcb.cmd_pkt; - cmd_pkt->request_type = IPR_RQTYPE_IOACMD; - cmd_pkt->cdb[0] = IPR_RESET_DEVICE; - - ipr_sdev_err(scsi_cmd->device, "Resetting device\n"); - ipr_send_blocking_cmd(ipr_cmd, ipr_timeout, IPR_DEVICE_RESET_TIMEOUT); + if (ipr_is_gata(res) && res->sata_port) { + ap = res->sata_port->ap; + spin_unlock_irq(scsi_cmd->device->host->host_lock); + ata_do_eh(ap, NULL, NULL, ipr_sata_reset, NULL); + spin_lock_irq(scsi_cmd->device->host->host_lock); + } else + rc = ipr_device_reset(ioa_cfg, res); + res->resetting_device = 0; - ioasc = be32_to_cpu(ipr_cmd->ioasa.ioasc); + LEAVE; + return (rc ? FAILED : SUCCESS); +} - res->resetting_device = 0; +static int ipr_eh_dev_reset(struct scsi_cmnd * cmd) +{ + int rc; - list_add_tail(&ipr_cmd->queue, &ioa_cfg->free_q); + spin_lock_irq(cmd->device->host->host_lock); + rc = __ipr_eh_dev_reset(cmd); + spin_unlock_irq(cmd->device->host->host_lock); - LEAVE; - return (IPR_IOASC_SENSE_KEY(ioasc) ? FAILED : SUCCESS); + return rc; } /** @@ -3050,7 +3950,7 @@ static void ipr_abort_timeout(struct ipr_cmnd *ipr_cmd) return; } - ipr_sdev_err(ipr_cmd->u.sdev, "Abort timed out. Resetting bus\n"); + sdev_printk(KERN_ERR, ipr_cmd->u.sdev, "Abort timed out. Resetting bus.\n"); reset_cmd = ipr_get_free_ipr_cmnd(ioa_cfg); ipr_cmd->sibling = reset_cmd; reset_cmd->sibling = ipr_cmd; @@ -3080,14 +3980,20 @@ static int ipr_cancel_op(struct scsi_cmnd * scsi_cmd) struct ipr_ioa_cfg *ioa_cfg; struct ipr_resource_entry *res; struct ipr_cmd_pkt *cmd_pkt; - u32 ioasc, ioarcb_addr; + u32 ioasc; int op_found = 0; ENTER; ioa_cfg = (struct ipr_ioa_cfg *)scsi_cmd->device->host->hostdata; res = scsi_cmd->device->hostdata; - if (!res || (!ipr_is_gscsi(res) && !ipr_is_vset_device(res))) + /* If we are currently going through reset/reload, return failed. + * This will force the mid-layer to call ipr_eh_host_reset, + * which will then go to sleep and wait for the reset to complete + */ + if (ioa_cfg->in_reset_reload || ioa_cfg->ioa_is_dead) + return FAILED; + if (!res || !ipr_is_gscsi(res)) return FAILED; list_for_each_entry(ipr_cmd, &ioa_cfg->pending_q, queue) { @@ -3101,21 +4007,16 @@ static int ipr_cancel_op(struct scsi_cmnd * scsi_cmd) if (!op_found) return SUCCESS; - ioarcb_addr = be32_to_cpu(ipr_cmd->ioarcb.ioarcb_host_pci_addr); - ipr_cmd = ipr_get_free_ipr_cmnd(ioa_cfg); ipr_cmd->ioarcb.res_handle = res->cfgte.res_handle; cmd_pkt = &ipr_cmd->ioarcb.cmd_pkt; cmd_pkt->request_type = IPR_RQTYPE_IOACMD; - cmd_pkt->cdb[0] = IPR_ABORT_TASK; - cmd_pkt->cdb[2] = (ioarcb_addr >> 24) & 0xff; - cmd_pkt->cdb[3] = (ioarcb_addr >> 16) & 0xff; - cmd_pkt->cdb[4] = (ioarcb_addr >> 8) & 0xff; - cmd_pkt->cdb[5] = ioarcb_addr & 0xff; + cmd_pkt->cdb[0] = IPR_CANCEL_ALL_REQUESTS; ipr_cmd->u.sdev = scsi_cmd->device; - ipr_sdev_err(scsi_cmd->device, "Aborting command: %02X\n", scsi_cmd->cmnd[0]); - ipr_send_blocking_cmd(ipr_cmd, ipr_abort_timeout, IPR_ABORT_TASK_TIMEOUT); + scmd_printk(KERN_ERR, scsi_cmd, "Aborting command: %02X\n", + scsi_cmd->cmnd[0]); + ipr_send_blocking_cmd(ipr_cmd, ipr_abort_timeout, IPR_CANCEL_ALL_TIMEOUT); ioasc = be32_to_cpu(ipr_cmd->ioasa.ioasc); /* @@ -3128,7 +4029,8 @@ static int ipr_cancel_op(struct scsi_cmnd * scsi_cmd) } list_add_tail(&ipr_cmd->queue, &ioa_cfg->free_q); - res->needs_sync_complete = 1; + if (!ipr_is_naca_model(res)) + res->needs_sync_complete = 1; LEAVE; return (IPR_IOASC_SENSE_KEY(ioasc) ? FAILED : SUCCESS); @@ -3143,23 +4045,17 @@ static int ipr_cancel_op(struct scsi_cmnd * scsi_cmd) **/ static int ipr_eh_abort(struct scsi_cmnd * scsi_cmd) { - struct ipr_ioa_cfg *ioa_cfg; + unsigned long flags; + int rc; ENTER; - ioa_cfg = (struct ipr_ioa_cfg *) scsi_cmd->device->host->hostdata; - /* If we are currently going through reset/reload, return failed. This will force the - mid-layer to call ipr_eh_host_reset, which will then go to sleep and wait for the - reset to complete */ - if (ioa_cfg->in_reset_reload) - return FAILED; - if (ioa_cfg->ioa_is_dead) - return FAILED; - if (!scsi_cmd->device->hostdata) - return FAILED; + spin_lock_irqsave(scsi_cmd->device->host->host_lock, flags); + rc = ipr_cancel_op(scsi_cmd); + spin_unlock_irqrestore(scsi_cmd->device->host->host_lock, flags); LEAVE; - return ipr_cancel_op(scsi_cmd); + return rc; } /** @@ -3207,12 +4103,11 @@ static irqreturn_t ipr_handle_other_interrupt(struct ipr_ioa_cfg *ioa_cfg, * ipr_isr - Interrupt service routine * @irq: irq number * @devp: pointer to ioa config struct - * @regs: pt_regs struct * * Return value: * IRQ_NONE / IRQ_HANDLED **/ -static irqreturn_t ipr_isr(int irq, void *devp, struct pt_regs *regs) +static irqreturn_t ipr_isr(int irq, void *devp) { struct ipr_ioa_cfg *ioa_cfg = (struct ipr_ioa_cfg *)devp; unsigned long lock_flags = 0; @@ -3430,15 +4325,16 @@ static void ipr_erp_done(struct ipr_cmnd *ipr_cmd) if (IPR_IOASC_SENSE_KEY(ioasc) > 0) { scsi_cmd->result |= (DID_ERROR << 16); - ipr_sdev_err(scsi_cmd->device, - "Request Sense failed with IOASC: 0x%08X\n", ioasc); + scmd_printk(KERN_ERR, scsi_cmd, + "Request Sense failed with IOASC: 0x%08X\n", ioasc); } else { memcpy(scsi_cmd->sense_buffer, ipr_cmd->sense_buffer, SCSI_SENSE_BUFFERSIZE); } if (res) { - res->needs_sync_complete = 1; + if (!ipr_is_naca_model(res)) + res->needs_sync_complete = 1; res->in_erp = 0; } ipr_unmap_sglist(ioa_cfg, ipr_cmd); @@ -3535,7 +4431,7 @@ static void ipr_erp_cancel_all(struct ipr_cmnd *ipr_cmd) ipr_reinit_ipr_cmnd_for_erp(ipr_cmd); - if (!res->tcq_active) { + if (!scsi_get_tag_type(scsi_cmd->device)) { ipr_erp_request_sense(ipr_cmd); return; } @@ -3552,6 +4448,7 @@ static void ipr_erp_cancel_all(struct ipr_cmnd *ipr_cmd) * ipr_dump_ioasa - Dump contents of IOASA * @ioa_cfg: ioa config struct * @ipr_cmd: ipr command struct + * @res: resource entry struct * * This function is invoked by the interrupt handler when ops * fail. It will log the IOASA if appropriate. Only called @@ -3561,13 +4458,13 @@ static void ipr_erp_cancel_all(struct ipr_cmnd *ipr_cmd) * none **/ static void ipr_dump_ioasa(struct ipr_ioa_cfg *ioa_cfg, - struct ipr_cmnd *ipr_cmd) + struct ipr_cmnd *ipr_cmd, struct ipr_resource_entry *res) { int i; u16 data_len; u32 ioasc; struct ipr_ioasa *ioasa = &ipr_cmd->ioasa; - u32 *ioasa_data = (u32 *)ioasa; + __be32 *ioasa_data = (__be32 *)ioasa; int error_index; ioasc = be32_to_cpu(ioasa->ioasc) & IPR_IOASC_IOASC_MASK; @@ -3589,16 +4486,7 @@ static void ipr_dump_ioasa(struct ipr_ioa_cfg *ioa_cfg, return; } - ipr_sdev_err(ipr_cmd->scsi_cmd->device, "%s\n", - ipr_error_table[error_index].error); - - if ((ioasa->u.gpdd.end_state <= ARRAY_SIZE(ipr_gpdd_dev_end_states)) && - (ioasa->u.gpdd.bus_phase <= ARRAY_SIZE(ipr_gpdd_dev_bus_phases))) { - ipr_sdev_err(ipr_cmd->scsi_cmd->device, - "Device End state: %s Phase: %s\n", - ipr_gpdd_dev_end_states[ioasa->u.gpdd.end_state], - ipr_gpdd_dev_bus_phases[ioasa->u.gpdd.bus_phase]); - } + ipr_res_err(ioa_cfg, res, "%s\n", ipr_error_table[error_index].error); if (sizeof(struct ipr_ioasa) < be16_to_cpu(ioasa->ret_stat_len)) data_len = sizeof(struct ipr_ioasa); @@ -3707,6 +4595,29 @@ static void ipr_gen_sense(struct ipr_cmnd *ipr_cmd) } } +/** + * ipr_get_autosense - Copy autosense data to sense buffer + * @ipr_cmd: ipr command struct + * + * This function copies the autosense buffer to the buffer + * in the scsi_cmd, if there is autosense available. + * + * Return value: + * 1 if autosense was available / 0 if not + **/ +static int ipr_get_autosense(struct ipr_cmnd *ipr_cmd) +{ + struct ipr_ioasa *ioasa = &ipr_cmd->ioasa; + + if ((be32_to_cpu(ioasa->ioasc_specific) & IPR_AUTOSENSE_VALID) == 0) + return 0; + + memcpy(ipr_cmd->scsi_cmd->sense_buffer, ioasa->auto_sense.data, + min_t(u16, be16_to_cpu(ioasa->auto_sense.auto_sense_len), + SCSI_SENSE_BUFFERSIZE)); + return 1; +} + /** * ipr_erp_start - Process an error response for a SCSI op * @ioa_cfg: ioa config struct @@ -3731,20 +4642,25 @@ static void ipr_erp_start(struct ipr_ioa_cfg *ioa_cfg, } if (ipr_is_gscsi(res)) - ipr_dump_ioasa(ioa_cfg, ipr_cmd); + ipr_dump_ioasa(ioa_cfg, ipr_cmd, res); else ipr_gen_sense(ipr_cmd); switch (ioasc & IPR_IOASC_IOASC_MASK) { case IPR_IOASC_ABORTED_CMD_TERM_BY_HOST: - scsi_cmd->result |= (DID_ERROR << 16); + if (ipr_is_naca_model(res)) + scsi_cmd->result |= (DID_ABORT << 16); + else + scsi_cmd->result |= (DID_IMM_RETRY << 16); break; case IPR_IOASC_IR_RESOURCE_HANDLE: + case IPR_IOASC_IR_NO_CMDS_TO_2ND_IOA: scsi_cmd->result |= (DID_NO_CONNECT << 16); break; case IPR_IOASC_HW_SEL_TIMEOUT: scsi_cmd->result |= (DID_NO_CONNECT << 16); - res->needs_sync_complete = 1; + if (!ipr_is_naca_model(res)) + res->needs_sync_complete = 1; break; case IPR_IOASC_SYNC_REQUIRED: if (!res->in_erp) @@ -3752,6 +4668,7 @@ static void ipr_erp_start(struct ipr_ioa_cfg *ioa_cfg, scsi_cmd->result |= (DID_IMM_RETRY << 16); break; case IPR_IOASC_MED_DO_NOT_REALLOC: /* prevent retries */ + case IPR_IOASA_IR_DUAL_IOA_DISABLED: scsi_cmd->result |= (DID_PASSTHROUGH << 16); break; case IPR_IOASC_BUS_WAS_RESET: @@ -3763,21 +4680,28 @@ static void ipr_erp_start(struct ipr_ioa_cfg *ioa_cfg, if (!res->resetting_device) scsi_report_bus_reset(ioa_cfg->host, scsi_cmd->device->channel); scsi_cmd->result |= (DID_ERROR << 16); - res->needs_sync_complete = 1; + if (!ipr_is_naca_model(res)) + res->needs_sync_complete = 1; break; case IPR_IOASC_HW_DEV_BUS_STATUS: scsi_cmd->result |= IPR_IOASC_SENSE_STATUS(ioasc); if (IPR_IOASC_SENSE_STATUS(ioasc) == SAM_STAT_CHECK_CONDITION) { - ipr_erp_cancel_all(ipr_cmd); - return; + if (!ipr_get_autosense(ipr_cmd)) { + if (!ipr_is_naca_model(res)) { + ipr_erp_cancel_all(ipr_cmd); + return; + } + } } - res->needs_sync_complete = 1; + if (!ipr_is_naca_model(res)) + res->needs_sync_complete = 1; break; case IPR_IOASC_NR_INIT_CMD_REQUIRED: break; default: - scsi_cmd->result |= (DID_ERROR << 16); - if (!ipr_is_vset_device(res)) + if (IPR_IOASC_SENSE_KEY(ioasc) > RECOVERED_ERROR) + scsi_cmd->result |= (DID_ERROR << 16); + if (!ipr_is_vset_device(res) && !ipr_is_naca_model(res)) res->needs_sync_complete = 1; break; } @@ -3813,35 +4737,6 @@ static void ipr_scsi_done(struct ipr_cmnd *ipr_cmd) ipr_erp_start(ioa_cfg, ipr_cmd); } -/** - * ipr_save_ioafp_mode_select - Save adapters mode select data - * @ioa_cfg: ioa config struct - * @scsi_cmd: scsi command struct - * - * This function saves mode select data for the adapter to - * use following an adapter reset. - * - * Return value: - * 0 on success / SCSI_MLQUEUE_HOST_BUSY on failure - **/ -static int ipr_save_ioafp_mode_select(struct ipr_ioa_cfg *ioa_cfg, - struct scsi_cmnd *scsi_cmd) -{ - if (!ioa_cfg->saved_mode_pages) { - ioa_cfg->saved_mode_pages = kmalloc(sizeof(struct ipr_mode_pages), - GFP_ATOMIC); - if (!ioa_cfg->saved_mode_pages) { - dev_err(&ioa_cfg->pdev->dev, - "IOA mode select buffer allocation failed\n"); - return SCSI_MLQUEUE_HOST_BUSY; - } - } - - memcpy(ioa_cfg->saved_mode_pages, scsi_cmd->buffer, scsi_cmd->cmnd[4]); - ioa_cfg->saved_mode_page_len = scsi_cmd->cmnd[4]; - return 0; -} - /** * ipr_queuecommand - Queue a mid-layer request * @scsi_cmd: scsi command struct @@ -3873,7 +4768,7 @@ static int ipr_queuecommand(struct scsi_cmnd *scsi_cmd, * We have told the host to stop giving us new requests, but * ERP ops don't count. FIXME */ - if (unlikely(!ioa_cfg->allow_cmds)) + if (unlikely(!ioa_cfg->allow_cmds && !ioa_cfg->ioa_is_dead)) return SCSI_MLQUEUE_HOST_BUSY; /* @@ -3887,6 +4782,9 @@ static int ipr_queuecommand(struct scsi_cmnd *scsi_cmd, return 0; } + if (ipr_is_gata(res) && res->sata_port) + return ata_sas_queuecmd(scsi_cmd, done, res->sata_port->ap); + ipr_cmd = ipr_get_free_ipr_cmnd(ioa_cfg); ioarcb = &ipr_cmd->ioarcb; list_add_tail(&ipr_cmd->queue, &ioa_cfg->pending_q); @@ -3912,12 +4810,10 @@ static int ipr_queuecommand(struct scsi_cmnd *scsi_cmd, ioarcb->cmd_pkt.flags_lo |= ipr_get_task_attributes(scsi_cmd); } - if (!ipr_is_gscsi(res) && scsi_cmd->cmnd[0] >= 0xC0) + if (scsi_cmd->cmnd[0] >= 0xC0 && + (!ipr_is_gscsi(res) || scsi_cmd->cmnd[0] == IPR_QUERY_RSRC_STATE)) ioarcb->cmd_pkt.request_type = IPR_RQTYPE_IOACMD; - if (ipr_is_ioa_resource(res) && scsi_cmd->cmnd[0] == MODE_SELECT) - rc = ipr_save_ioafp_mode_select(ioa_cfg, scsi_cmd); - if (likely(rc == 0)) rc = ipr_build_ioadl(ioa_cfg, ipr_cmd); @@ -3933,6 +4829,26 @@ static int ipr_queuecommand(struct scsi_cmnd *scsi_cmd, return 0; } +/** + * ipr_ioctl - IOCTL handler + * @sdev: scsi device struct + * @cmd: IOCTL cmd + * @arg: IOCTL arg + * + * Return value: + * 0 on success / other on failure + **/ +static int ipr_ioctl(struct scsi_device *sdev, int cmd, void __user *arg) +{ + struct ipr_resource_entry *res; + + res = (struct ipr_resource_entry *)sdev->hostdata; + if (res && ipr_is_gata(res)) + return ata_scsi_ioctl(sdev, cmd, arg); + + return -EINVAL; +} + /** * ipr_info - Get information about the card/driver * @scsi_host: scsi host struct @@ -3959,6 +4875,7 @@ static struct scsi_host_template driver_template = { .module = THIS_MODULE, .name = "IPR", .info = ipr_ioa_info, + .ioctl = ipr_ioctl, .queuecommand = ipr_queuecommand, .eh_abort_handler = ipr_eh_abort, .eh_device_reset_handler = ipr_eh_dev_reset, @@ -3966,11 +4883,15 @@ static struct scsi_host_template driver_template = { .slave_alloc = ipr_slave_alloc, .slave_configure = ipr_slave_configure, .slave_destroy = ipr_slave_destroy, + .target_alloc = ipr_target_alloc, + .target_destroy = ipr_target_destroy, + .change_queue_depth = ipr_change_queue_depth, + .change_queue_type = ipr_change_queue_type, .bios_param = ipr_biosparam, .can_queue = IPR_MAX_COMMANDS, .this_id = -1, .sg_tablesize = IPR_MAX_SGLIST, - .max_sectors = IPR_MAX_SECTORS, + .max_sectors = IPR_IOA_MAX_SECTORS, .cmd_per_lun = IPR_MAX_CMD_PER_LUN, .use_clustering = ENABLE_CLUSTERING, .shost_attrs = ipr_ioa_attrs, @@ -3978,6 +4899,336 @@ static struct scsi_host_template driver_template = { .proc_name = IPR_NAME }; +/** + * ipr_ata_phy_reset - libata phy_reset handler + * @ap: ata port to reset + * + **/ +static void ipr_ata_phy_reset(struct ata_port *ap) +{ + unsigned long flags; + struct ipr_sata_port *sata_port = ap->private_data; + struct ipr_resource_entry *res = sata_port->res; + struct ipr_ioa_cfg *ioa_cfg = sata_port->ioa_cfg; + int rc; + + ENTER; + spin_lock_irqsave(ioa_cfg->host->host_lock, flags); + while(ioa_cfg->in_reset_reload) { + spin_unlock_irqrestore(ioa_cfg->host->host_lock, flags); + wait_event(ioa_cfg->reset_wait_q, !ioa_cfg->in_reset_reload); + spin_lock_irqsave(ioa_cfg->host->host_lock, flags); + } + + if (!ioa_cfg->allow_cmds) + goto out_unlock; + + rc = ipr_device_reset(ioa_cfg, res); + + if (rc) { + ap->ops->port_disable(ap); + goto out_unlock; + } + + switch(res->cfgte.proto) { + case IPR_PROTO_SATA: + case IPR_PROTO_SAS_STP: + ap->device[0].class = ATA_DEV_ATA; + break; + case IPR_PROTO_SATA_ATAPI: + case IPR_PROTO_SAS_STP_ATAPI: + ap->device[0].class = ATA_DEV_ATAPI; + break; + default: + ap->device[0].class = ATA_DEV_UNKNOWN; + ap->ops->port_disable(ap); + break; + }; + +out_unlock: + spin_unlock_irqrestore(ioa_cfg->host->host_lock, flags); + LEAVE; +} + +/** + * ipr_ata_post_internal - Cleanup after an internal command + * @qc: ATA queued command + * + * Return value: + * none + **/ +static void ipr_ata_post_internal(struct ata_queued_cmd *qc) +{ + struct ipr_sata_port *sata_port = qc->ap->private_data; + struct ipr_ioa_cfg *ioa_cfg = sata_port->ioa_cfg; + struct ipr_cmnd *ipr_cmd; + unsigned long flags; + + spin_lock_irqsave(ioa_cfg->host->host_lock, flags); + while(ioa_cfg->in_reset_reload) { + spin_unlock_irqrestore(ioa_cfg->host->host_lock, flags); + wait_event(ioa_cfg->reset_wait_q, !ioa_cfg->in_reset_reload); + spin_lock_irqsave(ioa_cfg->host->host_lock, flags); + } + + list_for_each_entry(ipr_cmd, &ioa_cfg->pending_q, queue) { + if (ipr_cmd->qc == qc) { + ipr_device_reset(ioa_cfg, sata_port->res); + break; + } + } + spin_unlock_irqrestore(ioa_cfg->host->host_lock, flags); +} + +/** + * ipr_tf_read - Read the current ATA taskfile for the ATA port + * @ap: ATA port + * @tf: destination ATA taskfile + * + * Return value: + * none + **/ +static void ipr_tf_read(struct ata_port *ap, struct ata_taskfile *tf) +{ + struct ipr_sata_port *sata_port = ap->private_data; + struct ipr_ioasa_gata *g = &sata_port->ioasa; + + tf->feature = g->error; + tf->nsect = g->nsect; + tf->lbal = g->lbal; + tf->lbam = g->lbam; + tf->lbah = g->lbah; + tf->device = g->device; + tf->command = g->status; + tf->hob_nsect = g->hob_nsect; + tf->hob_lbal = g->hob_lbal; + tf->hob_lbam = g->hob_lbam; + tf->hob_lbah = g->hob_lbah; + tf->ctl = g->alt_status; +} + +/** + * ipr_copy_sata_tf - Copy a SATA taskfile to an IOA data structure + * @regs: destination + * @tf: source ATA taskfile + * + * Return value: + * none + **/ +static void ipr_copy_sata_tf(struct ipr_ioarcb_ata_regs *regs, + struct ata_taskfile *tf) +{ + regs->feature = tf->feature; + regs->nsect = tf->nsect; + regs->lbal = tf->lbal; + regs->lbam = tf->lbam; + regs->lbah = tf->lbah; + regs->device = tf->device; + regs->command = tf->command; + regs->hob_feature = tf->hob_feature; + regs->hob_nsect = tf->hob_nsect; + regs->hob_lbal = tf->hob_lbal; + regs->hob_lbam = tf->hob_lbam; + regs->hob_lbah = tf->hob_lbah; + regs->ctl = tf->ctl; +} + +/** + * ipr_sata_done - done function for SATA commands + * @ipr_cmd: ipr command struct + * + * This function is invoked by the interrupt handler for + * ops generated by the SCSI mid-layer to SATA devices + * + * Return value: + * none + **/ +static void ipr_sata_done(struct ipr_cmnd *ipr_cmd) +{ + struct ipr_ioa_cfg *ioa_cfg = ipr_cmd->ioa_cfg; + struct ata_queued_cmd *qc = ipr_cmd->qc; + struct ipr_sata_port *sata_port = qc->ap->private_data; + struct ipr_resource_entry *res = sata_port->res; + u32 ioasc = be32_to_cpu(ipr_cmd->ioasa.ioasc); + + memcpy(&sata_port->ioasa, &ipr_cmd->ioasa.u.gata, + sizeof(struct ipr_ioasa_gata)); + ipr_dump_ioasa(ioa_cfg, ipr_cmd, res); + + if (be32_to_cpu(ipr_cmd->ioasa.ioasc_specific) & IPR_ATA_DEVICE_WAS_RESET) + scsi_report_device_reset(ioa_cfg->host, res->cfgte.res_addr.bus, + res->cfgte.res_addr.target); + + if (IPR_IOASC_SENSE_KEY(ioasc) > RECOVERED_ERROR) + qc->err_mask |= __ac_err_mask(ipr_cmd->ioasa.u.gata.status); + else + qc->err_mask |= ac_err_mask(ipr_cmd->ioasa.u.gata.status); + list_add_tail(&ipr_cmd->queue, &ioa_cfg->free_q); + ata_qc_complete(qc); +} + +/** + * ipr_build_ata_ioadl - Build an ATA scatter/gather list + * @ipr_cmd: ipr command struct + * @qc: ATA queued command + * + **/ +static void ipr_build_ata_ioadl(struct ipr_cmnd *ipr_cmd, + struct ata_queued_cmd *qc) +{ + u32 ioadl_flags = 0; + struct ipr_ioarcb *ioarcb = &ipr_cmd->ioarcb; + struct ipr_ioadl_desc *ioadl = ipr_cmd->ioadl; + int len = qc->nbytes + qc->pad_len; + struct scatterlist *sg; + + if (len == 0) + return; + + if (qc->dma_dir == DMA_TO_DEVICE) { + ioadl_flags = IPR_IOADL_FLAGS_WRITE; + ioarcb->cmd_pkt.flags_hi |= IPR_FLAGS_HI_WRITE_NOT_READ; + ioarcb->write_data_transfer_length = cpu_to_be32(len); + ioarcb->write_ioadl_len = + cpu_to_be32(sizeof(struct ipr_ioadl_desc) * ipr_cmd->dma_use_sg); + } else if (qc->dma_dir == DMA_FROM_DEVICE) { + ioadl_flags = IPR_IOADL_FLAGS_READ; + ioarcb->read_data_transfer_length = cpu_to_be32(len); + ioarcb->read_ioadl_len = + cpu_to_be32(sizeof(struct ipr_ioadl_desc) * ipr_cmd->dma_use_sg); + } + + ata_for_each_sg(sg, qc) { + ioadl->flags_and_data_len = cpu_to_be32(ioadl_flags | sg_dma_len(sg)); + ioadl->address = cpu_to_be32(sg_dma_address(sg)); + if (ata_sg_is_last(sg, qc)) + ioadl->flags_and_data_len |= cpu_to_be32(IPR_IOADL_FLAGS_LAST); + else + ioadl++; + } +} + +/** + * ipr_qc_issue - Issue a SATA qc to a device + * @qc: queued command + * + * Return value: + * 0 if success + **/ +static unsigned int ipr_qc_issue(struct ata_queued_cmd *qc) +{ + struct ata_port *ap = qc->ap; + struct ipr_sata_port *sata_port = ap->private_data; + struct ipr_resource_entry *res = sata_port->res; + struct ipr_ioa_cfg *ioa_cfg = sata_port->ioa_cfg; + struct ipr_cmnd *ipr_cmd; + struct ipr_ioarcb *ioarcb; + struct ipr_ioarcb_ata_regs *regs; + + if (unlikely(!ioa_cfg->allow_cmds || ioa_cfg->ioa_is_dead)) + return -EIO; + + ipr_cmd = ipr_get_free_ipr_cmnd(ioa_cfg); + ioarcb = &ipr_cmd->ioarcb; + regs = &ioarcb->add_data.u.regs; + + memset(&ioarcb->add_data, 0, sizeof(ioarcb->add_data)); + ioarcb->add_cmd_parms_len = cpu_to_be32(sizeof(ioarcb->add_data.u.regs)); + + list_add_tail(&ipr_cmd->queue, &ioa_cfg->pending_q); + ipr_cmd->qc = qc; + ipr_cmd->done = ipr_sata_done; + ipr_cmd->ioarcb.res_handle = res->cfgte.res_handle; + ioarcb->cmd_pkt.request_type = IPR_RQTYPE_ATA_PASSTHRU; + ioarcb->cmd_pkt.flags_hi |= IPR_FLAGS_HI_NO_LINK_DESC; + ioarcb->cmd_pkt.flags_hi |= IPR_FLAGS_HI_NO_ULEN_CHK; + ipr_cmd->dma_use_sg = qc->pad_len ? qc->n_elem + 1 : qc->n_elem; + + ipr_build_ata_ioadl(ipr_cmd, qc); + regs->flags |= IPR_ATA_FLAG_STATUS_ON_GOOD_COMPLETION; + ipr_copy_sata_tf(regs, &qc->tf); + memcpy(ioarcb->cmd_pkt.cdb, qc->cdb, IPR_MAX_CDB_LEN); + ipr_trc_hook(ipr_cmd, IPR_TRACE_START, IPR_GET_PHYS_LOC(res->cfgte.res_addr)); + + switch (qc->tf.protocol) { + case ATA_PROT_NODATA: + case ATA_PROT_PIO: + break; + + case ATA_PROT_DMA: + regs->flags |= IPR_ATA_FLAG_XFER_TYPE_DMA; + break; + + case ATA_PROT_ATAPI: + case ATA_PROT_ATAPI_NODATA: + regs->flags |= IPR_ATA_FLAG_PACKET_CMD; + break; + + case ATA_PROT_ATAPI_DMA: + regs->flags |= IPR_ATA_FLAG_PACKET_CMD; + regs->flags |= IPR_ATA_FLAG_XFER_TYPE_DMA; + break; + + default: + WARN_ON(1); + return -1; + } + + mb(); + writel(be32_to_cpu(ioarcb->ioarcb_host_pci_addr), + ioa_cfg->regs.ioarrin_reg); + return 0; +} + +/** + * ipr_ata_check_status - Return last ATA status + * @ap: ATA port + * + * Return value: + * ATA status + **/ +static u8 ipr_ata_check_status(struct ata_port *ap) +{ + struct ipr_sata_port *sata_port = ap->private_data; + return sata_port->ioasa.status; +} + +/** + * ipr_ata_check_altstatus - Return last ATA altstatus + * @ap: ATA port + * + * Return value: + * Alt ATA status + **/ +static u8 ipr_ata_check_altstatus(struct ata_port *ap) +{ + struct ipr_sata_port *sata_port = ap->private_data; + return sata_port->ioasa.alt_status; +} + +static struct ata_port_operations ipr_sata_ops = { + .port_disable = ata_port_disable, + .check_status = ipr_ata_check_status, + .check_altstatus = ipr_ata_check_altstatus, + .dev_select = ata_noop_dev_select, + .phy_reset = ipr_ata_phy_reset, + .post_internal_cmd = ipr_ata_post_internal, + .tf_read = ipr_tf_read, + .qc_prep = ata_noop_qc_prep, + .qc_issue = ipr_qc_issue, + .port_start = ata_sas_port_start, + .port_stop = ata_sas_port_stop +}; + +static struct ata_port_info sata_port_info = { + .flags = ATA_FLAG_SATA | ATA_FLAG_NO_LEGACY | ATA_FLAG_SATA_RESET | + ATA_FLAG_MMIO | ATA_FLAG_PIO_DMA, + .pio_mask = 0x10, /* pio4 */ + .mwdma_mask = 0x07, + .udma_mask = 0x7f, /* udma0-6 */ + .port_ops = &ipr_sata_ops +}; + #ifdef CONFIG_PPC_PSERIES static const u16 ipr_blocked_processors[] = { PV_NORTHSTAR, @@ -4073,14 +5324,15 @@ static int ipr_ioa_reset_done(struct ipr_cmnd *ipr_cmd) ioa_cfg->in_reset_reload = 0; ioa_cfg->allow_cmds = 1; ioa_cfg->reset_cmd = NULL; + ioa_cfg->doorbell |= IPR_RUNTIME_RESET; list_for_each_entry(res, &ioa_cfg->used_res_q, queue) { if (ioa_cfg->allow_ml_add_del && (res->add_to_ml || res->del_from_ml)) { ipr_trace; - schedule_work(&ioa_cfg->work_q); break; } } + schedule_work(&ioa_cfg->work_q); list_for_each_entry_safe(hostrcb, temp, &ioa_cfg->hostrcb_free_q, queue) { list_del(&hostrcb->queue); @@ -4146,7 +5398,7 @@ static int ipr_set_supported_devs(struct ipr_cmnd *ipr_cmd) ipr_cmd->job_step = ipr_ioa_reset_done; list_for_each_entry_continue(res, &ioa_cfg->used_res_q, queue) { - if (!ipr_is_af_dasd_device(res)) + if (!ipr_is_scsi_disk(res)) continue; ipr_cmd->u.res = res; @@ -4178,6 +5430,36 @@ static int ipr_set_supported_devs(struct ipr_cmnd *ipr_cmd) return IPR_RC_JOB_CONTINUE; } +/** + * ipr_setup_write_cache - Disable write cache if needed + * @ipr_cmd: ipr command struct + * + * This function sets up adapters write cache to desired setting + * + * Return value: + * IPR_RC_JOB_CONTINUE / IPR_RC_JOB_RETURN + **/ +static int ipr_setup_write_cache(struct ipr_cmnd *ipr_cmd) +{ + struct ipr_ioa_cfg *ioa_cfg = ipr_cmd->ioa_cfg; + + ipr_cmd->job_step = ipr_set_supported_devs; + ipr_cmd->u.res = list_entry(ioa_cfg->used_res_q.next, + struct ipr_resource_entry, queue); + + if (ioa_cfg->cache_state != CACHE_DISABLED) + return IPR_RC_JOB_CONTINUE; + + ipr_cmd->ioarcb.res_handle = cpu_to_be32(IPR_IOA_RES_HANDLE); + ipr_cmd->ioarcb.cmd_pkt.request_type = IPR_RQTYPE_IOACMD; + ipr_cmd->ioarcb.cmd_pkt.cdb[0] = IPR_IOA_SHUTDOWN; + ipr_cmd->ioarcb.cmd_pkt.cdb[1] = IPR_SHUTDOWN_PREPARE_FOR_NORMAL; + + ipr_do_req(ipr_cmd, ipr_reset_ioa_job, ipr_timeout, IPR_INTERNAL_TIMEOUT); + + return IPR_RC_JOB_RETURN; +} + /** * ipr_get_mode_page - Locate specified mode page * @mode_pages: mode page buffer @@ -4336,7 +5618,7 @@ static void ipr_modify_ioafp_mode_page_28(struct ipr_ioa_cfg *ioa_cfg, * none **/ static void ipr_build_mode_select(struct ipr_cmnd *ipr_cmd, - u32 res_handle, u8 parm, u32 dma_addr, + __be32 res_handle, u8 parm, u32 dma_addr, u8 xfer_len) { struct ipr_ioadl_desc *ioadl = ipr_cmd->ioadl; @@ -4373,26 +5655,17 @@ static int ipr_ioafp_mode_select_page28(struct ipr_cmnd *ipr_cmd) int length; ENTER; - if (ioa_cfg->saved_mode_pages) { - memcpy(mode_pages, ioa_cfg->saved_mode_pages, - ioa_cfg->saved_mode_page_len); - length = ioa_cfg->saved_mode_page_len; - } else { - ipr_scsi_bus_speed_limit(ioa_cfg); - ipr_check_term_power(ioa_cfg, mode_pages); - ipr_modify_ioafp_mode_page_28(ioa_cfg, mode_pages); - length = mode_pages->hdr.length + 1; - mode_pages->hdr.length = 0; - } + ipr_scsi_bus_speed_limit(ioa_cfg); + ipr_check_term_power(ioa_cfg, mode_pages); + ipr_modify_ioafp_mode_page_28(ioa_cfg, mode_pages); + length = mode_pages->hdr.length + 1; + mode_pages->hdr.length = 0; ipr_build_mode_select(ipr_cmd, cpu_to_be32(IPR_IOA_RES_HANDLE), 0x11, ioa_cfg->vpd_cbs_dma + offsetof(struct ipr_misc_cbs, mode_pages), length); - ipr_cmd->job_step = ipr_set_supported_devs; - ipr_cmd->u.res = list_entry(ioa_cfg->used_res_q.next, - struct ipr_resource_entry, queue); - + ipr_cmd->job_step = ipr_setup_write_cache; ipr_do_req(ipr_cmd, ipr_reset_ioa_job, ipr_timeout, IPR_INTERNAL_TIMEOUT); LEAVE; @@ -4411,7 +5684,7 @@ static int ipr_ioafp_mode_select_page28(struct ipr_cmnd *ipr_cmd) * none **/ static void ipr_build_mode_sense(struct ipr_cmnd *ipr_cmd, - u32 res_handle, + __be32 res_handle, u8 parm, u32 dma_addr, u8 xfer_len) { struct ipr_ioadl_desc *ioadl = ipr_cmd->ioadl; @@ -4430,6 +5703,51 @@ static void ipr_build_mode_sense(struct ipr_cmnd *ipr_cmd, ioarcb->read_data_transfer_length = cpu_to_be32(xfer_len); } +/** + * ipr_reset_cmd_failed - Handle failure of IOA reset command + * @ipr_cmd: ipr command struct + * + * This function handles the failure of an IOA bringup command. + * + * Return value: + * IPR_RC_JOB_RETURN + **/ +static int ipr_reset_cmd_failed(struct ipr_cmnd *ipr_cmd) +{ + struct ipr_ioa_cfg *ioa_cfg = ipr_cmd->ioa_cfg; + u32 ioasc = be32_to_cpu(ipr_cmd->ioasa.ioasc); + + dev_err(&ioa_cfg->pdev->dev, + "0x%02X failed with IOASC: 0x%08X\n", + ipr_cmd->ioarcb.cmd_pkt.cdb[0], ioasc); + + ipr_initiate_ioa_reset(ioa_cfg, IPR_SHUTDOWN_NONE); + list_add_tail(&ipr_cmd->queue, &ioa_cfg->free_q); + return IPR_RC_JOB_RETURN; +} + +/** + * ipr_reset_mode_sense_failed - Handle failure of IOAFP mode sense + * @ipr_cmd: ipr command struct + * + * This function handles the failure of a Mode Sense to the IOAFP. + * Some adapters do not handle all mode pages. + * + * Return value: + * IPR_RC_JOB_CONTINUE / IPR_RC_JOB_RETURN + **/ +static int ipr_reset_mode_sense_failed(struct ipr_cmnd *ipr_cmd) +{ + u32 ioasc = be32_to_cpu(ipr_cmd->ioasa.ioasc); + + if (ioasc == IPR_IOASC_IR_INVALID_REQ_TYPE_OR_PKT) { + ipr_cmd->job_step = ipr_setup_write_cache; + return IPR_RC_JOB_CONTINUE; + } + + return ipr_reset_cmd_failed(ipr_cmd); +} + /** * ipr_ioafp_mode_sense_page28 - Issue Mode Sense Page 28 to IOA * @ipr_cmd: ipr command struct @@ -4451,6 +5769,7 @@ static int ipr_ioafp_mode_sense_page28(struct ipr_cmnd *ipr_cmd) sizeof(struct ipr_mode_pages)); ipr_cmd->job_step = ipr_ioafp_mode_select_page28; + ipr_cmd->job_step_failed = ipr_reset_mode_sense_failed; ipr_do_req(ipr_cmd, ipr_reset_ioa_job, ipr_timeout, IPR_INTERNAL_TIMEOUT); @@ -4519,6 +5838,7 @@ static int ipr_init_res_table(struct ipr_cmnd *ipr_cmd) list_for_each_entry_safe(res, temp, &old_res, queue) { if (res->sdev) { res->del_from_ml = 1; + res->cfgte.res_handle = IPR_INVALID_RES_HANDLE; list_move_tail(&res->queue, &ioa_cfg->used_res_q); } else { list_move_tail(&res->queue, &ioa_cfg->free_res_q); @@ -4610,6 +5930,27 @@ static void ipr_ioafp_inquiry(struct ipr_cmnd *ipr_cmd, u8 flags, u8 page, LEAVE; } +/** + * ipr_inquiry_page_supported - Is the given inquiry page supported + * @page0: inquiry page 0 buffer + * @page: page code. + * + * This function determines if the specified inquiry page is supported. + * + * Return value: + * 1 if page is supported / 0 if not + **/ +static int ipr_inquiry_page_supported(struct ipr_inquiry_page0 *page0, u8 page) +{ + int i; + + for (i = 0; i < min_t(u8, page0->len, IPR_INQUIRY_PAGE0_ENTRIES); i++) + if (page0->page[i] == page) + return 1; + + return 0; +} + /** * ipr_ioafp_page3_inquiry - Send a Page 3 Inquiry to the adapter. * @ipr_cmd: ipr command struct @@ -4621,6 +5962,36 @@ static void ipr_ioafp_inquiry(struct ipr_cmnd *ipr_cmd, u8 flags, u8 page, * IPR_RC_JOB_CONTINUE / IPR_RC_JOB_RETURN **/ static int ipr_ioafp_page3_inquiry(struct ipr_cmnd *ipr_cmd) +{ + struct ipr_ioa_cfg *ioa_cfg = ipr_cmd->ioa_cfg; + struct ipr_inquiry_page0 *page0 = &ioa_cfg->vpd_cbs->page0_data; + + ENTER; + + if (!ipr_inquiry_page_supported(page0, 1)) + ioa_cfg->cache_state = CACHE_NONE; + + ipr_cmd->job_step = ipr_ioafp_query_ioa_cfg; + + ipr_ioafp_inquiry(ipr_cmd, 1, 3, + ioa_cfg->vpd_cbs_dma + offsetof(struct ipr_misc_cbs, page3_data), + sizeof(struct ipr_inquiry_page3)); + + LEAVE; + return IPR_RC_JOB_RETURN; +} + +/** + * ipr_ioafp_page0_inquiry - Send a Page 0 Inquiry to the adapter. + * @ipr_cmd: ipr command struct + * + * This function sends a Page 0 inquiry to the adapter + * to retrieve supported inquiry pages. + * + * Return value: + * IPR_RC_JOB_CONTINUE / IPR_RC_JOB_RETURN + **/ +static int ipr_ioafp_page0_inquiry(struct ipr_cmnd *ipr_cmd) { struct ipr_ioa_cfg *ioa_cfg = ipr_cmd->ioa_cfg; char type[5]; @@ -4632,11 +6003,11 @@ static int ipr_ioafp_page3_inquiry(struct ipr_cmnd *ipr_cmd) type[4] = '\0'; ioa_cfg->type = simple_strtoul((char *)type, NULL, 16); - ipr_cmd->job_step = ipr_ioafp_query_ioa_cfg; + ipr_cmd->job_step = ipr_ioafp_page3_inquiry; - ipr_ioafp_inquiry(ipr_cmd, 1, 3, - ioa_cfg->vpd_cbs_dma + offsetof(struct ipr_misc_cbs, page3_data), - sizeof(struct ipr_inquiry_page3)); + ipr_ioafp_inquiry(ipr_cmd, 1, 0, + ioa_cfg->vpd_cbs_dma + offsetof(struct ipr_misc_cbs, page0_data), + sizeof(struct ipr_inquiry_page0)); LEAVE; return IPR_RC_JOB_RETURN; @@ -4656,7 +6027,7 @@ static int ipr_ioafp_std_inquiry(struct ipr_cmnd *ipr_cmd) struct ipr_ioa_cfg *ioa_cfg = ipr_cmd->ioa_cfg; ENTER; - ipr_cmd->job_step = ipr_ioafp_page3_inquiry; + ipr_cmd->job_step = ipr_ioafp_page0_inquiry; ipr_ioafp_inquiry(ipr_cmd, 0, 0, ioa_cfg->vpd_cbs_dma + offsetof(struct ipr_misc_cbs, ioa_vpd), @@ -4814,7 +6185,7 @@ static int ipr_reset_enable_ioa(struct ipr_cmnd *ipr_cmd) } /* Enable destructive diagnostics on IOA */ - writel(IPR_DOORBELL, ioa_cfg->regs.set_uproc_interrupt_reg); + writel(ioa_cfg->doorbell, ioa_cfg->regs.set_uproc_interrupt_reg); writel(IPR_PCII_OPER_INTERRUPTS, ioa_cfg->regs.clr_interrupt_mask_reg); int_reg = readl(ioa_cfg->regs.sense_interrupt_mask_reg); @@ -4822,8 +6193,8 @@ static int ipr_reset_enable_ioa(struct ipr_cmnd *ipr_cmd) dev_info(&ioa_cfg->pdev->dev, "Initializing IOA.\n"); ipr_cmd->timer.data = (unsigned long) ipr_cmd; - ipr_cmd->timer.expires = jiffies + IPR_OPERATIONAL_TIMEOUT; - ipr_cmd->timer.function = (void (*)(unsigned long))ipr_timeout; + ipr_cmd->timer.expires = jiffies + (ipr_transop_timeout * HZ); + ipr_cmd->timer.function = (void (*)(unsigned long))ipr_oper_timeout; ipr_cmd->done = ipr_reset_ioa_job; add_timer(&ipr_cmd->timer); list_add_tail(&ipr_cmd->queue, &ioa_cfg->pending_q); @@ -4895,8 +6266,8 @@ static void ipr_get_unit_check_buffer(struct ipr_ioa_cfg *ioa_cfg) } memset(&sdt, 0, sizeof(struct ipr_uc_sdt)); - rc = ipr_get_ldump_data_section(ioa_cfg, mailbox, (u32 *) &sdt, - (sizeof(struct ipr_uc_sdt)) / sizeof(u32)); + rc = ipr_get_ldump_data_section(ioa_cfg, mailbox, (__be32 *) &sdt, + (sizeof(struct ipr_uc_sdt)) / sizeof(__be32)); if (rc || (be32_to_cpu(sdt.hdr.state) != IPR_FMT2_SDT_READY_TO_USE) || !(sdt.entry[0].flags & IPR_SDT_VALID_ENTRY)) { @@ -4915,8 +6286,8 @@ static void ipr_get_unit_check_buffer(struct ipr_ioa_cfg *ioa_cfg) rc = ipr_get_ldump_data_section(ioa_cfg, be32_to_cpu(sdt.entry[0].bar_str_offset), - (u32 *)&hostrcb->hcam, - min(length, (int)sizeof(hostrcb->hcam)) / sizeof(u32)); + (__be32 *)&hostrcb->hcam, + min(length, (int)sizeof(hostrcb->hcam)) / sizeof(__be32)); if (!rc) ipr_handle_log_data(ioa_cfg, hostrcb); @@ -4943,7 +6314,8 @@ static int ipr_reset_restore_cfg_space(struct ipr_cmnd *ipr_cmd) int rc; ENTER; - rc = pci_restore_state(ioa_cfg->pdev, ioa_cfg->pci_cfg_buf); + pci_unblock_user_cfg_access(ioa_cfg->pdev); + rc = pci_restore_state(ioa_cfg->pdev); if (rc != PCIBIOS_SUCCESSFUL) { ipr_cmd->ioasa.ioasc = cpu_to_be32(IPR_IOASC_PCI_ACCESS_ERROR); @@ -4997,6 +6369,7 @@ static int ipr_reset_start_bist(struct ipr_cmnd *ipr_cmd) int rc; ENTER; + pci_block_user_cfg_access(ioa_cfg->pdev); rc = pci_write_config_byte(ioa_cfg->pdev, PCI_BIST, PCI_BIST_START); if (rc != PCIBIOS_SUCCESSFUL) { @@ -5144,12 +6517,7 @@ static int ipr_reset_ucode_download(struct ipr_cmnd *ipr_cmd) ipr_cmd->ioarcb.cmd_pkt.cdb[7] = (sglist->buffer_len & 0x00ff00) >> 8; ipr_cmd->ioarcb.cmd_pkt.cdb[8] = sglist->buffer_len & 0x0000ff; - if (ipr_map_ucode_buffer(ipr_cmd, sglist, sglist->buffer_len)) { - dev_err(&ioa_cfg->pdev->dev, - "Failed to map microcode download buffer\n"); - return IPR_RC_JOB_CONTINUE; - } - + ipr_build_ucode_ioadl(ipr_cmd, sglist); ipr_cmd->job_step = ipr_reset_ucode_download_done; ipr_do_req(ipr_cmd, ipr_reset_ioa_job, ipr_timeout, @@ -5214,7 +6582,6 @@ static int ipr_reset_shutdown_ioa(struct ipr_cmnd *ipr_cmd) static void ipr_reset_ioa_job(struct ipr_cmnd *ipr_cmd) { u32 rc, ioasc; - unsigned long scratch = ipr_cmd->u.scratch; struct ipr_ioa_cfg *ioa_cfg = ipr_cmd->ioa_cfg; do { @@ -5230,17 +6597,13 @@ static void ipr_reset_ioa_job(struct ipr_cmnd *ipr_cmd) } if (IPR_IOASC_SENSE_KEY(ioasc)) { - dev_err(&ioa_cfg->pdev->dev, - "0x%02X failed with IOASC: 0x%08X\n", - ipr_cmd->ioarcb.cmd_pkt.cdb[0], ioasc); - - ipr_initiate_ioa_reset(ioa_cfg, IPR_SHUTDOWN_NONE); - list_add_tail(&ipr_cmd->queue, &ioa_cfg->free_q); - return; + rc = ipr_cmd->job_step_failed(ipr_cmd); + if (rc == IPR_RC_JOB_RETURN) + return; } ipr_reinit_ipr_cmnd(ipr_cmd); - ipr_cmd->u.scratch = scratch; + ipr_cmd->job_step_failed = ipr_reset_cmd_failed; rc = ipr_cmd->job_step(ipr_cmd); } while(rc == IPR_RC_JOB_CONTINUE); } @@ -5298,7 +6661,7 @@ static void ipr_initiate_ioa_reset(struct ipr_ioa_cfg *ioa_cfg, if (ioa_cfg->in_reset_reload && ioa_cfg->sdt_state == GET_DUMP) ioa_cfg->sdt_state = ABORT_DUMP; - if (ioa_cfg->reset_retries++ > IPR_NUM_RESET_RELOAD_RETRIES) { + if (ioa_cfg->reset_retries++ >= IPR_NUM_RESET_RELOAD_RETRIES) { dev_err(&ioa_cfg->pdev->dev, "IOA taken offline - error recovery failed\n"); @@ -5325,6 +6688,109 @@ static void ipr_initiate_ioa_reset(struct ipr_ioa_cfg *ioa_cfg, shutdown_type); } +/** + * ipr_reset_freeze - Hold off all I/O activity + * @ipr_cmd: ipr command struct + * + * Description: If the PCI slot is frozen, hold off all I/O + * activity; then, as soon as the slot is available again, + * initiate an adapter reset. + */ +static int ipr_reset_freeze(struct ipr_cmnd *ipr_cmd) +{ + /* Disallow new interrupts, avoid loop */ + ipr_cmd->ioa_cfg->allow_interrupts = 0; + list_add_tail(&ipr_cmd->queue, &ipr_cmd->ioa_cfg->pending_q); + ipr_cmd->done = ipr_reset_ioa_job; + return IPR_RC_JOB_RETURN; +} + +/** + * ipr_pci_frozen - Called when slot has experienced a PCI bus error. + * @pdev: PCI device struct + * + * Description: This routine is called to tell us that the PCI bus + * is down. Can't do anything here, except put the device driver + * into a holding pattern, waiting for the PCI bus to come back. + */ +static void ipr_pci_frozen(struct pci_dev *pdev) +{ + unsigned long flags = 0; + struct ipr_ioa_cfg *ioa_cfg = pci_get_drvdata(pdev); + + spin_lock_irqsave(ioa_cfg->host->host_lock, flags); + _ipr_initiate_ioa_reset(ioa_cfg, ipr_reset_freeze, IPR_SHUTDOWN_NONE); + spin_unlock_irqrestore(ioa_cfg->host->host_lock, flags); +} + +/** + * ipr_pci_slot_reset - Called when PCI slot has been reset. + * @pdev: PCI device struct + * + * Description: This routine is called by the pci error recovery + * code after the PCI slot has been reset, just before we + * should resume normal operations. + */ +static pci_ers_result_t ipr_pci_slot_reset(struct pci_dev *pdev) +{ + unsigned long flags = 0; + struct ipr_ioa_cfg *ioa_cfg = pci_get_drvdata(pdev); + + spin_lock_irqsave(ioa_cfg->host->host_lock, flags); + _ipr_initiate_ioa_reset(ioa_cfg, ipr_reset_restore_cfg_space, + IPR_SHUTDOWN_NONE); + spin_unlock_irqrestore(ioa_cfg->host->host_lock, flags); + return PCI_ERS_RESULT_RECOVERED; +} + +/** + * ipr_pci_perm_failure - Called when PCI slot is dead for good. + * @pdev: PCI device struct + * + * Description: This routine is called when the PCI bus has + * permanently failed. + */ +static void ipr_pci_perm_failure(struct pci_dev *pdev) +{ + unsigned long flags = 0; + struct ipr_ioa_cfg *ioa_cfg = pci_get_drvdata(pdev); + + spin_lock_irqsave(ioa_cfg->host->host_lock, flags); + if (ioa_cfg->sdt_state == WAIT_FOR_DUMP) + ioa_cfg->sdt_state = ABORT_DUMP; + ioa_cfg->reset_retries = IPR_NUM_RESET_RELOAD_RETRIES; + ioa_cfg->in_ioa_bringdown = 1; + ipr_initiate_ioa_reset(ioa_cfg, IPR_SHUTDOWN_NONE); + spin_unlock_irqrestore(ioa_cfg->host->host_lock, flags); +} + +/** + * ipr_pci_error_detected - Called when a PCI error is detected. + * @pdev: PCI device struct + * @state: PCI channel state + * + * Description: Called when a PCI error is detected. + * + * Return value: + * PCI_ERS_RESULT_NEED_RESET or PCI_ERS_RESULT_DISCONNECT + */ +static pci_ers_result_t ipr_pci_error_detected(struct pci_dev *pdev, + pci_channel_state_t state) +{ + switch (state) { + case pci_channel_io_frozen: + ipr_pci_frozen(pdev); + return PCI_ERS_RESULT_NEED_RESET; + case pci_channel_io_perm_failure: + ipr_pci_perm_failure(pdev); + return PCI_ERS_RESULT_DISCONNECT; + break; + default: + break; + } + return PCI_ERS_RESULT_NEED_RESET; +} + /** * ipr_probe_ioa_part2 - Initializes IOAs found in ipr_probe_ioa(..) * @ioa_cfg: ioa cfg struct @@ -5344,7 +6810,12 @@ static int __devinit ipr_probe_ioa_part2(struct ipr_ioa_cfg *ioa_cfg) ENTER; spin_lock_irqsave(ioa_cfg->host->host_lock, host_lock_flags); dev_dbg(&ioa_cfg->pdev->dev, "ioa_cfg adx: 0x%p\n", ioa_cfg); - _ipr_initiate_ioa_reset(ioa_cfg, ipr_reset_enable_ioa, IPR_SHUTDOWN_NONE); + if (ioa_cfg->needs_hard_reset) { + ioa_cfg->needs_hard_reset = 0; + ipr_initiate_ioa_reset(ioa_cfg, IPR_SHUTDOWN_NONE); + } else + _ipr_initiate_ioa_reset(ioa_cfg, ipr_reset_enable_ioa, + IPR_SHUTDOWN_NONE); spin_unlock_irqrestore(ioa_cfg->host->host_lock, host_lock_flags); wait_event(ioa_cfg->reset_wait_q, !ioa_cfg->in_reset_reload); @@ -5421,7 +6892,6 @@ static void ipr_free_mem(struct ipr_ioa_cfg *ioa_cfg) } ipr_free_dump(ioa_cfg); - kfree(ioa_cfg->saved_mode_pages); kfree(ioa_cfg->trace); } @@ -5437,13 +6907,15 @@ static void ipr_free_mem(struct ipr_ioa_cfg *ioa_cfg) **/ static void ipr_free_all_resources(struct ipr_ioa_cfg *ioa_cfg) { + struct pci_dev *pdev = ioa_cfg->pdev; + ENTER; - free_irq(ioa_cfg->pdev->irq, ioa_cfg); - iounmap((void *) ioa_cfg->hdw_dma_regs); - release_mem_region(ioa_cfg->hdw_dma_regs_pci, - pci_resource_len(ioa_cfg->pdev, 0)); + free_irq(pdev->irq, ioa_cfg); + iounmap(ioa_cfg->hdw_dma_regs); + pci_release_regions(pdev); ipr_free_mem(ioa_cfg); scsi_host_put(ioa_cfg->host); + pci_disable_device(pdev); LEAVE; } @@ -5458,7 +6930,7 @@ static int __devinit ipr_alloc_cmd_blks(struct ipr_ioa_cfg *ioa_cfg) { struct ipr_cmnd *ipr_cmd; struct ipr_ioarcb *ioarcb; - u32 dma_addr; + dma_addr_t dma_addr; int i; ioa_cfg->ipr_cmd_pool = pci_pool_create (IPR_NAME, ioa_cfg->pdev, @@ -5468,7 +6940,7 @@ static int __devinit ipr_alloc_cmd_blks(struct ipr_ioa_cfg *ioa_cfg) return -ENOMEM; for (i = 0; i < IPR_NUM_CMD_BLKS; i++) { - ipr_cmd = pci_pool_alloc (ioa_cfg->ipr_cmd_pool, SLAB_KERNEL, &dma_addr); + ipr_cmd = pci_pool_alloc (ioa_cfg->ipr_cmd_pool, GFP_KERNEL, &dma_addr); if (!ipr_cmd) { ipr_free_cmd_blks(ioa_cfg); @@ -5508,17 +6980,15 @@ static int __devinit ipr_alloc_cmd_blks(struct ipr_ioa_cfg *ioa_cfg) **/ static int __devinit ipr_alloc_mem(struct ipr_ioa_cfg *ioa_cfg) { - int i; + struct pci_dev *pdev = ioa_cfg->pdev; + int i, rc = -ENOMEM; ENTER; - ioa_cfg->res_entries = kmalloc(sizeof(struct ipr_resource_entry) * + ioa_cfg->res_entries = kzalloc(sizeof(struct ipr_resource_entry) * IPR_MAX_PHYSICAL_DEVS, GFP_KERNEL); if (!ioa_cfg->res_entries) - goto cleanup; - - memset(ioa_cfg->res_entries, 0, - sizeof(struct ipr_resource_entry) * IPR_MAX_PHYSICAL_DEVS); + goto out; for (i = 0; i < IPR_MAX_PHYSICAL_DEVS; i++) list_add_tail(&ioa_cfg->res_entries[i].queue, &ioa_cfg->free_res_q); @@ -5528,24 +6998,24 @@ static int __devinit ipr_alloc_mem(struct ipr_ioa_cfg *ioa_cfg) &ioa_cfg->vpd_cbs_dma); if (!ioa_cfg->vpd_cbs) - goto cleanup; + goto out_free_res_entries; if (ipr_alloc_cmd_blks(ioa_cfg)) - goto cleanup; + goto out_free_vpd_cbs; ioa_cfg->host_rrq = pci_alloc_consistent(ioa_cfg->pdev, sizeof(u32) * IPR_NUM_CMD_BLKS, &ioa_cfg->host_rrq_dma); if (!ioa_cfg->host_rrq) - goto cleanup; + goto out_ipr_free_cmd_blocks; ioa_cfg->cfg_table = pci_alloc_consistent(ioa_cfg->pdev, sizeof(struct ipr_config_table), &ioa_cfg->cfg_table_dma); if (!ioa_cfg->cfg_table) - goto cleanup; + goto out_free_host_rrq; for (i = 0; i < IPR_NUM_HCAMS; i++) { ioa_cfg->hostrcb[i] = pci_alloc_consistent(ioa_cfg->pdev, @@ -5553,31 +7023,44 @@ static int __devinit ipr_alloc_mem(struct ipr_ioa_cfg *ioa_cfg) &ioa_cfg->hostrcb_dma[i]); if (!ioa_cfg->hostrcb[i]) - goto cleanup; + goto out_free_hostrcb_dma; - memset(ioa_cfg->hostrcb[i], 0, sizeof(struct ipr_hostrcb)); ioa_cfg->hostrcb[i]->hostrcb_dma = ioa_cfg->hostrcb_dma[i] + offsetof(struct ipr_hostrcb, hcam); + ioa_cfg->hostrcb[i]->ioa_cfg = ioa_cfg; list_add_tail(&ioa_cfg->hostrcb[i]->queue, &ioa_cfg->hostrcb_free_q); } - ioa_cfg->trace = kmalloc(sizeof(struct ipr_trace_entry) * + ioa_cfg->trace = kzalloc(sizeof(struct ipr_trace_entry) * IPR_NUM_TRACE_ENTRIES, GFP_KERNEL); if (!ioa_cfg->trace) - goto cleanup; - - memset(ioa_cfg->trace, 0, - sizeof(struct ipr_trace_entry) * IPR_NUM_TRACE_ENTRIES); + goto out_free_hostrcb_dma; + rc = 0; +out: LEAVE; - return 0; - -cleanup: - ipr_free_mem(ioa_cfg); + return rc; - LEAVE; - return -ENOMEM; +out_free_hostrcb_dma: + while (i-- > 0) { + pci_free_consistent(pdev, sizeof(struct ipr_hostrcb), + ioa_cfg->hostrcb[i], + ioa_cfg->hostrcb_dma[i]); + } + pci_free_consistent(pdev, sizeof(struct ipr_config_table), + ioa_cfg->cfg_table, ioa_cfg->cfg_table_dma); +out_free_host_rrq: + pci_free_consistent(pdev, sizeof(u32) * IPR_NUM_CMD_BLKS, + ioa_cfg->host_rrq, ioa_cfg->host_rrq_dma); +out_ipr_free_cmd_blocks: + ipr_free_cmd_blks(ioa_cfg); +out_free_vpd_cbs: + pci_free_consistent(pdev, sizeof(struct ipr_misc_cbs), + ioa_cfg->vpd_cbs, ioa_cfg->vpd_cbs_dma); +out_free_res_entries: + kfree(ioa_cfg->res_entries); + goto out; } /** @@ -5614,9 +7097,16 @@ static void __devinit ipr_initialize_bus_attr(struct ipr_ioa_cfg *ioa_cfg) static void __devinit ipr_init_ioa_cfg(struct ipr_ioa_cfg *ioa_cfg, struct Scsi_Host *host, struct pci_dev *pdev) { + const struct ipr_interrupt_offsets *p; + struct ipr_interrupts *t; + void __iomem *base; + ioa_cfg->host = host; ioa_cfg->pdev = pdev; ioa_cfg->log_level = ipr_log_level; + ioa_cfg->doorbell = IPR_DOORBELL; + if (!ipr_auto_create) + ioa_cfg->doorbell |= IPR_RUNTIME_RESET; sprintf(ioa_cfg->eye_catcher, IPR_EYECATCHER); sprintf(ioa_cfg->trace_start, IPR_TRACE_START_LABEL); sprintf(ioa_cfg->ipr_free_label, IPR_FREEQ_LABEL); @@ -5632,9 +7122,13 @@ static void __devinit ipr_init_ioa_cfg(struct ipr_ioa_cfg *ioa_cfg, INIT_LIST_HEAD(&ioa_cfg->hostrcb_pending_q); INIT_LIST_HEAD(&ioa_cfg->free_res_q); INIT_LIST_HEAD(&ioa_cfg->used_res_q); - INIT_WORK(&ioa_cfg->work_q, ipr_worker_thread, ioa_cfg); + INIT_WORK(&ioa_cfg->work_q, ipr_worker_thread); init_waitqueue_head(&ioa_cfg->reset_wait_q); ioa_cfg->sdt_state = INACTIVE; + if (ipr_enable_cache) + ioa_cfg->cache_state = CACHE_ENABLED; + else + ioa_cfg->cache_state = CACHE_DISABLED; ipr_initialize_bus_attr(ioa_cfg); @@ -5645,17 +7139,41 @@ static void __devinit ipr_init_ioa_cfg(struct ipr_ioa_cfg *ioa_cfg, host->max_cmd_len = IPR_MAX_CDB_LEN; pci_set_drvdata(pdev, ioa_cfg); - memcpy(&ioa_cfg->regs, &ioa_cfg->chip_cfg->regs, sizeof(ioa_cfg->regs)); + p = &ioa_cfg->chip_cfg->regs; + t = &ioa_cfg->regs; + base = ioa_cfg->hdw_dma_regs; + + t->set_interrupt_mask_reg = base + p->set_interrupt_mask_reg; + t->clr_interrupt_mask_reg = base + p->clr_interrupt_mask_reg; + t->sense_interrupt_mask_reg = base + p->sense_interrupt_mask_reg; + t->clr_interrupt_reg = base + p->clr_interrupt_reg; + t->sense_interrupt_reg = base + p->sense_interrupt_reg; + t->ioarrin_reg = base + p->ioarrin_reg; + t->sense_uproc_interrupt_reg = base + p->sense_uproc_interrupt_reg; + t->set_uproc_interrupt_reg = base + p->set_uproc_interrupt_reg; + t->clr_uproc_interrupt_reg = base + p->clr_uproc_interrupt_reg; +} + +/** + * ipr_get_chip_cfg - Find adapter chip configuration + * @dev_id: PCI device id struct + * + * Return value: + * ptr to chip config on success / NULL on failure + **/ +static const struct ipr_chip_cfg_t * __devinit +ipr_get_chip_cfg(const struct pci_device_id *dev_id) +{ + int i; + + if (dev_id->driver_data) + return (const struct ipr_chip_cfg_t *)dev_id->driver_data; - ioa_cfg->regs.set_interrupt_mask_reg += ioa_cfg->hdw_dma_regs; - ioa_cfg->regs.clr_interrupt_mask_reg += ioa_cfg->hdw_dma_regs; - ioa_cfg->regs.sense_interrupt_mask_reg += ioa_cfg->hdw_dma_regs; - ioa_cfg->regs.clr_interrupt_reg += ioa_cfg->hdw_dma_regs; - ioa_cfg->regs.sense_interrupt_reg += ioa_cfg->hdw_dma_regs; - ioa_cfg->regs.ioarrin_reg += ioa_cfg->hdw_dma_regs; - ioa_cfg->regs.sense_uproc_interrupt_reg += ioa_cfg->hdw_dma_regs; - ioa_cfg->regs.set_uproc_interrupt_reg += ioa_cfg->hdw_dma_regs; - ioa_cfg->regs.clr_uproc_interrupt_reg += ioa_cfg->hdw_dma_regs; + for (i = 0; i < ARRAY_SIZE(ipr_chip); i++) + if (ipr_chip[i].vendor == dev_id->vendor && + ipr_chip[i].device == dev_id->device) + return ipr_chip[i].cfg; + return NULL; } /** @@ -5671,14 +7189,16 @@ static int __devinit ipr_probe_ioa(struct pci_dev *pdev, { struct ipr_ioa_cfg *ioa_cfg; struct Scsi_Host *host; - unsigned long ipr_regs, ipr_regs_pci; - u32 rc = PCIBIOS_SUCCESSFUL; + unsigned long ipr_regs_pci; + void __iomem *ipr_regs; + int rc = PCIBIOS_SUCCESSFUL; + volatile u32 mask, uproc; ENTER; if ((rc = pci_enable_device(pdev))) { dev_err(&pdev->dev, "Cannot enable adapter\n"); - return rc; + goto out; } dev_info(&pdev->dev, "Found IOA with IRQ: %d\n", pdev->irq); @@ -5687,33 +7207,39 @@ static int __devinit ipr_probe_ioa(struct pci_dev *pdev, if (!host) { dev_err(&pdev->dev, "call to scsi_host_alloc failed!\n"); - return -ENOMEM; + rc = -ENOMEM; + goto out_disable; } ioa_cfg = (struct ipr_ioa_cfg *)host->hostdata; memset(ioa_cfg, 0, sizeof(struct ipr_ioa_cfg)); + ata_host_init(&ioa_cfg->ata_host, &pdev->dev, + sata_port_info.flags, &ipr_sata_ops); + + ioa_cfg->chip_cfg = ipr_get_chip_cfg(dev_id); - ioa_cfg->chip_cfg = (const struct ipr_chip_cfg_t *)dev_id->driver_data; + if (!ioa_cfg->chip_cfg) { + dev_err(&pdev->dev, "Unknown adapter chipset 0x%04X 0x%04X\n", + dev_id->vendor, dev_id->device); + goto out_scsi_host_put; + } ipr_regs_pci = pci_resource_start(pdev, 0); - if (!request_mem_region(ipr_regs_pci, - pci_resource_len(pdev, 0), IPR_NAME)) { + rc = pci_request_regions(pdev, IPR_NAME); + if (rc < 0) { dev_err(&pdev->dev, "Couldn't register memory range of registers\n"); - scsi_host_put(host); - return -ENOMEM; + goto out_scsi_host_put; } - ipr_regs = (unsigned long)ioremap(ipr_regs_pci, - pci_resource_len(pdev, 0)); + ipr_regs = ioremap(ipr_regs_pci, pci_resource_len(pdev, 0)); if (!ipr_regs) { dev_err(&pdev->dev, "Couldn't map memory range of registers\n"); - release_mem_region(ipr_regs_pci, pci_resource_len(pdev, 0)); - scsi_host_put(host); - return -ENOMEM; + rc = -ENOMEM; + goto out_release_regions; } ioa_cfg->hdw_dma_regs = ipr_regs; @@ -5723,11 +7249,10 @@ static int __devinit ipr_probe_ioa(struct pci_dev *pdev, ipr_init_ioa_cfg(ioa_cfg, host, pdev); pci_set_master(pdev); - rc = pci_set_dma_mask(pdev, 0xffffffff); - if (rc != PCIBIOS_SUCCESSFUL) { + rc = pci_set_dma_mask(pdev, DMA_32BIT_MASK); + if (rc < 0) { dev_err(&pdev->dev, "Failed to set PCI DMA mask\n"); - rc = -EIO; goto cleanup_nomem; } @@ -5741,7 +7266,7 @@ static int __devinit ipr_probe_ioa(struct pci_dev *pdev, } /* Save away PCI config space for use following IOA reset */ - rc = pci_save_state(pdev, ioa_cfg->pci_cfg_buf); + rc = pci_save_state(pdev); if (rc != PCIBIOS_SUCCESSFUL) { dev_err(&pdev->dev, "Failed to save PCI config space\n"); @@ -5755,11 +7280,24 @@ static int __devinit ipr_probe_ioa(struct pci_dev *pdev, if ((rc = ipr_set_pcix_cmd_reg(ioa_cfg))) goto cleanup_nomem; - if ((rc = ipr_alloc_mem(ioa_cfg))) - goto cleanup; + rc = ipr_alloc_mem(ioa_cfg); + if (rc < 0) { + dev_err(&pdev->dev, + "Couldn't allocate enough memory for device driver!\n"); + goto cleanup_nomem; + } + + /* + * If HRRQ updated interrupt is not masked, or reset alert is set, + * the card is in an unknown state and needs a hard reset + */ + mask = readl(ioa_cfg->regs.sense_interrupt_mask_reg); + uproc = readl(ioa_cfg->regs.sense_uproc_interrupt_reg); + if ((mask & IPR_PCII_HRRQ_UPDATED) == 0 || (uproc & IPR_UPROCI_RESET_ALERT)) + ioa_cfg->needs_hard_reset = 1; ipr_mask_and_clear_interrupts(ioa_cfg, ~IPR_PCII_IOA_TRANS_TO_OPER); - rc = request_irq(pdev->irq, ipr_isr, SA_SHIRQ, IPR_NAME, ioa_cfg); + rc = request_irq(pdev->irq, ipr_isr, IRQF_SHARED, IPR_NAME, ioa_cfg); if (rc) { dev_err(&pdev->dev, "Couldn't register IRQ %d! rc=%d\n", @@ -5772,18 +7310,20 @@ static int __devinit ipr_probe_ioa(struct pci_dev *pdev, spin_unlock(&ipr_driver_lock); LEAVE; - return 0; +out: + return rc; -cleanup: - dev_err(&pdev->dev, "Couldn't allocate enough memory for device driver!\n"); cleanup_nolog: ipr_free_mem(ioa_cfg); cleanup_nomem: - iounmap((void *) ipr_regs); - release_mem_region(ipr_regs_pci, pci_resource_len(pdev, 0)); + iounmap(ipr_regs); +out_release_regions: + pci_release_regions(pdev); +out_scsi_host_put: scsi_host_put(host); - - return rc; +out_disable: + pci_disable_device(pdev); + goto out; } /** @@ -5851,6 +7391,7 @@ static void __ipr_remove(struct pci_dev *pdev) spin_unlock_irqrestore(ioa_cfg->host->host_lock, host_lock_flags); wait_event(ioa_cfg->reset_wait_q, !ioa_cfg->in_reset_reload); + flush_scheduled_work(); spin_lock_irqsave(ioa_cfg->host->host_lock, host_lock_flags); spin_lock(&ipr_driver_lock); @@ -5881,8 +7422,6 @@ static void ipr_remove(struct pci_dev *pdev) ENTER; - ioa_cfg->allow_cmds = 0; - flush_scheduled_work(); ipr_remove_trace_file(&ioa_cfg->host->shost_classdev.kobj, &ipr_trace_attr); ipr_remove_dump_file(&ioa_cfg->host->shost_classdev.kobj, @@ -5950,13 +7489,14 @@ static int __devinit ipr_probe(struct pci_dev *pdev, ipr_scan_vsets(ioa_cfg); scsi_add_device(ioa_cfg->host, IPR_IOA_BUS, IPR_IOA_TARGET, IPR_IOA_LUN); ioa_cfg->allow_ml_add_del = 1; + ioa_cfg->host->max_channel = IPR_VSET_BUS; schedule_work(&ioa_cfg->work_q); return 0; } /** * ipr_shutdown - Shutdown handler. - * @dev: device struct + * @pdev: pci device struct * * This function is invoked upon system shutdown/reboot. It will issue * an adapter shutdown to the adapter to flush the write cache. @@ -5964,9 +7504,9 @@ static int __devinit ipr_probe(struct pci_dev *pdev, * Return value: * none **/ -static void ipr_shutdown(struct device *dev) +static void ipr_shutdown(struct pci_dev *pdev) { - struct ipr_ioa_cfg *ioa_cfg = pci_get_drvdata(to_pci_dev(dev)); + struct ipr_ioa_cfg *ioa_cfg = pci_get_drvdata(pdev); unsigned long lock_flags = 0; spin_lock_irqsave(ioa_cfg->host->host_lock, lock_flags); @@ -5979,46 +7519,93 @@ static struct pci_device_id ipr_pci_table[] __devinitdata = { { PCI_VENDOR_ID_MYLEX, PCI_DEVICE_ID_IBM_GEMSTONE, PCI_VENDOR_ID_IBM, IPR_SUBS_DEV_ID_5702, 0, 0, (kernel_ulong_t)&ipr_chip_cfg[0] }, - { PCI_VENDOR_ID_MYLEX, PCI_DEVICE_ID_IBM_GEMSTONE, - PCI_VENDOR_ID_IBM, IPR_SUBS_DEV_ID_572E, - 0, 0, (kernel_ulong_t)&ipr_chip_cfg[0] }, { PCI_VENDOR_ID_MYLEX, PCI_DEVICE_ID_IBM_GEMSTONE, PCI_VENDOR_ID_IBM, IPR_SUBS_DEV_ID_5703, 0, 0, (kernel_ulong_t)&ipr_chip_cfg[0] }, { PCI_VENDOR_ID_MYLEX, PCI_DEVICE_ID_IBM_GEMSTONE, PCI_VENDOR_ID_IBM, IPR_SUBS_DEV_ID_573D, 0, 0, (kernel_ulong_t)&ipr_chip_cfg[0] }, + { PCI_VENDOR_ID_MYLEX, PCI_DEVICE_ID_IBM_GEMSTONE, + PCI_VENDOR_ID_IBM, IPR_SUBS_DEV_ID_573E, + 0, 0, (kernel_ulong_t)&ipr_chip_cfg[0] }, + { PCI_VENDOR_ID_IBM, PCI_DEVICE_ID_IBM_CITRINE, + PCI_VENDOR_ID_IBM, IPR_SUBS_DEV_ID_571B, + 0, 0, (kernel_ulong_t)&ipr_chip_cfg[0] }, + { PCI_VENDOR_ID_IBM, PCI_DEVICE_ID_IBM_CITRINE, + PCI_VENDOR_ID_IBM, IPR_SUBS_DEV_ID_572E, + 0, 0, (kernel_ulong_t)&ipr_chip_cfg[0] }, + { PCI_VENDOR_ID_IBM, PCI_DEVICE_ID_IBM_CITRINE, + PCI_VENDOR_ID_IBM, IPR_SUBS_DEV_ID_571A, + 0, 0, (kernel_ulong_t)&ipr_chip_cfg[0] }, + { PCI_VENDOR_ID_IBM, PCI_DEVICE_ID_IBM_CITRINE, + PCI_VENDOR_ID_IBM, IPR_SUBS_DEV_ID_575B, + 0, 0, (kernel_ulong_t)&ipr_chip_cfg[0] }, + { PCI_VENDOR_ID_ADAPTEC2, PCI_DEVICE_ID_ADAPTEC2_OBSIDIAN, + PCI_VENDOR_ID_IBM, IPR_SUBS_DEV_ID_572A, + 0, 0, (kernel_ulong_t)&ipr_chip_cfg[0] }, + { PCI_VENDOR_ID_ADAPTEC2, PCI_DEVICE_ID_ADAPTEC2_OBSIDIAN, + PCI_VENDOR_ID_IBM, IPR_SUBS_DEV_ID_572B, + 0, 0, (kernel_ulong_t)&ipr_chip_cfg[0] }, + { PCI_VENDOR_ID_ADAPTEC2, PCI_DEVICE_ID_ADAPTEC2_OBSIDIAN, + PCI_VENDOR_ID_IBM, IPR_SUBS_DEV_ID_575C, + 0, 0, (kernel_ulong_t)&ipr_chip_cfg[0] }, + { PCI_VENDOR_ID_IBM, PCI_DEVICE_ID_IBM_OBSIDIAN, + PCI_VENDOR_ID_IBM, IPR_SUBS_DEV_ID_572A, + 0, 0, (kernel_ulong_t)&ipr_chip_cfg[0] }, + { PCI_VENDOR_ID_IBM, PCI_DEVICE_ID_IBM_OBSIDIAN, + PCI_VENDOR_ID_IBM, IPR_SUBS_DEV_ID_572B, + 0, 0, (kernel_ulong_t)&ipr_chip_cfg[0] }, + { PCI_VENDOR_ID_IBM, PCI_DEVICE_ID_IBM_OBSIDIAN, + PCI_VENDOR_ID_IBM, IPR_SUBS_DEV_ID_575C, + 0, 0, (kernel_ulong_t)&ipr_chip_cfg[0] }, + { PCI_VENDOR_ID_IBM, PCI_DEVICE_ID_IBM_OBSIDIAN, + PCI_VENDOR_ID_IBM, IPR_SUBS_DEV_ID_57B8, + 0, 0, (kernel_ulong_t)&ipr_chip_cfg[0] }, + { PCI_VENDOR_ID_IBM, PCI_DEVICE_ID_IBM_OBSIDIAN_E, + PCI_VENDOR_ID_IBM, IPR_SUBS_DEV_ID_57B7, + 0, 0, (kernel_ulong_t)&ipr_chip_cfg[0] }, { PCI_VENDOR_ID_IBM, PCI_DEVICE_ID_IBM_SNIPE, PCI_VENDOR_ID_IBM, IPR_SUBS_DEV_ID_2780, 0, 0, (kernel_ulong_t)&ipr_chip_cfg[1] }, + { PCI_VENDOR_ID_ADAPTEC2, PCI_DEVICE_ID_ADAPTEC2_SCAMP, + PCI_VENDOR_ID_IBM, IPR_SUBS_DEV_ID_571E, + 0, 0, (kernel_ulong_t)&ipr_chip_cfg[1] }, + { PCI_VENDOR_ID_ADAPTEC2, PCI_DEVICE_ID_ADAPTEC2_SCAMP, + PCI_VENDOR_ID_IBM, IPR_SUBS_DEV_ID_571F, + 0, 0, (kernel_ulong_t)&ipr_chip_cfg[1] }, + { PCI_VENDOR_ID_ADAPTEC2, PCI_DEVICE_ID_ADAPTEC2_SCAMP, + PCI_VENDOR_ID_IBM, IPR_SUBS_DEV_ID_572F, + 0, 0, (kernel_ulong_t)&ipr_chip_cfg[1] }, { } }; MODULE_DEVICE_TABLE(pci, ipr_pci_table); +static struct pci_error_handlers ipr_err_handler = { + .error_detected = ipr_pci_error_detected, + .slot_reset = ipr_pci_slot_reset, +}; + static struct pci_driver ipr_driver = { .name = IPR_NAME, .id_table = ipr_pci_table, .probe = ipr_probe, .remove = ipr_remove, - .driver = { - .shutdown = ipr_shutdown, - }, + .shutdown = ipr_shutdown, + .err_handler = &ipr_err_handler, }; /** * ipr_init - Module entry point * * Return value: - * 0 on success / non-zero on failure + * 0 on success / negative value on failure **/ static int __init ipr_init(void) { ipr_info("IBM Power RAID SCSI Device Driver version: %s %s\n", IPR_DRIVER_VERSION, IPR_DRIVER_DATE); - pci_register_driver(&ipr_driver); - - return 0; + return pci_register_driver(&ipr_driver); } /**