* QLogic QLA1280 (Ultra2) and QLA12160 (Ultra3) SCSI driver
* Copyright (C) 2000 Qlogic Corporation (www.qlogic.com)
* Copyright (C) 2001-2004 Jes Sorensen, Wild Open Source Inc.
-* Copyright (C) 2003 Christoph Hellwig
+* Copyright (C) 2003-2004 Christoph Hellwig
*
* This program is free software; you can redistribute it and/or modify it
* under the terms of the GNU General Public License as published by the
* General Public License for more details.
*
******************************************************************************/
-#define QLA1280_VERSION "3.24.3"
+#define QLA1280_VERSION "3.26"
/*****************************************************************************
Revision History:
+ Rev 3.26, January 16, 2006 Jes Sorensen
+ - Ditch all < 2.6 support
+ Rev 3.25.1, February 10, 2005 Christoph Hellwig
+ - use pci_map_single to map non-S/G requests
+ - remove qla1280_proc_info
+ Rev 3.25, September 28, 2004, Christoph Hellwig
+ - add support for ISP1020/1040
+ - don't include "scsi.h" anymore for 2.6.x
+ Rev 3.24.4 June 7, 2004 Christoph Hellwig
+ - restructure firmware loading, cleanup initialization code
+ - prepare support for ISP1020/1040 chips
Rev 3.24.3 January 19, 2004, Jes Sorensen
- Handle PCI DMA mask settings correctly
- Correct order of error handling in probe_one, free_irq should not
- Don't walk the entire list in qla1280_putq_t() just to directly
grab the pointer to the last element afterwards
Rev 3.23.5 Beta August 9, 2001, Jes Sorensen
- - Don't use SA_INTERRUPT, it's use is deprecated for this kinda driver
+ - Don't use IRQF_DISABLED, it's use is deprecated for this kinda driver
Rev 3.23.4 Beta August 8, 2001, Jes Sorensen
- Set dev->max_sectors to 1024
Rev 3.23.3 Beta August 6, 2001, Jes Sorensen
*****************************************************************************/
-#include <linux/config.h>
#include <linux/module.h>
#include <linux/version.h>
#include <linux/pci_ids.h>
#include <linux/interrupt.h>
#include <linux/init.h>
+#include <linux/dma-mapping.h>
#include <asm/io.h>
#include <asm/irq.h>
#include <asm/types.h>
#include <asm/system.h>
-#if LINUX_VERSION_CODE >= 0x020545
#include <scsi/scsi.h>
#include <scsi/scsi_cmnd.h>
#include <scsi/scsi_device.h>
#include <scsi/scsi_host.h>
#include <scsi/scsi_tcq.h>
-#include "scsi.h"
-#else
-#include <linux/blk.h>
-#include "scsi.h"
-#include <scsi/scsi_host.h>
-#include "sd.h"
+
+#if defined(CONFIG_IA64_GENERIC) || defined(CONFIG_IA64_SGI_SN2)
+#include <asm/sn/io.h>
#endif
-#if LINUX_VERSION_CODE < 0x020407
-#error "Kernels older than 2.4.7 are no longer supported"
+#if LINUX_VERSION_CODE < 0x020600
+#error "Kernels older than 2.6.0 are no longer supported"
#endif
#include "qla1280.h"
#include "ql12160_fw.h" /* ISP RISC codes */
#include "ql1280_fw.h"
-
-
-/*
- * Missing PCI ID's
- */
-#ifndef PCI_DEVICE_ID_QLOGIC_ISP1080
-#define PCI_DEVICE_ID_QLOGIC_ISP1080 0x1080
-#endif
-#ifndef PCI_DEVICE_ID_QLOGIC_ISP1240
-#define PCI_DEVICE_ID_QLOGIC_ISP1240 0x1240
-#endif
-#ifndef PCI_DEVICE_ID_QLOGIC_ISP1280
-#define PCI_DEVICE_ID_QLOGIC_ISP1280 0x1280
-#endif
-#ifndef PCI_DEVICE_ID_QLOGIC_ISP10160
-#define PCI_DEVICE_ID_QLOGIC_ISP10160 0x1016
-#endif
-#ifndef PCI_DEVICE_ID_QLOGIC_ISP12160
-#define PCI_DEVICE_ID_QLOGIC_ISP12160 0x1216
-#endif
-
-#ifndef PCI_VENDOR_ID_AMI
-#define PCI_VENDOR_ID_AMI 0x101e
-#endif
+#include "ql1040_fw.h"
#ifndef BITS_PER_LONG
#error "BITS_PER_LONG not defined!"
#define QLA_64BIT_PTR 1
#endif
-#if defined(CONFIG_IA64_GENERIC) || defined(CONFIG_IA64_SGI_SN2)
-#include <asm/sn/pci/pciio.h>
-/* Ugly hack needed for the virtual channel fix on SN2 */
-extern int snia_pcibr_rrb_alloc(struct pci_dev *pci_dev,
- int *count_vchan0, int *count_vchan1);
-#endif
-
#ifdef QLA_64BIT_PTR
#define pci_dma_hi32(a) ((a >> 16) >> 16)
#else
#define NVRAM_DELAY() udelay(500) /* 2 microseconds */
-#if LINUX_VERSION_CODE < 0x020500
-#define HOST_LOCK &io_request_lock
-#define irqreturn_t void
-#define IRQ_RETVAL(foo)
-#define MSG_ORDERED_TAG 1
-
-#define DMA_BIDIRECTIONAL SCSI_DATA_UNKNOWN
-#define DMA_TO_DEVICE SCSI_DATA_WRITE
-#define DMA_FROM_DEVICE SCSI_DATA_READ
-#define DMA_NONE SCSI_DATA_NONE
-
-#ifndef HAVE_SECTOR_T
-typedef unsigned int sector_t;
-#endif
-
-static inline void
-scsi_adjust_queue_depth(struct scsi_device *device, int tag, int depth)
-{
- if (tag) {
- device->tagged_queue = tag;
- device->current_tag = 0;
- }
- device->queue_depth = depth;
-}
-static inline struct Scsi_Host *scsi_host_alloc(Scsi_Host_Template *t, size_t s)
-{
- return scsi_register(t, s);
-}
-static inline void scsi_host_put(struct Scsi_Host *h)
-{
- scsi_unregister(h);
-}
-#else
-#define HOST_LOCK ha->host->host_lock
-#endif
-#if LINUX_VERSION_CODE < 0x020600
-#define DEV_SIMPLE_TAGS(device) device->tagged_queue
-/*
- * Hack around that qla1280_remove_one is called from
- * qla1280_release in 2.4
- */
-#undef __devexit
-#define __devexit
-#else
-#define DEV_SIMPLE_TAGS(device) device->simple_tags
-#endif
#if defined(__ia64__) && !defined(ia64_platform_is)
#define ia64_platform_is(foo) (!strcmp(x, platform_name))
#endif
+
+#define IS_ISP1040(ha) (ha->pdev->device == PCI_DEVICE_ID_QLOGIC_ISP1020)
+#define IS_ISP1x40(ha) (ha->pdev->device == PCI_DEVICE_ID_QLOGIC_ISP1020 || \
+ ha->pdev->device == PCI_DEVICE_ID_QLOGIC_ISP1240)
+#define IS_ISP1x160(ha) (ha->pdev->device == PCI_DEVICE_ID_QLOGIC_ISP10160 || \
+ ha->pdev->device == PCI_DEVICE_ID_QLOGIC_ISP12160)
+
+
static int qla1280_probe_one(struct pci_dev *, const struct pci_device_id *);
static void qla1280_remove_one(struct pci_dev *);
* QLogic Driver Support Function Prototypes.
*/
static void qla1280_done(struct scsi_qla_host *);
-#if LINUX_VERSION_CODE < 0x020545
-static void qla1280_get_target_options(struct scsi_cmnd *, struct scsi_qla_host *);
-#endif
static int qla1280_get_token(char *);
static int qla1280_setup(char *s) __init;
/*
* QLogic ISP1280 Hardware Support Function Prototypes.
*/
-static int qla1280_isp_firmware(struct scsi_qla_host *);
-static int qla1280_chip_diag(struct scsi_qla_host *);
-static int qla1280_setup_chip(struct scsi_qla_host *);
+static int qla1280_load_firmware(struct scsi_qla_host *);
static int qla1280_init_rings(struct scsi_qla_host *);
static int qla1280_nvram_config(struct scsi_qla_host *);
static int qla1280_mailbox_command(struct scsi_qla_host *,
struct list_head *);
static uint16_t qla1280_get_nvram_word(struct scsi_qla_host *, uint32_t);
static uint16_t qla1280_nvram_request(struct scsi_qla_host *, uint32_t);
-static uint16_t qla1280_debounce_register(volatile uint16_t *);
+static uint16_t qla1280_debounce_register(volatile uint16_t __iomem *);
static request_t *qla1280_req_pkt(struct scsi_qla_host *);
static int qla1280_check_for_dead_scsi_bus(struct scsi_qla_host *,
unsigned int);
static int qla1280_set_target_parameters(struct scsi_qla_host *, int, int);
-static struct qla_driver_setup driver_setup __initdata;
+static struct qla_driver_setup driver_setup;
/*
* convert scsi data direction to request_t control flags
static char *qla1280;
/* insmod qla1280 options=verbose" */
-MODULE_PARM(qla1280, "s");
+module_param(qla1280, charp, 0);
#else
__setup("qla1280=", qla1280_setup);
#endif
#define CMD_SNSLEN(Cmnd) sizeof(Cmnd->sense_buffer)
#define CMD_RESULT(Cmnd) Cmnd->result
#define CMD_HANDLE(Cmnd) Cmnd->host_scribble
-#if LINUX_VERSION_CODE < 0x020545
-#define CMD_REQUEST(Cmnd) Cmnd->request.cmd
-#else
#define CMD_REQUEST(Cmnd) Cmnd->request->cmd
-#endif
#define CMD_HOST(Cmnd) Cmnd->device->host
#define SCSI_BUS_32(Cmnd) Cmnd->device->channel
unsigned char *fwver; /* Ptr to F/W version array */
};
-/* NOTE: qla1280_pci_tbl and ql1280_board_tbl must be in the same order */
+/* NOTE: the last argument in each entry is used to index ql1280_board_tbl */
static struct pci_device_id qla1280_pci_tbl[] = {
{PCI_VENDOR_ID_QLOGIC, PCI_DEVICE_ID_QLOGIC_ISP12160,
PCI_ANY_ID, PCI_ANY_ID, 0, 0, 0},
- {PCI_VENDOR_ID_QLOGIC, PCI_DEVICE_ID_QLOGIC_ISP1080,
+ {PCI_VENDOR_ID_QLOGIC, PCI_DEVICE_ID_QLOGIC_ISP1020,
PCI_ANY_ID, PCI_ANY_ID, 0, 0, 1},
- {PCI_VENDOR_ID_QLOGIC, PCI_DEVICE_ID_QLOGIC_ISP1240,
+ {PCI_VENDOR_ID_QLOGIC, PCI_DEVICE_ID_QLOGIC_ISP1080,
PCI_ANY_ID, PCI_ANY_ID, 0, 0, 2},
- {PCI_VENDOR_ID_QLOGIC, PCI_DEVICE_ID_QLOGIC_ISP1280,
+ {PCI_VENDOR_ID_QLOGIC, PCI_DEVICE_ID_QLOGIC_ISP1240,
PCI_ANY_ID, PCI_ANY_ID, 0, 0, 3},
- {PCI_VENDOR_ID_QLOGIC, PCI_DEVICE_ID_QLOGIC_ISP10160,
+ {PCI_VENDOR_ID_QLOGIC, PCI_DEVICE_ID_QLOGIC_ISP1280,
PCI_ANY_ID, PCI_ANY_ID, 0, 0, 4},
+ {PCI_VENDOR_ID_QLOGIC, PCI_DEVICE_ID_QLOGIC_ISP10160,
+ PCI_ANY_ID, PCI_ANY_ID, 0, 0, 5},
{0,}
};
MODULE_DEVICE_TABLE(pci, qla1280_pci_tbl);
/* Name , Number of ports, FW details */
{"QLA12160", 2, &fw12160i_code01[0], &fw12160i_length01,
&fw12160i_addr01, &fw12160i_version_str[0]},
+ {"QLA1040", 1, &risc_code01[0], &risc_code_length01,
+ &risc_code_addr01, &firmware_version[0]},
{"QLA1080", 1, &fw1280ei_code01[0], &fw1280ei_length01,
&fw1280ei_addr01, &fw1280ei_version_str[0]},
{"QLA1240", 2, &fw1280ei_code01[0], &fw1280ei_length01,
};
static int qla1280_verbose = 1;
-static int qla1280_buffer_size;
-static char *qla1280_buffer;
#if DEBUG_QLA1280
static int ql_debug_level = 1;
#define LEAVE_INTR(x) dprintk(4, "qla1280 : Leaving %s()\n", x);
-/*************************************************************************
- * qla1280_proc_info
- *
- * Description:
- * Return information to handle /proc support for the driver.
- *
- * buffer - ptrs to a page buffer
- *
- * Returns:
- *************************************************************************/
-#define PROC_BUF &qla1280_buffer[len]
-
-static int qla1280_proc_info(struct Scsi_Host *host, char *buffer,
- char **start, off_t offset, int length, int inout)
-{
- struct scsi_qla_host *ha = (struct scsi_qla_host *)host->hostdata;
- struct qla_boards *bdp = &ql1280_board_tbl[ha->devnum];
- int size = 0;
- int len = 0;
-
- if (inout)
- return -ENOSYS;
-
- /*
- * if our old buffer is the right size use it otherwise
- * allocate a new one.
- */
- if (qla1280_buffer_size != PAGE_SIZE) {
- /* deallocate this buffer and get a new one */
- if (qla1280_buffer != NULL) {
- free_page((unsigned long)qla1280_buffer);
- qla1280_buffer_size = 0;
- }
- qla1280_buffer = (char *)get_zeroed_page(GFP_KERNEL);
- }
- if (qla1280_buffer == NULL) {
- size = sprintf(buffer, "qla1280 - kmalloc error at line %d\n",
- __LINE__);
- return size;
- }
- /* save the size of our buffer */
- qla1280_buffer_size = PAGE_SIZE;
-
- /* 3.20 clear the buffer we use for proc display */
- memset(qla1280_buffer, 0, PAGE_SIZE);
-
- /* start building the print buffer */
- size = sprintf(PROC_BUF,
- "QLogic PCI to SCSI Adapter for ISP 1280/12160:\n"
- " Firmware version: %2d.%02d.%02d, Driver version %s\n",
- bdp->fwver[0], bdp->fwver[1], bdp->fwver[2],
- QLA1280_VERSION);
-
- len += size;
-
- size = sprintf(PROC_BUF, "SCSI Host Adapter Information: %s\n",
- bdp->name);
- len += size;
- size = sprintf(PROC_BUF, "Request Queue count= 0x%x, Response "
- "Queue count= 0x%x\n",
- REQUEST_ENTRY_CNT, RESPONSE_ENTRY_CNT);
- len += size;
- size = sprintf(PROC_BUF, "Number of pending commands = 0x%lx\n",
- ha->actthreads);
- len += size;
- size = sprintf(PROC_BUF, "Number of free request entries = %d\n",
- ha->req_q_cnt);
- len += size;
- size = sprintf(PROC_BUF, "\n"); /* 1 */
- len += size;
-
- if (len >= qla1280_buffer_size) {
- printk(KERN_WARNING
- "qla1280: Overflow buffer in qla1280_proc.c\n");
- }
-
- if (offset > len - 1) {
- free_page((unsigned long) qla1280_buffer);
- qla1280_buffer = NULL;
- qla1280_buffer_size = length = 0;
- *start = NULL;
- } else {
- *start = &qla1280_buffer[offset]; /* Start of wanted data */
- if (len - offset < length) {
- length = len - offset;
- }
- }
- return length;
-}
-
-
static int qla1280_read_nvram(struct scsi_qla_host *ha)
{
uint16_t *wptr;
static void qla1280_mailbox_timeout(unsigned long __data)
{
struct scsi_qla_host *ha = (struct scsi_qla_host *)__data;
- struct device_reg *reg;
+ struct device_reg __iomem *reg;
reg = ha->iobase;
ha->mailbox_out[0] = RD_REG_WORD(®->mailbox0);
uint16_t data;
unsigned char *handle;
int result, i;
- DECLARE_COMPLETION(wait);
+ DECLARE_COMPLETION_ONSTACK(wait);
struct timer_list timer;
ha = (struct scsi_qla_host *)(CMD_HOST(cmd)->hostdata);
break;
case ABORT_DEVICE:
- ha->flags.in_reset = 1;
if (qla1280_verbose)
printk(KERN_INFO
"scsi(%ld:%d:%d:%d): Queueing abort device "
printk(KERN_INFO
"scsi(%ld:%d:%d:%d): Queueing device reset "
"command.\n", ha->host_no, bus, target, lun);
- ha->flags.in_reset = 1;
if (qla1280_device_reset(ha, bus, target) == 0)
result = SUCCESS;
break;
case BUS_RESET:
if (qla1280_verbose)
- printk(KERN_INFO "qla1280(%ld:%d): Issuing BUS "
- "DEVICE RESET\n", ha->host_no, bus);
- ha->flags.in_reset = 1;
- if (qla1280_bus_reset(ha, bus == 0))
+ printk(KERN_INFO "qla1280(%ld:%d): Issued bus "
+ "reset.\n", ha->host_no, bus);
+ if (qla1280_bus_reset(ha, bus) == 0)
result = SUCCESS;
-
break;
case ADAPTER_RESET:
if (!list_empty(&ha->done_q))
qla1280_done(ha);
- ha->flags.in_reset = 0;
/* If we didn't manage to issue the action, or we have no
* command to wait for, exit here */
add_timer(&timer);
/* wait for the action to complete (or the timer to expire) */
- spin_unlock_irq(HOST_LOCK);
+ spin_unlock_irq(ha->host->host_lock);
wait_for_completion(&wait);
del_timer_sync(&timer);
- spin_lock_irq(HOST_LOCK);
+ spin_lock_irq(ha->host->host_lock);
sp->wait = NULL;
/* the only action we might get a fail for is abort */
static int
qla1280_eh_abort(struct scsi_cmnd * cmd)
{
- return qla1280_error_action(cmd, ABORT_COMMAND);
+ int rc;
+
+ spin_lock_irq(cmd->device->host->host_lock);
+ rc = qla1280_error_action(cmd, ABORT_COMMAND);
+ spin_unlock_irq(cmd->device->host->host_lock);
+
+ return rc;
}
/**************************************************************************
static int
qla1280_eh_device_reset(struct scsi_cmnd *cmd)
{
- return qla1280_error_action(cmd, DEVICE_RESET);
+ int rc;
+
+ spin_lock_irq(cmd->device->host->host_lock);
+ rc = qla1280_error_action(cmd, DEVICE_RESET);
+ spin_unlock_irq(cmd->device->host->host_lock);
+
+ return rc;
}
/**************************************************************************
static int
qla1280_eh_bus_reset(struct scsi_cmnd *cmd)
{
- return qla1280_error_action(cmd, BUS_RESET);
+ int rc;
+
+ spin_lock_irq(cmd->device->host->host_lock);
+ rc = qla1280_error_action(cmd, BUS_RESET);
+ spin_unlock_irq(cmd->device->host->host_lock);
+
+ return rc;
}
/**************************************************************************
static int
qla1280_eh_adapter_reset(struct scsi_cmnd *cmd)
{
- return qla1280_error_action(cmd, ADAPTER_RESET);
+ int rc;
+
+ spin_lock_irq(cmd->device->host->host_lock);
+ rc = qla1280_error_action(cmd, ADAPTER_RESET);
+ spin_unlock_irq(cmd->device->host->host_lock);
+
+ return rc;
}
static int
return 0;
}
-#if LINUX_VERSION_CODE < 0x020600
-static int
-qla1280_detect(Scsi_Host_Template *template)
-{
- struct pci_device_id *id = &qla1280_pci_tbl[0];
- struct pci_dev *pdev = NULL;
- int num_hosts = 0;
-
- if (sizeof(struct srb) > sizeof(Scsi_Pointer)) {
- printk(KERN_WARNING
- "qla1280: struct srb too big, aborting\n");
- return 0;
- }
-
- if ((DMA_BIDIRECTIONAL != PCI_DMA_BIDIRECTIONAL) ||
- (DMA_TO_DEVICE != PCI_DMA_TODEVICE) ||
- (DMA_FROM_DEVICE != PCI_DMA_FROMDEVICE) ||
- (DMA_NONE != PCI_DMA_NONE)) {
- printk(KERN_WARNING
- "qla1280: dma direction bits don't match\n");
- return 0;
- }
-
-#ifdef MODULE
- /*
- * If we are called as a module, the qla1280 pointer may not be null
- * and it would point to our bootup string, just like on the lilo
- * command line. IF not NULL, then process this config string with
- * qla1280_setup
- *
- * Boot time Options
- * To add options at boot time add a line to your lilo.conf file like:
- * append="qla1280=verbose,max_tags:{{255,255,255,255},{255,255,255,255}}"
- * which will result in the first four devices on the first two
- * controllers being set to a tagged queue depth of 32.
- */
- if (qla1280)
- qla1280_setup(qla1280);
-#endif
-
- /* First Initialize QLA12160 on PCI Bus 1 Dev 2 */
- while ((pdev = pci_find_device(id->vendor, id->device, pdev))) {
- if (pdev->bus->number == 1 && PCI_SLOT(pdev->devfn) == 2) {
- if (!qla1280_probe_one(pdev, id))
- num_hosts++;
- }
- }
-
- pdev = NULL;
- /* Try and find each different type of adapter we support */
- for (id = &qla1280_pci_tbl[0]; id->device; id++) {
- while ((pdev = pci_find_device(id->vendor, id->device, pdev))) {
- /*
- * skip QLA12160 already initialized on
- * PCI Bus 1 Dev 2 since we already initialized
- * and presented it
- */
- if (id->device == PCI_DEVICE_ID_QLOGIC_ISP12160 &&
- pdev->bus->number == 1 &&
- PCI_SLOT(pdev->devfn) == 2)
- continue;
-
- if (!qla1280_probe_one(pdev, id))
- num_hosts++;
- }
- }
-
- return num_hosts;
-}
-
-/*
- * This looks a bit ugly as we could just pass down host to
- * qla1280_remove_one, but I want to keep qla1280_release purely a wrapper
- * around pci_driver::remove as used from 2.6 onwards.
- */
-static int
-qla1280_release(struct Scsi_Host *host)
-{
- struct scsi_qla_host *ha = (struct scsi_qla_host *)host->hostdata;
-
- qla1280_remove_one(ha->pdev);
- return 0;
-}
-
-static int
-qla1280_biosparam_old(Disk * disk, kdev_t dev, int geom[])
+
+/* disable risc and host interrupts */
+static inline void
+qla1280_disable_intrs(struct scsi_qla_host *ha)
{
- return qla1280_biosparam(disk->device, NULL, disk->capacity, geom);
+ WRT_REG_WORD(&ha->iobase->ictrl, 0);
+ RD_REG_WORD(&ha->iobase->ictrl); /* PCI Posted Write flush */
}
-static int
-qla1280_proc_info_old(char *buffer, char **start, off_t offset, int length,
- int hostno, int inout)
+/* enable risc and host interrupts */
+static inline void
+qla1280_enable_intrs(struct scsi_qla_host *ha)
{
- struct Scsi_Host *host;
-
- for (host = scsi_hostlist; host; host = host->next) {
- if (host->host_no == hostno) {
- return qla1280_proc_info(host, buffer, start,
- offset, length, inout);
- }
- }
-
- return -ESRCH;
+ WRT_REG_WORD(&ha->iobase->ictrl, (ISP_EN_INT | ISP_EN_RISC));
+ RD_REG_WORD(&ha->iobase->ictrl); /* PCI Posted Write flush */
}
-#endif
/**************************************************************************
* qla1280_intr_handler
* Handles the H/W interrupt
**************************************************************************/
static irqreturn_t
-qla1280_intr_handler(int irq, void *dev_id, struct pt_regs *regs)
+qla1280_intr_handler(int irq, void *dev_id)
{
struct scsi_qla_host *ha;
- struct device_reg *reg;
+ struct device_reg __iomem *reg;
u16 data;
int handled = 0;
ENTER_INTR ("qla1280_intr_handler");
ha = (struct scsi_qla_host *)dev_id;
- spin_lock(HOST_LOCK);
+ spin_lock(ha->host->host_lock);
ha->isr_count++;
reg = ha->iobase;
- WRT_REG_WORD(®->ictrl, 0); /* disable our interrupt. */
+ qla1280_disable_intrs(ha);
data = qla1280_debounce_register(®->istatus);
/* Check for pending interrupts. */
if (!list_empty(&ha->done_q))
qla1280_done(ha);
- spin_unlock(HOST_LOCK);
+ spin_unlock(ha->host->host_lock);
- /* enable our interrupt. */
- WRT_REG_WORD(®->ictrl, (ISP_EN_INT | ISP_EN_RISC));
+ qla1280_enable_intrs(ha);
LEAVE_INTR("qla1280_intr_handler");
return IRQ_RETVAL(handled);
uint8_t mr;
uint16_t mb[MAILBOX_REGISTER_COUNT];
struct nvram *nv;
- int is1x160, status;
+ int status, lun;
nv = &ha->nvram;
- if (ha->pdev->device == PCI_DEVICE_ID_QLOGIC_ISP12160 ||
- ha->pdev->device == PCI_DEVICE_ID_QLOGIC_ISP10160)
- is1x160 = 1;
- else
- is1x160 = 0;
-
mr = BIT_3 | BIT_2 | BIT_1 | BIT_0;
/* Set Target Parameters. */
mb[0] = MBC_SET_TARGET_PARAMETERS;
- mb[1] = (uint16_t) (bus ? target | BIT_7 : target);
- mb[1] <<= 8;
-
- mb[2] = (nv->bus[bus].target[target].parameter.c << 8);
-
- if (is1x160)
- mb[3] = nv->bus[bus].target[target].flags.flags1x160.sync_offset << 8;
- else
- mb[3] = nv->bus[bus].target[target].flags.flags1x80.sync_offset << 8;
- mb[3] |= nv->bus[bus].target[target].sync_period;
-
- if (is1x160) {
+ mb[1] = (uint16_t)((bus ? target | BIT_7 : target) << 8);
+ mb[2] = nv->bus[bus].target[target].parameter.renegotiate_on_error << 8;
+ mb[2] |= nv->bus[bus].target[target].parameter.stop_queue_on_check << 9;
+ mb[2] |= nv->bus[bus].target[target].parameter.auto_request_sense << 10;
+ mb[2] |= nv->bus[bus].target[target].parameter.tag_queuing << 11;
+ mb[2] |= nv->bus[bus].target[target].parameter.enable_sync << 12;
+ mb[2] |= nv->bus[bus].target[target].parameter.enable_wide << 13;
+ mb[2] |= nv->bus[bus].target[target].parameter.parity_checking << 14;
+ mb[2] |= nv->bus[bus].target[target].parameter.disconnect_allowed << 15;
+
+ if (IS_ISP1x160(ha)) {
mb[2] |= nv->bus[bus].target[target].ppr_1x160.flags.enable_ppr << 5;
- mb[6] = nv->bus[bus].target[target].ppr_1x160.flags.ppr_options << 8;
- mb[6] |= nv->bus[bus].target[target].ppr_1x160.flags.ppr_bus_width;
+ mb[3] = (nv->bus[bus].target[target].flags.flags1x160.sync_offset << 8);
+ mb[6] = (nv->bus[bus].target[target].ppr_1x160.flags.ppr_options << 8) |
+ nv->bus[bus].target[target].ppr_1x160.flags.ppr_bus_width;
mr |= BIT_6;
+ } else {
+ mb[3] = (nv->bus[bus].target[target].flags.flags1x80.sync_offset << 8);
}
+ mb[3] |= nv->bus[bus].target[target].sync_period;
- status = qla1280_mailbox_command(ha, mr, &mb[0]);
+ status = qla1280_mailbox_command(ha, mr, mb);
+
+ /* Set Device Queue Parameters. */
+ for (lun = 0; lun < MAX_LUNS; lun++) {
+ mb[0] = MBC_SET_DEVICE_QUEUE;
+ mb[1] = (uint16_t)((bus ? target | BIT_7 : target) << 8);
+ mb[1] |= lun;
+ mb[2] = nv->bus[bus].max_queue_depth;
+ mb[3] = nv->bus[bus].target[target].execution_throttle;
+ status |= qla1280_mailbox_command(ha, 0x0f, mb);
+ }
if (status)
printk(KERN_WARNING "scsi(%ld:%i:%i): "
scsi_adjust_queue_depth(device, 0, default_depth);
}
-#if LINUX_VERSION_CODE > 0x020500
- nv->bus[bus].target[target].parameter.f.enable_sync = device->sdtr;
- nv->bus[bus].target[target].parameter.f.enable_wide = device->wdtr;
+ nv->bus[bus].target[target].parameter.enable_sync = device->sdtr;
+ nv->bus[bus].target[target].parameter.enable_wide = device->wdtr;
nv->bus[bus].target[target].ppr_1x160.flags.enable_ppr = device->ppr;
-#endif
if (driver_setup.no_sync ||
(driver_setup.sync_mask &&
(~driver_setup.sync_mask & (1 << target))))
- nv->bus[bus].target[target].parameter.f.enable_sync = 0;
+ nv->bus[bus].target[target].parameter.enable_sync = 0;
if (driver_setup.no_wide ||
(driver_setup.wide_mask &&
(~driver_setup.wide_mask & (1 << target))))
- nv->bus[bus].target[target].parameter.f.enable_wide = 0;
- if (ha->pdev->device == PCI_DEVICE_ID_QLOGIC_ISP12160 ||
- ha->pdev->device == PCI_DEVICE_ID_QLOGIC_ISP10160) {
+ nv->bus[bus].target[target].parameter.enable_wide = 0;
+ if (IS_ISP1x160(ha)) {
if (driver_setup.no_ppr ||
(driver_setup.ppr_mask &&
(~driver_setup.ppr_mask & (1 << target))))
nv->bus[bus].target[target].ppr_1x160.flags.enable_ppr = 0;
}
- spin_lock_irqsave(HOST_LOCK, flags);
- if (nv->bus[bus].target[target].parameter.f.enable_sync)
+ spin_lock_irqsave(ha->host->host_lock, flags);
+ if (nv->bus[bus].target[target].parameter.enable_sync)
status = qla1280_set_target_parameters(ha, bus, target);
qla1280_get_target_parameters(ha, device);
- spin_unlock_irqrestore(HOST_LOCK, flags);
+ spin_unlock_irqrestore(ha->host->host_lock, flags);
return status;
}
-#if LINUX_VERSION_CODE < 0x020545
-/**************************************************************************
- * qla1280_select_queue_depth
- *
- * Sets the queue depth for each SCSI device hanging off the input
- * host adapter. We use a queue depth of 2 for devices that do not
- * support tagged queueing.
- **************************************************************************/
-static void
-qla1280_select_queue_depth(struct Scsi_Host *host, struct scsi_device *sdev_q)
-{
- struct scsi_qla_host *ha = (struct scsi_qla_host *)host->hostdata;
- struct scsi_device *sdev;
-
- ENTER("qla1280_select_queue_depth");
- for (sdev = sdev_q; sdev; sdev = sdev->next)
- if (sdev->host == host)
- qla1280_slave_configure(sdev);
-
- if (sdev_q)
- qla1280_check_for_dead_scsi_bus(ha, sdev_q->channel);
- LEAVE("qla1280_select_queue_depth");
-}
-#endif
/*
* qla1280_done
*
* Input:
* ha = adapter block pointer.
- * done_q = done queue.
*/
static void
qla1280_done(struct scsi_qla_host *ha)
/* Release memory used for this I/O */
if (cmd->use_sg) {
- dprintk(3, "S/G unmap_sg cmd=%p\n", cmd);
-
pci_unmap_sg(ha->pdev, cmd->request_buffer,
- cmd->use_sg, cmd->sc_data_direction);
+ cmd->use_sg, cmd->sc_data_direction);
} else if (cmd->request_bufflen) {
- /*dprintk(1, "No S/G unmap_single cmd=%x saved_dma_handle=%lx\n",
- cmd, sp->saved_dma_handle); */
-
- pci_unmap_page(ha->pdev, sp->saved_dma_handle,
- cmd->request_bufflen, cmd->sc_data_direction);
+ pci_unmap_single(ha->pdev, sp->saved_dma_handle,
+ cmd->request_bufflen,
+ cmd->sc_data_direction);
}
/* Call the mid-level driver interrupt handler */
CMD_HANDLE(sp->cmd) = (unsigned char *)INVALID_HANDLE;
ha->actthreads--;
-#if LINUX_VERSION_CODE < 0x020500
- if (cmd->cmnd[0] == INQUIRY)
- qla1280_get_target_options(cmd, ha);
-#endif
(*(cmd)->scsi_done)(cmd);
if(sp->wait != NULL)
int host_status = DID_ERROR;
uint16_t comp_status = le16_to_cpu(sts->comp_status);
uint16_t state_flags = le16_to_cpu(sts->state_flags);
- uint16_t residual_length = le16_to_cpu(sts->residual_length);
+ uint32_t residual_length = le32_to_cpu(sts->residual_length);
uint16_t scsi_status = le16_to_cpu(sts->scsi_status);
#if DEBUG_QLA1280_INTR
static char *reason[] = {
case CS_DATA_OVERRUN:
dprintk(2, "Data overrun 0x%x\n", residual_length);
- dprintk(2, "qla1280_isr: response packet data\n");
+ dprintk(2, "qla1280_return_status: response packet data\n");
qla1280_dump_buffer(2, (char *)sts, RESPONSE_ENTRY_SIZE);
host_status = DID_ERROR;
break;
"scsi: Underflow detected - retrying "
"command.\n");
host_status = DID_ERROR;
- } else
+ } else {
+ cp->resid = residual_length;
host_status = DID_OK;
+ }
break;
default:
/* QLogic ISP1280 Hardware Support Functions. */
/****************************************************************************/
- /*
- * qla2100_enable_intrs
- * qla2100_disable_intrs
- *
- * Input:
- * ha = adapter block pointer.
- *
- * Returns:
- * None
- */
-static inline void
-qla1280_enable_intrs(struct scsi_qla_host *ha)
-{
- struct device_reg *reg;
-
- reg = ha->iobase;
- /* enable risc and host interrupts */
- WRT_REG_WORD(®->ictrl, (ISP_EN_INT | ISP_EN_RISC));
- RD_REG_WORD(®->ictrl); /* PCI Posted Write flush */
- ha->flags.ints_enabled = 1;
-}
-
-static inline void
-qla1280_disable_intrs(struct scsi_qla_host *ha)
-{
- struct device_reg *reg;
-
- reg = ha->iobase;
- /* disable risc and host interrupts */
- WRT_REG_WORD(®->ictrl, 0);
- RD_REG_WORD(®->ictrl); /* PCI Posted Write flush */
- ha->flags.ints_enabled = 0;
-}
-
/*
* qla1280_initialize_adapter
* Initialize board.
static int __devinit
qla1280_initialize_adapter(struct scsi_qla_host *ha)
{
- struct device_reg *reg;
+ struct device_reg __iomem *reg;
int status;
int bus;
-#if LINUX_VERSION_CODE > 0x020500
unsigned long flags;
-#endif
ENTER("qla1280_initialize_adapter");
ha->flags.reset_active = 0;
ha->flags.abort_isp_active = 0;
- ha->flags.ints_enabled = 0;
#if defined(CONFIG_IA64_GENERIC) || defined(CONFIG_IA64_SGI_SN2)
if (ia64_platform_is("sn2")) {
- int count1, count2;
- int c;
-
- count1 = 3;
- count2 = 3;
printk(KERN_INFO "scsi(%li): Enabling SN2 PCI DMA "
"dual channel lockup workaround\n", ha->host_no);
- if ((c = snia_pcibr_rrb_alloc(ha->pdev, &count1, &count2)) < 0)
- printk(KERN_ERR "scsi(%li): Unable to allocate SN2 "
- "virtual DMA channels\n", ha->host_no);
- else
- ha->flags.use_pci_vchannel = 1;
-
+ ha->flags.use_pci_vchannel = 1;
driver_setup.no_nvram = 1;
}
#endif
+ /* TODO: implement support for the 1040 nvram format */
+ if (IS_ISP1040(ha))
+ driver_setup.no_nvram = 1;
+
dprintk(1, "Configure PCI space for adapter...\n");
reg = ha->iobase;
"NVRAM\n");
}
-#if LINUX_VERSION_CODE >= 0x020500
/*
* It's necessary to grab the spin here as qla1280_mailbox_command
* needs to be able to drop the lock unconditionally to wait
* for completion.
- * In 2.4 ->detect is called with the io_request_lock held.
*/
- spin_lock_irqsave(HOST_LOCK, flags);
-#endif
- /* If firmware needs to be loaded */
- if (qla1280_isp_firmware(ha)) {
- if (!(status = qla1280_chip_diag(ha))) {
- status = qla1280_setup_chip(ha);
- }
- } else {
- printk(KERN_ERR "scsi(%li): isp_firmware() failed!\n",
- ha->host_no);
- status = 1;
- }
+ spin_lock_irqsave(ha->host->host_lock, flags);
+ status = qla1280_load_firmware(ha);
if (status) {
printk(KERN_ERR "scsi(%li): initialize: pci probe failed!\n",
ha->host_no);
dprintk(1, "scsi(%ld): Configure NVRAM parameters\n", ha->host_no);
qla1280_nvram_config(ha);
- if (!ha->flags.disable_host_adapter && !qla1280_init_rings(ha)) {
- /* Issue SCSI reset. */
- /* dg 03/13 if we can't reset twice then bus is dead */
- for (bus = 0; bus < ha->ports; bus++) {
- if (!ha->bus_settings[bus].disable_scsi_reset){
- if (qla1280_bus_reset(ha, bus)) {
- if (qla1280_bus_reset(ha, bus)) {
- ha->bus_settings[bus].scsi_bus_dead = 1;
- }
- }
- }
- }
-
- /*
- * qla1280_bus_reset() will take care of issueing markers,
- * no need to do that here as well!
- */
-#if 0
- /* Issue marker command. */
- ha->flags.reset_marker = 0;
- for (bus = 0; bus < ha->ports; bus++) {
- ha->bus_settings[bus].reset_marker = 0;
- qla1280_marker(ha, bus, 0, 0, MK_SYNC_ALL);
- }
-#endif
-
- ha->flags.online = 1;
- } else
+ if (ha->flags.disable_host_adapter) {
status = 1;
+ goto out;
+ }
- out:
-#if LINUX_VERSION_CODE >= 0x020500
- spin_unlock_irqrestore(HOST_LOCK, flags);
-#endif
+ status = qla1280_init_rings(ha);
if (status)
- dprintk(2, "qla1280_initialize_adapter: **** FAILED ****\n");
-
- LEAVE("qla1280_initialize_adapter");
- return status;
-}
-
-
-/*
- * ISP Firmware Test
- * Checks if present version of RISC firmware is older than
- * driver firmware.
- *
- * Input:
- * ha = adapter block pointer.
- *
- * Returns:
- * 0 = firmware does not need to be loaded.
- */
-static int
-qla1280_isp_firmware(struct scsi_qla_host *ha)
-{
- struct nvram *nv = (struct nvram *) ha->response_ring;
- int status = 0; /* dg 2/27 always loads RISC */
- uint16_t mb[MAILBOX_REGISTER_COUNT];
-
- ENTER("qla1280_isp_firmware");
-
- dprintk(1, "scsi(%li): Determining if RISC is loaded\n", ha->host_no);
+ goto out;
- /* Bad NVRAM data, load RISC code. */
- if (!ha->nvram_valid) {
- ha->flags.disable_risc_code_load = 0;
- } else
- ha->flags.disable_risc_code_load =
- nv->cntr_flags_1.disable_loading_risc_code;
-
- if (ha->flags.disable_risc_code_load) {
- dprintk(3, "qla1280_isp_firmware: Telling RISC to verify "
- "checksum of loaded BIOS code.\n");
-
- /* Verify checksum of loaded RISC code. */
- mb[0] = MBC_VERIFY_CHECKSUM;
- /* mb[1] = ql12_risc_code_addr01; */
- mb[1] = *ql1280_board_tbl[ha->devnum].fwstart;
-
- if (!(status =
- qla1280_mailbox_command(ha, BIT_1 | BIT_0, &mb[0]))) {
- /* Start firmware execution. */
- dprintk(3, "qla1280_isp_firmware: Startng F/W "
- "execution.\n");
-
- mb[0] = MBC_EXECUTE_FIRMWARE;
- /* mb[1] = ql12_risc_code_addr01; */
- mb[1] = *ql1280_board_tbl[ha->devnum].fwstart;
- qla1280_mailbox_command(ha, BIT_1 | BIT_0, &mb[0]);
- } else
- printk(KERN_INFO "qla1280: RISC checksum failed.\n");
- } else {
- dprintk(1, "qla1280: NVRAM configured to load RISC load.\n");
- status = 1;
+ /* Issue SCSI reset, if we can't reset twice then bus is dead */
+ for (bus = 0; bus < ha->ports; bus++) {
+ if (!ha->bus_settings[bus].disable_scsi_reset &&
+ qla1280_bus_reset(ha, bus) &&
+ qla1280_bus_reset(ha, bus))
+ ha->bus_settings[bus].scsi_bus_dead = 1;
}
+ ha->flags.online = 1;
+ out:
+ spin_unlock_irqrestore(ha->host->host_lock, flags);
+
if (status)
- dprintk(2, "qla1280_isp_firmware: **** Load RISC code ****\n");
+ dprintk(2, "qla1280_initialize_adapter: **** FAILED ****\n");
- LEAVE("qla1280_isp_firmware");
+ LEAVE("qla1280_initialize_adapter");
return status;
}
qla1280_chip_diag(struct scsi_qla_host *ha)
{
uint16_t mb[MAILBOX_REGISTER_COUNT];
- struct device_reg *reg = ha->iobase;
+ struct device_reg __iomem *reg = ha->iobase;
int status = 0;
int cnt;
uint16_t data;
-
dprintk(3, "qla1280_chip_diag: testing device at 0x%p \n", ®->id_l);
dprintk(1, "scsi(%ld): Verifying chip\n", ha->host_no);
/* Soft reset chip and wait for it to finish. */
WRT_REG_WORD(®->ictrl, ISP_RESET);
+
/*
* We can't do a traditional PCI write flush here by reading
* back the register. The card will not respond once the reset
data = RD_REG_WORD(®->ictrl);
}
- if (cnt) {
- /* Reset register cleared by chip reset. */
- dprintk(3, "qla1280_chip_diag: reset register cleared by "
- "chip reset\n");
+ if (!cnt)
+ goto fail;
- WRT_REG_WORD(®->cfg_1, 0);
+ /* Reset register cleared by chip reset. */
+ dprintk(3, "qla1280_chip_diag: reset register cleared by chip reset\n");
- /* Reset RISC and disable BIOS which
- allows RISC to execute out of RAM. */
-#if 0
- WRT_REG_WORD(®->host_cmd, HC_RESET_RISC);
- RD_REG_WORD(®->id_l); /* Flush PCI write */
- WRT_REG_WORD(®->host_cmd, HC_RELEASE_RISC);
- RD_REG_WORD(®->id_l); /* Flush PCI write */
- WRT_REG_WORD(®->host_cmd, HC_DISABLE_BIOS);
-#else
- WRT_REG_WORD(®->host_cmd, HC_RESET_RISC |
- HC_RELEASE_RISC | HC_DISABLE_BIOS);
-#endif
- RD_REG_WORD(®->id_l); /* Flush PCI write */
- data = qla1280_debounce_register(®->mailbox0);
- /*
- * I *LOVE* this code!
- */
- for (cnt = 1000000; cnt && data == MBS_BUSY; cnt--) {
- udelay(5);
- data = RD_REG_WORD(®->mailbox0);
- }
+ WRT_REG_WORD(®->cfg_1, 0);
- if (cnt) {
- /* Check product ID of chip */
- dprintk(3, "qla1280_chip_diag: Checking product "
- "ID of chip\n");
-
- if (RD_REG_WORD(®->mailbox1) != PROD_ID_1 ||
- (RD_REG_WORD(®->mailbox2) != PROD_ID_2 &&
- RD_REG_WORD(®->mailbox2) != PROD_ID_2a) ||
- RD_REG_WORD(®->mailbox3) != PROD_ID_3 ||
- RD_REG_WORD(®->mailbox4) != PROD_ID_4) {
- printk(KERN_INFO "qla1280: Wrong product ID = "
- "0x%x,0x%x,0x%x,0x%x\n",
- RD_REG_WORD(®->mailbox1),
- RD_REG_WORD(®->mailbox2),
- RD_REG_WORD(®->mailbox3),
- RD_REG_WORD(®->mailbox4));
- status = 1;
- } else {
- /*
- * Enable ints early!!!
- */
- qla1280_enable_intrs(ha);
-
- dprintk(1, "qla1280_chip_diag: Checking "
- "mailboxes of chip\n");
- /* Wrap Incoming Mailboxes Test. */
- mb[0] = MBC_MAILBOX_REGISTER_TEST;
- mb[1] = 0xAAAA;
- mb[2] = 0x5555;
- mb[3] = 0xAA55;
- mb[4] = 0x55AA;
- mb[5] = 0xA5A5;
- mb[6] = 0x5A5A;
- mb[7] = 0x2525;
- if (!(status = qla1280_mailbox_command(ha,
- 0xff,
- &mb
- [0]))) {
- if (mb[1] != 0xAAAA ||
- mb[2] != 0x5555 ||
- mb[3] != 0xAA55 ||
- mb[4] != 0x55AA ||
- mb[5] != 0xA5A5 ||
- mb[6] != 0x5A5A ||
- mb[7] != 0x2525) {
- status = 1;
- printk(KERN_INFO "qla1280: "
- "Failed mbox check\n");
- }
- }
- }
- } else
- status = 1;
- } else
- status = 1;
+ /* Reset RISC and disable BIOS which
+ allows RISC to execute out of RAM. */
+ WRT_REG_WORD(®->host_cmd, HC_RESET_RISC |
+ HC_RELEASE_RISC | HC_DISABLE_BIOS);
+
+ RD_REG_WORD(®->id_l); /* Flush PCI write */
+ data = qla1280_debounce_register(®->mailbox0);
+
+ /*
+ * I *LOVE* this code!
+ */
+ for (cnt = 1000000; cnt && data == MBS_BUSY; cnt--) {
+ udelay(5);
+ data = RD_REG_WORD(®->mailbox0);
+ }
+ if (!cnt)
+ goto fail;
+
+ /* Check product ID of chip */
+ dprintk(3, "qla1280_chip_diag: Checking product ID of chip\n");
+
+ if (RD_REG_WORD(®->mailbox1) != PROD_ID_1 ||
+ (RD_REG_WORD(®->mailbox2) != PROD_ID_2 &&
+ RD_REG_WORD(®->mailbox2) != PROD_ID_2a) ||
+ RD_REG_WORD(®->mailbox3) != PROD_ID_3 ||
+ RD_REG_WORD(®->mailbox4) != PROD_ID_4) {
+ printk(KERN_INFO "qla1280: Wrong product ID = "
+ "0x%x,0x%x,0x%x,0x%x\n",
+ RD_REG_WORD(®->mailbox1),
+ RD_REG_WORD(®->mailbox2),
+ RD_REG_WORD(®->mailbox3),
+ RD_REG_WORD(®->mailbox4));
+ goto fail;
+ }
+
+ /*
+ * Enable ints early!!!
+ */
+ qla1280_enable_intrs(ha);
+
+ dprintk(1, "qla1280_chip_diag: Checking mailboxes of chip\n");
+ /* Wrap Incoming Mailboxes Test. */
+ mb[0] = MBC_MAILBOX_REGISTER_TEST;
+ mb[1] = 0xAAAA;
+ mb[2] = 0x5555;
+ mb[3] = 0xAA55;
+ mb[4] = 0x55AA;
+ mb[5] = 0xA5A5;
+ mb[6] = 0x5A5A;
+ mb[7] = 0x2525;
+
+ status = qla1280_mailbox_command(ha, 0xff, mb);
if (status)
- dprintk(2, "qla1280_chip_diag: **** FAILED ****\n");
- else
- dprintk(3, "qla1280_chip_diag: exiting normally\n");
+ goto fail;
+
+ if (mb[1] != 0xAAAA || mb[2] != 0x5555 || mb[3] != 0xAA55 ||
+ mb[4] != 0x55AA || mb[5] != 0xA5A5 || mb[6] != 0x5A5A ||
+ mb[7] != 0x2525) {
+ printk(KERN_INFO "qla1280: Failed mbox check\n");
+ goto fail;
+ }
+ dprintk(3, "qla1280_chip_diag: exiting normally\n");
+ return 0;
+ fail:
+ dprintk(2, "qla1280_chip_diag: **** FAILED ****\n");
return status;
}
-/*
- * Setup chip
- * Load and start RISC firmware.
- *
- * Input:
- * ha = adapter block pointer.
- *
- * Returns:
- * 0 = success.
- */
-#define DUMP_IT_BACK 0 /* for debug of RISC loading */
static int
-qla1280_setup_chip(struct scsi_qla_host *ha)
+qla1280_load_firmware_pio(struct scsi_qla_host *ha)
{
- int status = 0;
- uint16_t risc_address;
- uint16_t *risc_code_address;
- int risc_code_size;
- uint16_t mb[MAILBOX_REGISTER_COUNT];
- uint16_t cnt;
- int num, i;
-#if DUMP_IT_BACK
- uint8_t *sp;
- uint8_t *tbuf;
- dma_addr_t p_tbuf;
-#endif
+ uint16_t risc_address, *risc_code_address, risc_code_size;
+ uint16_t mb[MAILBOX_REGISTER_COUNT], i;
+ int err;
- ENTER("qla1280_setup_chip");
+ /* Load RISC code. */
+ risc_address = *ql1280_board_tbl[ha->devnum].fwstart;
+ risc_code_address = ql1280_board_tbl[ha->devnum].fwcode;
+ risc_code_size = *ql1280_board_tbl[ha->devnum].fwlen;
+
+ for (i = 0; i < risc_code_size; i++) {
+ mb[0] = MBC_WRITE_RAM_WORD;
+ mb[1] = risc_address + i;
+ mb[2] = risc_code_address[i];
+
+ err = qla1280_mailbox_command(ha, BIT_0 | BIT_1 | BIT_2, mb);
+ if (err) {
+ printk(KERN_ERR "scsi(%li): Failed to load firmware\n",
+ ha->host_no);
+ return err;
+ }
+ }
- dprintk(1, "scsi(%ld): Setup chip\n", ha->host_no);
+ return 0;
+}
+#define DUMP_IT_BACK 0 /* for debug of RISC loading */
+static int
+qla1280_load_firmware_dma(struct scsi_qla_host *ha)
+{
+ uint16_t risc_address, *risc_code_address, risc_code_size;
+ uint16_t mb[MAILBOX_REGISTER_COUNT], cnt;
+ int err = 0, num, i;
#if DUMP_IT_BACK
- /* get consistent memory allocated for setup_chip */
+ uint8_t *sp, *tbuf;
+ dma_addr_t p_tbuf;
+
tbuf = pci_alloc_consistent(ha->pdev, 8000, &p_tbuf);
+ if (!tbuf)
+ return -ENOMEM;
#endif
/* Load RISC code. */
risc_address = *ql1280_board_tbl[ha->devnum].fwstart;
risc_code_address = ql1280_board_tbl[ha->devnum].fwcode;
- risc_code_size = (int) *ql1280_board_tbl[ha->devnum].fwlen;
+ risc_code_size = *ql1280_board_tbl[ha->devnum].fwlen;
- dprintk(1, "qla1280_setup_chip: DMA RISC code (%i) words\n",
- risc_code_size);
+ dprintk(1, "%s: DMA RISC code (%i) words\n",
+ __FUNCTION__, risc_code_size);
num = 0;
- while (risc_code_size > 0 && !status) {
+ while (risc_code_size > 0) {
int warn __attribute__((unused)) = 0;
cnt = 2000 >> 1;
"%d,%d(0x%x)\n",
risc_code_address, cnt, num, risc_address);
for(i = 0; i < cnt; i++)
- ((uint16_t *)ha->request_ring)[i] =
+ ((__le16 *)ha->request_ring)[i] =
cpu_to_le16(risc_code_address[i]);
mb[0] = MBC_LOAD_RAM;
mb[2] = (ha->request_dma >> 16) & 0xffff;
mb[7] = pci_dma_hi32(ha->request_dma) & 0xffff;
mb[6] = pci_dma_hi32(ha->request_dma) >> 16;
- dprintk(2, "qla1280_setup_chip: op=%d 0x%p = 0x%4x,0x%4x,"
- "0x%4x,0x%4x\n", mb[0], (void *)(long)ha->request_dma,
- mb[6], mb[7], mb[2], mb[3]);
- if ((status = qla1280_mailbox_command(ha, BIT_4 | BIT_3 |
- BIT_2 | BIT_1 | BIT_0,
- &mb[0]))) {
+ dprintk(2, "%s: op=%d 0x%p = 0x%4x,0x%4x,0x%4x,0x%4x\n",
+ __FUNCTION__, mb[0],
+ (void *)(long)ha->request_dma,
+ mb[6], mb[7], mb[2], mb[3]);
+ err = qla1280_mailbox_command(ha, BIT_4 | BIT_3 | BIT_2 |
+ BIT_1 | BIT_0, mb);
+ if (err) {
printk(KERN_ERR "scsi(%li): Failed to load partial "
"segment of f\n", ha->host_no);
- break;
+ goto out;
}
#if DUMP_IT_BACK
mb[7] = pci_dma_hi32(p_tbuf) & 0xffff;
mb[6] = pci_dma_hi32(p_tbuf) >> 16;
- if ((status = qla1280_mailbox_command(ha,
- BIT_4 | BIT_3 | BIT_2 |
- BIT_1 | BIT_0,
- &mb[0]))) {
+ err = qla1280_mailbox_command(ha, BIT_4 | BIT_3 | BIT_2 |
+ BIT_1 | BIT_0, mb);
+ if (err) {
printk(KERN_ERR
"Failed to dump partial segment of f/w\n");
- break;
+ goto out;
}
sp = (uint8_t *)ha->request_ring;
for (i = 0; i < (cnt << 1); i++) {
if (tbuf[i] != sp[i] && warn++ < 10) {
- printk(KERN_ERR "qla1280_setup_chip: FW "
- "compare error @ byte(0x%x) loop#=%x\n",
- i, num);
- printk(KERN_ERR "setup_chip: FWbyte=%x "
- "FWfromChip=%x\n", sp[i], tbuf[i]);
+ printk(KERN_ERR "%s: FW compare error @ "
+ "byte(0x%x) loop#=%x\n",
+ __FUNCTION__, i, num);
+ printk(KERN_ERR "%s: FWbyte=%x "
+ "FWfromChip=%x\n",
+ __FUNCTION__, sp[i], tbuf[i]);
/*break; */
}
}
num++;
}
- /* Verify checksum of loaded RISC code. */
- if (!status) {
- dprintk(1, "qla1280_setup_chip: Verifying checksum of "
- "loaded RISC code.\n");
- mb[0] = MBC_VERIFY_CHECKSUM;
- /* mb[1] = ql12_risc_code_addr01; */
- mb[1] = *ql1280_board_tbl[ha->devnum].fwstart;
-
- if (!(status =
- qla1280_mailbox_command(ha, BIT_1 | BIT_0, &mb[0]))) {
- /* Start firmware execution. */
- dprintk(1,
- "qla1280_setup_chip: start firmware running.\n");
- mb[0] = MBC_EXECUTE_FIRMWARE;
- mb[1] = *ql1280_board_tbl[ha->devnum].fwstart;
- qla1280_mailbox_command(ha, BIT_1 | BIT_0, &mb[0]);
- } else
- printk(KERN_ERR "scsi(%li): qla1280_setup_chip: "
- "Failed checksum\n", ha->host_no);
- }
-
+ out:
#if DUMP_IT_BACK
- /* free consistent memory allocated for setup_chip */
pci_free_consistent(ha->pdev, 8000, tbuf, p_tbuf);
#endif
+ return err;
+}
- if (status)
- dprintk(2, "qla1280_setup_chip: **** FAILED ****\n");
+static int
+qla1280_start_firmware(struct scsi_qla_host *ha)
+{
+ uint16_t mb[MAILBOX_REGISTER_COUNT];
+ int err;
- LEAVE("qla1280_setup_chip");
- return status;
+ dprintk(1, "%s: Verifying checksum of loaded RISC code.\n",
+ __FUNCTION__);
+
+ /* Verify checksum of loaded RISC code. */
+ mb[0] = MBC_VERIFY_CHECKSUM;
+ /* mb[1] = ql12_risc_code_addr01; */
+ mb[1] = *ql1280_board_tbl[ha->devnum].fwstart;
+ err = qla1280_mailbox_command(ha, BIT_1 | BIT_0, mb);
+ if (err) {
+ printk(KERN_ERR "scsi(%li): RISC checksum failed.\n", ha->host_no);
+ return err;
+ }
+
+ /* Start firmware execution. */
+ dprintk(1, "%s: start firmware running.\n", __FUNCTION__);
+ mb[0] = MBC_EXECUTE_FIRMWARE;
+ mb[1] = *ql1280_board_tbl[ha->devnum].fwstart;
+ err = qla1280_mailbox_command(ha, BIT_1 | BIT_0, &mb[0]);
+ if (err) {
+ printk(KERN_ERR "scsi(%li): Failed to start firmware\n",
+ ha->host_no);
+ }
+
+ return err;
+}
+
+static int
+qla1280_load_firmware(struct scsi_qla_host *ha)
+{
+ int err;
+
+ err = qla1280_chip_diag(ha);
+ if (err)
+ goto out;
+ if (IS_ISP1040(ha))
+ err = qla1280_load_firmware_pio(ha);
+ else
+ err = qla1280_load_firmware_dma(ha);
+ if (err)
+ goto out;
+ err = qla1280_start_firmware(ha);
+ out:
+ return err;
}
/*
/* mb[0] = MBC_INIT_REQUEST_QUEUE; */
mb[0] = MBC_INIT_REQUEST_QUEUE_A64;
mb[1] = REQUEST_ENTRY_CNT;
- mb[3] = ha->request_dma & 0xffff;
- mb[2] = (ha->request_dma >> 16) & 0xffff;
- mb[4] = 0;
- mb[7] = pci_dma_hi32(ha->request_dma) & 0xffff;
- mb[6] = pci_dma_hi32(ha->request_dma) >> 16;
- if (!(status = qla1280_mailbox_command(ha, BIT_7 | BIT_6 | BIT_4 |
- BIT_3 | BIT_2 | BIT_1 | BIT_0,
- &mb[0]))) {
- /* Initialize response queue. */
- ha->response_ring_ptr = ha->response_ring;
- ha->rsp_ring_index = 0;
- /* mb[0] = MBC_INIT_RESPONSE_QUEUE; */
- mb[0] = MBC_INIT_RESPONSE_QUEUE_A64;
- mb[1] = RESPONSE_ENTRY_CNT;
- mb[3] = ha->response_dma & 0xffff;
- mb[2] = (ha->response_dma >> 16) & 0xffff;
- mb[5] = 0;
- mb[7] = pci_dma_hi32(ha->response_dma) & 0xffff;
- mb[6] = pci_dma_hi32(ha->response_dma) >> 16;
- status = qla1280_mailbox_command(ha, BIT_7 | BIT_6 | BIT_5 |
- BIT_3 | BIT_2 | BIT_1 | BIT_0,
- &mb[0]);
- }
-
- if (status)
- dprintk(2, "qla1280_init_rings: **** FAILED ****\n");
-
- LEAVE("qla1280_init_rings");
- return status;
-}
-
-/*
- * NVRAM configuration.
- *
- * Input:
- * ha = adapter block pointer.
- * ha->request_ring = request ring virtual address
- *
- * Output:
- * host adapters parameters in host adapter block
- *
- * Returns:
- * 0 = success.
- */
-static int
-qla1280_nvram_config(struct scsi_qla_host *ha)
-{
- struct device_reg *reg = ha->iobase;
- struct nvram *nv;
- int is1x160, status = 0;
- int bus, target, lun;
- uint16_t mb[MAILBOX_REGISTER_COUNT];
- uint16_t mask;
-
- ENTER("qla1280_nvram_config");
-
- if (ha->pdev->device == PCI_DEVICE_ID_QLOGIC_ISP12160 ||
- ha->pdev->device == PCI_DEVICE_ID_QLOGIC_ISP10160)
- is1x160 = 1;
- else
- is1x160 = 0;
-
- nv = &ha->nvram;
- if (!ha->nvram_valid) {
- dprintk(1, "Using defaults for NVRAM: \n");
- memset(nv, 0, sizeof(struct nvram));
-
- /* nv->cntr_flags_1.disable_loading_risc_code = 1; */
- nv->firmware_feature.f.enable_fast_posting = 1;
- nv->firmware_feature.f.disable_synchronous_backoff = 1;
-
- nv->termination.f.scsi_bus_0_control = 3;
- nv->termination.f.scsi_bus_1_control = 3;
- nv->termination.f.auto_term_support = 1;
-
- /*
- * Set default FIFO magic - What appropriate values
- * would be here is unknown. This is what I have found
- * testing with 12160s.
- * Now, I would love the magic decoder ring for this one,
- * the header file provided by QLogic seems to be bogus
- * or incomplete at best.
- */
- nv->isp_config.c = 0x44;
+ mb[3] = ha->request_dma & 0xffff;
+ mb[2] = (ha->request_dma >> 16) & 0xffff;
+ mb[4] = 0;
+ mb[7] = pci_dma_hi32(ha->request_dma) & 0xffff;
+ mb[6] = pci_dma_hi32(ha->request_dma) >> 16;
+ if (!(status = qla1280_mailbox_command(ha, BIT_7 | BIT_6 | BIT_4 |
+ BIT_3 | BIT_2 | BIT_1 | BIT_0,
+ &mb[0]))) {
+ /* Initialize response queue. */
+ ha->response_ring_ptr = ha->response_ring;
+ ha->rsp_ring_index = 0;
+ /* mb[0] = MBC_INIT_RESPONSE_QUEUE; */
+ mb[0] = MBC_INIT_RESPONSE_QUEUE_A64;
+ mb[1] = RESPONSE_ENTRY_CNT;
+ mb[3] = ha->response_dma & 0xffff;
+ mb[2] = (ha->response_dma >> 16) & 0xffff;
+ mb[5] = 0;
+ mb[7] = pci_dma_hi32(ha->response_dma) & 0xffff;
+ mb[6] = pci_dma_hi32(ha->response_dma) >> 16;
+ status = qla1280_mailbox_command(ha, BIT_7 | BIT_6 | BIT_5 |
+ BIT_3 | BIT_2 | BIT_1 | BIT_0,
+ &mb[0]);
+ }
- if (is1x160)
- nv->isp_parameter = 0x01;
+ if (status)
+ dprintk(2, "qla1280_init_rings: **** FAILED ****\n");
- for (bus = 0; bus < MAX_BUSES; bus++) {
- nv->bus[bus].config_1.initiator_id = 7;
- nv->bus[bus].bus_reset_delay = 5;
- /* 8 = 5.0 clocks */
- nv->bus[bus].config_2.async_data_setup_time = 8;
- nv->bus[bus].config_2.req_ack_active_negation = 1;
- nv->bus[bus].config_2.data_line_active_negation = 1;
- nv->bus[bus].selection_timeout = 250;
- nv->bus[bus].max_queue_depth = 256;
+ LEAVE("qla1280_init_rings");
+ return status;
+}
- for (target = 0; target < MAX_TARGETS; target++) {
- nv->bus[bus].target[target].parameter.f.
- renegotiate_on_error = 1;
- nv->bus[bus].target[target].parameter.f.
- auto_request_sense = 1;
- nv->bus[bus].target[target].parameter.f.
- tag_queuing = 1;
- nv->bus[bus].target[target].parameter.f.
- enable_sync = 1;
-#if 1 /* Some SCSI Processors do not seem to like this */
- nv->bus[bus].target[target].parameter.f.
- enable_wide = 1;
-#endif
- nv->bus[bus].target[target].parameter.f.
- parity_checking = 1;
- nv->bus[bus].target[target].parameter.f.
- disconnect_allowed = 1;
- nv->bus[bus].target[target].execution_throttle=
- nv->bus[bus].max_queue_depth - 1;
- if (is1x160) {
- nv->bus[bus].target[target].flags.
- flags1x160.device_enable = 1;
- nv->bus[bus].target[target].flags.
- flags1x160.sync_offset = 0x0e;
- nv->bus[bus].target[target].
- sync_period = 9;
- nv->bus[bus].target[target].
- ppr_1x160.flags.enable_ppr = 1;
- nv->bus[bus].target[target].ppr_1x160.
- flags.ppr_options = 2;
- nv->bus[bus].target[target].ppr_1x160.
- flags.ppr_bus_width = 1;
- } else {
- nv->bus[bus].target[target].flags.
- flags1x80.device_enable = 1;
- nv->bus[bus].target[target].flags.
- flags1x80.sync_offset = 0x8;
- nv->bus[bus].target[target].
- sync_period = 10;
- }
- }
- }
- } else {
- /* Always force AUTO sense for LINUX SCSI */
- for (bus = 0; bus < MAX_BUSES; bus++)
- for (target = 0; target < MAX_TARGETS; target++) {
- nv->bus[bus].target[target].parameter.f.
- auto_request_sense = 1;
- }
- }
+static void
+qla1280_print_settings(struct nvram *nv)
+{
dprintk(1, "qla1280 : initiator scsi id bus[0]=%d\n",
nv->bus[0].config_1.initiator_id);
dprintk(1, "qla1280 : initiator scsi id bus[1]=%d\n",
nv->bus[0].max_queue_depth);
dprintk(1, "qla1280 : max queue depth[1]=%d\n",
nv->bus[1].max_queue_depth);
+}
+
+static void
+qla1280_set_target_defaults(struct scsi_qla_host *ha, int bus, int target)
+{
+ struct nvram *nv = &ha->nvram;
+
+ nv->bus[bus].target[target].parameter.renegotiate_on_error = 1;
+ nv->bus[bus].target[target].parameter.auto_request_sense = 1;
+ nv->bus[bus].target[target].parameter.tag_queuing = 1;
+ nv->bus[bus].target[target].parameter.enable_sync = 1;
+#if 1 /* Some SCSI Processors do not seem to like this */
+ nv->bus[bus].target[target].parameter.enable_wide = 1;
+#endif
+ nv->bus[bus].target[target].execution_throttle =
+ nv->bus[bus].max_queue_depth - 1;
+ nv->bus[bus].target[target].parameter.parity_checking = 1;
+ nv->bus[bus].target[target].parameter.disconnect_allowed = 1;
+
+ if (IS_ISP1x160(ha)) {
+ nv->bus[bus].target[target].flags.flags1x160.device_enable = 1;
+ nv->bus[bus].target[target].flags.flags1x160.sync_offset = 0x0e;
+ nv->bus[bus].target[target].sync_period = 9;
+ nv->bus[bus].target[target].ppr_1x160.flags.enable_ppr = 1;
+ nv->bus[bus].target[target].ppr_1x160.flags.ppr_options = 2;
+ nv->bus[bus].target[target].ppr_1x160.flags.ppr_bus_width = 1;
+ } else {
+ nv->bus[bus].target[target].flags.flags1x80.device_enable = 1;
+ nv->bus[bus].target[target].flags.flags1x80.sync_offset = 12;
+ nv->bus[bus].target[target].sync_period = 10;
+ }
+}
+
+static void
+qla1280_set_defaults(struct scsi_qla_host *ha)
+{
+ struct nvram *nv = &ha->nvram;
+ int bus, target;
+
+ dprintk(1, "Using defaults for NVRAM: \n");
+ memset(nv, 0, sizeof(struct nvram));
+
+ /* nv->cntr_flags_1.disable_loading_risc_code = 1; */
+ nv->firmware_feature.f.enable_fast_posting = 1;
+ nv->firmware_feature.f.disable_synchronous_backoff = 1;
+ nv->termination.scsi_bus_0_control = 3;
+ nv->termination.scsi_bus_1_control = 3;
+ nv->termination.auto_term_support = 1;
+
+ /*
+ * Set default FIFO magic - What appropriate values would be here
+ * is unknown. This is what I have found testing with 12160s.
+ *
+ * Now, I would love the magic decoder ring for this one, the
+ * header file provided by QLogic seems to be bogus or incomplete
+ * at best.
+ */
+ nv->isp_config.burst_enable = 1;
+ if (IS_ISP1040(ha))
+ nv->isp_config.fifo_threshold |= 3;
+ else
+ nv->isp_config.fifo_threshold |= 4;
+
+ if (IS_ISP1x160(ha))
+ nv->isp_parameter = 0x01; /* fast memory enable */
+
+ for (bus = 0; bus < MAX_BUSES; bus++) {
+ nv->bus[bus].config_1.initiator_id = 7;
+ nv->bus[bus].config_2.req_ack_active_negation = 1;
+ nv->bus[bus].config_2.data_line_active_negation = 1;
+ nv->bus[bus].selection_timeout = 250;
+ nv->bus[bus].max_queue_depth = 256;
+
+ if (IS_ISP1040(ha)) {
+ nv->bus[bus].bus_reset_delay = 3;
+ nv->bus[bus].config_2.async_data_setup_time = 6;
+ nv->bus[bus].retry_delay = 1;
+ } else {
+ nv->bus[bus].bus_reset_delay = 5;
+ nv->bus[bus].config_2.async_data_setup_time = 8;
+ }
+
+ for (target = 0; target < MAX_TARGETS; target++)
+ qla1280_set_target_defaults(ha, bus, target);
+ }
+}
+
+static int
+qla1280_config_target(struct scsi_qla_host *ha, int bus, int target)
+{
+ struct nvram *nv = &ha->nvram;
+ uint16_t mb[MAILBOX_REGISTER_COUNT];
+ int status, lun;
+ uint16_t flag;
+
+ /* Set Target Parameters. */
+ mb[0] = MBC_SET_TARGET_PARAMETERS;
+ mb[1] = (uint16_t)((bus ? target | BIT_7 : target) << 8);
+
+ /*
+ * Do not enable sync and ppr for the initial INQUIRY run. We
+ * enable this later if we determine the target actually
+ * supports it.
+ */
+ mb[2] = (TP_RENEGOTIATE | TP_AUTO_REQUEST_SENSE | TP_TAGGED_QUEUE
+ | TP_WIDE | TP_PARITY | TP_DISCONNECT);
+
+ if (IS_ISP1x160(ha))
+ mb[3] = nv->bus[bus].target[target].flags.flags1x160.sync_offset << 8;
+ else
+ mb[3] = nv->bus[bus].target[target].flags.flags1x80.sync_offset << 8;
+ mb[3] |= nv->bus[bus].target[target].sync_period;
+ status = qla1280_mailbox_command(ha, 0x0f, mb);
+
+ /* Save Tag queuing enable flag. */
+ flag = (BIT_0 << target) & mb[0];
+ if (nv->bus[bus].target[target].parameter.tag_queuing)
+ ha->bus_settings[bus].qtag_enables |= flag;
+
+ /* Save Device enable flag. */
+ if (IS_ISP1x160(ha)) {
+ if (nv->bus[bus].target[target].flags.flags1x160.device_enable)
+ ha->bus_settings[bus].device_enables |= flag;
+ ha->bus_settings[bus].lun_disables |= 0;
+ } else {
+ if (nv->bus[bus].target[target].flags.flags1x80.device_enable)
+ ha->bus_settings[bus].device_enables |= flag;
+ /* Save LUN disable flag. */
+ if (nv->bus[bus].target[target].flags.flags1x80.lun_disable)
+ ha->bus_settings[bus].lun_disables |= flag;
+ }
+
+ /* Set Device Queue Parameters. */
+ for (lun = 0; lun < MAX_LUNS; lun++) {
+ mb[0] = MBC_SET_DEVICE_QUEUE;
+ mb[1] = (uint16_t)((bus ? target | BIT_7 : target) << 8);
+ mb[1] |= lun;
+ mb[2] = nv->bus[bus].max_queue_depth;
+ mb[3] = nv->bus[bus].target[target].execution_throttle;
+ status |= qla1280_mailbox_command(ha, 0x0f, mb);
+ }
+
+ return status;
+}
+
+static int
+qla1280_config_bus(struct scsi_qla_host *ha, int bus)
+{
+ struct nvram *nv = &ha->nvram;
+ uint16_t mb[MAILBOX_REGISTER_COUNT];
+ int target, status;
+
+ /* SCSI Reset Disable. */
+ ha->bus_settings[bus].disable_scsi_reset =
+ nv->bus[bus].config_1.scsi_reset_disable;
+
+ /* Initiator ID. */
+ ha->bus_settings[bus].id = nv->bus[bus].config_1.initiator_id;
+ mb[0] = MBC_SET_INITIATOR_ID;
+ mb[1] = bus ? ha->bus_settings[bus].id | BIT_7 :
+ ha->bus_settings[bus].id;
+ status = qla1280_mailbox_command(ha, BIT_1 | BIT_0, &mb[0]);
+
+ /* Reset Delay. */
+ ha->bus_settings[bus].bus_reset_delay =
+ nv->bus[bus].bus_reset_delay;
+
+ /* Command queue depth per device. */
+ ha->bus_settings[bus].hiwat = nv->bus[bus].max_queue_depth - 1;
+
+ /* Set target parameters. */
+ for (target = 0; target < MAX_TARGETS; target++)
+ status |= qla1280_config_target(ha, bus, target);
+
+ return status;
+}
+
+static int
+qla1280_nvram_config(struct scsi_qla_host *ha)
+{
+ struct device_reg __iomem *reg = ha->iobase;
+ struct nvram *nv = &ha->nvram;
+ int bus, target, status = 0;
+ uint16_t mb[MAILBOX_REGISTER_COUNT];
+
+ ENTER("qla1280_nvram_config");
+
+ if (ha->nvram_valid) {
+ /* Always force AUTO sense for LINUX SCSI */
+ for (bus = 0; bus < MAX_BUSES; bus++)
+ for (target = 0; target < MAX_TARGETS; target++) {
+ nv->bus[bus].target[target].parameter.
+ auto_request_sense = 1;
+ }
+ } else {
+ qla1280_set_defaults(ha);
+ }
+
+ qla1280_print_settings(nv);
/* Disable RISC load of firmware. */
ha->flags.disable_risc_code_load =
nv->cntr_flags_1.disable_loading_risc_code;
- /* Set ISP hardware DMA burst */
- mb[0] = nv->isp_config.c;
- /* Enable DMA arbitration on dual channel controllers */
- if (ha->ports > 1)
- mb[0] |= BIT_13;
- WRT_REG_WORD(®->cfg_1, mb[0]);
-
-#if 1 /* Is this safe? */
- /* Set SCSI termination. */
- WRT_REG_WORD(®->gpio_enable, (BIT_3 + BIT_2 + BIT_1 + BIT_0));
- mb[0] = nv->termination.c & (BIT_3 + BIT_2 + BIT_1 + BIT_0);
- WRT_REG_WORD(®->gpio_data, mb[0]);
-#endif
+ if (IS_ISP1040(ha)) {
+ uint16_t hwrev, cfg1, cdma_conf, ddma_conf;
+
+ hwrev = RD_REG_WORD(®->cfg_0) & ISP_CFG0_HWMSK;
+
+ cfg1 = RD_REG_WORD(®->cfg_1) & ~(BIT_4 | BIT_5 | BIT_6);
+ cdma_conf = RD_REG_WORD(®->cdma_cfg);
+ ddma_conf = RD_REG_WORD(®->ddma_cfg);
+
+ /* Busted fifo, says mjacob. */
+ if (hwrev != ISP_CFG0_1040A)
+ cfg1 |= nv->isp_config.fifo_threshold << 4;
+
+ cfg1 |= nv->isp_config.burst_enable << 2;
+ WRT_REG_WORD(®->cfg_1, cfg1);
+
+ WRT_REG_WORD(®->cdma_cfg, cdma_conf | CDMA_CONF_BENAB);
+ WRT_REG_WORD(®->ddma_cfg, cdma_conf | DDMA_CONF_BENAB);
+ } else {
+ uint16_t cfg1, term;
+
+ /* Set ISP hardware DMA burst */
+ cfg1 = nv->isp_config.fifo_threshold << 4;
+ cfg1 |= nv->isp_config.burst_enable << 2;
+ /* Enable DMA arbitration on dual channel controllers */
+ if (ha->ports > 1)
+ cfg1 |= BIT_13;
+ WRT_REG_WORD(®->cfg_1, cfg1);
+
+ /* Set SCSI termination. */
+ WRT_REG_WORD(®->gpio_enable,
+ BIT_7 | BIT_3 | BIT_2 | BIT_1 | BIT_0);
+ term = nv->termination.scsi_bus_1_control;
+ term |= nv->termination.scsi_bus_0_control << 2;
+ term |= nv->termination.auto_term_support << 7;
+ RD_REG_WORD(®->id_l); /* Flush PCI write */
+ WRT_REG_WORD(®->gpio_data, term);
+ }
+ RD_REG_WORD(®->id_l); /* Flush PCI write */
/* ISP parameter word. */
mb[0] = MBC_SET_SYSTEM_PARAMETER;
mb[1] = nv->isp_parameter;
status |= qla1280_mailbox_command(ha, BIT_1 | BIT_0, &mb[0]);
-#if 0
- /* clock rate - for qla1240 and older, only */
- mb[0] = MBC_SET_CLOCK_RATE;
- mb[1] = 0x50;
- status |= qla1280_mailbox_command(ha, BIT_1 | BIT_0, &mb[0]);
-#endif
+ if (IS_ISP1x40(ha)) {
+ /* clock rate - for qla1240 and older, only */
+ mb[0] = MBC_SET_CLOCK_RATE;
+ mb[1] = 40;
+ status |= qla1280_mailbox_command(ha, BIT_1 | BIT_0, mb);
+ }
+
/* Firmware feature word. */
mb[0] = MBC_SET_FIRMWARE_FEATURES;
- mask = BIT_5 | BIT_1 | BIT_0;
- mb[1] = le16_to_cpu(nv->firmware_feature.w) & (mask);
+ mb[1] = nv->firmware_feature.f.enable_fast_posting;
+ mb[1] |= nv->firmware_feature.f.report_lvd_bus_transition << 1;
+ mb[1] |= nv->firmware_feature.f.disable_synchronous_backoff << 5;
#if defined(CONFIG_IA64_GENERIC) || defined (CONFIG_IA64_SGI_SN2)
if (ia64_platform_is("sn2")) {
printk(KERN_INFO "scsi(%li): Enabling SN2 PCI DMA "
"workaround\n", ha->host_no);
- mb[1] |= BIT_9;
+ mb[1] |= nv->firmware_feature.f.unused_9 << 9; /* XXX */
}
#endif
- status |= qla1280_mailbox_command(ha, mask, &mb[0]);
+ status |= qla1280_mailbox_command(ha, BIT_1 | BIT_0, mb);
/* Retry count and delay. */
mb[0] = MBC_SET_RETRY_COUNT;
mb[2] |= BIT_5;
if (nv->bus[1].config_2.data_line_active_negation)
mb[2] |= BIT_4;
- status |= qla1280_mailbox_command(ha, BIT_2 | BIT_1 | BIT_0, &mb[0]);
+ status |= qla1280_mailbox_command(ha, BIT_2 | BIT_1 | BIT_0, mb);
mb[0] = MBC_SET_DATA_OVERRUN_RECOVERY;
mb[1] = 2; /* Reset SCSI bus and return all outstanding IO */
- status |= qla1280_mailbox_command(ha, BIT_1 | BIT_0, &mb[0]);
+ status |= qla1280_mailbox_command(ha, BIT_1 | BIT_0, mb);
/* thingy */
mb[0] = MBC_SET_PCI_CONTROL;
- mb[1] = 2; /* Data DMA Channel Burst Enable */
- mb[2] = 2; /* Command DMA Channel Burst Enable */
- status |= qla1280_mailbox_command(ha, BIT_2 | BIT_1 | BIT_0, &mb[0]);
+ mb[1] = BIT_1; /* Data DMA Channel Burst Enable */
+ mb[2] = BIT_1; /* Command DMA Channel Burst Enable */
+ status |= qla1280_mailbox_command(ha, BIT_2 | BIT_1 | BIT_0, mb);
+
+ mb[0] = MBC_SET_TAG_AGE_LIMIT;
+ mb[1] = 8;
+ status |= qla1280_mailbox_command(ha, BIT_1 | BIT_0, mb);
/* Selection timeout. */
mb[0] = MBC_SET_SELECTION_TIMEOUT;
mb[1] = nv->bus[0].selection_timeout;
mb[2] = nv->bus[1].selection_timeout;
- status |= qla1280_mailbox_command(ha, BIT_2 | BIT_1 | BIT_0, &mb[0]);
-
- for (bus = 0; bus < ha->ports; bus++) {
- /* SCSI Reset Disable. */
- ha->bus_settings[bus].disable_scsi_reset =
- nv->bus[bus].config_1.scsi_reset_disable;
-
- /* Initiator ID. */
- ha->bus_settings[bus].id = nv->bus[bus].config_1.initiator_id;
- mb[0] = MBC_SET_INITIATOR_ID;
- mb[1] = bus ? ha->bus_settings[bus].id | BIT_7 :
- ha->bus_settings[bus].id;
- status |= qla1280_mailbox_command(ha, BIT_1 | BIT_0, &mb[0]);
-
- /* Reset Delay. */
- ha->bus_settings[bus].bus_reset_delay =
- nv->bus[bus].bus_reset_delay;
-
- /* Command queue depth per device. */
- ha->bus_settings[bus].hiwat = nv->bus[bus].max_queue_depth - 1;
-
- /* Set target parameters. */
- for (target = 0; target < MAX_TARGETS; target++) {
- uint8_t mr = BIT_2 | BIT_1 | BIT_0;
-
- /* Set Target Parameters. */
- mb[0] = MBC_SET_TARGET_PARAMETERS;
- mb[1] = (uint16_t) (bus ? target | BIT_7 : target);
- mb[1] <<= 8;
- /*
- * Do not enable wide, sync, and ppr for the initial
- * INQUIRY run. We enable this later if we determine
- * the target actually supports it.
- */
- nv->bus[bus].target[target].parameter.f.
- auto_request_sense = 1;
- nv->bus[bus].target[target].parameter.f.
- stop_queue_on_check = 0;
-
- if (is1x160)
- nv->bus[bus].target[target].ppr_1x160.
- flags.enable_ppr = 0;
- /*
- * No sync, wide, etc. while probing
- */
- mb[2] = (nv->bus[bus].target[target].parameter.c << 8)&
- ~(TP_SYNC /*| TP_WIDE | TP_PPR*/);
-
- if (is1x160)
- mb[3] = nv->bus[bus].target[target].flags.flags1x160.sync_offset << 8;
- else
- mb[3] = nv->bus[bus].target[target].flags.flags1x80.sync_offset << 8;
- mb[3] |= nv->bus[bus].target[target].sync_period;
- mr |= BIT_3;
-
- /*
- * We don't want to enable ppr etc. before we have
- * determined that the target actually supports it
- */
-#if 0
- if (is1x160) {
- mb[2] |= nv->bus[bus].target[target].ppr_1x160.flags.enable_ppr << 5;
-
- mb[6] = nv->bus[bus].target[target].ppr_1x160.flags.ppr_options << 8;
- mb[6] |= nv->bus[bus].target[target].ppr_1x160.flags.ppr_bus_width;
- mr |= BIT_6;
- }
-#endif
-
- status = qla1280_mailbox_command(ha, mr, &mb[0]);
-
- /* Save Tag queuing enable flag. */
- mb[0] = BIT_0 << target;
- if (nv->bus[bus].target[target].parameter.f.tag_queuing)
- ha->bus_settings[bus].qtag_enables |= mb[0];
-
- /* Save Device enable flag. */
- if (is1x160) {
- if (nv->bus[bus].target[target].flags.flags1x160.device_enable)
- ha->bus_settings[bus].device_enables |= mb[0];
- ha->bus_settings[bus].lun_disables |= 0;
- } else {
- if (nv->bus[bus].target[target].flags.flags1x80.device_enable)
- ha->bus_settings[bus].device_enables |= mb[0];
- /* Save LUN disable flag. */
- if (nv->bus[bus].target[target].flags.flags1x80.lun_disable)
- ha->bus_settings[bus].lun_disables |= mb[0];
- }
-
+ status |= qla1280_mailbox_command(ha, BIT_2 | BIT_1 | BIT_0, mb);
- /* Set Device Queue Parameters. */
- for (lun = 0; lun < MAX_LUNS; lun++) {
- mb[0] = MBC_SET_DEVICE_QUEUE;
- mb[1] = (uint16_t)(bus ? target | BIT_7 : target);
- mb[1] = mb[1] << 8 | lun;
- mb[2] = nv->bus[bus].max_queue_depth;
- mb[3] = nv->bus[bus].target[target].execution_throttle;
- status |= qla1280_mailbox_command(ha, 0x0f,
- &mb[0]);
- }
- }
- }
+ for (bus = 0; bus < ha->ports; bus++)
+ status |= qla1280_config_bus(ha, bus);
if (status)
dprintk(2, "qla1280_nvram_config: **** FAILED ****\n");
static uint16_t
qla1280_nvram_request(struct scsi_qla_host *ha, uint32_t nv_cmd)
{
- struct device_reg *reg = ha->iobase;
+ struct device_reg __iomem *reg = ha->iobase;
int cnt;
uint16_t data = 0;
uint16_t reg_data;
static void
qla1280_nv_write(struct scsi_qla_host *ha, uint16_t data)
{
- struct device_reg *reg = ha->iobase;
+ struct device_reg __iomem *reg = ha->iobase;
WRT_REG_WORD(®->nvram, data | NV_SELECT);
RD_REG_WORD(®->id_l); /* Flush PCI write */
static int
qla1280_mailbox_command(struct scsi_qla_host *ha, uint8_t mr, uint16_t *mb)
{
- struct device_reg *reg = ha->iobase;
+ struct device_reg __iomem *reg = ha->iobase;
#if 0
LIST_HEAD(done_q);
#endif
int status = 0;
int cnt;
uint16_t *optr, *iptr;
+ uint16_t __iomem *mptr;
uint16_t data;
- DECLARE_COMPLETION(wait);
+ DECLARE_COMPLETION_ONSTACK(wait);
struct timer_list timer;
ENTER("qla1280_mailbox_command");
* available before starting sending the command data
*/
/* Load mailbox registers. */
- optr = (uint16_t *) ®->mailbox0;
+ mptr = (uint16_t __iomem *) ®->mailbox0;
iptr = mb;
for (cnt = 0; cnt < MAILBOX_REGISTER_COUNT; cnt++) {
if (mr & BIT_0) {
- WRT_REG_WORD(optr, (*iptr));
+ WRT_REG_WORD(mptr, (*iptr));
}
mr >>= 1;
- optr++;
+ mptr++;
iptr++;
}
timer.function = qla1280_mailbox_timeout;
add_timer(&timer);
- spin_unlock_irq(HOST_LOCK);
+ spin_unlock_irq(ha->host->host_lock);
WRT_REG_WORD(®->host_cmd, HC_SET_HOST_INT);
data = qla1280_debounce_register(®->istatus);
wait_for_completion(&wait);
del_timer_sync(&timer);
- spin_lock_irq(HOST_LOCK);
+ spin_lock_irq(ha->host->host_lock);
ha->mailbox_wait = NULL;
static void
qla1280_poll(struct scsi_qla_host *ha)
{
- struct device_reg *reg = ha->iobase;
+ struct device_reg __iomem *reg = ha->iobase;
uint16_t data;
LIST_HEAD(done_q);
ha->bus_settings[bus].scsi_bus_dead = 1;
ha->bus_settings[bus].failed_reset_count++;
} else {
- spin_unlock_irq(HOST_LOCK);
- schedule_timeout(reset_delay * HZ);
- spin_lock_irq(HOST_LOCK);
+ spin_unlock_irq(ha->host->host_lock);
+ ssleep(reset_delay);
+ spin_lock_irq(ha->host->host_lock);
ha->bus_settings[bus].scsi_bus_dead = 0;
ha->bus_settings[bus].failed_reset_count = 0;
static void
qla1280_reset_adapter(struct scsi_qla_host *ha)
{
- struct device_reg *reg = ha->iobase;
+ struct device_reg __iomem *reg = ha->iobase;
ENTER("qla1280_reset_adapter");
static int
qla1280_64bit_start_scsi(struct scsi_qla_host *ha, struct srb * sp)
{
- struct device_reg *reg = ha->iobase;
+ struct device_reg __iomem *reg = ha->iobase;
struct scsi_cmnd *cmd = sp->cmd;
cmd_a64_entry_t *pkt;
struct scatterlist *sg = NULL;
- u32 *dword_ptr;
+ __le32 *dword_ptr;
dma_addr_t dma_handle;
int status = 0;
int cnt;
REQUEST_ENTRY_CNT - (ha->req_ring_index - cnt);
}
+ dprintk(3, "Number of free entries=(%d) seg_cnt=0x%x\n",
+ ha->req_q_cnt, seg_cnt);
+
/* If room for request in request ring. */
if ((req_cnt + 2) >= ha->req_q_cnt) {
status = 1;
- dprintk(2, "qla1280_64bit_start_scsi: in-ptr=0x%x req_q_cnt="
+ dprintk(2, "qla1280_start_scsi: in-ptr=0x%x req_q_cnt="
"0x%xreq_cnt=0x%x", ha->req_ring_index, ha->req_q_cnt,
req_cnt);
goto out;
if (cnt >= MAX_OUTSTANDING_COMMANDS) {
status = 1;
- dprintk(2, "qla1280_64bit_start_scsi: NO ROOM IN "
+ dprintk(2, "qla1280_start_scsi: NO ROOM IN "
"OUTSTANDING ARRAY, req_q_cnt=0x%x", ha->req_q_cnt);
goto out;
}
ha->req_q_cnt -= req_cnt;
CMD_HANDLE(sp->cmd) = (unsigned char *)(unsigned long)(cnt + 1);
- dprintk(2, "64bit_start: cmd=%p sp=%p CDB=%xm, handle %lx\n", cmd, sp,
+ dprintk(2, "start: cmd=%p sp=%p CDB=%xm, handle %lx\n", cmd, sp,
cmd->cmnd[0], (long)CMD_HANDLE(sp->cmd));
dprintk(2, " bus %i, target %i, lun %i\n",
SCSI_BUS_32(cmd), SCSI_TCN_32(cmd), SCSI_LUN_32(cmd));
memset(((char *)pkt + 8), 0, (REQUEST_ENTRY_SIZE - 8));
/* Set ISP command timeout. */
- pkt->timeout = cpu_to_le16(30);
+ pkt->timeout = cpu_to_le16(cmd->timeout_per_command/HZ);
/* Set device target ID and LUN */
pkt->lun = SCSI_LUN_32(cmd);
(SCSI_TCN_32(cmd) | BIT_7) : SCSI_TCN_32(cmd);
/* Enable simple tag queuing if device supports it. */
- if (DEV_SIMPLE_TAGS(cmd->device))
+ if (cmd->device->simple_tags)
pkt->control_flags |= cpu_to_le16(BIT_3);
/* Load SCSI command packet. */
dma_handle = sg_dma_address(sg);
#if defined(CONFIG_IA64_GENERIC) || defined(CONFIG_IA64_SGI_SN2)
if (ha->flags.use_pci_vchannel)
- sn_pci_set_vchan(ha->pdev, &dma_handle,
+ sn_pci_set_vchan(ha->pdev,
+ (unsigned long *)&dma_handle,
SCSI_BUS_32(cmd));
#endif
*dword_ptr++ =
dma_handle = sg_dma_address(sg);
#if defined(CONFIG_IA64_GENERIC) || defined(CONFIG_IA64_SGI_SN2)
if (ha->flags.use_pci_vchannel)
- sn_pci_set_vchan(ha->pdev, &dma_handle,
+ sn_pci_set_vchan(ha->pdev,
+ (unsigned long *)&dma_handle,
SCSI_BUS_32(cmd));
#endif
*dword_ptr++ =
REQUEST_ENTRY_SIZE);
}
} else { /* No scatter gather data transfer */
- struct page *page = virt_to_page(cmd->request_buffer);
- unsigned long off = (unsigned long)cmd->request_buffer & ~PAGE_MASK;
+ dma_handle = pci_map_single(ha->pdev,
+ cmd->request_buffer,
+ cmd->request_bufflen,
+ cmd->sc_data_direction);
- dma_handle = pci_map_page(ha->pdev, page, off,
- cmd->request_bufflen,
- cmd->sc_data_direction);
-
- /* save dma_handle for pci_unmap_page */
sp->saved_dma_handle = dma_handle;
#if defined(CONFIG_IA64_GENERIC) || defined(CONFIG_IA64_SGI_SN2)
if (ha->flags.use_pci_vchannel)
- sn_pci_set_vchan(ha->pdev, &dma_handle,
+ sn_pci_set_vchan(ha->pdev,
+ (unsigned long *)&dma_handle,
SCSI_BUS_32(cmd));
#endif
*dword_ptr++ = cpu_to_le32(pci_dma_lo32(dma_handle));
sp->flags |= SRB_SENT;
ha->actthreads++;
WRT_REG_WORD(®->mailbox4, ha->req_ring_index);
- (void) RD_REG_WORD(®->mailbox4); /* PCI posted write flush */
+ /* Enforce mmio write ordering; see comment in qla1280_isp_cmd(). */
+ mmiowb();
out:
if (status)
static int
qla1280_32bit_start_scsi(struct scsi_qla_host *ha, struct srb * sp)
{
- struct device_reg *reg = ha->iobase;
+ struct device_reg __iomem *reg = ha->iobase;
struct scsi_cmnd *cmd = sp->cmd;
struct cmd_entry *pkt;
struct scatterlist *sg = NULL;
- uint32_t *dword_ptr;
+ __le32 *dword_ptr;
int status = 0;
int cnt;
int req_cnt;
memset(((char *)pkt + 8), 0, (REQUEST_ENTRY_SIZE - 8));
/* Set ISP command timeout. */
- pkt->timeout = cpu_to_le16(30);
+ pkt->timeout = cpu_to_le16(cmd->timeout_per_command/HZ);
/* Set device target ID and LUN */
pkt->lun = SCSI_LUN_32(cmd);
(SCSI_TCN_32(cmd) | BIT_7) : SCSI_TCN_32(cmd);
/* Enable simple tag queuing if device supports it. */
- if (DEV_SIMPLE_TAGS(cmd->device))
+ if (cmd->device->simple_tags)
pkt->control_flags |= cpu_to_le16(BIT_3);
/* Load SCSI command packet. */
REQUEST_ENTRY_SIZE);
}
} else { /* No S/G data transfer */
- struct page *page = virt_to_page(cmd->request_buffer);
- unsigned long off = (unsigned long)cmd->request_buffer & ~PAGE_MASK;
- dma_handle = pci_map_page(ha->pdev, page, off,
- cmd->request_bufflen,
- cmd->sc_data_direction);
+ dma_handle = pci_map_single(ha->pdev,
+ cmd->request_buffer,
+ cmd->request_bufflen,
+ cmd->sc_data_direction);
sp->saved_dma_handle = dma_handle;
*dword_ptr++ = cpu_to_le32(pci_dma_lo32(dma_handle));
sp->flags |= SRB_SENT;
ha->actthreads++;
WRT_REG_WORD(®->mailbox4, ha->req_ring_index);
- (void) RD_REG_WORD(®->mailbox4); /* PCI posted write flush */
+ /* Enforce mmio write ordering; see comment in qla1280_isp_cmd(). */
+ mmiowb();
out:
if (status)
static request_t *
qla1280_req_pkt(struct scsi_qla_host *ha)
{
- struct device_reg *reg = ha->iobase;
+ struct device_reg __iomem *reg = ha->iobase;
request_t *pkt = NULL;
int cnt;
uint32_t timer;
static void
qla1280_isp_cmd(struct scsi_qla_host *ha)
{
- struct device_reg *reg = ha->iobase;
+ struct device_reg __iomem *reg = ha->iobase;
ENTER("qla1280_isp_cmd");
} else
ha->request_ring_ptr++;
- /* Set chip new ring index. */
+ /*
+ * Update request index to mailbox4 (Request Queue In).
+ * The mmiowb() ensures that this write is ordered with writes by other
+ * CPUs. Without the mmiowb(), it is possible for the following:
+ * CPUA posts write of index 5 to mailbox4
+ * CPUA releases host lock
+ * CPUB acquires host lock
+ * CPUB posts write of index 6 to mailbox4
+ * On PCI bus, order reverses and write of 6 posts, then index 5,
+ * causing chip to issue full queue of stale commands
+ * The mmiowb() prevents future writes from crossing the barrier.
+ * See Documentation/DocBook/deviceiobook.tmpl for more information.
+ */
WRT_REG_WORD(®->mailbox4, ha->req_ring_index);
- (void) RD_REG_WORD(®->mailbox4); /* PCI posted write flush */
+ mmiowb();
LEAVE("qla1280_isp_cmd");
}
static void
qla1280_isr(struct scsi_qla_host *ha, struct list_head *done_q)
{
- struct device_reg *reg = ha->iobase;
+ struct device_reg __iomem *reg = ha->iobase;
struct response *pkt;
struct srb *sp = NULL;
uint16_t mailbox[MAILBOX_REGISTER_COUNT];
}
-#if LINUX_VERSION_CODE < 0x020500
-/*
- *
- */
-static void
-qla1280_get_target_options(struct scsi_cmnd *cmd, struct scsi_qla_host *ha)
-{
- unsigned char *result;
- struct nvram *n;
- int bus, target, lun;
-
- bus = SCSI_BUS_32(cmd);
- target = SCSI_TCN_32(cmd);
- lun = SCSI_LUN_32(cmd);
-
- /*
- * Make sure to not touch anything if someone is using the
- * sg interface.
- */
- if (cmd->use_sg || (CMD_RESULT(cmd) >> 16) != DID_OK || lun)
- return;
-
- result = cmd->request_buffer;
- n = &ha->nvram;
-
- n->bus[bus].target[target].parameter.f.enable_wide = 0;
- n->bus[bus].target[target].parameter.f.enable_sync = 0;
- n->bus[bus].target[target].ppr_1x160.flags.enable_ppr = 0;
-
- if (result[7] & 0x60)
- n->bus[bus].target[target].parameter.f.enable_wide = 1;
- if (result[7] & 0x10)
- n->bus[bus].target[target].parameter.f.enable_sync = 1;
- if ((result[2] >= 3) && (result[4] + 5 > 56) &&
- (result[56] & 0x4))
- n->bus[bus].target[target].ppr_1x160.flags.enable_ppr = 1;
-
- dprintk(2, "get_target_options(): wide %i, sync %i, ppr %i\n",
- n->bus[bus].target[target].parameter.f.enable_wide,
- n->bus[bus].target[target].parameter.f.enable_sync,
- n->bus[bus].target[target].ppr_1x160.flags.enable_ppr);
-}
-#endif
-
/*
* qla1280_status_entry
* Processes received ISP status entry.
scsi_status, handle);
}
- /* Target busy */
- if (scsi_status & SS_BUSY_CONDITION &&
- scsi_status != SS_RESERVE_CONFLICT) {
- CMD_RESULT(cmd) =
- DID_BUS_BUSY << 16 | (scsi_status & 0xff);
+ /* Target busy or queue full */
+ if ((scsi_status & 0xFF) == SAM_STAT_TASK_SET_FULL ||
+ (scsi_status & 0xFF) == SAM_STAT_BUSY) {
+ CMD_RESULT(cmd) = scsi_status & 0xff;
} else {
/* Save ISP completion status */
CMD_RESULT(cmd) = qla1280_return_status(pkt, cmd);
- if (scsi_status & SS_CHECK_CONDITION) {
+ if (scsi_status & SAM_STAT_CHECK_CONDITION) {
if (comp_status != CS_ARS_FAILED) {
uint16_t req_sense_length =
le16_to_cpu(pkt->req_sense_length);
static int
qla1280_abort_isp(struct scsi_qla_host *ha)
{
+ struct device_reg __iomem *reg = ha->iobase;
struct srb *sp;
int status = 0;
int cnt;
ENTER("qla1280_abort_isp");
- if (!ha->flags.abort_isp_active && ha->flags.online) {
- struct device_reg *reg = ha->iobase;
- ha->flags.abort_isp_active = 1;
-
- /* Disable ISP interrupts. */
- qla1280_disable_intrs(ha);
- WRT_REG_WORD(®->host_cmd, HC_PAUSE_RISC);
- RD_REG_WORD(®->id_l);
+ if (ha->flags.abort_isp_active || !ha->flags.online)
+ goto out;
+
+ ha->flags.abort_isp_active = 1;
- printk(KERN_INFO "scsi(%li): dequeuing outstanding commands\n",
- ha->host_no);
- /* Dequeue all commands in outstanding command list. */
- for (cnt = 0; cnt < MAX_OUTSTANDING_COMMANDS; cnt++) {
- struct scsi_cmnd *cmd;
- sp = ha->outstanding_cmds[cnt];
- if (sp) {
+ /* Disable ISP interrupts. */
+ qla1280_disable_intrs(ha);
+ WRT_REG_WORD(®->host_cmd, HC_PAUSE_RISC);
+ RD_REG_WORD(®->id_l);
- cmd = sp->cmd;
- CMD_RESULT(cmd) = DID_RESET << 16;
+ printk(KERN_INFO "scsi(%li): dequeuing outstanding commands\n",
+ ha->host_no);
+ /* Dequeue all commands in outstanding command list. */
+ for (cnt = 0; cnt < MAX_OUTSTANDING_COMMANDS; cnt++) {
+ struct scsi_cmnd *cmd;
+ sp = ha->outstanding_cmds[cnt];
+ if (sp) {
- sp->cmd = NULL;
- ha->outstanding_cmds[cnt] = NULL;
+ cmd = sp->cmd;
+ CMD_RESULT(cmd) = DID_RESET << 16;
- (*cmd->scsi_done)(cmd);
+ sp->cmd = NULL;
+ ha->outstanding_cmds[cnt] = NULL;
- sp->flags = 0;
- }
- }
+ (*cmd->scsi_done)(cmd);
- /* If firmware needs to be loaded */
- if (qla1280_isp_firmware (ha)) {
- if (!(status = qla1280_chip_diag(ha)))
- status = qla1280_setup_chip(ha);
+ sp->flags = 0;
}
+ }
- if (!status) {
- /* Setup adapter based on NVRAM parameters. */
- qla1280_nvram_config (ha);
+ status = qla1280_load_firmware(ha);
+ if (status)
+ goto out;
- if (!(status = qla1280_init_rings(ha))) {
- /* Issue SCSI reset. */
- for (bus = 0; bus < ha->ports; bus++) {
- qla1280_bus_reset(ha, bus);
- }
- /*
- * qla1280_bus_reset() will do the marker
- * dance - no reason to repeat here!
- */
-#if 0
- /* Issue marker command. */
- ha->flags.reset_marker = 0;
- for (bus = 0; bus < ha->ports; bus++) {
- ha->bus_settings[bus].
- reset_marker = 0;
- qla1280_marker(ha, bus, 0, 0,
- MK_SYNC_ALL);
- }
-#endif
- ha->flags.abort_isp_active = 0;
- }
- }
- }
+ /* Setup adapter based on NVRAM parameters. */
+ qla1280_nvram_config (ha);
+ status = qla1280_init_rings(ha);
+ if (status)
+ goto out;
+
+ /* Issue SCSI reset. */
+ for (bus = 0; bus < ha->ports; bus++)
+ qla1280_bus_reset(ha, bus);
+
+ ha->flags.abort_isp_active = 0;
+ out:
if (status) {
printk(KERN_WARNING
"qla1280: ISP error recovery failed, board disabled");
* register value.
*/
static u16
-qla1280_debounce_register(volatile u16 * addr)
+qla1280_debounce_register(volatile u16 __iomem * addr)
{
volatile u16 ret;
volatile u16 ret2;
qla1280_check_for_dead_scsi_bus(struct scsi_qla_host *ha, unsigned int bus)
{
uint16_t config_reg, scsi_control;
- struct device_reg *reg = ha->iobase;
+ struct device_reg __iomem *reg = ha->iobase;
if (ha->bus_settings[bus].scsi_bus_dead) {
WRT_REG_WORD(®->host_cmd, HC_PAUSE_RISC);
} else
printk(" Async");
- if (DEV_SIMPLE_TAGS(device))
+ if (device->simple_tags)
printk(", Tagged queuing: depth %d", device->queue_depth);
printk("\n");
}
}
-static int
+static int __init
qla1280_get_token(char *str)
{
char *sep;
long ret = -1;
- int i, len;
-
- len = sizeof(setup_token)/sizeof(struct setup_tokens);
+ int i;
sep = strchr(str, ':');
if (sep) {
- for (i = 0; i < len; i++){
-
+ for (i = 0; i < ARRAY_SIZE(setup_token); i++) {
if (!strncmp(setup_token[i].token, str, (sep - str))) {
ret = setup_token[i].val;
break;
return ret;
}
-#if LINUX_VERSION_CODE >= 0x020600
+
static struct scsi_host_template qla1280_driver_template = {
+ .module = THIS_MODULE,
.proc_name = "qla1280",
.name = "Qlogic ISP 1280/12160",
.info = qla1280_info,
.eh_bus_reset_handler = qla1280_eh_bus_reset,
.eh_host_reset_handler = qla1280_eh_adapter_reset,
.bios_param = qla1280_biosparam,
- .proc_info = qla1280_proc_info,
- .can_queue = 0xfffff,
- .this_id = -1,
- .sg_tablesize = SG_ALL,
- .cmd_per_lun = 1,
- .use_clustering = ENABLE_CLUSTERING,
-};
-#else
-static Scsi_Host_Template qla1280_driver_template = {
- .proc_name = "qla1280",
- .name = "Qlogic ISP 1280/12160",
- .detect = qla1280_detect,
- .release = qla1280_release,
- .info = qla1280_info,
- .queuecommand = qla1280_queuecommand,
- .eh_abort_handler = qla1280_eh_abort,
- .eh_device_reset_handler= qla1280_eh_device_reset,
- .eh_bus_reset_handler = qla1280_eh_bus_reset,
- .eh_host_reset_handler = qla1280_eh_adapter_reset,
- .bios_param = qla1280_biosparam_old,
- .proc_info = qla1280_proc_info_old,
.can_queue = 0xfffff,
.this_id = -1,
.sg_tablesize = SG_ALL,
.cmd_per_lun = 1,
.use_clustering = ENABLE_CLUSTERING,
- .use_new_eh_code = 1,
};
-#endif
+
static int __devinit
qla1280_probe_one(struct pci_dev *pdev, const struct pci_device_id *id)
#ifdef QLA_64BIT_PTR
if (pci_set_dma_mask(ha->pdev, (dma_addr_t) ~ 0ULL)) {
- if (pci_set_dma_mask(ha->pdev, 0xffffffff)) {
+ if (pci_set_dma_mask(ha->pdev, DMA_32BIT_MASK)) {
printk(KERN_WARNING "scsi(%li): Unable to set a "
- " suitable DMA mask - aboring\n", ha->host_no);
+ "suitable DMA mask - aborting\n", ha->host_no);
error = -ENODEV;
goto error_free_irq;
}
dprintk(2, "scsi(%li): 64 Bit PCI Addressing Enabled\n",
ha->host_no);
#else
- if (pci_set_dma_mask(ha->pdev, 0xffffffff)) {
+ if (pci_set_dma_mask(ha->pdev, DMA_32BIT_MASK)) {
printk(KERN_WARNING "scsi(%li): Unable to set a "
- " suitable DMA mask - aboring\n", ha->host_no);
+ "suitable DMA mask - aborting\n", ha->host_no);
error = -ENODEV;
goto error_free_irq;
}
#endif
ha->request_ring = pci_alloc_consistent(ha->pdev,
- ((REQUEST_ENTRY_CNT + 1) * (sizeof(request_t))),
+ ((REQUEST_ENTRY_CNT + 1) * sizeof(request_t)),
&ha->request_dma);
if (!ha->request_ring) {
printk(KERN_INFO "qla1280: Failed to get request memory\n");
}
ha->response_ring = pci_alloc_consistent(ha->pdev,
- ((RESPONSE_ENTRY_CNT + 1) * (sizeof(struct response))),
+ ((RESPONSE_ENTRY_CNT + 1) * sizeof(struct response)),
&ha->response_dma);
if (!ha->response_ring) {
printk(KERN_INFO "qla1280: Failed to get response memory\n");
host->max_sectors = 1024;
host->unique_id = host->host_no;
-#if LINUX_VERSION_CODE < 0x020545
- host->select_queue_depths = qla1280_select_queue_depth;
-#endif
-
error = -ENODEV;
#if MEMORY_MAPPED_IO
}
host->base = (unsigned long)ha->mmpbase;
- ha->iobase = (struct device_reg *)ha->mmpbase;
+ ha->iobase = (struct device_reg __iomem *)ha->mmpbase;
#else
host->io_port = pci_resource_start(ha->pdev, 0);
if (!request_region(host->io_port, 0xff, "qla1280")) {
/* Disable ISP interrupts. */
qla1280_disable_intrs(ha);
- if (request_irq(pdev->irq, qla1280_intr_handler, SA_SHIRQ,
+ if (request_irq(pdev->irq, qla1280_intr_handler, IRQF_SHARED,
"qla1280", ha)) {
printk("qla1280 : Failed to reserve interrupt %d already "
"in use\n", pdev->irq);
pci_set_drvdata(pdev, host);
-#if LINUX_VERSION_CODE >= 0x020600
error = scsi_add_host(host, &pdev->dev);
if (error)
goto error_disable_adapter;
scsi_scan_host(host);
-#else
- scsi_set_pci_device(host, pdev);
-#endif
return 0;
-#if LINUX_VERSION_CODE >= 0x020600
error_disable_adapter:
- WRT_REG_WORD(&ha->iobase->ictrl, 0);
-#endif
+ qla1280_disable_intrs(ha);
error_free_irq:
free_irq(pdev->irq, ha);
error_release_region:
#endif
error_free_response_ring:
pci_free_consistent(ha->pdev,
- ((RESPONSE_ENTRY_CNT + 1) * (sizeof(struct response))),
+ ((RESPONSE_ENTRY_CNT + 1) * sizeof(struct response)),
ha->response_ring, ha->response_dma);
error_free_request_ring:
pci_free_consistent(ha->pdev,
- ((REQUEST_ENTRY_CNT + 1) * (sizeof(request_t))),
+ ((REQUEST_ENTRY_CNT + 1) * sizeof(request_t)),
ha->request_ring, ha->request_dma);
error_put_host:
scsi_host_put(host);
struct Scsi_Host *host = pci_get_drvdata(pdev);
struct scsi_qla_host *ha = (struct scsi_qla_host *)host->hostdata;
-#if LINUX_VERSION_CODE >= 0x020600
scsi_remove_host(host);
-#endif
- WRT_REG_WORD(&ha->iobase->ictrl, 0);
+ qla1280_disable_intrs(ha);
free_irq(pdev->irq, ha);
scsi_host_put(host);
}
-#if LINUX_VERSION_CODE >= 0x020600
static struct pci_driver qla1280_pci_driver = {
.name = "qla1280",
.id_table = qla1280_pci_tbl,
qla1280_setup(qla1280);
#endif
- return pci_module_init(&qla1280_pci_driver);
+ return pci_register_driver(&qla1280_pci_driver);
}
static void __exit
module_init(qla1280_init);
module_exit(qla1280_exit);
-#else
-# define driver_template qla1280_driver_template
-# include "scsi_module.c"
-#endif
MODULE_AUTHOR("Qlogic & Jes Sorensen");
MODULE_DESCRIPTION("Qlogic ISP SCSI (qla1x80/qla1x160) driver");
MODULE_LICENSE("GPL");
+MODULE_VERSION(QLA1280_VERSION);
/*
* Overrides for Emacs so that we almost follow Linus's tabbing style.