--- /dev/null
+/*
+ * This file is subject to the terms and conditions of the GNU General Public
+ * License. See the file "COPYING" in the main directory of this archive
+ * for more details.
+ *
+ * Copyright (C) 1992 - 1997, 2000-2003 Silicon Graphics, Inc. All rights reserved.
+ */
+#ifndef _ASM_IA64_SN_IOERROR_H
+#define _ASM_IA64_SN_IOERROR_H
+
+/*
+ * IO error structure.
+ *
+ * This structure would expand to hold the information retrieved from
+ * all IO related error registers.
+ *
+ * This structure is defined to hold all system specific
+ * information related to a single error.
+ *
+ * This serves a couple of purpose.
+ * - Error handling often involves translating one form of address to other
+ * form. So, instead of having different data structures at each level,
+ * we have a single structure, and the appropriate fields get filled in
+ * at each layer.
+ * - This provides a way to dump all error related information in any layer
+ * of erorr handling (debugging aid).
+ *
+ * A second possibility is to allow each layer to define its own error
+ * data structure, and fill in the proper fields. This has the advantage
+ * of isolating the layers.
+ * A big concern is the potential stack usage (and overflow), if each layer
+ * defines these structures on stack (assuming we don't want to do kmalloc.
+ *
+ * Any layer wishing to pass extra information to a layer next to it in
+ * error handling hierarchy, can do so as a separate parameter.
+ */
+
+typedef struct io_error_s {
+ /* Bit fields indicating which structure fields are valid */
+ union {
+ struct {
+ unsigned ievb_errortype:1;
+ unsigned ievb_widgetnum:1;
+ unsigned ievb_widgetdev:1;
+ unsigned ievb_srccpu:1;
+ unsigned ievb_srcnode:1;
+ unsigned ievb_errnode:1;
+ unsigned ievb_sysioaddr:1;
+ unsigned ievb_xtalkaddr:1;
+ unsigned ievb_busspace:1;
+ unsigned ievb_busaddr:1;
+ unsigned ievb_vaddr:1;
+ unsigned ievb_memaddr:1;
+ unsigned ievb_epc:1;
+ unsigned ievb_ef:1;
+ unsigned ievb_tnum:1;
+ } iev_b;
+ unsigned iev_a;
+ } ie_v;
+
+ short ie_errortype; /* error type: extra info about error */
+ short ie_widgetnum; /* Widget number that's in error */
+ short ie_widgetdev; /* Device within widget in error */
+ cpuid_t ie_srccpu; /* CPU on srcnode generating error */
+ cnodeid_t ie_srcnode; /* Node which caused the error */
+ cnodeid_t ie_errnode; /* Node where error was noticed */
+ iopaddr_t ie_sysioaddr; /* Sys specific IO address */
+ iopaddr_t ie_xtalkaddr; /* Xtalk (48bit) addr of Error */
+ iopaddr_t ie_busspace; /* Bus specific address space */
+ iopaddr_t ie_busaddr; /* Bus specific address */
+ caddr_t ie_vaddr; /* Virtual address of error */
+ iopaddr_t ie_memaddr; /* Physical memory address */
+ caddr_t ie_epc; /* pc when error reported */
+ caddr_t ie_ef; /* eframe when error reported */
+ short ie_tnum; /* Xtalk TNUM field */
+} ioerror_t;
+
+#define IOERROR_INIT(e) do { (e)->ie_v.iev_a = 0; } while (0)
+#define IOERROR_SETVALUE(e,f,v) do { (e)->ie_ ## f = (v); (e)->ie_v.iev_b.ievb_ ## f = 1; } while (0)
+
+#endif /* _ASM_IA64_SN_IOERROR_H */
--- /dev/null
+/*
+ * This file is subject to the terms and conditions of the GNU General Public
+ * License. See the file "COPYING" in the main directory of this archive
+ * for more details.
+ *
+ * Copyright (C) 2000-2004 Silicon Graphics, Inc. All rights reserved.
+ */
+
+#ifndef _ASM_IA64_SN_TIO_H
+#define _ASM_IA64_SN_TIO_H
+
+#define TIO_MMR_ADDR_MOD
+
+#define TIO_NODE_ID TIO_MMR_ADDR_MOD(0x0000000090060e80)
+
+#define TIO_ITTE_BASE 0xb0008800 /* base of translation table entries */
+#define TIO_ITTE(bigwin) (TIO_ITTE_BASE + 8*(bigwin))
+
+#define TIO_ITTE_OFFSET_BITS 8 /* size of offset field */
+#define TIO_ITTE_OFFSET_MASK ((1<<TIO_ITTE_OFFSET_BITS)-1)
+#define TIO_ITTE_OFFSET_SHIFT 0
+
+#define TIO_ITTE_WIDGET_BITS 2 /* size of widget field */
+#define TIO_ITTE_WIDGET_MASK ((1<<TIO_ITTE_WIDGET_BITS)-1)
+#define TIO_ITTE_WIDGET_SHIFT 12
+#define TIO_ITTE_VALID_MASK 0x1
+#define TIO_ITTE_VALID_SHIFT 16
+
+
+#define TIO_ITTE_PUT(nasid, bigwin, widget, addr, valid) \
+ REMOTE_HUB_S((nasid), TIO_ITTE(bigwin), \
+ (((((addr) >> TIO_BWIN_SIZE_BITS) & \
+ TIO_ITTE_OFFSET_MASK) << TIO_ITTE_OFFSET_SHIFT) | \
+ (((widget) & TIO_ITTE_WIDGET_MASK) << TIO_ITTE_WIDGET_SHIFT)) | \
+ (( (valid) & TIO_ITTE_VALID_MASK) << TIO_ITTE_VALID_SHIFT))
+
+#endif /* _ASM_IA64_SN_TIO_H */
--- /dev/null
+/*
+ * This file is subject to the terms and conditions of the GNU General Public
+ * License. See the file "COPYING" in the main directory of this archive
+ * for more details.
+ *
+ * Copyright (C) 1992 - 1997, 2000-2004 Silicon Graphics, Inc. All rights reserved.
+ */
+#ifndef _ASM_IA64_SN_XTALK_HUBDEV_H
+#define _ASM_IA64_SN_XTALK_HUBDEV_H
+
+#define HUB_WIDGET_ID_MAX 0xf
+#define DEV_PER_WIDGET (2*2*8)
+#define IIO_ITTE_WIDGET_BITS 4 /* size of widget field */
+#define IIO_ITTE_WIDGET_MASK ((1<<IIO_ITTE_WIDGET_BITS)-1)
+#define IIO_ITTE_WIDGET_SHIFT 8
+
+/*
+ * Use the top big window as a surrogate for the first small window
+ */
+#define SWIN0_BIGWIN HUB_NUM_BIG_WINDOW
+#define IIO_NUM_ITTES 7
+#define HUB_NUM_BIG_WINDOW (IIO_NUM_ITTES - 1)
+
+struct sn_flush_device_list {
+ int sfdl_bus;
+ int sfdl_slot;
+ int sfdl_pin;
+ struct bar_list {
+ unsigned long start;
+ unsigned long end;
+ } sfdl_bar_list[6];
+ unsigned long sfdl_force_int_addr;
+ unsigned long sfdl_flush_value;
+ volatile unsigned long *sfdl_flush_addr;
+ uint64_t sfdl_persistent_busnum;
+ struct pcibus_info *sfdl_pcibus_info;
+ spinlock_t sfdl_flush_lock;
+};
+
+/*
+ * **widget_p - Used as an array[wid_num][device] of sn_flush_device_list.
+ */
+struct sn_flush_nasid_entry {
+ struct sn_flush_device_list **widget_p; /* Used as a array of wid_num */
+ uint64_t iio_itte[8];
+};
+
+struct hubdev_info {
+ geoid_t hdi_geoid;
+ short hdi_nasid;
+ short hdi_peer_nasid; /* Dual Porting Peer */
+
+ struct sn_flush_nasid_entry hdi_flush_nasid_list;
+ struct xwidget_info hdi_xwidget_info[HUB_WIDGET_ID_MAX + 1];
+
+
+ void *hdi_nodepda;
+ void *hdi_node_vertex;
+ void *hdi_xtalk_vertex;
+};
+
+extern void hubdev_init_node(nodepda_t *, cnodeid_t);
+extern void hub_error_init(struct hubdev_info *);
+extern void ice_error_init(struct hubdev_info *);
+
+
+#endif /* _ASM_IA64_SN_XTALK_HUBDEV_H */
--- /dev/null
+/*
+ * This file is subject to the terms and conditions of the GNU General Public
+ * License. See the file "COPYING" in the main directory of this archive
+ * for more details.
+ *
+ * Copyright (C) 1992-1997,2000-2004 Silicon Graphics, Inc. All Rights Reserved.
+ */
+#ifndef _ASM_IA64_SN_XTALK_XBOW_H
+#define _ASM_IA64_SN_XTALK_XBOW_H
+
+#define XBOW_PORT_8 0x8
+#define XBOW_PORT_C 0xc
+#define XBOW_PORT_F 0xf
+
+#define MAX_XBOW_PORTS 8 /* number of ports on xbow chip */
+#define BASE_XBOW_PORT XBOW_PORT_8 /* Lowest external port */
+
+#define XBOW_CREDIT 4
+
+#define MAX_XBOW_NAME 16
+
+/* Register set for each xbow link */
+typedef volatile struct xb_linkregs_s {
+/*
+ * we access these through synergy unswizzled space, so the address
+ * gets twiddled (i.e. references to 0x4 actually go to 0x0 and vv.)
+ * That's why we put the register first and filler second.
+ */
+ uint32_t link_ibf;
+ uint32_t filler0; /* filler for proper alignment */
+ uint32_t link_control;
+ uint32_t filler1;
+ uint32_t link_status;
+ uint32_t filler2;
+ uint32_t link_arb_upper;
+ uint32_t filler3;
+ uint32_t link_arb_lower;
+ uint32_t filler4;
+ uint32_t link_status_clr;
+ uint32_t filler5;
+ uint32_t link_reset;
+ uint32_t filler6;
+ uint32_t link_aux_status;
+ uint32_t filler7;
+} xb_linkregs_t;
+
+typedef volatile struct xbow_s {
+ /* standard widget configuration 0x000000-0x000057 */
+ struct widget_cfg xb_widget; /* 0x000000 */
+
+ /* helper fieldnames for accessing bridge widget */
+
+#define xb_wid_id xb_widget.w_id
+#define xb_wid_stat xb_widget.w_status
+#define xb_wid_err_upper xb_widget.w_err_upper_addr
+#define xb_wid_err_lower xb_widget.w_err_lower_addr
+#define xb_wid_control xb_widget.w_control
+#define xb_wid_req_timeout xb_widget.w_req_timeout
+#define xb_wid_int_upper xb_widget.w_intdest_upper_addr
+#define xb_wid_int_lower xb_widget.w_intdest_lower_addr
+#define xb_wid_err_cmdword xb_widget.w_err_cmd_word
+#define xb_wid_llp xb_widget.w_llp_cfg
+#define xb_wid_stat_clr xb_widget.w_tflush
+
+/*
+ * we access these through synergy unswizzled space, so the address
+ * gets twiddled (i.e. references to 0x4 actually go to 0x0 and vv.)
+ * That's why we put the register first and filler second.
+ */
+ /* xbow-specific widget configuration 0x000058-0x0000FF */
+ uint32_t xb_wid_arb_reload; /* 0x00005C */
+ uint32_t _pad_000058;
+ uint32_t xb_perf_ctr_a; /* 0x000064 */
+ uint32_t _pad_000060;
+ uint32_t xb_perf_ctr_b; /* 0x00006c */
+ uint32_t _pad_000068;
+ uint32_t xb_nic; /* 0x000074 */
+ uint32_t _pad_000070;
+
+ /* Xbridge only */
+ uint32_t xb_w0_rst_fnc; /* 0x00007C */
+ uint32_t _pad_000078;
+ uint32_t xb_l8_rst_fnc; /* 0x000084 */
+ uint32_t _pad_000080;
+ uint32_t xb_l9_rst_fnc; /* 0x00008c */
+ uint32_t _pad_000088;
+ uint32_t xb_la_rst_fnc; /* 0x000094 */
+ uint32_t _pad_000090;
+ uint32_t xb_lb_rst_fnc; /* 0x00009c */
+ uint32_t _pad_000098;
+ uint32_t xb_lc_rst_fnc; /* 0x0000a4 */
+ uint32_t _pad_0000a0;
+ uint32_t xb_ld_rst_fnc; /* 0x0000ac */
+ uint32_t _pad_0000a8;
+ uint32_t xb_le_rst_fnc; /* 0x0000b4 */
+ uint32_t _pad_0000b0;
+ uint32_t xb_lf_rst_fnc; /* 0x0000bc */
+ uint32_t _pad_0000b8;
+ uint32_t xb_lock; /* 0x0000c4 */
+ uint32_t _pad_0000c0;
+ uint32_t xb_lock_clr; /* 0x0000cc */
+ uint32_t _pad_0000c8;
+ /* end of Xbridge only */
+ uint32_t _pad_0000d0[12];
+
+ /* Link Specific Registers, port 8..15 0x000100-0x000300 */
+ xb_linkregs_t xb_link_raw[MAX_XBOW_PORTS];
+#define xb_link(p) xb_link_raw[(p) & (MAX_XBOW_PORTS - 1)]
+
+} xbow_t;
+
+#define XB_FLAGS_EXISTS 0x1 /* device exists */
+#define XB_FLAGS_MASTER 0x2
+#define XB_FLAGS_SLAVE 0x0
+#define XB_FLAGS_GBR 0x4
+#define XB_FLAGS_16BIT 0x8
+#define XB_FLAGS_8BIT 0x0
+
+/* is widget port number valid? (based on version 7.0 of xbow spec) */
+#define XBOW_WIDGET_IS_VALID(wid) ((wid) >= XBOW_PORT_8 && (wid) <= XBOW_PORT_F)
+
+/* whether to use upper or lower arbitration register, given source widget id */
+#define XBOW_ARB_IS_UPPER(wid) ((wid) >= XBOW_PORT_8 && (wid) <= XBOW_PORT_B)
+#define XBOW_ARB_IS_LOWER(wid) ((wid) >= XBOW_PORT_C && (wid) <= XBOW_PORT_F)
+
+/* offset of arbitration register, given source widget id */
+#define XBOW_ARB_OFF(wid) (XBOW_ARB_IS_UPPER(wid) ? 0x1c : 0x24)
+
+#define XBOW_WID_ID WIDGET_ID
+#define XBOW_WID_STAT WIDGET_STATUS
+#define XBOW_WID_ERR_UPPER WIDGET_ERR_UPPER_ADDR
+#define XBOW_WID_ERR_LOWER WIDGET_ERR_LOWER_ADDR
+#define XBOW_WID_CONTROL WIDGET_CONTROL
+#define XBOW_WID_REQ_TO WIDGET_REQ_TIMEOUT
+#define XBOW_WID_INT_UPPER WIDGET_INTDEST_UPPER_ADDR
+#define XBOW_WID_INT_LOWER WIDGET_INTDEST_LOWER_ADDR
+#define XBOW_WID_ERR_CMDWORD WIDGET_ERR_CMD_WORD
+#define XBOW_WID_LLP WIDGET_LLP_CFG
+#define XBOW_WID_STAT_CLR WIDGET_TFLUSH
+#define XBOW_WID_ARB_RELOAD 0x5c
+#define XBOW_WID_PERF_CTR_A 0x64
+#define XBOW_WID_PERF_CTR_B 0x6c
+#define XBOW_WID_NIC 0x74
+
+/* Xbridge only */
+#define XBOW_W0_RST_FNC 0x00007C
+#define XBOW_L8_RST_FNC 0x000084
+#define XBOW_L9_RST_FNC 0x00008c
+#define XBOW_LA_RST_FNC 0x000094
+#define XBOW_LB_RST_FNC 0x00009c
+#define XBOW_LC_RST_FNC 0x0000a4
+#define XBOW_LD_RST_FNC 0x0000ac
+#define XBOW_LE_RST_FNC 0x0000b4
+#define XBOW_LF_RST_FNC 0x0000bc
+#define XBOW_RESET_FENCE(x) ((x) > 7 && (x) < 16) ? \
+ (XBOW_W0_RST_FNC + ((x) - 7) * 8) : \
+ ((x) == 0) ? XBOW_W0_RST_FNC : 0
+#define XBOW_LOCK 0x0000c4
+#define XBOW_LOCK_CLR 0x0000cc
+/* End of Xbridge only */
+
+/* used only in ide, but defined here within the reserved portion */
+/* of the widget0 address space (before 0xf4) */
+#define XBOW_WID_UNDEF 0xe4
+
+/* xbow link register set base, legal value for x is 0x8..0xf */
+#define XB_LINK_BASE 0x100
+#define XB_LINK_OFFSET 0x40
+#define XB_LINK_REG_BASE(x) (XB_LINK_BASE + ((x) & (MAX_XBOW_PORTS - 1)) * XB_LINK_OFFSET)
+
+#define XB_LINK_IBUF_FLUSH(x) (XB_LINK_REG_BASE(x) + 0x4)
+#define XB_LINK_CTRL(x) (XB_LINK_REG_BASE(x) + 0xc)
+#define XB_LINK_STATUS(x) (XB_LINK_REG_BASE(x) + 0x14)
+#define XB_LINK_ARB_UPPER(x) (XB_LINK_REG_BASE(x) + 0x1c)
+#define XB_LINK_ARB_LOWER(x) (XB_LINK_REG_BASE(x) + 0x24)
+#define XB_LINK_STATUS_CLR(x) (XB_LINK_REG_BASE(x) + 0x2c)
+#define XB_LINK_RESET(x) (XB_LINK_REG_BASE(x) + 0x34)
+#define XB_LINK_AUX_STATUS(x) (XB_LINK_REG_BASE(x) + 0x3c)
+
+/* link_control(x) */
+#define XB_CTRL_LINKALIVE_IE 0x80000000 /* link comes alive */
+ /* reserved: 0x40000000 */
+#define XB_CTRL_PERF_CTR_MODE_MSK 0x30000000 /* perf counter mode */
+#define XB_CTRL_IBUF_LEVEL_MSK 0x0e000000 /* input packet buffer level */
+#define XB_CTRL_8BIT_MODE 0x01000000 /* force link into 8 bit mode */
+#define XB_CTRL_BAD_LLP_PKT 0x00800000 /* force bad LLP packet */
+#define XB_CTRL_WIDGET_CR_MSK 0x007c0000 /* LLP widget credit mask */
+#define XB_CTRL_WIDGET_CR_SHFT 18 /* LLP widget credit shift */
+#define XB_CTRL_ILLEGAL_DST_IE 0x00020000 /* illegal destination */
+#define XB_CTRL_OALLOC_IBUF_IE 0x00010000 /* overallocated input buffer */
+ /* reserved: 0x0000fe00 */
+#define XB_CTRL_BNDWDTH_ALLOC_IE 0x00000100 /* bandwidth alloc */
+#define XB_CTRL_RCV_CNT_OFLOW_IE 0x00000080 /* rcv retry overflow */
+#define XB_CTRL_XMT_CNT_OFLOW_IE 0x00000040 /* xmt retry overflow */
+#define XB_CTRL_XMT_MAX_RTRY_IE 0x00000020 /* max transmit retry */
+#define XB_CTRL_RCV_IE 0x00000010 /* receive */
+#define XB_CTRL_XMT_RTRY_IE 0x00000008 /* transmit retry */
+ /* reserved: 0x00000004 */
+#define XB_CTRL_MAXREQ_TOUT_IE 0x00000002 /* maximum request timeout */
+#define XB_CTRL_SRC_TOUT_IE 0x00000001 /* source timeout */
+
+/* link_status(x) */
+#define XB_STAT_LINKALIVE XB_CTRL_LINKALIVE_IE
+ /* reserved: 0x7ff80000 */
+#define XB_STAT_MULTI_ERR 0x00040000 /* multi error */
+#define XB_STAT_ILLEGAL_DST_ERR XB_CTRL_ILLEGAL_DST_IE
+#define XB_STAT_OALLOC_IBUF_ERR XB_CTRL_OALLOC_IBUF_IE
+#define XB_STAT_BNDWDTH_ALLOC_ID_MSK 0x0000ff00 /* port bitmask */
+#define XB_STAT_RCV_CNT_OFLOW_ERR XB_CTRL_RCV_CNT_OFLOW_IE
+#define XB_STAT_XMT_CNT_OFLOW_ERR XB_CTRL_XMT_CNT_OFLOW_IE
+#define XB_STAT_XMT_MAX_RTRY_ERR XB_CTRL_XMT_MAX_RTRY_IE
+#define XB_STAT_RCV_ERR XB_CTRL_RCV_IE
+#define XB_STAT_XMT_RTRY_ERR XB_CTRL_XMT_RTRY_IE
+ /* reserved: 0x00000004 */
+#define XB_STAT_MAXREQ_TOUT_ERR XB_CTRL_MAXREQ_TOUT_IE
+#define XB_STAT_SRC_TOUT_ERR XB_CTRL_SRC_TOUT_IE
+
+/* link_aux_status(x) */
+#define XB_AUX_STAT_RCV_CNT 0xff000000
+#define XB_AUX_STAT_XMT_CNT 0x00ff0000
+#define XB_AUX_STAT_TOUT_DST 0x0000ff00
+#define XB_AUX_LINKFAIL_RST_BAD 0x00000040
+#define XB_AUX_STAT_PRESENT 0x00000020
+#define XB_AUX_STAT_PORT_WIDTH 0x00000010
+ /* reserved: 0x0000000f */
+
+/*
+ * link_arb_upper/link_arb_lower(x), (reg) should be the link_arb_upper
+ * register if (x) is 0x8..0xb, link_arb_lower if (x) is 0xc..0xf
+ */
+#define XB_ARB_GBR_MSK 0x1f
+#define XB_ARB_RR_MSK 0x7
+#define XB_ARB_GBR_SHFT(x) (((x) & 0x3) * 8)
+#define XB_ARB_RR_SHFT(x) (((x) & 0x3) * 8 + 5)
+#define XB_ARB_GBR_CNT(reg,x) ((reg) >> XB_ARB_GBR_SHFT(x) & XB_ARB_GBR_MSK)
+#define XB_ARB_RR_CNT(reg,x) ((reg) >> XB_ARB_RR_SHFT(x) & XB_ARB_RR_MSK)
+
+/* XBOW_WID_STAT */
+#define XB_WID_STAT_LINK_INTR_SHFT (24)
+#define XB_WID_STAT_LINK_INTR_MASK (0xFF << XB_WID_STAT_LINK_INTR_SHFT)
+#define XB_WID_STAT_LINK_INTR(x) (0x1 << (((x)&7) + XB_WID_STAT_LINK_INTR_SHFT))
+#define XB_WID_STAT_WIDGET0_INTR 0x00800000
+#define XB_WID_STAT_SRCID_MASK 0x000003c0 /* Xbridge only */
+#define XB_WID_STAT_REG_ACC_ERR 0x00000020
+#define XB_WID_STAT_RECV_TOUT 0x00000010 /* Xbridge only */
+#define XB_WID_STAT_ARB_TOUT 0x00000008 /* Xbridge only */
+#define XB_WID_STAT_XTALK_ERR 0x00000004
+#define XB_WID_STAT_DST_TOUT 0x00000002 /* Xbridge only */
+#define XB_WID_STAT_MULTI_ERR 0x00000001
+
+#define XB_WID_STAT_SRCID_SHFT 6
+
+/* XBOW_WID_CONTROL */
+#define XB_WID_CTRL_REG_ACC_IE XB_WID_STAT_REG_ACC_ERR
+#define XB_WID_CTRL_RECV_TOUT XB_WID_STAT_RECV_TOUT
+#define XB_WID_CTRL_ARB_TOUT XB_WID_STAT_ARB_TOUT
+#define XB_WID_CTRL_XTALK_IE XB_WID_STAT_XTALK_ERR
+
+/* XBOW_WID_INT_UPPER */
+/* defined in xwidget.h for WIDGET_INTDEST_UPPER_ADDR */
+
+/* XBOW WIDGET part number, in the ID register */
+#define XBOW_WIDGET_PART_NUM 0x0 /* crossbow */
+#define XXBOW_WIDGET_PART_NUM 0xd000 /* Xbridge */
+#define XBOW_WIDGET_MFGR_NUM 0x0
+#define XXBOW_WIDGET_MFGR_NUM 0x0
+#define PXBOW_WIDGET_PART_NUM 0xd100 /* PIC */
+
+#define XBOW_REV_1_0 0x1 /* xbow rev 1.0 is "1" */
+#define XBOW_REV_1_1 0x2 /* xbow rev 1.1 is "2" */
+#define XBOW_REV_1_2 0x3 /* xbow rev 1.2 is "3" */
+#define XBOW_REV_1_3 0x4 /* xbow rev 1.3 is "4" */
+#define XBOW_REV_2_0 0x5 /* xbow rev 2.0 is "5" */
+
+#define XXBOW_PART_REV_1_0 (XXBOW_WIDGET_PART_NUM << 4 | 0x1 )
+#define XXBOW_PART_REV_2_0 (XXBOW_WIDGET_PART_NUM << 4 | 0x2 )
+
+/* XBOW_WID_ARB_RELOAD */
+#define XBOW_WID_ARB_RELOAD_INT 0x3f /* GBR reload interval */
+
+#define IS_XBRIDGE_XBOW(wid) \
+ (XWIDGET_PART_NUM(wid) == XXBOW_WIDGET_PART_NUM && \
+ XWIDGET_MFG_NUM(wid) == XXBOW_WIDGET_MFGR_NUM)
+
+#define IS_PIC_XBOW(wid) \
+ (XWIDGET_PART_NUM(wid) == PXBOW_WIDGET_PART_NUM && \
+ XWIDGET_MFG_NUM(wid) == XXBOW_WIDGET_MFGR_NUM)
+
+#define XBOW_WAR_ENABLED(pv, widid) ((1 << XWIDGET_REV_NUM(widid)) & pv)
+
+#endif /* _ASM_IA64_SN_XTALK_XBOW_H */
--- /dev/null
+/*
+ * This file is subject to the terms and conditions of the GNU General Public
+ * License. See the file "COPYING" in the main directory of this archive
+ * for more details.
+ *
+ * Copyright (C) 1992-1997,2000-2003 Silicon Graphics, Inc. All Rights Reserved.
+ */
+#ifndef _ASM_IA64_SN_XTALK_XWIDGET_H
+#define _ASM_IA64_SN_XTALK_XWIDGET_H
+
+/* WIDGET_ID */
+#define WIDGET_REV_NUM 0xf0000000
+#define WIDGET_PART_NUM 0x0ffff000
+#define WIDGET_MFG_NUM 0x00000ffe
+#define WIDGET_REV_NUM_SHFT 28
+#define WIDGET_PART_NUM_SHFT 12
+#define WIDGET_MFG_NUM_SHFT 1
+
+#define XWIDGET_PART_NUM(widgetid) (((widgetid) & WIDGET_PART_NUM) >> WIDGET_PART_NUM_SHFT)
+#define XWIDGET_REV_NUM(widgetid) (((widgetid) & WIDGET_REV_NUM) >> WIDGET_REV_NUM_SHFT)
+#define XWIDGET_MFG_NUM(widgetid) (((widgetid) & WIDGET_MFG_NUM) >> WIDGET_MFG_NUM_SHFT)
+#define XWIDGET_PART_REV_NUM(widgetid) ((XWIDGET_PART_NUM(widgetid) << 4) | \
+ XWIDGET_REV_NUM(widgetid))
+#define XWIDGET_PART_REV_NUM_REV(partrev) (partrev & 0xf)
+
+/* widget configuration registers */
+struct widget_cfg{
+ uint32_t w_id; /* 0x04 */
+ uint32_t w_pad_0; /* 0x00 */
+ uint32_t w_status; /* 0x0c */
+ uint32_t w_pad_1; /* 0x08 */
+ uint32_t w_err_upper_addr; /* 0x14 */
+ uint32_t w_pad_2; /* 0x10 */
+ uint32_t w_err_lower_addr; /* 0x1c */
+ uint32_t w_pad_3; /* 0x18 */
+ uint32_t w_control; /* 0x24 */
+ uint32_t w_pad_4; /* 0x20 */
+ uint32_t w_req_timeout; /* 0x2c */
+ uint32_t w_pad_5; /* 0x28 */
+ uint32_t w_intdest_upper_addr; /* 0x34 */
+ uint32_t w_pad_6; /* 0x30 */
+ uint32_t w_intdest_lower_addr; /* 0x3c */
+ uint32_t w_pad_7; /* 0x38 */
+ uint32_t w_err_cmd_word; /* 0x44 */
+ uint32_t w_pad_8; /* 0x40 */
+ uint32_t w_llp_cfg; /* 0x4c */
+ uint32_t w_pad_9; /* 0x48 */
+ uint32_t w_tflush; /* 0x54 */
+ uint32_t w_pad_10; /* 0x50 */
+};
+
+/*
+ * Crosstalk Widget Hardware Identification, as defined in the Crosstalk spec.
+ */
+struct xwidget_hwid{
+ int mfg_num;
+ int rev_num;
+ int part_num;
+};
+
+struct xwidget_info{
+
+ struct xwidget_hwid xwi_hwid; /* Widget Identification */
+ char xwi_masterxid; /* Hub's Widget Port Number */
+ void *xwi_hubinfo; /* Hub's provider private info */
+ uint64_t *xwi_hub_provider; /* prom provider functions */
+ void *xwi_vertex;
+};
+
+#endif /* _ASM_IA64_SN_XTALK_XWIDGET_H */
--- /dev/null
+/*
+ * This file is subject to the terms and conditions of the GNU General Public
+ * License. See the file "COPYING" in the main directory of this archive
+ * for more details.
+ *
+ * Copyright (c) 2000-2004 Silicon Graphics, Inc. All Rights Reserved.
+ */
+
+#include <linux/types.h>
+#include <asm/sn/sn_sal.h>
+#include "ioerror.h"
+#include <asm/sn/addrs.h>
+#include "shubio.h"
+#include <asm/sn/geo.h>
+#include "xtalk/xwidgetdev.h"
+#include "xtalk/hubdev.h"
+#include <asm/sn/bte.h>
+
+/*
+ * Bte error handling is done in two parts. The first captures
+ * any crb related errors. Since there can be multiple crbs per
+ * interface and multiple interfaces active, we need to wait until
+ * all active crbs are completed. This is the first job of the
+ * second part error handler. When all bte related CRBs are cleanly
+ * completed, it resets the interfaces and gets them ready for new
+ * transfers to be queued.
+ */
+
+void bte_error_handler(unsigned long);
+
+/*
+ * Wait until all BTE related CRBs are completed
+ * and then reset the interfaces.
+ */
+void bte_error_handler(unsigned long _nodepda)
+{
+ struct nodepda_s *err_nodepda = (struct nodepda_s *)_nodepda;
+ spinlock_t *recovery_lock = &err_nodepda->bte_recovery_lock;
+ struct timer_list *recovery_timer = &err_nodepda->bte_recovery_timer;
+ nasid_t nasid;
+ int i;
+ int valid_crbs;
+ unsigned long irq_flags;
+ volatile u64 *notify;
+ bte_result_t bh_error;
+ ii_imem_u_t imem; /* II IMEM Register */
+ ii_icrb0_d_u_t icrbd; /* II CRB Register D */
+ ii_ibcr_u_t ibcr;
+ ii_icmr_u_t icmr;
+
+ BTE_PRINTK(("bte_error_handler(%p) - %d\n", err_nodepda,
+ smp_processor_id()));
+
+ spin_lock_irqsave(recovery_lock, irq_flags);
+
+ if ((err_nodepda->bte_if[0].bh_error == BTE_SUCCESS) &&
+ (err_nodepda->bte_if[1].bh_error == BTE_SUCCESS)) {
+ BTE_PRINTK(("eh:%p:%d Nothing to do.\n", err_nodepda,
+ smp_processor_id()));
+ spin_unlock_irqrestore(recovery_lock, irq_flags);
+ return;
+ }
+ /*
+ * Lock all interfaces on this node to prevent new transfers
+ * from being queued.
+ */
+ for (i = 0; i < BTES_PER_NODE; i++) {
+ if (err_nodepda->bte_if[i].cleanup_active) {
+ continue;
+ }
+ spin_lock(&err_nodepda->bte_if[i].spinlock);
+ BTE_PRINTK(("eh:%p:%d locked %d\n", err_nodepda,
+ smp_processor_id(), i));
+ err_nodepda->bte_if[i].cleanup_active = 1;
+ }
+
+ /* Determine information about our hub */
+ nasid = cnodeid_to_nasid(err_nodepda->bte_if[0].bte_cnode);
+
+ /*
+ * A BTE transfer can use multiple CRBs. We need to make sure
+ * that all the BTE CRBs are complete (or timed out) before
+ * attempting to clean up the error. Resetting the BTE while
+ * there are still BTE CRBs active will hang the BTE.
+ * We should look at all the CRBs to see if they are allocated
+ * to the BTE and see if they are still active. When none
+ * are active, we can continue with the cleanup.
+ *
+ * We also want to make sure that the local NI port is up.
+ * When a router resets the NI port can go down, while it
+ * goes through the LLP handshake, but then comes back up.
+ */
+ icmr.ii_icmr_regval = REMOTE_HUB_L(nasid, IIO_ICMR);
+ if (icmr.ii_icmr_fld_s.i_crb_mark != 0) {
+ /*
+ * There are errors which still need to be cleaned up by
+ * hubiio_crb_error_handler
+ */
+ mod_timer(recovery_timer, HZ * 5);
+ BTE_PRINTK(("eh:%p:%d Marked Giving up\n", err_nodepda,
+ smp_processor_id()));
+ spin_unlock_irqrestore(recovery_lock, irq_flags);
+ return;
+ }
+ if (icmr.ii_icmr_fld_s.i_crb_vld != 0) {
+
+ valid_crbs = icmr.ii_icmr_fld_s.i_crb_vld;
+
+ for (i = 0; i < IIO_NUM_CRBS; i++) {
+ if (!((1 << i) & valid_crbs)) {
+ /* This crb was not marked as valid, ignore */
+ continue;
+ }
+ icrbd.ii_icrb0_d_regval =
+ REMOTE_HUB_L(nasid, IIO_ICRB_D(i));
+ if (icrbd.d_bteop) {
+ mod_timer(recovery_timer, HZ * 5);
+ BTE_PRINTK(("eh:%p:%d Valid %d, Giving up\n",
+ err_nodepda, smp_processor_id(),
+ i));
+ spin_unlock_irqrestore(recovery_lock,
+ irq_flags);
+ return;
+ }
+ }
+ }
+
+ BTE_PRINTK(("eh:%p:%d Cleaning up\n", err_nodepda, smp_processor_id()));
+ /* Reenable both bte interfaces */
+ imem.ii_imem_regval = REMOTE_HUB_L(nasid, IIO_IMEM);
+ imem.ii_imem_fld_s.i_b0_esd = imem.ii_imem_fld_s.i_b1_esd = 1;
+ REMOTE_HUB_S(nasid, IIO_IMEM, imem.ii_imem_regval);
+
+ /* Reinitialize both BTE state machines. */
+ ibcr.ii_ibcr_regval = REMOTE_HUB_L(nasid, IIO_IBCR);
+ ibcr.ii_ibcr_fld_s.i_soft_reset = 1;
+ REMOTE_HUB_S(nasid, IIO_IBCR, ibcr.ii_ibcr_regval);
+
+ for (i = 0; i < BTES_PER_NODE; i++) {
+ bh_error = err_nodepda->bte_if[i].bh_error;
+ if (bh_error != BTE_SUCCESS) {
+ /* There is an error which needs to be notified */
+ notify = err_nodepda->bte_if[i].most_rcnt_na;
+ BTE_PRINTK(("cnode %d bte %d error=0x%lx\n",
+ err_nodepda->bte_if[i].bte_cnode,
+ err_nodepda->bte_if[i].bte_num,
+ IBLS_ERROR | (u64) bh_error));
+ *notify = IBLS_ERROR | bh_error;
+ err_nodepda->bte_if[i].bh_error = BTE_SUCCESS;
+ }
+
+ err_nodepda->bte_if[i].cleanup_active = 0;
+ BTE_PRINTK(("eh:%p:%d Unlocked %d\n", err_nodepda,
+ smp_processor_id(), i));
+ spin_unlock(&pda->cpu_bte_if[i]->spinlock);
+ }
+
+ del_timer(recovery_timer);
+
+ spin_unlock_irqrestore(recovery_lock, irq_flags);
+}
+
+/*
+ * First part error handler. This is called whenever any error CRB interrupt
+ * is generated by the II.
+ */
+void
+bte_crb_error_handler(cnodeid_t cnode, int btenum,
+ int crbnum, ioerror_t * ioe, int bteop)
+{
+ struct bteinfo_s *bte;
+
+
+ bte = &(NODEPDA(cnode)->bte_if[btenum]);
+
+ /*
+ * The caller has already figured out the error type, we save that
+ * in the bte handle structure for the thread excercising the
+ * interface to consume.
+ */
+ bte->bh_error = ioe->ie_errortype + BTEFAIL_OFFSET;
+ bte->bte_error_count++;
+
+ BTE_PRINTK(("Got an error on cnode %d bte %d: HW error type 0x%x\n",
+ bte->bte_cnode, bte->bte_num, ioe->ie_errortype));
+ bte_error_handler((unsigned long) NODEPDA(cnode));
+}
+
--- /dev/null
+/*
+ * This file is subject to the terms and conditions of the GNU General Public
+ * License. See the file "COPYING" in the main directory of this archive
+ * for more details.
+ *
+ * Copyright (C) 1992 - 1997, 2000-2004 Silicon Graphics, Inc. All rights reserved.
+ */
+
+#include <linux/types.h>
+#include <linux/ctype.h>
+#include <linux/string.h>
+#include <linux/kernel.h>
+#include <asm/sn/types.h>
+#include <asm/sn/module.h>
+#include <asm/sn/l1.h>
+
+char brick_types[MAX_BRICK_TYPES + 1] = "cri.xdpn%#=vo^kjbf890123456789...";
+/*
+ * Format a module id for printing.
+ *
+ * There are three possible formats:
+ *
+ * MODULE_FORMAT_BRIEF is the brief 6-character format, including
+ * the actual brick-type as recorded in the
+ * moduleid_t, eg. 002c15 for a C-brick, or
+ * 101#17 for a PX-brick.
+ *
+ * MODULE_FORMAT_LONG is the hwgraph format, eg. rack/002/bay/15
+ * of rack/101/bay/17 (note that the brick
+ * type does not appear in this format).
+ *
+ * MODULE_FORMAT_LCD is like MODULE_FORMAT_BRIEF, except that it
+ * ensures that the module id provided appears
+ * exactly as it would on the LCD display of
+ * the corresponding brick, eg. still 002c15
+ * for a C-brick, but 101p17 for a PX-brick.
+ *
+ * maule (9/13/04): Removed top-level check for (fmt == MODULE_FORMAT_LCD)
+ * making MODULE_FORMAT_LCD equivalent to MODULE_FORMAT_BRIEF. It was
+ * decided that all callers should assume the returned string should be what
+ * is displayed on the brick L1 LCD.
+ */
+void
+format_module_id(char *buffer, moduleid_t m, int fmt)
+{
+ int rack, position;
+ unsigned char brickchar;
+
+ rack = MODULE_GET_RACK(m);
+ brickchar = MODULE_GET_BTCHAR(m);
+
+ /* Be sure we use the same brick type character as displayed
+ * on the brick's LCD
+ */
+ switch (brickchar)
+ {
+ case L1_BRICKTYPE_GA:
+ case L1_BRICKTYPE_OPUS_TIO:
+ brickchar = L1_BRICKTYPE_C;
+ break;
+
+ case L1_BRICKTYPE_PX:
+ case L1_BRICKTYPE_PE:
+ case L1_BRICKTYPE_PA:
+ case L1_BRICKTYPE_SA: /* we can move this to the "I's" later
+ * if that makes more sense
+ */
+ brickchar = L1_BRICKTYPE_P;
+ break;
+
+ case L1_BRICKTYPE_IX:
+ case L1_BRICKTYPE_IA:
+
+ brickchar = L1_BRICKTYPE_I;
+ break;
+ }
+
+ position = MODULE_GET_BPOS(m);
+
+ if ((fmt == MODULE_FORMAT_BRIEF) || (fmt == MODULE_FORMAT_LCD)) {
+ /* Brief module number format, eg. 002c15 */
+
+ /* Decompress the rack number */
+ *buffer++ = '0' + RACK_GET_CLASS(rack);
+ *buffer++ = '0' + RACK_GET_GROUP(rack);
+ *buffer++ = '0' + RACK_GET_NUM(rack);
+
+ /* Add the brick type */
+ *buffer++ = brickchar;
+ }
+ else if (fmt == MODULE_FORMAT_LONG) {
+ /* Fuller hwgraph format, eg. rack/002/bay/15 */
+
+ strcpy(buffer, "rack" "/"); buffer += strlen(buffer);
+
+ *buffer++ = '0' + RACK_GET_CLASS(rack);
+ *buffer++ = '0' + RACK_GET_GROUP(rack);
+ *buffer++ = '0' + RACK_GET_NUM(rack);
+
+ strcpy(buffer, "/" "bay" "/"); buffer += strlen(buffer);
+ }
+
+ /* Add the bay position, using at least two digits */
+ if (position < 10)
+ *buffer++ = '0';
+ sprintf(buffer, "%d", position);
+
+}
--- /dev/null
+#
+# This file is subject to the terms and conditions of the GNU General Public
+# License. See the file "COPYING" in the main directory of this archive
+# for more details.
+#
+# Copyright (C) 2000-2004 Silicon Graphics, Inc. All Rights Reserved.
+#
+# Makefile for the sn pci general routines.
+
+obj-y := pci_dma.o pcibr/
--- /dev/null
+#
+# This file is subject to the terms and conditions of the GNU General Public
+# License. See the file "COPYING" in the main directory of this archive
+# for more details.
+#
+# Copyright (C) 2002-2004 Silicon Graphics, Inc. All Rights Reserved.
+#
+# Makefile for the sn2 io routines.
+
+obj-y += pcibr_dma.o pcibr_reg.o \
+ pcibr_ate.o pcibr_provider.o
--- /dev/null
+/*
+ * This file is subject to the terms and conditions of the GNU General Public
+ * License. See the file "COPYING" in the main directory of this archive
+ * for more details.
+ *
+ * Copyright (C) 2004 Silicon Graphics, Inc. All rights reserved.
+ */
+
+#include <linux/types.h>
+#include <linux/interrupt.h>
+#include "pci/pcibus_provider_defs.h"
+#include "pci/pcidev.h"
+#include "pci/tiocp.h"
+#include "pci/pic.h"
+#include "pci/pcibr_provider.h"
+
+union br_ptr {
+ struct tiocp tio;
+ struct pic pic;
+};
+
+/*
+ * Control Register Access -- Read/Write 0000_0020
+ */
+void pcireg_control_bit_clr(struct pcibus_info *pcibus_info, uint64_t bits)
+{
+ union br_ptr *ptr = (union br_ptr *)pcibus_info->pbi_buscommon.bs_base;
+
+ if (pcibus_info) {
+ switch (pcibus_info->pbi_bridge_type) {
+ case PCIBR_BRIDGETYPE_TIOCP:
+ ptr->tio.cp_control &= ~bits;
+ break;
+ case PCIBR_BRIDGETYPE_PIC:
+ ptr->pic.p_wid_control &= ~bits;
+ break;
+ default:
+ panic
+ ("pcireg_control_bit_clr: unknown bridgetype bridge 0x%p",
+ (void *)ptr);
+ }
+ }
+}
+
+void pcireg_control_bit_set(struct pcibus_info *pcibus_info, uint64_t bits)
+{
+ union br_ptr *ptr = (union br_ptr *)pcibus_info->pbi_buscommon.bs_base;
+
+ if (pcibus_info) {
+ switch (pcibus_info->pbi_bridge_type) {
+ case PCIBR_BRIDGETYPE_TIOCP:
+ ptr->tio.cp_control |= bits;
+ break;
+ case PCIBR_BRIDGETYPE_PIC:
+ ptr->pic.p_wid_control |= bits;
+ break;
+ default:
+ panic
+ ("pcireg_control_bit_set: unknown bridgetype bridge 0x%p",
+ (void *)ptr);
+ }
+ }
+}
+
+/*
+ * PCI/PCIX Target Flush Register Access -- Read Only 0000_0050
+ */
+uint64_t pcireg_tflush_get(struct pcibus_info *pcibus_info)
+{
+ union br_ptr *ptr = (union br_ptr *)pcibus_info->pbi_buscommon.bs_base;
+ uint64_t ret = 0;
+
+ if (pcibus_info) {
+ switch (pcibus_info->pbi_bridge_type) {
+ case PCIBR_BRIDGETYPE_TIOCP:
+ ret = ptr->tio.cp_tflush;
+ break;
+ case PCIBR_BRIDGETYPE_PIC:
+ ret = ptr->pic.p_wid_tflush;
+ break;
+ default:
+ panic
+ ("pcireg_tflush_get: unknown bridgetype bridge 0x%p",
+ (void *)ptr);
+ }
+ }
+
+ /* Read of the Target Flush should always return zero */
+ if (ret != 0)
+ panic("pcireg_tflush_get:Target Flush failed\n");
+
+ return ret;
+}
+
+/*
+ * Interrupt Status Register Access -- Read Only 0000_0100
+ */
+uint64_t pcireg_intr_status_get(struct pcibus_info * pcibus_info)
+{
+ union br_ptr *ptr = (union br_ptr *)pcibus_info->pbi_buscommon.bs_base;
+ uint64_t ret = 0;
+
+ if (pcibus_info) {
+ switch (pcibus_info->pbi_bridge_type) {
+ case PCIBR_BRIDGETYPE_TIOCP:
+ ret = ptr->tio.cp_int_status;
+ break;
+ case PCIBR_BRIDGETYPE_PIC:
+ ret = ptr->pic.p_int_status;
+ break;
+ default:
+ panic
+ ("pcireg_intr_status_get: unknown bridgetype bridge 0x%p",
+ (void *)ptr);
+ }
+ }
+ return ret;
+}
+
+/*
+ * Interrupt Enable Register Access -- Read/Write 0000_0108
+ */
+void pcireg_intr_enable_bit_clr(struct pcibus_info *pcibus_info, uint64_t bits)
+{
+ union br_ptr *ptr = (union br_ptr *)pcibus_info->pbi_buscommon.bs_base;
+
+ if (pcibus_info) {
+ switch (pcibus_info->pbi_bridge_type) {
+ case PCIBR_BRIDGETYPE_TIOCP:
+ ptr->tio.cp_int_enable &= ~bits;
+ break;
+ case PCIBR_BRIDGETYPE_PIC:
+ ptr->pic.p_int_enable &= ~bits;
+ break;
+ default:
+ panic
+ ("pcireg_intr_enable_bit_clr: unknown bridgetype bridge 0x%p",
+ (void *)ptr);
+ }
+ }
+}
+
+void pcireg_intr_enable_bit_set(struct pcibus_info *pcibus_info, uint64_t bits)
+{
+ union br_ptr *ptr = (union br_ptr *)pcibus_info->pbi_buscommon.bs_base;
+
+ if (pcibus_info) {
+ switch (pcibus_info->pbi_bridge_type) {
+ case PCIBR_BRIDGETYPE_TIOCP:
+ ptr->tio.cp_int_enable |= bits;
+ break;
+ case PCIBR_BRIDGETYPE_PIC:
+ ptr->pic.p_int_enable |= bits;
+ break;
+ default:
+ panic
+ ("pcireg_intr_enable_bit_set: unknown bridgetype bridge 0x%p",
+ (void *)ptr);
+ }
+ }
+}
+
+/*
+ * Intr Host Address Register (int_addr) -- Read/Write 0000_0130 - 0000_0168
+ */
+void pcireg_intr_addr_addr_set(struct pcibus_info *pcibus_info, int int_n,
+ uint64_t addr)
+{
+ union br_ptr *ptr = (union br_ptr *)pcibus_info->pbi_buscommon.bs_base;
+
+ if (pcibus_info) {
+ switch (pcibus_info->pbi_bridge_type) {
+ case PCIBR_BRIDGETYPE_TIOCP:
+ ptr->tio.cp_int_addr[int_n] &= ~TIOCP_HOST_INTR_ADDR;
+ ptr->tio.cp_int_addr[int_n] |=
+ (addr & TIOCP_HOST_INTR_ADDR);
+ break;
+ case PCIBR_BRIDGETYPE_PIC:
+ ptr->pic.p_int_addr[int_n] &= ~PIC_HOST_INTR_ADDR;
+ ptr->pic.p_int_addr[int_n] |=
+ (addr & PIC_HOST_INTR_ADDR);
+ break;
+ default:
+ panic
+ ("pcireg_intr_addr_addr_get: unknown bridgetype bridge 0x%p",
+ (void *)ptr);
+ }
+ }
+}
+
+/*
+ * Force Interrupt Register Access -- Write Only 0000_01C0 - 0000_01F8
+ */
+void pcireg_force_intr_set(struct pcibus_info *pcibus_info, int int_n)
+{
+ union br_ptr *ptr = (union br_ptr *)pcibus_info->pbi_buscommon.bs_base;
+
+ if (pcibus_info) {
+ switch (pcibus_info->pbi_bridge_type) {
+ case PCIBR_BRIDGETYPE_TIOCP:
+ ptr->tio.cp_force_pin[int_n] = 1;
+ break;
+ case PCIBR_BRIDGETYPE_PIC:
+ ptr->pic.p_force_pin[int_n] = 1;
+ break;
+ default:
+ panic
+ ("pcireg_force_intr_set: unknown bridgetype bridge 0x%p",
+ (void *)ptr);
+ }
+ }
+}
+
+/*
+ * Device(x) Write Buffer Flush Reg Access -- Read Only 0000_0240 - 0000_0258
+ */
+uint64_t pcireg_wrb_flush_get(struct pcibus_info *pcibus_info, int device)
+{
+ union br_ptr *ptr = (union br_ptr *)pcibus_info->pbi_buscommon.bs_base;
+ uint64_t ret = 0;
+
+ if (pcibus_info) {
+ switch (pcibus_info->pbi_bridge_type) {
+ case PCIBR_BRIDGETYPE_TIOCP:
+ ret = ptr->tio.cp_wr_req_buf[device];
+ break;
+ case PCIBR_BRIDGETYPE_PIC:
+ ret = ptr->pic.p_wr_req_buf[device];
+ break;
+ default:
+ panic("pcireg_wrb_flush_get: unknown bridgetype bridge 0x%p", (void *)ptr);
+ }
+
+ }
+ /* Read of the Write Buffer Flush should always return zero */
+ return ret;
+}
+
+void pcireg_int_ate_set(struct pcibus_info *pcibus_info, int ate_index,
+ uint64_t val)
+{
+ union br_ptr *ptr = (union br_ptr *)pcibus_info->pbi_buscommon.bs_base;
+
+ if (pcibus_info) {
+ switch (pcibus_info->pbi_bridge_type) {
+ case PCIBR_BRIDGETYPE_TIOCP:
+ ptr->tio.cp_int_ate_ram[ate_index] = (uint64_t) val;
+ break;
+ case PCIBR_BRIDGETYPE_PIC:
+ ptr->pic.p_int_ate_ram[ate_index] = (uint64_t) val;
+ break;
+ default:
+ panic
+ ("pcireg_int_ate_set: unknown bridgetype bridge 0x%p",
+ (void *)ptr);
+ }
+ }
+}
+
+uint64_t *pcireg_int_ate_addr(struct pcibus_info *pcibus_info, int ate_index)
+{
+ union br_ptr *ptr = (union br_ptr *)pcibus_info->pbi_buscommon.bs_base;
+ uint64_t *ret = (uint64_t *) 0;
+
+ if (pcibus_info) {
+ switch (pcibus_info->pbi_bridge_type) {
+ case PCIBR_BRIDGETYPE_TIOCP:
+ ret =
+ (uint64_t *) & (ptr->tio.cp_int_ate_ram[ate_index]);
+ break;
+ case PCIBR_BRIDGETYPE_PIC:
+ ret =
+ (uint64_t *) & (ptr->pic.p_int_ate_ram[ate_index]);
+ break;
+ default:
+ panic
+ ("pcireg_int_ate_addr: unknown bridgetype bridge 0x%p",
+ (void *)ptr);
+ }
+ }
+ return ret;
+}
--- /dev/null
+/*
+ * ahci.c - AHCI SATA support
+ *
+ * Copyright 2004 Red Hat, Inc.
+ *
+ * The contents of this file are subject to the Open
+ * Software License version 1.1 that can be found at
+ * http://www.opensource.org/licenses/osl-1.1.txt and is included herein
+ * by reference.
+ *
+ * Alternatively, the contents of this file may be used under the terms
+ * of the GNU General Public License version 2 (the "GPL") as distributed
+ * in the kernel source COPYING file, in which case the provisions of
+ * the GPL are applicable instead of the above. If you wish to allow
+ * the use of your version of this file only under the terms of the
+ * GPL and not to allow others to use your version of this file under
+ * the OSL, indicate your decision by deleting the provisions above and
+ * replace them with the notice and other provisions required by the GPL.
+ * If you do not delete the provisions above, a recipient may use your
+ * version of this file under either the OSL or the GPL.
+ *
+ * Version 1.0 of the AHCI specification:
+ * http://www.intel.com/technology/serialata/pdf/rev1_0.pdf
+ *
+ */
+
+#include <linux/kernel.h>
+#include <linux/module.h>
+#include <linux/pci.h>
+#include <linux/init.h>
+#include <linux/blkdev.h>
+#include <linux/delay.h>
+#include <linux/interrupt.h>
+#include <linux/sched.h>
+#include "scsi.h"
+#include <scsi/scsi_host.h>
+#include <linux/libata.h>
+#include <asm/io.h>
+
+#define DRV_NAME "ahci"
+#define DRV_VERSION "1.00"
+
+
+enum {
+ AHCI_PCI_BAR = 5,
+ AHCI_MAX_SG = 168, /* hardware max is 64K */
+ AHCI_DMA_BOUNDARY = 0xffffffff,
+ AHCI_USE_CLUSTERING = 0,
+ AHCI_CMD_SLOT_SZ = 32 * 32,
+ AHCI_RX_FIS_SZ = 256,
+ AHCI_CMD_TBL_HDR = 0x80,
+ AHCI_CMD_TBL_SZ = AHCI_CMD_TBL_HDR + (AHCI_MAX_SG * 16),
+ AHCI_PORT_PRIV_DMA_SZ = AHCI_CMD_SLOT_SZ + AHCI_CMD_TBL_SZ +
+ AHCI_RX_FIS_SZ,
+ AHCI_IRQ_ON_SG = (1 << 31),
+ AHCI_CMD_ATAPI = (1 << 5),
+ AHCI_CMD_WRITE = (1 << 6),
+
+ RX_FIS_D2H_REG = 0x40, /* offset of D2H Register FIS data */
+
+ board_ahci = 0,
+
+ /* global controller registers */
+ HOST_CAP = 0x00, /* host capabilities */
+ HOST_CTL = 0x04, /* global host control */
+ HOST_IRQ_STAT = 0x08, /* interrupt status */
+ HOST_PORTS_IMPL = 0x0c, /* bitmap of implemented ports */
+ HOST_VERSION = 0x10, /* AHCI spec. version compliancy */
+
+ /* HOST_CTL bits */
+ HOST_RESET = (1 << 0), /* reset controller; self-clear */
+ HOST_IRQ_EN = (1 << 1), /* global IRQ enable */
+ HOST_AHCI_EN = (1 << 31), /* AHCI enabled */
+
+ /* HOST_CAP bits */
+ HOST_CAP_64 = (1 << 31), /* PCI DAC (64-bit DMA) support */
+
+ /* registers for each SATA port */
+ PORT_LST_ADDR = 0x00, /* command list DMA addr */
+ PORT_LST_ADDR_HI = 0x04, /* command list DMA addr hi */
+ PORT_FIS_ADDR = 0x08, /* FIS rx buf addr */
+ PORT_FIS_ADDR_HI = 0x0c, /* FIS rx buf addr hi */
+ PORT_IRQ_STAT = 0x10, /* interrupt status */
+ PORT_IRQ_MASK = 0x14, /* interrupt enable/disable mask */
+ PORT_CMD = 0x18, /* port command */
+ PORT_TFDATA = 0x20, /* taskfile data */
+ PORT_SIG = 0x24, /* device TF signature */
+ PORT_CMD_ISSUE = 0x38, /* command issue */
+ PORT_SCR = 0x28, /* SATA phy register block */
+ PORT_SCR_STAT = 0x28, /* SATA phy register: SStatus */
+ PORT_SCR_CTL = 0x2c, /* SATA phy register: SControl */
+ PORT_SCR_ERR = 0x30, /* SATA phy register: SError */
+ PORT_SCR_ACT = 0x34, /* SATA phy register: SActive */
+
+ /* PORT_IRQ_{STAT,MASK} bits */
+ PORT_IRQ_COLD_PRES = (1 << 31), /* cold presence detect */
+ PORT_IRQ_TF_ERR = (1 << 30), /* task file error */
+ PORT_IRQ_HBUS_ERR = (1 << 29), /* host bus fatal error */
+ PORT_IRQ_HBUS_DATA_ERR = (1 << 28), /* host bus data error */
+ PORT_IRQ_IF_ERR = (1 << 27), /* interface fatal error */
+ PORT_IRQ_IF_NONFATAL = (1 << 26), /* interface non-fatal error */
+ PORT_IRQ_OVERFLOW = (1 << 24), /* xfer exhausted available S/G */
+ PORT_IRQ_BAD_PMP = (1 << 23), /* incorrect port multiplier */
+
+ PORT_IRQ_PHYRDY = (1 << 22), /* PhyRdy changed */
+ PORT_IRQ_DEV_ILCK = (1 << 7), /* device interlock */
+ PORT_IRQ_CONNECT = (1 << 6), /* port connect change status */
+ PORT_IRQ_SG_DONE = (1 << 5), /* descriptor processed */
+ PORT_IRQ_UNK_FIS = (1 << 4), /* unknown FIS rx'd */
+ PORT_IRQ_SDB_FIS = (1 << 3), /* Set Device Bits FIS rx'd */
+ PORT_IRQ_DMAS_FIS = (1 << 2), /* DMA Setup FIS rx'd */
+ PORT_IRQ_PIOS_FIS = (1 << 1), /* PIO Setup FIS rx'd */
+ PORT_IRQ_D2H_REG_FIS = (1 << 0), /* D2H Register FIS rx'd */
+
+ PORT_IRQ_FATAL = PORT_IRQ_TF_ERR |
+ PORT_IRQ_HBUS_ERR |
+ PORT_IRQ_HBUS_DATA_ERR |
+ PORT_IRQ_IF_ERR,
+ DEF_PORT_IRQ = PORT_IRQ_FATAL | PORT_IRQ_PHYRDY |
+ PORT_IRQ_CONNECT | PORT_IRQ_SG_DONE |
+ PORT_IRQ_UNK_FIS | PORT_IRQ_SDB_FIS |
+ PORT_IRQ_DMAS_FIS | PORT_IRQ_PIOS_FIS |
+ PORT_IRQ_D2H_REG_FIS,
+
+ /* PORT_CMD bits */
+ PORT_CMD_LIST_ON = (1 << 15), /* cmd list DMA engine running */
+ PORT_CMD_FIS_ON = (1 << 14), /* FIS DMA engine running */
+ PORT_CMD_FIS_RX = (1 << 4), /* Enable FIS receive DMA engine */
+ PORT_CMD_POWER_ON = (1 << 2), /* Power up device */
+ PORT_CMD_SPIN_UP = (1 << 1), /* Spin up device */
+ PORT_CMD_START = (1 << 0), /* Enable port DMA engine */
+
+ PORT_CMD_ICC_ACTIVE = (0x1 << 28), /* Put i/f in active state */
+ PORT_CMD_ICC_PARTIAL = (0x2 << 28), /* Put i/f in partial state */
+ PORT_CMD_ICC_SLUMBER = (0x6 << 28), /* Put i/f in slumber state */
+};
+
+struct ahci_cmd_hdr {
+ u32 opts;
+ u32 status;
+ u32 tbl_addr;
+ u32 tbl_addr_hi;
+ u32 reserved[4];
+};
+
+struct ahci_sg {
+ u32 addr;
+ u32 addr_hi;
+ u32 reserved;
+ u32 flags_size;
+};
+
+struct ahci_host_priv {
+ unsigned long flags;
+ u32 cap; /* cache of HOST_CAP register */
+ u32 port_map; /* cache of HOST_PORTS_IMPL reg */
+};
+
+struct ahci_port_priv {
+ struct ahci_cmd_hdr *cmd_slot;
+ dma_addr_t cmd_slot_dma;
+ void *cmd_tbl;
+ dma_addr_t cmd_tbl_dma;
+ struct ahci_sg *cmd_tbl_sg;
+ void *rx_fis;
+ dma_addr_t rx_fis_dma;
+};
+
+static u32 ahci_scr_read (struct ata_port *ap, unsigned int sc_reg);
+static void ahci_scr_write (struct ata_port *ap, unsigned int sc_reg, u32 val);
+static int ahci_init_one (struct pci_dev *pdev, const struct pci_device_id *ent);
+static int ahci_qc_issue(struct ata_queued_cmd *qc);
+static irqreturn_t ahci_interrupt (int irq, void *dev_instance, struct pt_regs *regs);
+static void ahci_phy_reset(struct ata_port *ap);
+static void ahci_irq_clear(struct ata_port *ap);
+static void ahci_eng_timeout(struct ata_port *ap);
+static int ahci_port_start(struct ata_port *ap);
+static void ahci_port_stop(struct ata_port *ap);
+static void ahci_host_stop(struct ata_host_set *host_set);
+static void ahci_qc_prep(struct ata_queued_cmd *qc);
+static u8 ahci_check_status(struct ata_port *ap);
+static inline int ahci_host_intr(struct ata_port *ap, struct ata_queued_cmd *qc);
+
+static Scsi_Host_Template ahci_sht = {
+ .module = THIS_MODULE,
+ .name = DRV_NAME,
+ .ioctl = ata_scsi_ioctl,
+ .queuecommand = ata_scsi_queuecmd,
+ .eh_strategy_handler = ata_scsi_error,
+ .can_queue = ATA_DEF_QUEUE,
+ .this_id = ATA_SHT_THIS_ID,
+ .sg_tablesize = AHCI_MAX_SG,
+ .max_sectors = ATA_MAX_SECTORS,
+ .cmd_per_lun = ATA_SHT_CMD_PER_LUN,
+ .emulated = ATA_SHT_EMULATED,
+ .use_clustering = AHCI_USE_CLUSTERING,
+ .proc_name = DRV_NAME,
+ .dma_boundary = AHCI_DMA_BOUNDARY,
+ .slave_configure = ata_scsi_slave_config,
+ .bios_param = ata_std_bios_param,
+};
+
+static struct ata_port_operations ahci_ops = {
+ .port_disable = ata_port_disable,
+
+ .check_status = ahci_check_status,
+ .dev_select = ata_noop_dev_select,
+
+ .phy_reset = ahci_phy_reset,
+
+ .qc_prep = ahci_qc_prep,
+ .qc_issue = ahci_qc_issue,
+
+ .eng_timeout = ahci_eng_timeout,
+
+ .irq_handler = ahci_interrupt,
+ .irq_clear = ahci_irq_clear,
+
+ .scr_read = ahci_scr_read,
+ .scr_write = ahci_scr_write,
+
+ .port_start = ahci_port_start,
+ .port_stop = ahci_port_stop,
+ .host_stop = ahci_host_stop,
+};
+
+static struct ata_port_info ahci_port_info[] = {
+ /* board_ahci */
+ {
+ .sht = &ahci_sht,
+ .host_flags = ATA_FLAG_SATA | ATA_FLAG_NO_LEGACY |
+ ATA_FLAG_SATA_RESET | ATA_FLAG_MMIO |
+ ATA_FLAG_PIO_DMA,
+ .pio_mask = 0x03, /* pio3-4 */
+ .udma_mask = 0x7f, /* udma0-6 ; FIXME */
+ .port_ops = &ahci_ops,
+ },
+};
+
+static struct pci_device_id ahci_pci_tbl[] = {
+ { PCI_VENDOR_ID_INTEL, 0x2652, PCI_ANY_ID, PCI_ANY_ID, 0, 0,
+ board_ahci },
+ { PCI_VENDOR_ID_INTEL, 0x2653, PCI_ANY_ID, PCI_ANY_ID, 0, 0,
+ board_ahci },
+ { } /* terminate list */
+};
+
+
+static struct pci_driver ahci_pci_driver = {
+ .name = DRV_NAME,
+ .id_table = ahci_pci_tbl,
+ .probe = ahci_init_one,
+ .remove = ata_pci_remove_one,
+};
+
+
+static inline unsigned long ahci_port_base_ul (unsigned long base, unsigned int port)
+{
+ return base + 0x100 + (port * 0x80);
+}
+
+static inline void *ahci_port_base (void *base, unsigned int port)
+{
+ return (void *) ahci_port_base_ul((unsigned long)base, port);
+}
+
+static void ahci_host_stop(struct ata_host_set *host_set)
+{
+ struct ahci_host_priv *hpriv = host_set->private_data;
+ kfree(hpriv);
+}
+
+static int ahci_port_start(struct ata_port *ap)
+{
+ struct pci_dev *pdev = ap->host_set->pdev;
+ struct ahci_host_priv *hpriv = ap->host_set->private_data;
+ struct ahci_port_priv *pp;
+ int rc;
+ void *mem, *mmio = ap->host_set->mmio_base;
+ void *port_mmio = ahci_port_base(mmio, ap->port_no);
+ dma_addr_t mem_dma;
+
+ rc = ata_port_start(ap);
+ if (rc)
+ return rc;
+
+ pp = kmalloc(sizeof(*pp), GFP_KERNEL);
+ if (!pp) {
+ rc = -ENOMEM;
+ goto err_out;
+ }
+ memset(pp, 0, sizeof(*pp));
+
+ mem = pci_alloc_consistent(pdev, AHCI_PORT_PRIV_DMA_SZ, &mem_dma);
+ if (!mem) {
+ rc = -ENOMEM;
+ goto err_out_kfree;
+ }
+ memset(mem, 0, AHCI_PORT_PRIV_DMA_SZ);
+
+ /*
+ * First item in chunk of DMA memory: 32-slot command table,
+ * 32 bytes each in size
+ */
+ pp->cmd_slot = mem;
+ pp->cmd_slot_dma = mem_dma;
+
+ mem += AHCI_CMD_SLOT_SZ;
+ mem_dma += AHCI_CMD_SLOT_SZ;
+
+ /*
+ * Second item: Received-FIS area
+ */
+ pp->rx_fis = mem;
+ pp->rx_fis_dma = mem_dma;
+
+ mem += AHCI_RX_FIS_SZ;
+ mem_dma += AHCI_RX_FIS_SZ;
+
+ /*
+ * Third item: data area for storing a single command
+ * and its scatter-gather table
+ */
+ pp->cmd_tbl = mem;
+ pp->cmd_tbl_dma = mem_dma;
+
+ pp->cmd_tbl_sg = mem + AHCI_CMD_TBL_HDR;
+
+ ap->private_data = pp;
+
+ if (hpriv->cap & HOST_CAP_64)
+ writel((pp->cmd_slot_dma >> 16) >> 16, port_mmio + PORT_LST_ADDR_HI);
+ writel(pp->cmd_slot_dma & 0xffffffff, port_mmio + PORT_LST_ADDR);
+ readl(port_mmio + PORT_LST_ADDR); /* flush */
+
+ if (hpriv->cap & HOST_CAP_64)
+ writel((pp->rx_fis_dma >> 16) >> 16, port_mmio + PORT_FIS_ADDR_HI);
+ writel(pp->rx_fis_dma & 0xffffffff, port_mmio + PORT_FIS_ADDR);
+ readl(port_mmio + PORT_FIS_ADDR); /* flush */
+
+ writel(PORT_CMD_ICC_ACTIVE | PORT_CMD_FIS_RX |
+ PORT_CMD_POWER_ON | PORT_CMD_SPIN_UP |
+ PORT_CMD_START, port_mmio + PORT_CMD);
+ readl(port_mmio + PORT_CMD); /* flush */
+
+ return 0;
+
+err_out_kfree:
+ kfree(pp);
+err_out:
+ ata_port_stop(ap);
+ return rc;
+}
+
+
+static void ahci_port_stop(struct ata_port *ap)
+{
+ struct pci_dev *pdev = ap->host_set->pdev;
+ struct ahci_port_priv *pp = ap->private_data;
+ void *mmio = ap->host_set->mmio_base;
+ void *port_mmio = ahci_port_base(mmio, ap->port_no);
+ u32 tmp;
+
+ tmp = readl(port_mmio + PORT_CMD);
+ tmp &= ~(PORT_CMD_START | PORT_CMD_FIS_RX);
+ writel(tmp, port_mmio + PORT_CMD);
+ readl(port_mmio + PORT_CMD); /* flush */
+
+ /* spec says 500 msecs for each PORT_CMD_{START,FIS_RX} bit, so
+ * this is slightly incorrect.
+ */
+ msleep(500);
+
+ ap->private_data = NULL;
+ pci_free_consistent(pdev, AHCI_PORT_PRIV_DMA_SZ,
+ pp->cmd_slot, pp->cmd_slot_dma);
+ kfree(pp);
+ ata_port_stop(ap);
+}
+
+static u32 ahci_scr_read (struct ata_port *ap, unsigned int sc_reg_in)
+{
+ unsigned int sc_reg;
+
+ switch (sc_reg_in) {
+ case SCR_STATUS: sc_reg = 0; break;
+ case SCR_CONTROL: sc_reg = 1; break;
+ case SCR_ERROR: sc_reg = 2; break;
+ case SCR_ACTIVE: sc_reg = 3; break;
+ default:
+ return 0xffffffffU;
+ }
+
+ return readl((void *) ap->ioaddr.scr_addr + (sc_reg * 4));
+}
+
+
+static void ahci_scr_write (struct ata_port *ap, unsigned int sc_reg_in,
+ u32 val)
+{
+ unsigned int sc_reg;
+
+ switch (sc_reg_in) {
+ case SCR_STATUS: sc_reg = 0; break;
+ case SCR_CONTROL: sc_reg = 1; break;
+ case SCR_ERROR: sc_reg = 2; break;
+ case SCR_ACTIVE: sc_reg = 3; break;
+ default:
+ return;
+ }
+
+ writel(val, (void *) ap->ioaddr.scr_addr + (sc_reg * 4));
+}
+
+static void ahci_phy_reset(struct ata_port *ap)
+{
+ void __iomem *port_mmio = (void __iomem *) ap->ioaddr.cmd_addr;
+ struct ata_taskfile tf;
+ struct ata_device *dev = &ap->device[0];
+ u32 tmp;
+
+ __sata_phy_reset(ap);
+
+ if (ap->flags & ATA_FLAG_PORT_DISABLED)
+ return;
+
+ tmp = readl(port_mmio + PORT_SIG);
+ tf.lbah = (tmp >> 24) & 0xff;
+ tf.lbam = (tmp >> 16) & 0xff;
+ tf.lbal = (tmp >> 8) & 0xff;
+ tf.nsect = (tmp) & 0xff;
+
+ dev->class = ata_dev_classify(&tf);
+ if (!ata_dev_present(dev))
+ ata_port_disable(ap);
+}
+
+static u8 ahci_check_status(struct ata_port *ap)
+{
+ void *mmio = (void *) ap->ioaddr.cmd_addr;
+
+ return readl(mmio + PORT_TFDATA) & 0xFF;
+}
+
+static void ahci_fill_sg(struct ata_queued_cmd *qc)
+{
+ struct ahci_port_priv *pp = qc->ap->private_data;
+ unsigned int i;
+
+ VPRINTK("ENTER\n");
+
+ /*
+ * Next, the S/G list.
+ */
+ for (i = 0; i < qc->n_elem; i++) {
+ u32 sg_len;
+ dma_addr_t addr;
+
+ addr = sg_dma_address(&qc->sg[i]);
+ sg_len = sg_dma_len(&qc->sg[i]);
+
+ pp->cmd_tbl_sg[i].addr = cpu_to_le32(addr & 0xffffffff);
+ pp->cmd_tbl_sg[i].addr_hi = cpu_to_le32((addr >> 16) >> 16);
+ pp->cmd_tbl_sg[i].flags_size = cpu_to_le32(sg_len - 1);
+ }
+}
+
+static void ahci_qc_prep(struct ata_queued_cmd *qc)
+{
+ struct ahci_port_priv *pp = qc->ap->private_data;
+ u32 opts;
+ const u32 cmd_fis_len = 5; /* five dwords */
+
+ /*
+ * Fill in command slot information (currently only one slot,
+ * slot 0, is currently since we don't do queueing)
+ */
+
+ opts = (qc->n_elem << 16) | cmd_fis_len;
+ if (qc->tf.flags & ATA_TFLAG_WRITE)
+ opts |= AHCI_CMD_WRITE;
+
+ switch (qc->tf.protocol) {
+ case ATA_PROT_ATAPI:
+ case ATA_PROT_ATAPI_NODATA:
+ case ATA_PROT_ATAPI_DMA:
+ opts |= AHCI_CMD_ATAPI;
+ break;
+
+ default:
+ /* do nothing */
+ break;
+ }
+
+ pp->cmd_slot[0].opts = cpu_to_le32(opts);
+ pp->cmd_slot[0].status = 0;
+ pp->cmd_slot[0].tbl_addr = cpu_to_le32(pp->cmd_tbl_dma & 0xffffffff);
+ pp->cmd_slot[0].tbl_addr_hi = cpu_to_le32((pp->cmd_tbl_dma >> 16) >> 16);
+
+ /*
+ * Fill in command table information. First, the header,
+ * a SATA Register - Host to Device command FIS.
+ */
+ ata_tf_to_fis(&qc->tf, pp->cmd_tbl, 0);
+
+ if (!(qc->flags & ATA_QCFLAG_DMAMAP))
+ return;
+
+ ahci_fill_sg(qc);
+}
+
+static inline void ahci_dma_complete (struct ata_port *ap,
+ struct ata_queued_cmd *qc,
+ int have_err)
+{
+ /* get drive status; clear intr; complete txn */
+ ata_qc_complete(ata_qc_from_tag(ap, ap->active_tag),
+ have_err ? ATA_ERR : 0);
+}
+
+static void ahci_intr_error(struct ata_port *ap, u32 irq_stat)
+{
+ void *mmio = ap->host_set->mmio_base;
+ void *port_mmio = ahci_port_base(mmio, ap->port_no);
+ u32 tmp;
+ int work;
+
+ /* stop DMA */
+ tmp = readl(port_mmio + PORT_CMD);
+ tmp &= PORT_CMD_START | PORT_CMD_FIS_RX;
+ writel(tmp, port_mmio + PORT_CMD);
+
+ /* wait for engine to stop. TODO: this could be
+ * as long as 500 msec
+ */
+ work = 1000;
+ while (work-- > 0) {
+ tmp = readl(port_mmio + PORT_CMD);
+ if ((tmp & PORT_CMD_LIST_ON) == 0)
+ break;
+ udelay(10);
+ }
+
+ /* clear SATA phy error, if any */
+ tmp = readl(port_mmio + PORT_SCR_ERR);
+ writel(tmp, port_mmio + PORT_SCR_ERR);
+
+ /* if DRQ/BSY is set, device needs to be reset.
+ * if so, issue COMRESET
+ */
+ tmp = readl(port_mmio + PORT_TFDATA);
+ if (tmp & (ATA_BUSY | ATA_DRQ)) {
+ writel(0x301, port_mmio + PORT_SCR_CTL);
+ readl(port_mmio + PORT_SCR_CTL); /* flush */
+ udelay(10);
+ writel(0x300, port_mmio + PORT_SCR_CTL);
+ readl(port_mmio + PORT_SCR_CTL); /* flush */
+ }
+
+ /* re-start DMA */
+ tmp = readl(port_mmio + PORT_CMD);
+ tmp |= PORT_CMD_START | PORT_CMD_FIS_RX;
+ writel(tmp, port_mmio + PORT_CMD);
+ readl(port_mmio + PORT_CMD); /* flush */
+
+ printk(KERN_WARNING "ata%u: error occurred, port reset\n", ap->port_no);
+}
+
+static void ahci_eng_timeout(struct ata_port *ap)
+{
+ void *mmio = ap->host_set->mmio_base;
+ void *port_mmio = ahci_port_base(mmio, ap->port_no);
+ struct ata_queued_cmd *qc;
+
+ DPRINTK("ENTER\n");
+
+ ahci_intr_error(ap, readl(port_mmio + PORT_IRQ_STAT));
+
+ qc = ata_qc_from_tag(ap, ap->active_tag);
+ if (!qc) {
+ printk(KERN_ERR "ata%u: BUG: timeout without command\n",
+ ap->id);
+ } else {
+ /* hack alert! We cannot use the supplied completion
+ * function from inside the ->eh_strategy_handler() thread.
+ * libata is the only user of ->eh_strategy_handler() in
+ * any kernel, so the default scsi_done() assumes it is
+ * not being called from the SCSI EH.
+ */
+ qc->scsidone = scsi_finish_command;
+ ata_qc_complete(qc, ATA_ERR);
+ }
+
+}
+
+static inline int ahci_host_intr(struct ata_port *ap, struct ata_queued_cmd *qc)
+{
+ void *mmio = ap->host_set->mmio_base;
+ void *port_mmio = ahci_port_base(mmio, ap->port_no);
+ u32 status, serr, ci;
+
+ serr = readl(port_mmio + PORT_SCR_ERR);
+ writel(serr, port_mmio + PORT_SCR_ERR);
+
+ status = readl(port_mmio + PORT_IRQ_STAT);
+ writel(status, port_mmio + PORT_IRQ_STAT);
+
+ ci = readl(port_mmio + PORT_CMD_ISSUE);
+ if (likely((ci & 0x1) == 0)) {
+ if (qc) {
+ ata_qc_complete(qc, 0);
+ qc = NULL;
+ }
+ }
+
+ if (status & PORT_IRQ_FATAL) {
+ ahci_intr_error(ap, status);
+ if (qc)
+ ata_qc_complete(qc, ATA_ERR);
+ }
+
+ return 1;
+}
+
+static void ahci_irq_clear(struct ata_port *ap)
+{
+ /* TODO */
+}
+
+static irqreturn_t ahci_interrupt (int irq, void *dev_instance, struct pt_regs *regs)
+{
+ struct ata_host_set *host_set = dev_instance;
+ struct ahci_host_priv *hpriv;
+ unsigned int i, handled = 0;
+ void *mmio;
+ u32 irq_stat, irq_ack = 0;
+
+ VPRINTK("ENTER\n");
+
+ hpriv = host_set->private_data;
+ mmio = host_set->mmio_base;
+
+ /* sigh. 0xffffffff is a valid return from h/w */
+ irq_stat = readl(mmio + HOST_IRQ_STAT);
+ irq_stat &= hpriv->port_map;
+ if (!irq_stat)
+ return IRQ_NONE;
+
+ spin_lock(&host_set->lock);
+
+ for (i = 0; i < host_set->n_ports; i++) {
+ struct ata_port *ap;
+ u32 tmp;
+
+ VPRINTK("port %u\n", i);
+ ap = host_set->ports[i];
+ tmp = irq_stat & (1 << i);
+ if (tmp && ap) {
+ struct ata_queued_cmd *qc;
+ qc = ata_qc_from_tag(ap, ap->active_tag);
+ if (ahci_host_intr(ap, qc))
+ irq_ack |= (1 << i);
+ }
+ }
+
+ if (irq_ack) {
+ writel(irq_ack, mmio + HOST_IRQ_STAT);
+ handled = 1;
+ }
+
+ spin_unlock(&host_set->lock);
+
+ VPRINTK("EXIT\n");
+
+ return IRQ_RETVAL(handled);
+}
+
+static int ahci_qc_issue(struct ata_queued_cmd *qc)
+{
+ struct ata_port *ap = qc->ap;
+ void *port_mmio = (void *) ap->ioaddr.cmd_addr;
+
+ writel(1, port_mmio + PORT_SCR_ACT);
+ readl(port_mmio + PORT_SCR_ACT); /* flush */
+
+ writel(1, port_mmio + PORT_CMD_ISSUE);
+ readl(port_mmio + PORT_CMD_ISSUE); /* flush */
+
+ return 0;
+}
+
+static void ahci_setup_port(struct ata_ioports *port, unsigned long base,
+ unsigned int port_idx)
+{
+ VPRINTK("ENTER, base==0x%lx, port_idx %u\n", base, port_idx);
+ base = ahci_port_base_ul(base, port_idx);
+ VPRINTK("base now==0x%lx\n", base);
+
+ port->cmd_addr = base;
+ port->scr_addr = base + PORT_SCR;
+
+ VPRINTK("EXIT\n");
+}
+
+static int ahci_host_init(struct ata_probe_ent *probe_ent)
+{
+ struct ahci_host_priv *hpriv = probe_ent->private_data;
+ struct pci_dev *pdev = probe_ent->pdev;
+ void __iomem *mmio = probe_ent->mmio_base;
+ u32 tmp, cap_save;
+ u16 tmp16;
+ unsigned int i, j, using_dac;
+ int rc;
+ void __iomem *port_mmio;
+
+ cap_save = readl(mmio + HOST_CAP);
+ cap_save &= ( (1<<28) | (1<<17) );
+ cap_save |= (1 << 27);
+
+ /* global controller reset */
+ tmp = readl(mmio + HOST_CTL);
+ if ((tmp & HOST_RESET) == 0) {
+ writel(tmp | HOST_RESET, mmio + HOST_CTL);
+ readl(mmio + HOST_CTL); /* flush */
+ }
+
+ /* reset must complete within 1 second, or
+ * the hardware should be considered fried.
+ */
+ ssleep(1);
+
+ tmp = readl(mmio + HOST_CTL);
+ if (tmp & HOST_RESET) {
+ printk(KERN_ERR DRV_NAME "(%s): controller reset failed (0x%x)\n",
+ pci_name(pdev), tmp);
+ return -EIO;
+ }
+
+ writel(HOST_AHCI_EN, mmio + HOST_CTL);
+ (void) readl(mmio + HOST_CTL); /* flush */
+ writel(cap_save, mmio + HOST_CAP);
+ writel(0xf, mmio + HOST_PORTS_IMPL);
+ (void) readl(mmio + HOST_PORTS_IMPL); /* flush */
+
+ pci_read_config_word(pdev, 0x92, &tmp16);
+ tmp16 |= 0xf;
+ pci_write_config_word(pdev, 0x92, tmp16);
+
+ hpriv->cap = readl(mmio + HOST_CAP);
+ hpriv->port_map = readl(mmio + HOST_PORTS_IMPL);
+ probe_ent->n_ports = (hpriv->cap & 0x1f) + 1;
+
+ VPRINTK("cap 0x%x port_map 0x%x n_ports %d\n",
+ hpriv->cap, hpriv->port_map, probe_ent->n_ports);
+
+ using_dac = hpriv->cap & HOST_CAP_64;
+ if (using_dac &&
+ !pci_set_dma_mask(pdev, 0xffffffffffffffffULL)) {
+ rc = pci_set_consistent_dma_mask(pdev, 0xffffffffffffffffULL);
+ if (rc) {
+ rc = pci_set_consistent_dma_mask(pdev, 0xffffffffULL);
+ if (rc) {
+ printk(KERN_ERR DRV_NAME "(%s): 64-bit DMA enable failed\n",
+ pci_name(pdev));
+ return rc;
+ }
+ }
+
+ hpriv->flags |= HOST_CAP_64;
+ } else {
+ rc = pci_set_dma_mask(pdev, 0xffffffffULL);
+ if (rc) {
+ printk(KERN_ERR DRV_NAME "(%s): 32-bit DMA enable failed\n",
+ pci_name(pdev));
+ return rc;
+ }
+ rc = pci_set_consistent_dma_mask(pdev, 0xffffffffULL);
+ if (rc) {
+ printk(KERN_ERR DRV_NAME "(%s): 32-bit consistent DMA enable failed\n",
+ pci_name(pdev));
+ return rc;
+ }
+ }
+
+ for (i = 0; i < probe_ent->n_ports; i++) {
+#if 0 /* BIOSen initialize this incorrectly */
+ if (!(hpriv->port_map & (1 << i)))
+ continue;
+#endif
+
+ port_mmio = ahci_port_base(mmio, i);
+ VPRINTK("mmio %p port_mmio %p\n", mmio, port_mmio);
+
+ ahci_setup_port(&probe_ent->port[i],
+ (unsigned long) mmio, i);
+
+ /* make sure port is not active */
+ tmp = readl(port_mmio + PORT_CMD);
+ VPRINTK("PORT_CMD 0x%x\n", tmp);
+ if (tmp & (PORT_CMD_LIST_ON | PORT_CMD_FIS_ON |
+ PORT_CMD_FIS_RX | PORT_CMD_START)) {
+ tmp &= ~(PORT_CMD_LIST_ON | PORT_CMD_FIS_ON |
+ PORT_CMD_FIS_RX | PORT_CMD_START);
+ writel(tmp, port_mmio + PORT_CMD);
+ readl(port_mmio + PORT_CMD); /* flush */
+
+ /* spec says 500 msecs for each bit, so
+ * this is slightly incorrect.
+ */
+ msleep(500);
+ }
+
+ writel(PORT_CMD_SPIN_UP, port_mmio + PORT_CMD);
+
+ j = 0;
+ while (j < 100) {
+ msleep(10);
+ tmp = readl(port_mmio + PORT_SCR_STAT);
+ if ((tmp & 0xf) == 0x3)
+ break;
+ j++;
+ }
+
+ tmp = readl(port_mmio + PORT_SCR_ERR);
+ VPRINTK("PORT_SCR_ERR 0x%x\n", tmp);
+ writel(tmp, port_mmio + PORT_SCR_ERR);
+
+ /* ack any pending irq events for this port */
+ tmp = readl(port_mmio + PORT_IRQ_STAT);
+ VPRINTK("PORT_IRQ_STAT 0x%x\n", tmp);
+ if (tmp)
+ writel(tmp, port_mmio + PORT_IRQ_STAT);
+
+ writel(1 << i, mmio + HOST_IRQ_STAT);
+
+ /* set irq mask (enables interrupts) */
+ writel(DEF_PORT_IRQ, port_mmio + PORT_IRQ_MASK);
+ }
+
+ tmp = readl(mmio + HOST_CTL);
+ VPRINTK("HOST_CTL 0x%x\n", tmp);
+ writel(tmp | HOST_IRQ_EN, mmio + HOST_CTL);
+ tmp = readl(mmio + HOST_CTL);
+ VPRINTK("HOST_CTL 0x%x\n", tmp);
+
+ pci_set_master(pdev);
+
+ return 0;
+}
+
+/* move to PCI layer, integrate w/ MSI stuff */
+static void pci_enable_intx(struct pci_dev *pdev)
+{
+ u16 pci_command;
+
+ pci_read_config_word(pdev, PCI_COMMAND, &pci_command);
+ if (pci_command & PCI_COMMAND_INTX_DISABLE) {
+ pci_command &= ~PCI_COMMAND_INTX_DISABLE;
+ pci_write_config_word(pdev, PCI_COMMAND, pci_command);
+ }
+}
+
+static void ahci_print_info(struct ata_probe_ent *probe_ent)
+{
+ struct ahci_host_priv *hpriv = probe_ent->private_data;
+ struct pci_dev *pdev = probe_ent->pdev;
+ void *mmio = probe_ent->mmio_base;
+ u32 vers, cap, impl, speed;
+ const char *speed_s;
+ u16 cc;
+ const char *scc_s;
+
+ vers = readl(mmio + HOST_VERSION);
+ cap = hpriv->cap;
+ impl = hpriv->port_map;
+
+ speed = (cap >> 20) & 0xf;
+ if (speed == 1)
+ speed_s = "1.5";
+ else if (speed == 2)
+ speed_s = "3";
+ else
+ speed_s = "?";
+
+ pci_read_config_word(pdev, 0x0a, &cc);
+ if (cc == 0x0101)
+ scc_s = "IDE";
+ else if (cc == 0x0106)
+ scc_s = "SATA";
+ else if (cc == 0x0104)
+ scc_s = "RAID";
+ else
+ scc_s = "unknown";
+
+ printk(KERN_INFO DRV_NAME "(%s) AHCI %02x%02x.%02x%02x "
+ "%u slots %u ports %s Gbps 0x%x impl %s mode\n"
+ ,
+ pci_name(pdev),
+
+ (vers >> 24) & 0xff,
+ (vers >> 16) & 0xff,
+ (vers >> 8) & 0xff,
+ vers & 0xff,
+
+ ((cap >> 8) & 0x1f) + 1,
+ (cap & 0x1f) + 1,
+ speed_s,
+ impl,
+ scc_s);
+
+ printk(KERN_INFO DRV_NAME "(%s) flags: "
+ "%s%s%s%s%s%s"
+ "%s%s%s%s%s%s%s\n"
+ ,
+ pci_name(pdev),
+
+ cap & (1 << 31) ? "64bit " : "",
+ cap & (1 << 30) ? "ncq " : "",
+ cap & (1 << 28) ? "ilck " : "",
+ cap & (1 << 27) ? "stag " : "",
+ cap & (1 << 26) ? "pm " : "",
+ cap & (1 << 25) ? "led " : "",
+
+ cap & (1 << 24) ? "clo " : "",
+ cap & (1 << 19) ? "nz " : "",
+ cap & (1 << 18) ? "only " : "",
+ cap & (1 << 17) ? "pmp " : "",
+ cap & (1 << 15) ? "pio " : "",
+ cap & (1 << 14) ? "slum " : "",
+ cap & (1 << 13) ? "part " : ""
+ );
+}
+
+static int ahci_init_one (struct pci_dev *pdev, const struct pci_device_id *ent)
+{
+ static int printed_version;
+ struct ata_probe_ent *probe_ent = NULL;
+ struct ahci_host_priv *hpriv;
+ unsigned long base;
+ void *mmio_base;
+ unsigned int board_idx = (unsigned int) ent->driver_data;
+ int rc;
+
+ VPRINTK("ENTER\n");
+
+ if (!printed_version++)
+ printk(KERN_DEBUG DRV_NAME " version " DRV_VERSION "\n");
+
+ /*
+ * If this driver happens to only be useful on Apple's K2, then
+ * we should check that here as it has a normal Serverworks ID
+ */
+ rc = pci_enable_device(pdev);
+ if (rc)
+ return rc;
+
+ rc = pci_request_regions(pdev, DRV_NAME);
+ if (rc)
+ goto err_out;
+
+ pci_enable_intx(pdev);
+
+ probe_ent = kmalloc(sizeof(*probe_ent), GFP_KERNEL);
+ if (probe_ent == NULL) {
+ rc = -ENOMEM;
+ goto err_out_regions;
+ }
+
+ memset(probe_ent, 0, sizeof(*probe_ent));
+ probe_ent->pdev = pdev;
+ INIT_LIST_HEAD(&probe_ent->node);
+
+ mmio_base = ioremap(pci_resource_start(pdev, AHCI_PCI_BAR),
+ pci_resource_len(pdev, AHCI_PCI_BAR));
+ if (mmio_base == NULL) {
+ rc = -ENOMEM;
+ goto err_out_free_ent;
+ }
+ base = (unsigned long) mmio_base;
+
+ hpriv = kmalloc(sizeof(*hpriv), GFP_KERNEL);
+ if (!hpriv) {
+ rc = -ENOMEM;
+ goto err_out_iounmap;
+ }
+ memset(hpriv, 0, sizeof(*hpriv));
+
+ probe_ent->sht = ahci_port_info[board_idx].sht;
+ probe_ent->host_flags = ahci_port_info[board_idx].host_flags;
+ probe_ent->pio_mask = ahci_port_info[board_idx].pio_mask;
+ probe_ent->udma_mask = ahci_port_info[board_idx].udma_mask;
+ probe_ent->port_ops = ahci_port_info[board_idx].port_ops;
+
+ probe_ent->irq = pdev->irq;
+ probe_ent->irq_flags = SA_SHIRQ;
+ probe_ent->mmio_base = mmio_base;
+ probe_ent->private_data = hpriv;
+
+ /* initialize adapter */
+ rc = ahci_host_init(probe_ent);
+ if (rc)
+ goto err_out_hpriv;
+
+ ahci_print_info(probe_ent);
+
+ /* FIXME: check ata_device_add return value */
+ ata_device_add(probe_ent);
+ kfree(probe_ent);
+
+ return 0;
+
+err_out_hpriv:
+ kfree(hpriv);
+err_out_iounmap:
+ iounmap(mmio_base);
+err_out_free_ent:
+ kfree(probe_ent);
+err_out_regions:
+ pci_release_regions(pdev);
+err_out:
+ pci_disable_device(pdev);
+ return rc;
+}
+
+
+static int __init ahci_init(void)
+{
+ return pci_module_init(&ahci_pci_driver);
+}
+
+
+static void __exit ahci_exit(void)
+{
+ pci_unregister_driver(&ahci_pci_driver);
+}
+
+
+MODULE_AUTHOR("Jeff Garzik");
+MODULE_DESCRIPTION("AHCI SATA low-level driver");
+MODULE_LICENSE("GPL");
+MODULE_DEVICE_TABLE(pci, ahci_pci_tbl);
+MODULE_VERSION(DRV_VERSION);
+
+module_init(ahci_init);
+module_exit(ahci_exit);
--- /dev/null
+# Driver for Emulex LightPulse fibre channel host bus adapters.
+EXTRA_CFLAGS += -DFC_TRANS_VER1
+obj-$(CONFIG_SCSI_LPFC) := lpfc.o
+
+lpfc-objs := lpfc_mem.o lpfc_sli.o lpfc_ct.o lpfc_els.o \
+lpfc_hbadisc.o lpfc_init.o lpfc_mbox.o lpfc_nportdisc.o lpfc_scsiport.o \
+lpfc_fcp.o
--- /dev/null
+/*******************************************************************
+ * This file is part of the Emulex Linux Device Driver for *
+ * Enterprise Fibre Channel Host Bus Adapters. *
+ * Refer to the README file included with this package for *
+ * driver version and adapter support. *
+ * Copyright (C) 2004 Emulex Corporation. *
+ * www.emulex.com *
+ * *
+ * This program is free software; you can redistribute it and/or *
+ * modify it under the terms of the GNU General Public License *
+ * as published by the Free Software Foundation; either version 2 *
+ * of the License, or (at your option) any later version. *
+ * *
+ * This program is distributed in the hope that it will be useful, *
+ * but WITHOUT ANY WARRANTY; without even the implied warranty of *
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the *
+ * GNU General Public License for more details, a copy of which *
+ * can be found in the file COPYING included with this package. *
+ *******************************************************************/
+
+/*
+ * $Id: lpfc.h 1.134 2004/11/18 14:53:45EST sf_support Exp $
+ */
+
+#ifndef _H_LPFC
+#define _H_LPFC
+
+struct lpfc_sli2_slim;
+
+#define LPFC_MAX_TARGET 256 /* max nunber of targets
+ supported */
+#define LPFC_MAX_DISC_THREADS 64 /* max outstanding discovery els
+ requests */
+#define LPFC_MAX_NS_RETRY 3 /* Try to get to the NameServer
+ 3 times and then give up. */
+#define LPFC_DFT_HBA_Q_DEPTH 2048 /* max cmds per hba */
+#define LPFC_LC_HBA_Q_DEPTH 1024 /* max cmds per low cost hba */
+#define LPFC_LP101_HBA_Q_DEPTH 128 /* max cmds per low cost hba */
+
+/* Define the SLIM2 page size. */
+#define LPFC_SLIM2_PAGE_AREA 8192
+
+/* Define macros for 64 bit support */
+#define putPaddrLow(addr) ((uint32_t) (0xffffffff & (u64)(addr)))
+#define putPaddrHigh(addr) ((uint32_t) (0xffffffff & (((u64)(addr))>>32)))
+#define getPaddr(high, low) ((dma_addr_t)( \
+ (( (u64)(high)<<16 ) << 16)|( (u64)(low))))
+/* Provide maximum configuration definitions. */
+#define LPFC_DRVR_TIMEOUT 16 /* driver iocb timeout value in sec */
+#define MAX_FCP_TARGET 256 /* max num of FCP targets supported */
+#define FC_MAX_ADPTMSG 64
+
+#define MAX_HBAEVT 32
+
+#if __LITTLE_ENDIAN
+
+#define putLunLow(lunlow, lun) \
+ { \
+ lunlow = 0; \
+ }
+
+#define putLunHigh(lunhigh, lun) \
+ { \
+ lunhigh = swab16(lun); \
+ }
+
+#else /* BIG_ENDIAN_HOST */
+
+#define putLunLow(lunlow, lun) \
+ { \
+ lunlow = 0; \
+ }
+
+#define putLunHigh(lunhigh, lun) \
+ { \
+ lunhigh = (uint32_t)(lun << 16); \
+ }
+#endif
+
+/****************************************************************************/
+/* Device VPD save area */
+/****************************************************************************/
+typedef struct lpfc_vpd {
+ uint32_t status; /* vpd status value */
+ uint32_t length; /* number of bytes actually returned */
+ struct {
+ uint32_t rsvd1; /* Revision numbers */
+ uint32_t biuRev;
+ uint32_t smRev;
+ uint32_t smFwRev;
+ uint32_t endecRev;
+ uint16_t rBit;
+ uint8_t fcphHigh;
+ uint8_t fcphLow;
+ uint8_t feaLevelHigh;
+ uint8_t feaLevelLow;
+ uint32_t postKernRev;
+ uint32_t opFwRev;
+ uint8_t opFwName[16];
+ uint32_t sli1FwRev;
+ uint8_t sli1FwName[16];
+ uint32_t sli2FwRev;
+ uint8_t sli2FwName[16];
+ } rev;
+} lpfc_vpd_t;
+
+struct lpfc_scsi_buf;
+
+struct lpfc_hba_event {
+ uint32_t fc_eventcode;
+ uint32_t fc_evdata1;
+ uint32_t fc_evdata2;
+ uint32_t fc_evdata3;
+ uint32_t fc_evdata4;
+};
+
+/*
+ * lpfc stat counters
+ */
+struct lpfc_stats {
+ /* Statistics for ELS commands */
+ uint32_t elsLogiCol;
+ uint32_t elsRetryExceeded;
+ uint32_t elsXmitRetry;
+ uint32_t elsDelayRetry;
+ uint32_t elsRcvDrop;
+ uint32_t elsRcvFrame;
+ uint32_t elsRcvRSCN;
+ uint32_t elsRcvRNID;
+ uint32_t elsRcvFARP;
+ uint32_t elsRcvFARPR;
+ uint32_t elsRcvFLOGI;
+ uint32_t elsRcvPLOGI;
+ uint32_t elsRcvADISC;
+ uint32_t elsRcvPDISC;
+ uint32_t elsRcvFAN;
+ uint32_t elsRcvLOGO;
+ uint32_t elsRcvPRLO;
+ uint32_t elsRcvPRLI;
+ uint32_t elsRcvRRQ;
+ uint32_t elsXmitFLOGI;
+ uint32_t elsXmitPLOGI;
+ uint32_t elsXmitPRLI;
+ uint32_t elsXmitADISC;
+ uint32_t elsXmitLOGO;
+ uint32_t elsXmitSCR;
+ uint32_t elsXmitRNID;
+ uint32_t elsXmitFARP;
+ uint32_t elsXmitFARPR;
+ uint32_t elsXmitACC;
+ uint32_t elsXmitLSRJT;
+
+ uint32_t frameRcvBcast;
+ uint32_t frameRcvMulti;
+ uint32_t strayXmitCmpl;
+ uint32_t frameXmitDelay;
+ uint32_t xriCmdCmpl;
+ uint32_t xriStatErr;
+ uint32_t LinkUp;
+ uint32_t LinkDown;
+ uint32_t LinkMultiEvent;
+ uint32_t NoRcvBuf;
+ uint32_t fcpCmd;
+ uint32_t fcpCmpl;
+ uint32_t fcpRspErr;
+ uint32_t fcpRemoteStop;
+ uint32_t fcpPortRjt;
+ uint32_t fcpPortBusy;
+ uint32_t fcpError;
+ uint32_t fcpLocalErr;
+};
+
+enum sysfs_mbox_state {
+ SMBOX_IDLE,
+ SMBOX_WRITING,
+ SMBOX_READING
+};
+
+struct lpfc_sysfs_mbox {
+ enum sysfs_mbox_state state;
+ size_t offset;
+ struct lpfcMboxq * mbox;
+};
+
+struct lpfc_hba {
+ uint32_t intr_inited; /* flag for interrupt registration */
+ struct list_head hba_list; /* List of hbas/ports */
+ struct lpfc_sli sli;
+ struct lpfc_sli2_slim *slim2p;
+ dma_addr_t slim2p_mapping;
+
+ uint32_t hba_state;
+
+#define LPFC_INIT_START 1 /* Initial state after board reset */
+#define LPFC_INIT_MBX_CMDS 2 /* Initialize HBA with mbox commands */
+#define LPFC_LINK_DOWN 3 /* HBA initialized, link is down */
+#define LPFC_LINK_UP 4 /* Link is up - issue READ_LA */
+#define LPFC_LOCAL_CFG_LINK 5 /* local NPORT Id configured */
+#define LPFC_FLOGI 6 /* FLOGI sent to Fabric */
+#define LPFC_FABRIC_CFG_LINK 7 /* Fabric assigned NPORT Id
+ configured */
+#define LPFC_NS_REG 8 /* Register with NameServer */
+#define LPFC_NS_QRY 9 /* Query NameServer for NPort ID list */
+#define LPFC_BUILD_DISC_LIST 10 /* Build ADISC and PLOGI lists for
+ * device authentication / discovery */
+#define LPFC_DISC_AUTH 11 /* Processing ADISC list */
+#define LPFC_CLEAR_LA 12 /* authentication cmplt - issue
+ CLEAR_LA */
+#define LPFC_HBA_READY 32
+#define LPFC_HBA_ERROR 0xff
+
+ uint8_t fc_linkspeed; /* Link speed after last READ_LA */
+
+ uint32_t fc_eventTag; /* event tag for link attention */
+ uint32_t fc_prli_sent; /* cntr for outstanding PRLIs */
+
+ uint32_t num_disc_nodes; /*in addition to hba_state */
+
+ uint8_t fcp_mapping; /* Map FCP devices based on WWNN WWPN or DID */
+#define FCP_SEED_WWNN 0x1
+#define FCP_SEED_WWPN 0x2
+#define FCP_SEED_DID 0x4
+#define FCP_SEED_MASK 0x7
+#define FCP_SEED_AUTO 0x8 /* binding was created by auto mapping */
+
+ struct timer_list fc_estabtmo; /* link establishment timer */
+ struct timer_list fc_disctmo; /* Discovery rescue timer */
+ struct timer_list fc_fdmitmo; /* fdmi timer */
+ struct timer_list fc_scantmo; /* scsi scan host timer */
+
+
+ void *fc_evt_head; /* waiting for event queue */
+ void *fc_evt_tail; /* waiting for event queue */
+
+ uint16_t hba_event_put; /* hbaevent event put word anchor */
+ uint16_t hba_event_get; /* hbaevent event get word anchor */
+ uint32_t hba_event_missed; /* hbaevent missed event word anchor */
+ uint32_t sid_cnt; /* SCSI ID counter */
+
+ struct lpfc_hba_event hbaevt[MAX_HBAEVT];
+
+ /* These fields used to be binfo */
+ struct lpfc_name fc_nodename; /* fc nodename */
+ struct lpfc_name fc_portname; /* fc portname */
+ uint32_t fc_pref_DID; /* preferred D_ID */
+ uint8_t fc_pref_ALPA; /* preferred AL_PA */
+ uint32_t fc_edtov; /* E_D_TOV timer value */
+ uint32_t fc_arbtov; /* ARB_TOV timer value */
+ uint32_t fc_ratov; /* R_A_TOV timer value */
+ uint32_t fc_rttov; /* R_T_TOV timer value */
+ uint32_t fc_altov; /* AL_TOV timer value */
+ uint32_t fc_crtov; /* C_R_TOV timer value */
+ uint32_t fc_citov; /* C_I_TOV timer value */
+ uint32_t fc_myDID; /* fibre channel S_ID */
+ uint32_t fc_prevDID; /* previous fibre channel S_ID */
+
+ struct serv_parm fc_sparam; /* buffer for our service parameters */
+ struct serv_parm fc_fabparam; /* fabric service parameters buffer */
+ uint8_t alpa_map[128]; /* AL_PA map from READ_LA */
+
+ uint8_t fc_ns_retry; /* retries for fabric nameserver */
+ uint32_t fc_nlp_cnt; /* outstanding NODELIST requests */
+ uint32_t fc_rscn_id_cnt; /* count of RSCNs payloads in list */
+ struct lpfc_dmabuf *fc_rscn_id_list[FC_MAX_HOLD_RSCN];
+ uint32_t lmt;
+ uint32_t fc_flag; /* FC flags */
+#define FC_PT2PT 0x1 /* pt2pt with no fabric */
+#define FC_PT2PT_PLOGI 0x2 /* pt2pt initiate PLOGI */
+#define FC_DISC_TMO 0x4 /* Discovery timer running */
+#define FC_PUBLIC_LOOP 0x8 /* Public loop */
+#define FC_LBIT 0x10 /* LOGIN bit in loopinit set */
+#define FC_RSCN_MODE 0x20 /* RSCN cmd rcv'ed */
+#define FC_NLP_MORE 0x40 /* More node to process in node tbl */
+#define FC_OFFLINE_MODE 0x80 /* Interface is offline for diag */
+#define FC_FABRIC 0x100 /* We are fabric attached */
+#define FC_ESTABLISH_LINK 0x200 /* Reestablish Link */
+#define FC_RSCN_DISCOVERY 0x400 /* Authenticate all devices after RSCN*/
+#define FC_LOADING 0x1000 /* HBA in process of loading drvr */
+#define FC_UNLOADING 0x2000 /* HBA in process of unloading drvr */
+#define FC_SCSI_SCAN_TMO 0x4000 /* scsi scan timer running */
+#define FC_ABORT_DISCOVERY 0x8000 /* we want to abort discovery */
+
+ uint32_t fc_topology; /* link topology, from LINK INIT */
+
+ struct lpfc_stats fc_stat;
+
+ /* These are the head/tail pointers for the bind, plogi, adisc, unmap,
+ * and map lists. Their counters are immediately following.
+ */
+ struct list_head fc_nlpbind_list;
+ struct list_head fc_plogi_list;
+ struct list_head fc_adisc_list;
+ struct list_head fc_reglogin_list;
+ struct list_head fc_prli_list;
+ struct list_head fc_nlpunmap_list;
+ struct list_head fc_nlpmap_list;
+ struct list_head fc_npr_list;
+ struct list_head fc_unused_list;
+
+ /* Keep counters for the number of entries in each list. */
+ uint16_t fc_bind_cnt;
+ uint16_t fc_plogi_cnt;
+ uint16_t fc_adisc_cnt;
+ uint16_t fc_reglogin_cnt;
+ uint16_t fc_prli_cnt;
+ uint16_t fc_unmap_cnt;
+ uint16_t fc_map_cnt;
+ uint16_t fc_npr_cnt;
+ uint16_t fc_unused_cnt;
+ struct lpfc_nodelist fc_fcpnodev; /* nodelist entry for no device */
+ uint32_t nport_event_cnt; /* timestamp for nlplist entry */
+
+ struct lpfc_target *device_queue_hash[MAX_FCP_TARGET];
+#define LPFC_RPI_HASH_SIZE 64
+#define LPFC_RPI_HASH_FUNC(x) ((x) & (0x3f))
+ /* ptr to active D_ID / RPIs */
+ struct lpfc_nodelist *fc_nlplookup[LPFC_RPI_HASH_SIZE];
+ uint32_t wwnn[2];
+ uint32_t RandomData[7];
+
+ uint32_t cfg_log_verbose;
+ uint32_t cfg_lun_queue_depth;
+ uint32_t cfg_nodev_tmo;
+ uint32_t cfg_hba_queue_depth;
+ uint32_t cfg_automap;
+ uint32_t cfg_fcp_class;
+ uint32_t cfg_use_adisc;
+ uint32_t cfg_ack0;
+ uint32_t cfg_topology;
+ uint32_t cfg_scan_down;
+ uint32_t cfg_link_speed;
+ uint32_t cfg_cr_delay;
+ uint32_t cfg_cr_count;
+ uint32_t cfg_fdmi_on;
+ uint32_t cfg_fcp_bind_method;
+ uint32_t cfg_discovery_threads;
+ uint32_t cfg_max_luns;
+ uint32_t cfg_scsi_hotplug;
+
+ lpfc_vpd_t vpd; /* vital product data */
+
+#if defined(FC_TRANS_265_BLKPATCH)
+ /*
+ * Provide a per-HBA timer for 2.6.5 kernels patched with the
+ * block/unblock FC transport patch.
+ */
+ struct timer_list dev_loss_timer;
+#endif
+
+ struct Scsi_Host *host;
+ struct pci_dev *pcidev;
+ struct list_head dpc_disc;
+
+ pid_t dpc_pid;
+ int dpc_kill;
+ struct completion dpc_startup;
+ struct completion dpc_exiting;
+ struct semaphore *dpc_wait;
+
+ unsigned long pci_bar0_map; /* Physical address for PCI BAR0 */
+ unsigned long pci_bar2_map; /* Physical address for PCI BAR2 */
+ void *slim_memmap_p; /* Kernel memory mapped address for PCI
+ BAR0 */
+ void *ctrl_regs_memmap_p; /* Kernel memory mapped address for PCI
+ BAR2 */
+
+ void *MBslimaddr; /* virtual address for mbox cmds */
+ void *HAregaddr; /* virtual address for host attn reg */
+ void *CAregaddr; /* virtual address for chip attn reg */
+ void *HSregaddr; /* virtual address for host status reg */
+ void *HCregaddr; /* virtual address for host ctl reg */
+ wait_queue_head_t linkevtwq;
+ wait_queue_head_t rscnevtwq;
+ wait_queue_head_t ctevtwq;
+
+ uint8_t brd_no; /* FC board number */
+
+ char SerialNumber[32]; /* adapter Serial Number */
+ char OptionROMVersion[32]; /* adapter BIOS / Fcode version */
+
+ struct timer_list els_tmofunc;
+
+ void *link_stats;
+
+ /*
+ * stat counters
+ */
+ uint64_t fc4InputRequests;
+ uint64_t fc4OutputRequests;
+ uint64_t fc4ControlRequests;
+
+ struct lpfc_sysfs_mbox sysfs_mbox;
+;
+ /* pci_mem_pools */
+ struct pci_pool *lpfc_scsi_dma_ext_pool;
+ struct pci_pool *lpfc_mbuf_pool;
+ struct lpfc_dma_pool lpfc_mbuf_safety_pool;
+ mempool_t *scsibuf_mem_pool;
+
+ mempool_t *iocb_mem_pool;
+ mempool_t *mbox_mem_pool;
+ mempool_t *nlp_mem_pool;
+ mempool_t *bind_mem_pool;
+ struct list_head freebufList;
+ struct list_head ctrspbuflist;
+ struct list_head rnidrspbuflist;
+};
+
+/* event mask definitions */
+#define FC_REG_LINK_EVENT 0x1 /* Register for link up / down events */
+#define FC_REG_RSCN_EVENT 0x2 /* Register for RSCN events */
+#define FC_REG_CT_EVENT 0x4 /* Register for CT request events */
+
+#define FC_FSTYPE_ALL 0xffff /* match on all fsTypes */
+
+typedef struct fcEVT { /* Kernel level Event structure */
+ uint32_t evt_handle;
+ uint32_t evt_mask;
+ uint32_t evt_data0;
+ uint16_t evt_sleep;
+ uint16_t evt_flags;
+ void *evt_type;
+ void *evt_next;
+ void *evt_data1;
+ uint32_t evt_data2;
+} fcEVT_t;
+
+typedef struct fcEVTHDR { /* Kernel level Event Header */
+ uint32_t e_handle;
+ uint32_t e_mask;
+ uint16_t e_mode;
+#define E_SLEEPING_MODE 0x0001
+ uint16_t e_refcnt;
+ uint16_t e_flag;
+#define E_GET_EVENT_ACTIVE 0x0001
+ fcEVT_t *e_head;
+ fcEVT_t *e_tail;
+ void *e_next_header;
+ void *e_type;
+} fcEVTHDR_t;
+
+struct rnidrsp {
+ void *buf;
+ uint32_t uniqueid;
+ struct list_head list;
+ uint32_t data;
+};
+
+#endif /* _H_LPFC */
--- /dev/null
+/*******************************************************************
+ * This file is part of the Emulex Linux Device Driver for *
+ * Enterprise Fibre Channel Host Bus Adapters. *
+ * Refer to the README file included with this package for *
+ * driver version and adapter support. *
+ * Copyright (C) 2004 Emulex Corporation. *
+ * www.emulex.com *
+ * *
+ * This program is free software; you can redistribute it and/or *
+ * modify it under the terms of the GNU General Public License *
+ * as published by the Free Software Foundation; either version 2 *
+ * of the License, or (at your option) any later version. *
+ * *
+ * This program is distributed in the hope that it will be useful, *
+ * but WITHOUT ANY WARRANTY; without even the implied warranty of *
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the *
+ * GNU General Public License for more details, a copy of which *
+ * can be found in the file COPYING included with this package. *
+ *******************************************************************/
+
+/*
+ * $Id: lpfc_compat.h 1.28 2004/11/09 14:49:24EST sf_support Exp $
+ *
+ * This file provides macros to aid compilation in the Linux 2.4 kernel
+ * over various platform architectures.
+ */
+
+#ifndef _H_LPFC_COMPAT
+#define _H_LPFC_COMPAT
+
+
+/*******************************************************************
+Note: HBA's SLI memory contains little-endian LW.
+Thus to access it from a little-endian host,
+memcpy_toio() and memcpy_fromio() can be used.
+However on a big-endian host, copy 4 bytes at a time,
+using writel() and readl().
+ *******************************************************************/
+
+#if __BIG_ENDIAN
+
+static inline void
+lpfc_memcpy_to_slim( void *dest, void *src, unsigned int bytes)
+{
+ uint32_t *dest32;
+ uint32_t *src32;
+ unsigned int four_bytes;
+
+
+ dest32 = (uint32_t *) dest;
+ src32 = (uint32_t *) src;
+
+ /* write input bytes, 4 bytes at a time */
+ for (four_bytes = bytes /4; four_bytes > 0; four_bytes--) {
+ writel( *src32, dest32);
+ readl(dest32); /* flush */
+ dest32++;
+ src32++;
+ }
+
+ return;
+}
+
+static inline void
+lpfc_memcpy_from_slim( void *dest, void *src, unsigned int bytes)
+{
+ uint32_t *dest32;
+ uint32_t *src32;
+ unsigned int four_bytes;
+
+
+ dest32 = (uint32_t *) dest;
+ src32 = (uint32_t *) src;
+
+ /* read input bytes, 4 bytes at a time */
+ for (four_bytes = bytes /4; four_bytes > 0; four_bytes--) {
+ *dest32 = readl( src32);
+ dest32++;
+ src32++;
+ }
+
+ return;
+}
+
+#else
+
+static inline void
+lpfc_memcpy_to_slim( void *dest, void *src, unsigned int bytes)
+{
+ /* actually returns 1 byte past dest */
+ memcpy_toio( dest, src, bytes);
+}
+
+static inline void
+lpfc_memcpy_from_slim( void *dest, void *src, unsigned int bytes)
+{
+ /* actually returns 1 byte past dest */
+ memcpy_fromio( dest, src, bytes);
+}
+
+#endif /* __BIG_ENDIAN */
+
+#if LINUX_VERSION_CODE < KERNEL_VERSION(2,6,6)
+#define msleep(x) do { \
+ set_current_state(TASK_UNINTERRUPTIBLE); \
+ schedule_timeout((x)); \
+ } while (0);
+/* Provide local msecs_to_jiffies call for earlier kernels */
+static inline unsigned long msecs_to_jiffies(const unsigned int m)
+{
+#if HZ <= 1000 && !(1000 % HZ)
+ return (m + (1000 / HZ) - 1) / (1000 / HZ);
+#elif HZ > 1000 && !(HZ % 1000)
+ return m * (HZ / 1000);
+#else
+ return (m * HZ + 999) / 1000;
+#endif
+}
+#endif
+#endif /* _H_LPFC_COMPAT */
--- /dev/null
+/*******************************************************************
+ * This file is part of the Emulex Linux Device Driver for *
+ * Enterprise Fibre Channel Host Bus Adapters. *
+ * Refer to the README file included with this package for *
+ * driver version and adapter support. *
+ * Copyright (C) 2004 Emulex Corporation. *
+ * www.emulex.com *
+ * *
+ * This program is free software; you can redistribute it and/or *
+ * modify it under the terms of the GNU General Public License *
+ * as published by the Free Software Foundation; either version 2 *
+ * of the License, or (at your option) any later version. *
+ * *
+ * This program is distributed in the hope that it will be useful, *
+ * but WITHOUT ANY WARRANTY; without even the implied warranty of *
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the *
+ * GNU General Public License for more details, a copy of which *
+ * can be found in the file COPYING included with this package. *
+ *******************************************************************/
+
+/*
+ * $Id: lpfc_crtn.h 1.140 2004/11/17 19:00:57EST sf_support Exp $
+ */
+
+#ifndef _H_LPFC_CRTN
+#define _H_LPFC_CRTN
+
+#include <linux/delay.h>
+#include <linux/interrupt.h>
+#include <asm/uaccess.h>
+
+#include "lpfc_disc.h"
+#include "lpfc_logmsg.h"
+#include "lpfc_scsi.h"
+#include "lpfc_sli.h"
+
+
+void lpfc_dump_mem(struct lpfc_hba *, LPFC_MBOXQ_t *);
+void lpfc_read_nv(struct lpfc_hba *, LPFC_MBOXQ_t *);
+int lpfc_read_la(struct lpfc_hba *, LPFC_MBOXQ_t *);
+void lpfc_clear_la(struct lpfc_hba *, LPFC_MBOXQ_t *);
+void lpfc_config_link(struct lpfc_hba *, LPFC_MBOXQ_t *);
+int lpfc_read_sparam(struct lpfc_hba *, LPFC_MBOXQ_t *);
+void lpfc_read_config(struct lpfc_hba *, LPFC_MBOXQ_t *);
+void lpfc_set_slim(struct lpfc_hba *, LPFC_MBOXQ_t *, uint32_t, uint32_t);
+int lpfc_reg_login(struct lpfc_hba *, uint32_t, uint8_t *, LPFC_MBOXQ_t *,
+ uint32_t);
+void lpfc_unreg_login(struct lpfc_hba *, uint32_t, LPFC_MBOXQ_t *);
+void lpfc_unreg_did(struct lpfc_hba *, uint32_t, LPFC_MBOXQ_t *);
+void lpfc_init_link(struct lpfc_hba *, LPFC_MBOXQ_t *, uint32_t, uint32_t);
+
+
+int lpfc_linkdown(struct lpfc_hba *);
+void lpfc_mbx_cmpl_read_la(struct lpfc_hba *, LPFC_MBOXQ_t *);
+
+void lpfc_mbx_cmpl_clear_la(struct lpfc_hba *, LPFC_MBOXQ_t *);
+void lpfc_mbx_cmpl_reg_login(struct lpfc_hba *, LPFC_MBOXQ_t *);
+void lpfc_mbx_cmpl_fabric_reg_login(struct lpfc_hba *, LPFC_MBOXQ_t *);
+void lpfc_mbx_cmpl_ns_reg_login(struct lpfc_hba *, LPFC_MBOXQ_t *);
+void lpfc_mbx_cmpl_fdmi_reg_login(struct lpfc_hba *, LPFC_MBOXQ_t *);
+int lpfc_consistent_bind_save(struct lpfc_hba *, struct lpfc_bindlist *);
+int lpfc_nlp_plogi(struct lpfc_hba *, struct lpfc_nodelist *);
+int lpfc_nlp_adisc(struct lpfc_hba *, struct lpfc_nodelist *);
+int lpfc_nlp_unmapped(struct lpfc_hba *, struct lpfc_nodelist *);
+int lpfc_nlp_mapped(struct lpfc_hba *, struct lpfc_nodelist *,
+ struct lpfc_bindlist *);
+int lpfc_nlp_list(struct lpfc_hba *, struct lpfc_nodelist *, int);
+void lpfc_set_disctmo(struct lpfc_hba *);
+int lpfc_can_disctmo(struct lpfc_hba *);
+int lpfc_unreg_rpi(struct lpfc_hba *, struct lpfc_nodelist *);
+int lpfc_check_sli_ndlp(struct lpfc_hba *, struct lpfc_sli_ring *,
+ struct lpfc_iocbq *, struct lpfc_nodelist *);
+int lpfc_nlp_remove(struct lpfc_hba *, struct lpfc_nodelist *);
+void lpfc_nlp_init(struct lpfc_hba *, struct lpfc_nodelist *, uint32_t);
+struct lpfc_nodelist *lpfc_setup_disc_node(struct lpfc_hba *, uint32_t);
+struct lpfc_nodelist *lpfc_setup_rscn_node(struct lpfc_hba *, uint32_t);
+void lpfc_disc_list_loopmap(struct lpfc_hba *);
+void lpfc_disc_start(struct lpfc_hba *);
+void lpfc_disc_flush_list(struct lpfc_hba *);
+void lpfc_establish_link_tmo(unsigned long);
+void lpfc_disc_timeout(unsigned long);
+void lpfc_scan_timeout(unsigned long);
+struct lpfc_target *lpfc_find_target(struct lpfc_hba *, uint32_t,
+ struct lpfc_nodelist *);
+void lpfc_set_failmask(struct lpfc_hba *, struct lpfc_nodelist *, uint32_t,
+ uint32_t);
+void lpfc_process_nodev_timeout(struct lpfc_hba *, struct lpfc_nodelist *);
+
+struct lpfc_nodelist *lpfc_findnode_rpi(struct lpfc_hba * phba, uint16_t rpi);
+struct lpfc_nodelist *lpfc_findnode_remove_rpi(struct lpfc_hba * phba,
+ uint16_t rpi);
+void lpfc_addnode_rpi(struct lpfc_hba * phba, struct lpfc_nodelist * ndlp,
+ uint16_t rpi);
+
+int lpfc_discq_post_event(struct lpfc_hba *, void *, void *, uint32_t);
+int lpfc_do_dpc(void *);
+int lpfc_disc_state_machine(struct lpfc_hba *, struct lpfc_nodelist *, void *,
+ uint32_t);
+
+uint32_t lpfc_cmpl_prli_reglogin_issue(struct lpfc_hba *,
+ struct lpfc_nodelist *, void *,
+ uint32_t);
+uint32_t lpfc_cmpl_plogi_prli_issue(struct lpfc_hba *, struct lpfc_nodelist *,
+ void *, uint32_t);
+
+int lpfc_check_sparm(struct lpfc_hba *, struct lpfc_nodelist *,
+ struct serv_parm *, uint32_t);
+int lpfc_els_abort_flogi(struct lpfc_hba *);
+int lpfc_initial_flogi(struct lpfc_hba *);
+int lpfc_issue_els_plogi(struct lpfc_hba *, struct lpfc_nodelist *, uint8_t);
+int lpfc_issue_els_prli(struct lpfc_hba *, struct lpfc_nodelist *, uint8_t);
+int lpfc_issue_els_adisc(struct lpfc_hba *, struct lpfc_nodelist *, uint8_t);
+int lpfc_issue_els_logo(struct lpfc_hba *, struct lpfc_nodelist *, uint8_t);
+int lpfc_issue_els_scr(struct lpfc_hba *, uint32_t, uint8_t);
+int lpfc_els_free_iocb(struct lpfc_hba *, struct lpfc_iocbq *);
+int lpfc_els_rsp_acc(struct lpfc_hba *, uint32_t, struct lpfc_iocbq *,
+ struct lpfc_nodelist *, LPFC_MBOXQ_t *, uint8_t);
+int lpfc_els_rsp_reject(struct lpfc_hba *, uint32_t, struct lpfc_iocbq *,
+ struct lpfc_nodelist *);
+int lpfc_els_rsp_adisc_acc(struct lpfc_hba *, struct lpfc_iocbq *,
+ struct lpfc_nodelist *);
+int lpfc_els_rsp_prli_acc(struct lpfc_hba *, struct lpfc_iocbq *,
+ struct lpfc_nodelist *);
+void lpfc_els_retry_delay(unsigned long);
+void lpfc_els_unsol_event(struct lpfc_hba *, struct lpfc_sli_ring *,
+ struct lpfc_iocbq *);
+int lpfc_els_handle_rscn(struct lpfc_hba *);
+int lpfc_els_flush_rscn(struct lpfc_hba *);
+int lpfc_rscn_payload_check(struct lpfc_hba *, uint32_t);
+void lpfc_els_flush_cmd(struct lpfc_hba *);
+int lpfc_els_disc_adisc(struct lpfc_hba *);
+int lpfc_els_disc_plogi(struct lpfc_hba *);
+void lpfc_els_timeout_handler(unsigned long ptr);
+
+void lpfc_ct_unsol_event(struct lpfc_hba *, struct lpfc_sli_ring *,
+ struct lpfc_iocbq *);
+int lpfc_ns_cmd(struct lpfc_hba *, struct lpfc_nodelist *, int);
+int lpfc_fdmi_cmd(struct lpfc_hba *, struct lpfc_nodelist *, int);
+void lpfc_fdmi_tmo(unsigned long);
+
+int lpfc_config_port_prep(struct lpfc_hba *);
+int lpfc_config_port_post(struct lpfc_hba *);
+int lpfc_hba_down_prep(struct lpfc_hba *);
+void lpfc_handle_eratt(struct lpfc_hba *, uint32_t);
+void lpfc_handle_latt(struct lpfc_hba *);
+void lpfc_hba_init(struct lpfc_hba *, uint32_t *);
+int lpfc_post_buffer(struct lpfc_hba *, struct lpfc_sli_ring *, int, int);
+void lpfc_cleanup(struct lpfc_hba *, uint32_t);
+int lpfc_scsi_free(struct lpfc_hba *);
+void lpfc_decode_firmware_rev(struct lpfc_hba *, char *, int);
+uint8_t *lpfc_get_lpfchba_info(struct lpfc_hba *, uint8_t *);
+int lpfc_fcp_abort(struct lpfc_hba *, int, int, int);
+int lpfc_put_event(struct lpfc_hba *, uint32_t, uint32_t, void *,
+ uint32_t, uint32_t);
+void lpfc_get_hba_model_desc(struct lpfc_hba *, uint8_t *, uint8_t *);
+int lpfc_online(struct lpfc_hba *);
+int lpfc_offline(struct lpfc_hba *);
+
+
+
+int lpfc_sli_queue_setup(struct lpfc_hba *);
+void lpfc_slim_access(struct lpfc_hba *);
+
+void lpfc_handle_eratt(struct lpfc_hba *, uint32_t);
+void lpfc_handle_latt(struct lpfc_hba *);
+irqreturn_t lpfc_intr_handler(int, void *, struct pt_regs *);
+
+void lpfc_read_rev(struct lpfc_hba *, LPFC_MBOXQ_t *);
+void lpfc_config_ring(struct lpfc_hba *, int, LPFC_MBOXQ_t *);
+void lpfc_config_port(struct lpfc_hba *, LPFC_MBOXQ_t *);
+void lpfc_mbox_put(struct lpfc_hba *, LPFC_MBOXQ_t *);
+LPFC_MBOXQ_t *lpfc_mbox_get(struct lpfc_hba *);
+
+int lpfc_mem_alloc(struct lpfc_hba *);
+void lpfc_mem_free(struct lpfc_hba *);
+
+struct lpfc_iocbq *
+lpfc_prep_els_iocb(struct lpfc_hba * phba,
+ uint8_t expectRsp,
+ uint16_t cmdSize,
+ uint8_t retry, struct lpfc_nodelist * ndlp, uint32_t elscmd);
+
+int lpfc_sli_hba_setup(struct lpfc_hba *);
+int lpfc_sli_hba_down(struct lpfc_hba *);
+int lpfc_sli_intr(struct lpfc_hba *);
+int lpfc_sli_issue_mbox(struct lpfc_hba *, LPFC_MBOXQ_t *, uint32_t);
+int lpfc_sli_issue_iocb(struct lpfc_hba *, struct lpfc_sli_ring *,
+ struct lpfc_iocbq *, uint32_t);
+void lpfc_sli_pcimem_bcopy(uint32_t *, uint32_t *, uint32_t);
+int lpfc_sli_ringpostbuf_put(struct lpfc_hba *, struct lpfc_sli_ring *,
+ struct lpfc_dmabuf *);
+struct lpfc_dmabuf *lpfc_sli_ringpostbuf_get(struct lpfc_hba *,
+ struct lpfc_sli_ring *,
+ dma_addr_t);
+uint32_t lpfc_sli_next_iotag(struct lpfc_hba *, struct lpfc_sli_ring *);
+int lpfc_sli_issue_abort_iotag32(struct lpfc_hba *, struct lpfc_sli_ring *,
+ struct lpfc_iocbq *);
+int lpfc_sli_abort_iocb_ctx(struct lpfc_hba *, struct lpfc_sli_ring *,
+ uint32_t);
+int lpfc_sli_sum_iocb_host(struct lpfc_hba *, struct lpfc_sli_ring *);
+int lpfc_sli_abort_iocb_host(struct lpfc_hba *, struct lpfc_sli_ring *, int);
+int lpfc_sli_sum_iocb_lun(struct lpfc_hba *, struct lpfc_sli_ring *, uint16_t,
+ uint64_t);
+int lpfc_sli_abort_iocb_lun(struct lpfc_hba *, struct lpfc_sli_ring *, uint16_t,
+ uint64_t, int);
+int lpfc_sli_abort_iocb_tgt(struct lpfc_hba *, struct lpfc_sli_ring *,
+ uint16_t, int);
+void lpfc_mbox_timeout(unsigned long);
+
+void lpfc_map_fcp_cmnd_to_bpl(struct lpfc_hba *, struct lpfc_scsi_buf *);
+void lpfc_free_scsi_cmd(struct lpfc_scsi_buf *);
+uint32_t lpfc_os_timeout_transform(struct lpfc_hba *, uint32_t);
+
+struct lpfc_nodelist *
+lpfc_findnode_wwpn(struct lpfc_hba * phba, uint32_t order,
+ struct lpfc_name * wwpn);
+struct lpfc_nodelist *
+lpfc_findnode_wwnn(struct lpfc_hba * phba, uint32_t order,
+ struct lpfc_name * wwnn);
+struct lpfc_nodelist *lpfc_findnode_did(struct lpfc_hba * phba, uint32_t order,
+ uint32_t did);
+
+void lpfc_get_hba_sym_node_name(struct lpfc_hba * phba, uint8_t * symbp);
+
+int lpfc_sli_issue_mbox_wait(struct lpfc_hba * phba, LPFC_MBOXQ_t * pmboxq,
+ uint32_t timeout);
+
+int
+lpfc_sli_issue_iocb_wait(struct lpfc_hba * phba,
+ struct lpfc_sli_ring * pring,
+ struct lpfc_iocbq * piocb,
+ struct lpfc_iocbq * prspiocbq, uint32_t timeout);
+int lpfc_sli_issue_iocb_wait_high_priority(struct lpfc_hba * phba,
+ struct lpfc_sli_ring * pring,
+ struct lpfc_iocbq * piocb,
+ uint32_t flag,
+ struct lpfc_iocbq * prspiocbq,
+ uint32_t timeout);
+void lpfc_sli_wake_iocb_high_priority(struct lpfc_hba * phba,
+ struct lpfc_iocbq * queue1,
+ struct lpfc_iocbq * queue2);
+void *lpfc_mbuf_alloc(struct lpfc_hba *, int, dma_addr_t *);
+void lpfc_mbuf_free(struct lpfc_hba *, void *, dma_addr_t);
+
+int lpfc_stop_timer(struct lpfc_hba *);
+
+
+/* Function prototypes. */
+int lpfc_queuecommand(struct scsi_cmnd *, void (*done) (struct scsi_cmnd *));
+int lpfc_abort_handler(struct scsi_cmnd *);
+int lpfc_reset_bus_handler(struct scsi_cmnd *);
+int lpfc_reset_lun_handler(struct scsi_cmnd *);
+
+#if defined(FC_TRANS_VER1) || defined(FC_TRANS_265_BLKPATCH)
+void lpfc_target_unblock(struct lpfc_hba *, struct lpfc_target *);
+void lpfc_target_block(struct lpfc_hba *, struct lpfc_target *);
+int lpfc_target_remove(struct lpfc_hba *, struct lpfc_target *);
+int lpfc_target_add(struct lpfc_hba *, struct lpfc_target *);
+#endif
+
+#define ScsiResult(host_code, scsi_code) (((host_code) << 16) | scsi_code)
+#define HBA_EVENT_RSCN 5
+#define HBA_EVENT_LINK_UP 2
+#define HBA_EVENT_LINK_DOWN 3
+#endif /* _H_LPFC_CRTN */
--- /dev/null
+/*******************************************************************
+ * This file is part of the Emulex Linux Device Driver for *
+ * Enterprise Fibre Channel Host Bus Adapters. *
+ * Refer to the README file included with this package for *
+ * driver version and adapter support. *
+ * Copyright (C) 2004 Emulex Corporation. *
+ * www.emulex.com *
+ * *
+ * This program is free software; you can redistribute it and/or *
+ * modify it under the terms of the GNU General Public License *
+ * as published by the Free Software Foundation; either version 2 *
+ * of the License, or (at your option) any later version. *
+ * *
+ * This program is distributed in the hope that it will be useful, *
+ * but WITHOUT ANY WARRANTY; without even the implied warranty of *
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the *
+ * GNU General Public License for more details, a copy of which *
+ * can be found in the file COPYING included with this package. *
+ *******************************************************************/
+
+/*
+ * $Id: lpfc_ct.c 1.143 2004/11/17 14:50:38EST sf_support Exp $
+ *
+ * Fibre Channel SCSI LAN Device Driver CT support
+ */
+
+#include <linux/version.h>
+#include <linux/blkdev.h>
+#include <linux/dma-mapping.h>
+#include <linux/pci.h>
+#include <linux/spinlock.h>
+#include <linux/utsname.h>
+#include <scsi/scsi_device.h>
+#include <scsi/scsi_host.h>
+#include "lpfc_sli.h"
+#include "lpfc_disc.h"
+#include "lpfc_scsi.h"
+#include "lpfc.h"
+#include "lpfc_crtn.h"
+#include "lpfc_hw.h"
+#include "lpfc_logmsg.h"
+#include "lpfc_mem.h"
+#include "lpfc_version.h"
+
+
+#define HBA_PORTSPEED_UNKNOWN 0 /* Unknown - transceiver
+ * incapable of reporting */
+#define HBA_PORTSPEED_1GBIT 1 /* 1 GBit/sec */
+#define HBA_PORTSPEED_2GBIT 2 /* 2 GBit/sec */
+#define HBA_PORTSPEED_4GBIT 8 /* 4 GBit/sec */
+#define HBA_PORTSPEED_8GBIT 16 /* 8 GBit/sec */
+#define HBA_PORTSPEED_10GBIT 4 /* 10 GBit/sec */
+#define HBA_PORTSPEED_NOT_NEGOTIATED 5 /* Speed not established */
+
+#define FOURBYTES 4
+
+
+static char *lpfc_release_version = LPFC_DRIVER_VERSION;
+
+/*
+ * lpfc_ct_unsol_event
+ */
+void
+lpfc_ct_unsol_event(struct lpfc_hba * phba,
+ struct lpfc_sli_ring * pring, struct lpfc_iocbq * piocbq)
+{
+
+ struct lpfc_iocbq *next_piocbq;
+ struct lpfc_dmabuf *pmbuf = NULL;
+ struct lpfc_dmabuf *matp, *next_matp;
+ uint32_t ctx = 0, count = 0;
+ IOCB_t *icmd = &piocbq->iocb;
+ int i, status, go_exit = 0;
+ struct list_head head;
+
+ if (icmd->ulpStatus)
+ return;
+
+ list_add_tail(&head, &piocbq->list);
+ list_for_each_entry_safe(piocbq, next_piocbq, &head, list) {
+ icmd = &piocbq->iocb;
+ if (ctx == 0)
+ ctx = (uint32_t) (icmd->ulpContext);
+ if (icmd->ulpStatus) {
+ if ((icmd->ulpStatus == IOSTAT_LOCAL_REJECT) &&
+ ((icmd->un.ulpWord[4] & 0xff)
+ == IOERR_RCV_BUFFER_WAITING)) {
+ phba->fc_stat.NoRcvBuf++;
+ lpfc_post_buffer(phba, pring, 0, 1);
+ }
+ go_exit = 1;
+ goto ct_unsol_event_exit_piocbq;
+ }
+
+ if (icmd->ulpBdeCount == 0)
+ continue;
+
+ for (i = 0; i < icmd->ulpBdeCount; i++) {
+ matp = lpfc_sli_ringpostbuf_get(phba, pring,
+ getPaddr(icmd->un.
+ cont64[i].
+ addrHigh,
+ icmd->un.
+ cont64[i].
+ addrLow));
+ if (!matp) {
+ /* Insert lpfc log message here */
+ go_exit = 1;
+ goto ct_unsol_event_exit_piocbq;
+ }
+
+ /* Typically for Unsolicited CT requests */
+ if (!pmbuf) {
+ pmbuf = matp;
+ INIT_LIST_HEAD(&pmbuf->list);
+ } else
+ list_add_tail(&matp->list, &pmbuf->list);
+
+ count += icmd->un.cont64[i].tus.f.bdeSize;
+ }
+
+ lpfc_post_buffer(phba, pring, i, 1);
+ icmd->ulpBdeCount = 0;
+ }
+ct_unsol_event_exit_piocbq:
+ list_del(&head);
+ /*
+ * if not early-exiting and there is pmbuf,
+ * then do FC_REG_CT_EVENT for libdfc
+ */
+ if (!go_exit && pmbuf) {
+ status = lpfc_put_event(phba, FC_REG_CT_EVENT, ctx,
+ (void *)pmbuf, count, 0);
+ if (status)
+ /* Need to free IOCB buffer ? */
+ return;
+ }
+ if (pmbuf) {
+ list_for_each_entry_safe(matp, next_matp, &pmbuf->list, list) {
+ lpfc_mbuf_free(phba, matp->virt, matp->phys);
+ list_del(&matp->list);
+ kfree(matp);
+ }
+ lpfc_mbuf_free(phba, pmbuf->virt, pmbuf->phys);
+ kfree(pmbuf);
+ }
+ return;
+}
+
+static void
+lpfc_free_ct_rsp(struct lpfc_hba * phba, struct lpfc_dmabuf * mlist)
+{
+ struct lpfc_dmabuf *mlast, *next_mlast;
+
+ list_for_each_entry_safe(mlast, next_mlast, &mlist->list, list) {
+ lpfc_mbuf_free(phba, mlast->virt, mlast->phys);
+ list_del(&mlast->list);
+ kfree(mlast);
+ }
+ lpfc_mbuf_free(phba, mlist->virt, mlist->phys);
+ kfree(mlist);
+ return;
+}
+
+static struct lpfc_dmabuf *
+lpfc_alloc_ct_rsp(struct lpfc_hba * phba, int cmdcode, struct ulp_bde64 * bpl,
+ uint32_t size, int *entries)
+{
+ struct lpfc_dmabuf *mlist = NULL;
+ struct lpfc_dmabuf *mp;
+ int cnt, i = 0;
+
+ /* We get chucks of FCELSSIZE */
+ cnt = size > FCELSSIZE ? FCELSSIZE: size;
+
+ while (size) {
+ /* Allocate buffer for rsp payload */
+ mp = kmalloc(sizeof(struct lpfc_dmabuf), GFP_ATOMIC);
+ if (!mp) {
+ if (mlist)
+ lpfc_free_ct_rsp(phba, mlist);
+ return NULL;
+ }
+
+ INIT_LIST_HEAD(&mp->list);
+
+ if (cmdcode == be16_to_cpu(SLI_CTNS_GID_FT))
+ mp->virt = lpfc_mbuf_alloc(phba, MEM_PRI, &(mp->phys));
+ else
+ mp->virt = lpfc_mbuf_alloc(phba, 0, &(mp->phys));
+
+ if (!mp->virt) {
+ kfree(mp);
+ lpfc_free_ct_rsp(phba, mlist);
+ return NULL;
+ }
+
+ /* Queue it to a linked list */
+ if (!mlist)
+ mlist = mp;
+ else
+ list_add_tail(&mp->list, &mlist->list);
+
+ bpl->tus.f.bdeFlags = BUFF_USE_RCV;
+ /* build buffer ptr list for IOCB */
+ bpl->addrLow = le32_to_cpu( putPaddrLow(mp->phys) );
+ bpl->addrHigh = le32_to_cpu( putPaddrHigh(mp->phys) );
+ bpl->tus.f.bdeSize = (uint16_t) cnt;
+ bpl->tus.w = le32_to_cpu(bpl->tus.w);
+ bpl++;
+
+ i++;
+ size -= cnt;
+ }
+
+ *entries = i;
+ return mlist;
+}
+
+static int
+lpfc_gen_req(struct lpfc_hba *phba, struct lpfc_dmabuf *bmp,
+ struct lpfc_dmabuf *inp, struct lpfc_dmabuf *outp,
+ void (*cmpl) (struct lpfc_hba *, struct lpfc_iocbq *,
+ struct lpfc_iocbq *),
+ struct lpfc_nodelist *ndlp, uint32_t usr_flg, uint32_t num_entry,
+ uint32_t tmo)
+{
+
+ struct lpfc_sli *psli = &phba->sli;
+ struct lpfc_sli_ring *pring = &psli->ring[LPFC_ELS_RING];
+ IOCB_t *icmd;
+ struct lpfc_iocbq *geniocb;
+
+ /* Allocate buffer for command iocb */
+ geniocb = mempool_alloc(phba->iocb_mem_pool, GFP_ATOMIC);
+ if (!geniocb) {
+ return 1;
+ }
+ memset(geniocb, 0, sizeof (struct lpfc_iocbq));
+ icmd = &geniocb->iocb;
+
+ icmd->un.genreq64.bdl.ulpIoTag32 = 0;
+ icmd->un.genreq64.bdl.addrHigh = putPaddrHigh(bmp->phys);
+ icmd->un.genreq64.bdl.addrLow = putPaddrLow(bmp->phys);
+ icmd->un.genreq64.bdl.bdeFlags = BUFF_TYPE_BDL;
+ icmd->un.genreq64.bdl.bdeSize = (num_entry * sizeof (struct ulp_bde64));
+
+ if (usr_flg)
+ geniocb->context3 = NULL;
+ else
+ geniocb->context3 = (uint8_t *) bmp;
+
+ /* Save for completion so we can release these resources */
+ geniocb->context1 = (uint8_t *) inp;
+ geniocb->context2 = (uint8_t *) outp;
+
+ /* Fill in payload, bp points to frame payload */
+ icmd->ulpCommand = CMD_GEN_REQUEST64_CR;
+
+ pci_dma_sync_single_for_device(phba->pcidev, bmp->phys,
+ LPFC_BPL_SIZE, PCI_DMA_TODEVICE);
+
+ icmd->ulpIoTag = lpfc_sli_next_iotag(phba, pring);
+
+ /* Fill in rest of iocb */
+ icmd->un.genreq64.w5.hcsw.Fctl = (SI | LA);
+ icmd->un.genreq64.w5.hcsw.Dfctl = 0;
+ icmd->un.genreq64.w5.hcsw.Rctl = FC_UNSOL_CTL;
+ icmd->un.genreq64.w5.hcsw.Type = FC_COMMON_TRANSPORT_ULP;
+
+ if (!tmo)
+ tmo = (2 * phba->fc_ratov) + 1;
+ icmd->ulpTimeout = tmo;
+ icmd->ulpBdeCount = 1;
+ icmd->ulpLe = 1;
+ icmd->ulpClass = CLASS3;
+ icmd->ulpContext = ndlp->nlp_rpi;
+
+ /* Issue GEN REQ IOCB for NPORT <did> */
+ lpfc_printf_log(phba, KERN_INFO, LOG_ELS,
+ "%d:0119 Issue GEN REQ IOCB for NPORT x%x "
+ "Data: x%x x%x\n", phba->brd_no, icmd->un.ulpWord[5],
+ icmd->ulpIoTag, phba->hba_state);
+ geniocb->iocb_cmpl = cmpl;
+ geniocb->drvrTimeout = icmd->ulpTimeout + LPFC_DRVR_TIMEOUT;
+ if (lpfc_sli_issue_iocb(phba, pring, geniocb, 0) == IOCB_ERROR) {
+ mempool_free( geniocb, phba->iocb_mem_pool);
+ return 1;
+ }
+
+ return 0;
+}
+
+static int
+lpfc_ct_cmd(struct lpfc_hba *phba, struct lpfc_dmabuf *inmp,
+ struct lpfc_dmabuf *bmp, struct lpfc_nodelist *ndlp,
+ void (*cmpl) (struct lpfc_hba *, struct lpfc_iocbq *,
+ struct lpfc_iocbq *),
+ uint32_t rsp_size)
+{
+ struct ulp_bde64 *bpl = (struct ulp_bde64 *) bmp->virt;
+ struct lpfc_dmabuf *outmp;
+ int cnt = 0, status;
+ int cmdcode = ((struct lpfc_sli_ct_request *) inmp->virt)->
+ CommandResponse.bits.CmdRsp;
+
+ bpl++; /* Skip past ct request */
+
+ /* Put buffer(s) for ct rsp in bpl */
+ outmp = lpfc_alloc_ct_rsp(phba, cmdcode, bpl, rsp_size, &cnt);
+ if (!outmp)
+ return -ENOMEM;
+
+ status = lpfc_gen_req(phba, bmp, inmp, outmp, cmpl, ndlp, 0,
+ cnt+1, 0);
+ if (status) {
+ lpfc_free_ct_rsp(phba, outmp);
+ return -ENOMEM;
+ }
+ return 0;
+}
+
+static int
+lpfc_ns_rsp(struct lpfc_hba * phba, struct lpfc_dmabuf * mp, uint32_t Size)
+{
+ struct lpfc_sli_ct_request *Response =
+ (struct lpfc_sli_ct_request *) mp->virt;
+ struct lpfc_nodelist *ndlp = NULL;
+ struct lpfc_dmabuf *mlast, *next_mp;
+ uint32_t *ctptr = (uint32_t *) & Response->un.gid.PortType;
+ uint32_t Did;
+ uint32_t CTentry;
+ int Cnt;
+ struct list_head head;
+
+ lpfc_set_disctmo(phba);
+
+ Cnt = Size > FCELSSIZE ? FCELSSIZE : Size;
+
+ list_add_tail(&head, &mp->list);
+ list_for_each_entry_safe(mp, next_mp, &head, list) {
+ mlast = mp;
+ pci_dma_sync_single_for_cpu(phba->pcidev, mp->phys,
+ LPFC_BPL_SIZE, PCI_DMA_FROMDEVICE);
+
+ Size -= Cnt;
+
+ if (!ctptr)
+ ctptr = (uint32_t *) mlast->virt;
+ else
+ Cnt -= 16; /* subtract length of CT header */
+
+ /* Loop through entire NameServer list of DIDs */
+ while (Cnt) {
+
+ /* Get next DID from NameServer List */
+ CTentry = *ctptr++;
+ Did = ((be32_to_cpu(CTentry)) & Mask_DID);
+
+ ndlp = NULL;
+ if (Did != phba->fc_myDID) {
+ /* Check for rscn processing or not */
+ ndlp = lpfc_setup_disc_node(phba, Did);
+ }
+ /* Mark all node table entries that are in the
+ Nameserver */
+ if (ndlp) {
+ /* NameServer Rsp */
+ lpfc_printf_log(phba, KERN_INFO, LOG_DISCOVERY,
+ "%d:0238 Process x%x NameServer"
+ " Rsp Data: x%x x%x x%x\n",
+ phba->brd_no,
+ Did, ndlp->nlp_flag,
+ phba->fc_flag,
+ phba->fc_rscn_id_cnt);
+ } else {
+ /* NameServer Rsp */
+ lpfc_printf_log(phba,
+ KERN_INFO,
+ LOG_DISCOVERY,
+ "%d:0239 Skip x%x NameServer "
+ "Rsp Data: x%x x%x x%x\n",
+ phba->brd_no,
+ Did, Size, phba->fc_flag,
+ phba->fc_rscn_id_cnt);
+ }
+
+ if (CTentry & (be32_to_cpu(SLI_CT_LAST_ENTRY)))
+ goto nsout1;
+ Cnt -= sizeof (uint32_t);
+ }
+ ctptr = NULL;
+
+ }
+
+nsout1:
+ list_del(&head);
+
+ /* Here we are finished in the case RSCN */
+ if (phba->hba_state == LPFC_HBA_READY) {
+ lpfc_els_flush_rscn(phba);
+ phba->fc_flag |= FC_RSCN_MODE; /* we are still in RSCN mode */
+ }
+ return 0;
+}
+
+
+
+
+static void
+lpfc_cmpl_ct_cmd_gid_ft(struct lpfc_hba * phba, struct lpfc_iocbq * cmdiocb,
+ struct lpfc_iocbq * rspiocb)
+{
+ IOCB_t *irsp;
+ struct lpfc_sli *psli;
+ struct lpfc_dmabuf *bmp;
+ struct lpfc_dmabuf *inp;
+ struct lpfc_dmabuf *outp;
+ struct lpfc_nodelist *ndlp;
+ struct lpfc_sli_ct_request *CTrsp;
+
+ psli = &phba->sli;
+ /* we pass cmdiocb to state machine which needs rspiocb as well */
+ cmdiocb->context_un.rsp_iocb = rspiocb;
+
+ inp = (struct lpfc_dmabuf *) cmdiocb->context1;
+ outp = (struct lpfc_dmabuf *) cmdiocb->context2;
+ bmp = (struct lpfc_dmabuf *) cmdiocb->context3;
+
+ irsp = &rspiocb->iocb;
+ if (irsp->ulpStatus) {
+ if((irsp->ulpStatus == IOSTAT_LOCAL_REJECT) &&
+ ((irsp->un.ulpWord[4] == IOERR_SLI_DOWN) ||
+ (irsp->un.ulpWord[4] == IOERR_SLI_ABORTED))) {
+ goto out;
+ }
+
+ /* Check for retry */
+ if (phba->fc_ns_retry < LPFC_MAX_NS_RETRY) {
+ phba->fc_ns_retry++;
+ /* CT command is being retried */
+ ndlp =
+ lpfc_findnode_did(phba, NLP_SEARCH_UNMAPPED,
+ NameServer_DID);
+ if (ndlp) {
+ if (lpfc_ns_cmd(phba, ndlp, SLI_CTNS_GID_FT) ==
+ 0) {
+ goto out;
+ }
+ }
+ }
+ } else {
+ /* Good status, continue checking */
+ CTrsp = (struct lpfc_sli_ct_request *) outp->virt;
+ if (CTrsp->CommandResponse.bits.CmdRsp ==
+ be16_to_cpu(SLI_CT_RESPONSE_FS_ACC)) {
+ lpfc_ns_rsp(phba, outp,
+ (uint32_t) (irsp->un.genreq64.bdl.bdeSize));
+ } else if (CTrsp->CommandResponse.bits.CmdRsp ==
+ be16_to_cpu(SLI_CT_RESPONSE_FS_RJT)) {
+ /* NameServer Rsp Error */
+ lpfc_printf_log(phba, KERN_INFO, LOG_DISCOVERY,
+ "%d:0240 NameServer Rsp Error "
+ "Data: x%x x%x x%x x%x\n",
+ phba->brd_no,
+ CTrsp->CommandResponse.bits.CmdRsp,
+ (uint32_t) CTrsp->ReasonCode,
+ (uint32_t) CTrsp->Explanation,
+ phba->fc_flag);
+ } else {
+ /* NameServer Rsp Error */
+ lpfc_printf_log(phba,
+ KERN_INFO,
+ LOG_DISCOVERY,
+ "%d:0241 NameServer Rsp Error "
+ "Data: x%x x%x x%x x%x\n",
+ phba->brd_no,
+ CTrsp->CommandResponse.bits.CmdRsp,
+ (uint32_t) CTrsp->ReasonCode,
+ (uint32_t) CTrsp->Explanation,
+ phba->fc_flag);
+ }
+ }
+ /* Link up / RSCN discovery */
+ lpfc_disc_start(phba);
+out:
+ lpfc_free_ct_rsp(phba, outp);
+ lpfc_mbuf_free(phba, inp->virt, inp->phys);
+ lpfc_mbuf_free(phba, bmp->virt, bmp->phys);
+ kfree(inp);
+ kfree(bmp);
+ mempool_free( cmdiocb, phba->iocb_mem_pool);
+ return;
+}
+
+static void
+lpfc_cmpl_ct_cmd_rft_id(struct lpfc_hba * phba, struct lpfc_iocbq * cmdiocb,
+ struct lpfc_iocbq * rspiocb)
+{
+ struct lpfc_sli *psli;
+ struct lpfc_dmabuf *bmp;
+ struct lpfc_dmabuf *inp;
+ struct lpfc_dmabuf *outp;
+ IOCB_t *irsp;
+ struct lpfc_sli_ct_request *CTrsp;
+
+ psli = &phba->sli;
+ /* we pass cmdiocb to state machine which needs rspiocb as well */
+ cmdiocb->context_un.rsp_iocb = rspiocb;
+
+ inp = (struct lpfc_dmabuf *) cmdiocb->context1;
+ outp = (struct lpfc_dmabuf *) cmdiocb->context2;
+ bmp = (struct lpfc_dmabuf *) cmdiocb->context3;
+ irsp = &rspiocb->iocb;
+
+ CTrsp = (struct lpfc_sli_ct_request *) outp->virt;
+
+ /* RFT request completes status <ulpStatus> CmdRsp <CmdRsp> */
+ lpfc_printf_log(phba, KERN_INFO, LOG_DISCOVERY,
+ "%d:0209 RFT request completes ulpStatus x%x "
+ "CmdRsp x%x\n", phba->brd_no, irsp->ulpStatus,
+ CTrsp->CommandResponse.bits.CmdRsp);
+
+ lpfc_free_ct_rsp(phba, outp);
+ lpfc_mbuf_free(phba, inp->virt, inp->phys);
+ lpfc_mbuf_free(phba, bmp->virt, bmp->phys);
+ kfree(inp);
+ kfree(bmp);
+ mempool_free( cmdiocb, phba->iocb_mem_pool);
+ return;
+}
+
+static void
+lpfc_cmpl_ct_cmd_rnn_id(struct lpfc_hba * phba, struct lpfc_iocbq * cmdiocb,
+ struct lpfc_iocbq * rspiocb)
+{
+ lpfc_cmpl_ct_cmd_rft_id(phba, cmdiocb, rspiocb);
+ return;
+}
+
+static void
+lpfc_cmpl_ct_cmd_rsnn_nn(struct lpfc_hba * phba, struct lpfc_iocbq * cmdiocb,
+ struct lpfc_iocbq * rspiocb)
+{
+ lpfc_cmpl_ct_cmd_rft_id(phba, cmdiocb, rspiocb);
+ return;
+}
+
+void
+lpfc_get_hba_sym_node_name(struct lpfc_hba * phba, uint8_t * symbp)
+{
+ uint8_t buf[16];
+ char fwrev[16];
+
+ lpfc_decode_firmware_rev(phba, fwrev, 0);
+ lpfc_get_hba_model_desc(phba, buf, NULL);
+ sprintf(symbp, "Emulex %s FV%s DV%s", buf, fwrev, lpfc_release_version);
+}
+
+/*
+ * lpfc_ns_cmd
+ * Description:
+ * Issue Cmd to NameServer
+ * SLI_CTNS_GID_FT
+ * LI_CTNS_RFT_ID
+ */
+int
+lpfc_ns_cmd(struct lpfc_hba * phba, struct lpfc_nodelist * ndlp, int cmdcode)
+{
+ struct lpfc_dmabuf *mp, *bmp;
+ struct lpfc_sli_ct_request *CtReq;
+ struct ulp_bde64 *bpl;
+ void (*cmpl) (struct lpfc_hba *, struct lpfc_iocbq *,
+ struct lpfc_iocbq *) = NULL;
+ uint32_t rsp_size = 1024;
+
+ /* fill in BDEs for command */
+ /* Allocate buffer for command payload */
+ mp = kmalloc(sizeof (struct lpfc_dmabuf), GFP_ATOMIC);
+ if (!mp)
+ goto ns_cmd_exit;
+
+ INIT_LIST_HEAD(&mp->list);
+ mp->virt = lpfc_mbuf_alloc(phba, MEM_PRI, &(mp->phys));
+ if (!mp->virt)
+ goto ns_cmd_free_mp;
+
+ /* Allocate buffer for Buffer ptr list */
+ bmp = kmalloc(sizeof (struct lpfc_dmabuf), GFP_ATOMIC);
+ if (!bmp)
+ goto ns_cmd_free_mpvirt;
+
+ INIT_LIST_HEAD(&bmp->list);
+ bmp->virt = lpfc_mbuf_alloc(phba, MEM_PRI, &(bmp->phys));
+ if (!bmp->virt)
+ goto ns_cmd_free_bmp;
+
+ /* NameServer Req */
+ lpfc_printf_log(phba,
+ KERN_INFO,
+ LOG_DISCOVERY,
+ "%d:0236 NameServer Req Data: x%x x%x x%x\n",
+ phba->brd_no, cmdcode, phba->fc_flag,
+ phba->fc_rscn_id_cnt);
+
+ bpl = (struct ulp_bde64 *) bmp->virt;
+ memset(bpl, 0, sizeof(struct ulp_bde64));
+ bpl->addrHigh = le32_to_cpu( putPaddrHigh(mp->phys) );
+ bpl->addrLow = le32_to_cpu( putPaddrLow(mp->phys) );
+ bpl->tus.f.bdeFlags = 0;
+ if (cmdcode == SLI_CTNS_GID_FT)
+ bpl->tus.f.bdeSize = GID_REQUEST_SZ;
+ else if (cmdcode == SLI_CTNS_RFT_ID)
+ bpl->tus.f.bdeSize = RFT_REQUEST_SZ;
+ else if (cmdcode == SLI_CTNS_RNN_ID)
+ bpl->tus.f.bdeSize = RNN_REQUEST_SZ;
+ else if (cmdcode == SLI_CTNS_RSNN_NN)
+ bpl->tus.f.bdeSize = RSNN_REQUEST_SZ;
+ else
+ bpl->tus.f.bdeSize = 0;
+ bpl->tus.w = le32_to_cpu(bpl->tus.w);
+
+ CtReq = (struct lpfc_sli_ct_request *) mp->virt;
+ memset(CtReq, 0, sizeof (struct lpfc_sli_ct_request));
+ CtReq->RevisionId.bits.Revision = SLI_CT_REVISION;
+ CtReq->RevisionId.bits.InId = 0;
+ CtReq->FsType = SLI_CT_DIRECTORY_SERVICE;
+ CtReq->FsSubType = SLI_CT_DIRECTORY_NAME_SERVER;
+ CtReq->CommandResponse.bits.Size = 0;
+ switch (cmdcode) {
+ case SLI_CTNS_GID_FT:
+ CtReq->CommandResponse.bits.CmdRsp =
+ be16_to_cpu(SLI_CTNS_GID_FT);
+ CtReq->un.gid.Fc4Type = SLI_CTPT_FCP;
+ if (phba->hba_state < LPFC_HBA_READY)
+ phba->hba_state = LPFC_NS_QRY;
+ lpfc_set_disctmo(phba);
+ cmpl = lpfc_cmpl_ct_cmd_gid_ft;
+ rsp_size = FC_MAX_NS_RSP;
+ break;
+
+ case SLI_CTNS_RFT_ID:
+ CtReq->CommandResponse.bits.CmdRsp =
+ be16_to_cpu(SLI_CTNS_RFT_ID);
+ CtReq->un.rft.PortId = be32_to_cpu(phba->fc_myDID);
+ CtReq->un.rft.fcpReg = 1;
+ cmpl = lpfc_cmpl_ct_cmd_rft_id;
+ break;
+
+ case SLI_CTNS_RNN_ID:
+ CtReq->CommandResponse.bits.CmdRsp =
+ be16_to_cpu(SLI_CTNS_RNN_ID);
+ CtReq->un.rnn.PortId = be32_to_cpu(phba->fc_myDID);
+ memcpy(CtReq->un.rnn.wwnn, &phba->fc_nodename,
+ sizeof (struct lpfc_name));
+ cmpl = lpfc_cmpl_ct_cmd_rnn_id;
+ break;
+
+ case SLI_CTNS_RSNN_NN:
+ CtReq->CommandResponse.bits.CmdRsp =
+ be16_to_cpu(SLI_CTNS_RSNN_NN);
+ memcpy(CtReq->un.rsnn.wwnn, &phba->fc_nodename,
+ sizeof (struct lpfc_name));
+ lpfc_get_hba_sym_node_name(phba, CtReq->un.rsnn.symbname);
+ CtReq->un.rsnn.len = strlen(CtReq->un.rsnn.symbname);
+ cmpl = lpfc_cmpl_ct_cmd_rsnn_nn;
+ break;
+ }
+
+ if (!lpfc_ct_cmd(phba, mp, bmp, ndlp, cmpl, rsp_size))
+ /* On success, The cmpl function will free the buffers */
+ return 0;
+
+ lpfc_mbuf_free(phba, bmp->virt, bmp->phys);
+ns_cmd_free_bmp:
+ kfree(bmp);
+ns_cmd_free_mpvirt:
+ lpfc_mbuf_free(phba, mp->virt, mp->phys);
+ns_cmd_free_mp:
+ kfree(mp);
+ns_cmd_exit:
+ return 1;
+}
+
+static void
+lpfc_cmpl_ct_cmd_fdmi(struct lpfc_hba * phba,
+ struct lpfc_iocbq * cmdiocb, struct lpfc_iocbq * rspiocb)
+{
+ struct lpfc_dmabuf *bmp = cmdiocb->context3;
+ struct lpfc_dmabuf *inp = cmdiocb->context1;
+ struct lpfc_dmabuf *outp = cmdiocb->context2;
+ struct lpfc_sli_ct_request *CTrsp = outp->virt;
+ struct lpfc_sli_ct_request *CTcmd = inp->virt;
+ struct lpfc_nodelist *ndlp;
+ uint16_t fdmi_cmd = CTcmd->CommandResponse.bits.CmdRsp;
+ uint16_t fdmi_rsp = CTrsp->CommandResponse.bits.CmdRsp;
+
+ ndlp = lpfc_findnode_did(phba, NLP_SEARCH_ALL, FDMI_DID);
+ if (fdmi_rsp == be16_to_cpu(SLI_CT_RESPONSE_FS_RJT)) {
+ /* FDMI rsp failed */
+ lpfc_printf_log(phba,
+ KERN_INFO,
+ LOG_DISCOVERY,
+ "%d:0220 FDMI rsp failed Data: x%x\n",
+ phba->brd_no,
+ be16_to_cpu(fdmi_cmd));
+ }
+
+ switch (be16_to_cpu(fdmi_cmd)) {
+ case SLI_MGMT_RHBA:
+ lpfc_fdmi_cmd(phba, ndlp, SLI_MGMT_RPA);
+ break;
+
+ case SLI_MGMT_RPA:
+ break;
+
+ case SLI_MGMT_DHBA:
+ lpfc_fdmi_cmd(phba, ndlp, SLI_MGMT_DPRT);
+ break;
+
+ case SLI_MGMT_DPRT:
+ lpfc_fdmi_cmd(phba, ndlp, SLI_MGMT_RHBA);
+ break;
+ }
+
+ lpfc_free_ct_rsp(phba, outp);
+ lpfc_mbuf_free(phba, inp->virt, inp->phys);
+ lpfc_mbuf_free(phba, bmp->virt, bmp->phys);
+ kfree(inp);
+ kfree(bmp);
+ mempool_free(cmdiocb, phba->iocb_mem_pool);
+ return;
+}
+int
+lpfc_fdmi_cmd(struct lpfc_hba * phba, struct lpfc_nodelist * ndlp, int cmdcode)
+{
+ struct lpfc_dmabuf *mp, *bmp;
+ struct lpfc_sli_ct_request *CtReq;
+ struct ulp_bde64 *bpl;
+ uint32_t size;
+ REG_HBA *rh;
+ PORT_ENTRY *pe;
+ REG_PORT_ATTRIBUTE *pab;
+ ATTRIBUTE_BLOCK *ab;
+ ATTRIBUTE_ENTRY *ae;
+ void (*cmpl) (struct lpfc_hba *, struct lpfc_iocbq *,
+ struct lpfc_iocbq *);
+
+
+ /* fill in BDEs for command */
+ /* Allocate buffer for command payload */
+ mp = kmalloc(sizeof (struct lpfc_dmabuf), GFP_ATOMIC);
+ if (!mp)
+ goto fdmi_cmd_exit;
+
+ mp->virt = lpfc_mbuf_alloc(phba, 0, &(mp->phys));
+ if (!mp->virt)
+ goto fdmi_cmd_free_mp;
+
+ /* Allocate buffer for Buffer ptr list */
+ bmp = kmalloc(sizeof (struct lpfc_dmabuf), GFP_ATOMIC);
+ if (!bmp)
+ goto fdmi_cmd_free_mpvirt;
+
+ bmp->virt = lpfc_mbuf_alloc(phba, 0, &(bmp->phys));
+ if (!bmp->virt)
+ goto fdmi_cmd_free_bmp;
+
+ INIT_LIST_HEAD(&mp->list);
+ INIT_LIST_HEAD(&bmp->list);
+
+ /* FDMI request */
+ lpfc_printf_log(phba,
+ KERN_INFO,
+ LOG_DISCOVERY,
+ "%d:0218 FDMI Request Data: x%x x%x x%x\n",
+ phba->brd_no,
+ phba->fc_flag, phba->hba_state, cmdcode);
+
+ CtReq = (struct lpfc_sli_ct_request *) mp->virt;
+
+ memset(CtReq, 0, sizeof(struct lpfc_sli_ct_request));
+ CtReq->RevisionId.bits.Revision = SLI_CT_REVISION;
+ CtReq->RevisionId.bits.InId = 0;
+
+ CtReq->FsType = SLI_CT_MANAGEMENT_SERVICE;
+ CtReq->FsSubType = SLI_CT_FDMI_Subtypes;
+ size = 0;
+
+ switch (cmdcode) {
+ case SLI_MGMT_RHBA:
+ {
+ lpfc_vpd_t *vp = &phba->vpd;
+ uint32_t i, j, incr;
+ int len;
+
+ CtReq->CommandResponse.bits.CmdRsp =
+ be16_to_cpu(SLI_MGMT_RHBA);
+ CtReq->CommandResponse.bits.Size = 0;
+ rh = (REG_HBA *) & CtReq->un.PortID;
+ memcpy(&rh->hi.PortName, &phba->fc_sparam.portName,
+ sizeof (struct lpfc_name));
+ /* One entry (port) per adapter */
+ rh->rpl.EntryCnt = be32_to_cpu(1);
+ memcpy(&rh->rpl.pe, &phba->fc_sparam.portName,
+ sizeof (struct lpfc_name));
+
+ /* point to the HBA attribute block */
+ size = 2 * sizeof (struct lpfc_name) + FOURBYTES;
+ ab = (ATTRIBUTE_BLOCK *) ((uint8_t *) rh + size);
+ ab->EntryCnt = 0;
+
+ /* Point to the beginning of the first HBA attribute
+ entry */
+ /* #1 HBA attribute entry */
+ size += FOURBYTES;
+ ae = (ATTRIBUTE_ENTRY *) ((uint8_t *) rh + size);
+ ae->ad.bits.AttrType = be16_to_cpu(NODE_NAME);
+ ae->ad.bits.AttrLen = be16_to_cpu(FOURBYTES
+ + sizeof (struct lpfc_name));
+ memcpy(&ae->un.NodeName, &phba->fc_sparam.nodeName,
+ sizeof (struct lpfc_name));
+ ab->EntryCnt++;
+ size += FOURBYTES + sizeof (struct lpfc_name);
+
+ /* #2 HBA attribute entry */
+ ae = (ATTRIBUTE_ENTRY *) ((uint8_t *) rh + size);
+ ae->ad.bits.AttrType = be16_to_cpu(MANUFACTURER);
+ strcpy(ae->un.Manufacturer, "Emulex Corporation");
+ len = strlen(ae->un.Manufacturer);
+ len += (len & 3) ? (4 - (len & 3)) : 4;
+ ae->ad.bits.AttrLen = be16_to_cpu(FOURBYTES + len);
+ ab->EntryCnt++;
+ size += FOURBYTES + len;
+
+ /* #3 HBA attribute entry */
+ ae = (ATTRIBUTE_ENTRY *) ((uint8_t *) rh + size);
+ ae->ad.bits.AttrType = be16_to_cpu(SERIAL_NUMBER);
+ strcpy(ae->un.SerialNumber, phba->SerialNumber);
+ len = strlen(ae->un.SerialNumber);
+ len += (len & 3) ? (4 - (len & 3)) : 4;
+ ae->ad.bits.AttrLen = be16_to_cpu(FOURBYTES + len);
+ ab->EntryCnt++;
+ size += FOURBYTES + len;
+
+ /* #4 HBA attribute entry */
+ ae = (ATTRIBUTE_ENTRY *) ((uint8_t *) rh + size);
+ ae->ad.bits.AttrType = be16_to_cpu(MODEL);
+ lpfc_get_hba_model_desc(phba, ae->un.Model, NULL);
+ len = strlen(ae->un.Model);
+ len += (len & 3) ? (4 - (len & 3)) : 4;
+ ae->ad.bits.AttrLen = be16_to_cpu(FOURBYTES + len);
+ ab->EntryCnt++;
+ size += FOURBYTES + len;
+
+ /* #5 HBA attribute entry */
+ ae = (ATTRIBUTE_ENTRY *) ((uint8_t *) rh + size);
+ ae->ad.bits.AttrType = be16_to_cpu(MODEL_DESCRIPTION);
+ lpfc_get_hba_model_desc(phba, NULL,
+ ae->un.ModelDescription);
+ len = strlen(ae->un.ModelDescription);
+ len += (len & 3) ? (4 - (len & 3)) : 4;
+ ae->ad.bits.AttrLen = be16_to_cpu(FOURBYTES + len);
+ ab->EntryCnt++;
+ size += FOURBYTES + len;
+
+ /* #6 HBA attribute entry */
+ ae = (ATTRIBUTE_ENTRY *) ((uint8_t *) rh + size);
+ ae->ad.bits.AttrType = be16_to_cpu(HARDWARE_VERSION);
+ ae->ad.bits.AttrLen = be16_to_cpu(FOURBYTES + 8);
+ /* Convert JEDEC ID to ascii for hardware version */
+ incr = vp->rev.biuRev;
+ for (i = 0; i < 8; i++) {
+ j = (incr & 0xf);
+ if (j <= 9)
+ ae->un.HardwareVersion[7 - i] =
+ (char)((uint8_t) 0x30 +
+ (uint8_t) j);
+ else
+ ae->un.HardwareVersion[7 - i] =
+ (char)((uint8_t) 0x61 +
+ (uint8_t) (j - 10));
+ incr = (incr >> 4);
+ }
+ ab->EntryCnt++;
+ size += FOURBYTES + 8;
+
+ /* #7 HBA attribute entry */
+ ae = (ATTRIBUTE_ENTRY *) ((uint8_t *) rh + size);
+ ae->ad.bits.AttrType = be16_to_cpu(DRIVER_VERSION);
+ strcpy(ae->un.DriverVersion, lpfc_release_version);
+ len = strlen(ae->un.DriverVersion);
+ len += (len & 3) ? (4 - (len & 3)) : 4;
+ ae->ad.bits.AttrLen = be16_to_cpu(FOURBYTES + len);
+ ab->EntryCnt++;
+ size += FOURBYTES + len;
+
+ /* #8 HBA attribute entry */
+ ae = (ATTRIBUTE_ENTRY *) ((uint8_t *) rh + size);
+ ae->ad.bits.AttrType = be16_to_cpu(OPTION_ROM_VERSION);
+ strcpy(ae->un.OptionROMVersion, phba->OptionROMVersion);
+ len = strlen(ae->un.OptionROMVersion);
+ len += (len & 3) ? (4 - (len & 3)) : 4;
+ ae->ad.bits.AttrLen = be16_to_cpu(FOURBYTES + len);
+ ab->EntryCnt++;
+ size += FOURBYTES + len;
+
+ /* #9 HBA attribute entry */
+ ae = (ATTRIBUTE_ENTRY *) ((uint8_t *) rh + size);
+ ae->ad.bits.AttrType = be16_to_cpu(FIRMWARE_VERSION);
+ lpfc_decode_firmware_rev(phba, ae->un.FirmwareVersion,
+ 1);
+ len = strlen(ae->un.FirmwareVersion);
+ len += (len & 3) ? (4 - (len & 3)) : 4;
+ ae->ad.bits.AttrLen = be16_to_cpu(FOURBYTES + len);
+ ab->EntryCnt++;
+ size += FOURBYTES + len;
+
+ /* #10 HBA attribute entry */
+ ae = (ATTRIBUTE_ENTRY *) ((uint8_t *) rh + size);
+ ae->ad.bits.AttrType = be16_to_cpu(OS_NAME_VERSION);
+ sprintf(ae->un.OsNameVersion, "%s %s %s",
+ system_utsname.sysname, system_utsname.release,
+ system_utsname.version);
+ len = strlen(ae->un.OsNameVersion);
+ len += (len & 3) ? (4 - (len & 3)) : 4;
+ ae->ad.bits.AttrLen = be16_to_cpu(FOURBYTES + len);
+ ab->EntryCnt++;
+ size += FOURBYTES + len;
+
+ /* #11 HBA attribute entry */
+ ae = (ATTRIBUTE_ENTRY *) ((uint8_t *) rh + size);
+ ae->ad.bits.AttrType = be16_to_cpu(MAX_CT_PAYLOAD_LEN);
+ ae->ad.bits.AttrLen = be16_to_cpu(FOURBYTES + 4);
+ ae->un.MaxCTPayloadLen = (65 * 4096);
+ ab->EntryCnt++;
+ size += FOURBYTES + 4;
+
+ ab->EntryCnt = be32_to_cpu(ab->EntryCnt);
+ /* Total size */
+ size = GID_REQUEST_SZ - 4 + size;
+ }
+ break;
+
+ case SLI_MGMT_RPA:
+ {
+ lpfc_vpd_t *vp;
+ struct serv_parm *hsp;
+ int len;
+
+ vp = &phba->vpd;
+
+ CtReq->CommandResponse.bits.CmdRsp =
+ be16_to_cpu(SLI_MGMT_RPA);
+ CtReq->CommandResponse.bits.Size = 0;
+ pab = (REG_PORT_ATTRIBUTE *) & CtReq->un.PortID;
+ size = sizeof (struct lpfc_name) + FOURBYTES;
+ memcpy((uint8_t *) & pab->PortName,
+ (uint8_t *) & phba->fc_sparam.portName,
+ sizeof (struct lpfc_name));
+ pab->ab.EntryCnt = 0;
+
+ /* #1 Port attribute entry */
+ ae = (ATTRIBUTE_ENTRY *) ((uint8_t *) pab + size);
+ ae->ad.bits.AttrType = be16_to_cpu(SUPPORTED_FC4_TYPES);
+ ae->ad.bits.AttrLen = be16_to_cpu(FOURBYTES + 32);
+ ae->un.SupportFC4Types[2] = 1;
+ ae->un.SupportFC4Types[7] = 1;
+ pab->ab.EntryCnt++;
+ size += FOURBYTES + 32;
+
+ /* #2 Port attribute entry */
+ ae = (ATTRIBUTE_ENTRY *) ((uint8_t *) pab + size);
+ ae->ad.bits.AttrType = be16_to_cpu(SUPPORTED_SPEED);
+ ae->ad.bits.AttrLen = be16_to_cpu(FOURBYTES + 4);
+ if (FC_JEDEC_ID(vp->rev.biuRev) == VIPER_JEDEC_ID)
+ ae->un.SupportSpeed = HBA_PORTSPEED_10GBIT;
+ else if (FC_JEDEC_ID(vp->rev.biuRev) == HELIOS_JEDEC_ID)
+ ae->un.SupportSpeed = HBA_PORTSPEED_4GBIT;
+ else if ((FC_JEDEC_ID(vp->rev.biuRev) ==
+ CENTAUR_2G_JEDEC_ID)
+ || (FC_JEDEC_ID(vp->rev.biuRev) ==
+ PEGASUS_JEDEC_ID)
+ || (FC_JEDEC_ID(vp->rev.biuRev) ==
+ THOR_JEDEC_ID))
+ ae->un.SupportSpeed = HBA_PORTSPEED_2GBIT;
+ else
+ ae->un.SupportSpeed = HBA_PORTSPEED_1GBIT;
+ pab->ab.EntryCnt++;
+ size += FOURBYTES + 4;
+
+ /* #3 Port attribute entry */
+ ae = (ATTRIBUTE_ENTRY *) ((uint8_t *) pab + size);
+ ae->ad.bits.AttrType = be16_to_cpu(PORT_SPEED);
+ ae->ad.bits.AttrLen = be16_to_cpu(FOURBYTES + 4);
+ switch(phba->fc_linkspeed) {
+ case LA_1GHZ_LINK:
+ ae->un.PortSpeed = HBA_PORTSPEED_1GBIT;
+ break;
+ case LA_2GHZ_LINK:
+ ae->un.PortSpeed = HBA_PORTSPEED_2GBIT;
+ break;
+ case LA_4GHZ_LINK:
+ ae->un.PortSpeed = HBA_PORTSPEED_4GBIT;
+ break;
+ default:
+ ae->un.PortSpeed =
+ HBA_PORTSPEED_UNKNOWN;
+ break;
+ }
+ pab->ab.EntryCnt++;
+ size += FOURBYTES + 4;
+
+ /* #4 Port attribute entry */
+ ae = (ATTRIBUTE_ENTRY *) ((uint8_t *) pab + size);
+ ae->ad.bits.AttrType = be16_to_cpu(MAX_FRAME_SIZE);
+ ae->ad.bits.AttrLen = be16_to_cpu(FOURBYTES + 4);
+ hsp = (struct serv_parm *) & phba->fc_sparam;
+ ae->un.MaxFrameSize =
+ (((uint32_t) hsp->cmn.
+ bbRcvSizeMsb) << 8) | (uint32_t) hsp->cmn.
+ bbRcvSizeLsb;
+ pab->ab.EntryCnt++;
+ size += FOURBYTES + 4;
+
+ /* #5 Port attribute entry */
+ ae = (ATTRIBUTE_ENTRY *) ((uint8_t *) pab + size);
+ ae->ad.bits.AttrType = be16_to_cpu(OS_DEVICE_NAME);
+ strcpy((char *)ae->un.OsDeviceName, LPFC_DRIVER_NAME);
+ len = strlen((char *)ae->un.OsDeviceName);
+ len += (len & 3) ? (4 - (len & 3)) : 4;
+ ae->ad.bits.AttrLen = be16_to_cpu(FOURBYTES + len);
+ pab->ab.EntryCnt++;
+ size += FOURBYTES + len;
+
+ if (phba->cfg_fdmi_on == 2) {
+ /* #6 Port attribute entry */
+ ae = (ATTRIBUTE_ENTRY *) ((uint8_t *) pab +
+ size);
+ ae->ad.bits.AttrType = be16_to_cpu(HOST_NAME);
+ sprintf(ae->un.HostName, "%s",
+ system_utsname.nodename);
+ len = strlen(ae->un.HostName);
+ len += (len & 3) ? (4 - (len & 3)) : 4;
+ ae->ad.bits.AttrLen =
+ be16_to_cpu(FOURBYTES + len);
+ pab->ab.EntryCnt++;
+ size += FOURBYTES + len;
+ }
+
+ pab->ab.EntryCnt = be32_to_cpu(pab->ab.EntryCnt);
+ /* Total size */
+ size = GID_REQUEST_SZ - 4 + size;
+ }
+ break;
+
+ case SLI_MGMT_DHBA:
+ CtReq->CommandResponse.bits.CmdRsp = be16_to_cpu(SLI_MGMT_DHBA);
+ CtReq->CommandResponse.bits.Size = 0;
+ pe = (PORT_ENTRY *) & CtReq->un.PortID;
+ memcpy((uint8_t *) & pe->PortName,
+ (uint8_t *) & phba->fc_sparam.portName,
+ sizeof (struct lpfc_name));
+ size = GID_REQUEST_SZ - 4 + sizeof (struct lpfc_name);
+ break;
+
+ case SLI_MGMT_DPRT:
+ CtReq->CommandResponse.bits.CmdRsp = be16_to_cpu(SLI_MGMT_DPRT);
+ CtReq->CommandResponse.bits.Size = 0;
+ pe = (PORT_ENTRY *) & CtReq->un.PortID;
+ memcpy((uint8_t *) & pe->PortName,
+ (uint8_t *) & phba->fc_sparam.portName,
+ sizeof (struct lpfc_name));
+ size = GID_REQUEST_SZ - 4 + sizeof (struct lpfc_name);
+ break;
+ }
+
+ bpl = (struct ulp_bde64 *) bmp->virt;
+ bpl->addrHigh = le32_to_cpu( putPaddrHigh(mp->phys) );
+ bpl->addrLow = le32_to_cpu( putPaddrLow(mp->phys) );
+ bpl->tus.f.bdeFlags = 0;
+ bpl->tus.f.bdeSize = size;
+ bpl->tus.w = le32_to_cpu(bpl->tus.w);
+
+ cmpl = lpfc_cmpl_ct_cmd_fdmi;
+
+ if (!lpfc_ct_cmd(phba, mp, bmp, ndlp, cmpl, FC_MAX_NS_RSP))
+ return 0;
+
+ lpfc_mbuf_free(phba, bmp->virt, bmp->phys);
+fdmi_cmd_free_bmp:
+ kfree(bmp);
+fdmi_cmd_free_mpvirt:
+ lpfc_mbuf_free(phba, mp->virt, mp->phys);
+fdmi_cmd_free_mp:
+ kfree(mp);
+fdmi_cmd_exit:
+ /* Issue FDMI request failed */
+ lpfc_printf_log(phba,
+ KERN_INFO,
+ LOG_DISCOVERY,
+ "%d:0244 Issue FDMI request failed Data: x%x\n",
+ phba->brd_no,
+ cmdcode);
+ return 1;
+}
+
+
+void
+lpfc_fdmi_tmo(unsigned long ptr)
+{
+ struct lpfc_hba *phba = (struct lpfc_hba*)ptr;
+ struct lpfc_nodelist *ndlp;
+ unsigned long iflag;
+
+ spin_lock_irqsave(phba->host->host_lock, iflag);
+ ndlp = lpfc_findnode_did(phba, NLP_SEARCH_ALL, FDMI_DID);
+ if (ndlp) {
+ if (system_utsname.nodename[0] != '\0') {
+ lpfc_fdmi_cmd(phba, ndlp, SLI_MGMT_DHBA);
+ } else {
+ mod_timer(&phba->fc_fdmitmo, jiffies + HZ * 60);
+ }
+ }
+ spin_unlock_irqrestore(phba->host->host_lock, iflag);
+ return;
+}
+
+
+void
+lpfc_decode_firmware_rev(struct lpfc_hba * phba, char *fwrevision, int flag)
+{
+ struct lpfc_sli *psli = &phba->sli;
+ lpfc_vpd_t *vp = &phba->vpd;
+ uint32_t b1, b2, b3, b4, i, rev;
+ char c;
+ uint32_t *ptr, str[4];
+ uint8_t *fwname;
+
+ if (vp->rev.rBit) {
+ if (psli->sliinit.sli_flag & LPFC_SLI2_ACTIVE)
+ rev = vp->rev.sli2FwRev;
+ else
+ rev = vp->rev.sli1FwRev;
+
+ b1 = (rev & 0x0000f000) >> 12;
+ b2 = (rev & 0x00000f00) >> 8;
+ b3 = (rev & 0x000000c0) >> 6;
+ b4 = (rev & 0x00000030) >> 4;
+
+ switch (b4) {
+ case 0:
+ c = 'N';
+ break;
+ case 1:
+ c = 'A';
+ break;
+ case 2:
+ c = 'B';
+ break;
+ default:
+ c = 0;
+ break;
+ }
+ b4 = (rev & 0x0000000f);
+
+ if (psli->sliinit.sli_flag & LPFC_SLI2_ACTIVE)
+ fwname = vp->rev.sli2FwName;
+ else
+ fwname = vp->rev.sli1FwName;
+
+ for (i = 0; i < 16; i++)
+ if(fwname[i] == 0x20)
+ fwname[i] = 0;
+
+ ptr = (uint32_t*)fwname;
+
+ for (i = 0; i < 3; i++)
+ str[i] = be32_to_cpu(*ptr++);
+
+ if (c == 0) {
+ if (flag)
+ sprintf(fwrevision, "%d.%d%d (%s)",
+ b1, b2, b3, (char *)str);
+ else
+ sprintf(fwrevision, "%d.%d%d", b1,
+ b2, b3);
+ } else {
+ if (flag)
+ sprintf(fwrevision, "%d.%d%d%c%d (%s)",
+ b1, b2, b3, c,
+ b4, (char *)str);
+ else
+ sprintf(fwrevision, "%d.%d%d%c%d",
+ b1, b2, b3, c, b4);
+ }
+ } else {
+ rev = vp->rev.smFwRev;
+
+ b1 = (rev & 0xff000000) >> 24;
+ b2 = (rev & 0x00f00000) >> 20;
+ b3 = (rev & 0x000f0000) >> 16;
+ c = (rev & 0x0000ff00) >> 8;
+ b4 = (rev & 0x000000ff);
+
+ if (flag)
+ sprintf(fwrevision, "%d.%d%d%c%d ", b1,
+ b2, b3, c, b4);
+ else
+ sprintf(fwrevision, "%d.%d%d%c%d ", b1,
+ b2, b3, c, b4);
+ }
+ return;
+}
+
+void
+lpfc_get_hba_model_desc(struct lpfc_hba * phba, uint8_t * mdp, uint8_t * descp)
+{
+ lpfc_vpd_t *vp;
+ uint32_t id;
+ char str[16];
+
+ vp = &phba->vpd;
+ pci_read_config_dword(phba->pcidev, PCI_VENDOR_ID, &id);
+
+ switch ((id >> 16) & 0xffff) {
+ case PCI_DEVICE_ID_SUPERFLY:
+ if (vp->rev.biuRev >= 1 && vp->rev.biuRev <= 3)
+ strcpy(str, "LP7000 1");
+ else
+ strcpy(str, "LP7000E 1");
+ break;
+ case PCI_DEVICE_ID_DRAGONFLY:
+ strcpy(str, "LP8000 1");
+ break;
+ case PCI_DEVICE_ID_CENTAUR:
+ if (FC_JEDEC_ID(vp->rev.biuRev) == CENTAUR_2G_JEDEC_ID)
+ strcpy(str, "LP9002 2");
+ else
+ strcpy(str, "LP9000 1");
+ break;
+ case PCI_DEVICE_ID_RFLY:
+ strcpy(str, "LP952 2");
+ break;
+ case PCI_DEVICE_ID_PEGASUS:
+ strcpy(str, "LP9802 2");
+ break;
+ case PCI_DEVICE_ID_THOR:
+ strcpy(str, "LP10000 2");
+ break;
+ case PCI_DEVICE_ID_VIPER:
+ strcpy(str, "LPX1000 10");
+ break;
+ case PCI_DEVICE_ID_PFLY:
+ strcpy(str, "LP982 2");
+ break;
+ case PCI_DEVICE_ID_TFLY:
+ strcpy(str, "LP1050 2");
+ break;
+ case PCI_DEVICE_ID_HELIOS:
+ strcpy(str, "LP11000 4");
+ break;
+ case PCI_DEVICE_ID_BMID:
+ strcpy(str, "LP1150 4");
+ break;
+ case PCI_DEVICE_ID_BSMB:
+ strcpy(str, "LP111 4");
+ break;
+ case PCI_DEVICE_ID_ZEPHYR:
+ strcpy(str, "LP11000e 4");
+ break;
+ case PCI_DEVICE_ID_ZMID:
+ strcpy(str, "LP1150e 4");
+ break;
+ case PCI_DEVICE_ID_ZSMB:
+ strcpy(str, "LP111e 4");
+ break;
+ case PCI_DEVICE_ID_LP101:
+ strcpy(str, "LP101 2");
+ break;
+ }
+ if (mdp)
+ sscanf(str, "%s", mdp);
+ if (descp)
+ sprintf(descp, "Emulex LightPulse %s Gigabit PCI Fibre "
+ "Channel Adapter", str);
+}
+
--- /dev/null
+/*******************************************************************
+ * This file is part of the Emulex Linux Device Driver for *
+ * Enterprise Fibre Channel Host Bus Adapters. *
+ * Refer to the README file included with this package for *
+ * driver version and adapter support. *
+ * Copyright (C) 2004 Emulex Corporation. *
+ * www.emulex.com *
+ * *
+ * This program is free software; you can redistribute it and/or *
+ * modify it under the terms of the GNU General Public License *
+ * as published by the Free Software Foundation; either version 2 *
+ * of the License, or (at your option) any later version. *
+ * *
+ * This program is distributed in the hope that it will be useful, *
+ * but WITHOUT ANY WARRANTY; without even the implied warranty of *
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the *
+ * GNU General Public License for more details, a copy of which *
+ * can be found in the file COPYING included with this package. *
+ *******************************************************************/
+
+/*
+ * $Id: lpfc_disc.h 1.45 2004/11/10 11:40:40EST sf_support Exp $
+ */
+
+#ifndef _H_LPFC_DISC
+#define _H_LPFC_DISC
+
+#include "lpfc_hw.h"
+
+struct lpfc_target;
+
+#define FC_MAX_HOLD_RSCN 32 /* max number of deferred RSCNs */
+#define FC_MAX_NS_RSP 65536 /* max size NameServer rsp */
+#define FC_MAXLOOP 126 /* max devices supported on a fc loop */
+#define LPFC_DISC_FLOGI_TMO 10 /* Discovery FLOGI ratov */
+
+/* Defines for failMask bitmask
+ * These are reasons that the device is not currently available
+ * for I/O to be sent.
+ */
+#define LPFC_DEV_LINK_DOWN 0x1 /* Link is down */
+#define LPFC_DEV_DISAPPEARED 0x2 /* Device disappeared from mapped
+ list */
+#define LPFC_DEV_DISCOVERY_INP 0x4 /* Device to go through discovery */
+#define LPFC_DEV_DISCONNECTED 0x8 /* noactive connection to remote dev */
+
+/* These defines are used for set failMask routines */
+#define LPFC_SET_BITMASK 1
+#define LPFC_CLR_BITMASK 2
+
+/* Provide an enumeration for the Types of addresses a FARP can resolve. */
+typedef enum lpfc_farp_addr_type {
+ LPFC_FARP_BY_IEEE,
+ LPFC_FARP_BY_WWPN,
+ LPFC_FARP_BY_WWNN,
+} LPFC_FARP_ADDR_TYPE;
+
+/* This is the protocol dependent definition for a Node List Entry.
+ * This is used by Fibre Channel protocol to support FCP.
+ */
+
+struct lpfc_bindlist {
+ struct list_head nlp_listp;
+ struct lpfc_target *nlp_Target; /* ptr to the tgt structure */
+ struct lpfc_name nlp_portname; /* port name */
+ struct lpfc_name nlp_nodename; /* node name */
+ uint16_t nlp_bind_type;
+ uint16_t nlp_sid; /* scsi id */
+ uint32_t nlp_DID; /* FibreChannel D_ID of entry */
+};
+
+struct lpfc_nodelist {
+ struct list_head nlp_listp;
+ struct lpfc_name nlp_portname; /* port name */
+ struct lpfc_name nlp_nodename; /* node name */
+ uint32_t nlp_failMask; /* failure mask for device */
+ uint32_t nlp_flag; /* entry flags */
+ uint32_t nlp_DID; /* FC D_ID of entry */
+ uint32_t nlp_last_elscmd; /* Last ELS cmd sent */
+ uint16_t nlp_type;
+#define NLP_FC_NODE 0x1 /* entry is an FC node */
+#define NLP_FABRIC 0x4 /* entry rep a Fabric entity */
+#define NLP_FCP_TARGET 0x8 /* entry is an FCP target */
+
+ uint16_t nlp_rpi;
+ uint16_t nlp_state; /* state transition indicator */
+ uint16_t nlp_xri; /* output exchange id for RPI */
+ uint16_t nlp_sid; /* scsi id */
+ uint8_t nlp_retry; /* used for ELS retries */
+ uint8_t nlp_disc_refcnt; /* used for DSM */
+ uint8_t nlp_fcp_info; /* class info, bits 0-3 */
+#define NLP_FCP_2_DEVICE 0x10 /* FCP-2 device */
+
+ struct timer_list nlp_delayfunc; /* Used for delayed ELS cmds */
+ struct timer_list nlp_tmofunc; /* Used for nodev tmo */
+ struct lpfc_target *nlp_Target; /* Pointer to the target
+ structure */
+
+ struct lpfc_bindlist *nlp_listp_bind; /* Linked list bounded remote
+ ports */
+ struct lpfc_nodelist *nlp_rpi_hash_next;
+ struct lpfc_hba *nlp_phba;
+};
+
+/*++
+ * lpfc_node_farp_list:
+ * This data structure defines the attributes associated with
+ * an outstanding FARP REQ to a remote node.
+ *
+ * listentry - head of this list of pending farp requests.
+ * rnode_addr - The address of the remote node. Either the IEEE, WWPN, or
+ * WWNN. Used in the FARP request.
+ *
+ --*/
+struct lpfc_node_farp_pend {
+ struct list_head listentry;
+ struct lpfc_name rnode_addr;
+};
+
+/* Defines for nlp_flag (uint32) */
+#define NLP_NO_LIST 0x0 /* Indicates immediately free node */
+#define NLP_UNUSED_LIST 0x1 /* Flg to indicate node will be freed */
+#define NLP_PLOGI_LIST 0x2 /* Flg to indicate sent PLOGI */
+#define NLP_ADISC_LIST 0x3 /* Flg to indicate sent ADISC */
+#define NLP_REGLOGIN_LIST 0x4 /* Flg to indicate sent REG_LOGIN */
+#define NLP_PRLI_LIST 0x5 /* Flg to indicate sent PRLI */
+#define NLP_UNMAPPED_LIST 0x6 /* Node is now unmapped */
+#define NLP_MAPPED_LIST 0x7 /* Node is now mapped */
+#define NLP_NPR_LIST 0x8 /* Node is in NPort Recovery state */
+#define NLP_JUST_DQ 0x9 /* just deque ndlp in lpfc_nlp_list */
+#define NLP_LIST_MASK 0xf /* mask to see what list node is on */
+#define NLP_PLOGI_SND 0x20 /* sent PLOGI request for this entry */
+#define NLP_PRLI_SND 0x40 /* sent PRLI request for this entry */
+#define NLP_ADISC_SND 0x80 /* sent ADISC request for this entry */
+#define NLP_LOGO_SND 0x100 /* sent LOGO request for this entry */
+#define NLP_RNID_SND 0x400 /* sent RNID request for this entry */
+#define NLP_ELS_SND_MASK 0x7e0 /* sent ELS request for this entry */
+#define NLP_AUTOMAP 0x800 /* Entry was automap'ed */
+#define NLP_SEED_WWPN 0x1000 /* Entry scsi id is seeded for WWPN */
+#define NLP_SEED_WWNN 0x2000 /* Entry scsi id is seeded for WWNN */
+#define NLP_SEED_DID 0x4000 /* Entry scsi id is seeded for DID */
+#define NLP_SEED_MASK 0x807000 /* mask for seeded flags */
+#define NLP_NS_NODE 0x8000 /* Authenticated entry by NameServer */
+#define NLP_NODEV_TMO 0x10000 /* nodev timeout is running for node */
+#define NLP_DELAY_TMO 0x20000 /* delay timeout is running for node */
+#define NLP_NPR_2B_DISC 0x40000 /* node is included in num_disc_nodes */
+#define NLP_RCV_PLOGI 0x80000 /* Rcv'ed PLOGI from remote system */
+#define NLP_LOGO_ACC 0x100000 /* Process LOGO after ACC completes */
+#define NLP_TGT_NO_SCSIID 0x200000 /* good PRLI but no binding for scsid */
+#define NLP_SEED_ALPA 0x800000 /* SCSI id is derived from alpa array */
+#define NLP_ACC_REGLOGIN 0x1000000 /* Issue Reg Login after successful
+ ACC */
+#define NLP_NPR_ADISC 0x2000000 /* Issue ADISC when dq'ed from
+ NPR list */
+#define NLP_DELAY_REMOVE 0x4000000 /* Defer removal till end of DSM */
+
+/* Defines for list searchs */
+#define NLP_SEARCH_MAPPED 0x1 /* search mapped */
+#define NLP_SEARCH_UNMAPPED 0x2 /* search unmapped */
+#define NLP_SEARCH_PLOGI 0x4 /* search plogi */
+#define NLP_SEARCH_ADISC 0x8 /* search adisc */
+#define NLP_SEARCH_REGLOGIN 0x10 /* search reglogin */
+#define NLP_SEARCH_PRLI 0x20 /* search prli */
+#define NLP_SEARCH_NPR 0x40 /* search npr */
+#define NLP_SEARCH_UNUSED 0x80 /* search mapped */
+#define NLP_SEARCH_ALL 0xff /* search all lists */
+
+/* There are 4 different double linked lists nodelist entries can reside on.
+ * The Port Login (PLOGI) list and Address Discovery (ADISC) list are used
+ * when Link Up discovery or Registered State Change Notification (RSCN)
+ * processing is needed. Each list holds the nodes that require a PLOGI or
+ * ADISC Extended Link Service (ELS) request. These lists keep track of the
+ * nodes affected by an RSCN, or a Link Up (Typically, all nodes are effected
+ * by Link Up) event. The unmapped_list contains all nodes that have
+ * successfully logged into at the Fibre Channel level. The
+ * mapped_list will contain all nodes that are mapped FCP targets.
+ *
+ * The bind list is a list of undiscovered (potentially non-existent) nodes
+ * that we have saved binding information on. This information is used when
+ * nodes transition from the unmapped to the mapped list.
+ */
+
+/* Defines for nlp_state */
+#define NLP_STE_UNUSED_NODE 0x0 /* node is just allocated */
+#define NLP_STE_PLOGI_ISSUE 0x1 /* PLOGI was sent to NL_PORT */
+#define NLP_STE_ADISC_ISSUE 0x2 /* ADISC was sent to NL_PORT */
+#define NLP_STE_REG_LOGIN_ISSUE 0x3 /* REG_LOGIN was issued for NL_PORT */
+#define NLP_STE_PRLI_ISSUE 0x4 /* PRLI was sent to NL_PORT */
+#define NLP_STE_UNMAPPED_NODE 0x5 /* PRLI completed from NL_PORT */
+#define NLP_STE_MAPPED_NODE 0x6 /* Identified as a FCP Target */
+#define NLP_STE_NPR_NODE 0x7 /* NPort disappeared */
+#define NLP_STE_MAX_STATE 0x8
+#define NLP_STE_FREED_NODE 0xff /* node entry was freed to MEM_NLP */
+
+/* For UNUSED_NODE state, the node has just been allocated.
+ * For PLOGI_ISSUE and REG_LOGIN_ISSUE, the node is on
+ * the PLOGI list. For REG_LOGIN_COMPL, the node is taken off the PLOGI list
+ * and put on the unmapped list. For ADISC processing, the node is taken off
+ * the ADISC list and placed on either the mapped or unmapped list (depending
+ * on its previous state). Once on the unmapped list, a PRLI is issued and the
+ * state changed to PRLI_ISSUE. When the PRLI completion occurs, the state is
+ * changed to PRLI_COMPL. If the completion indicates a mapped
+ * node, the node is taken off the unmapped list. The binding list is checked
+ * for a valid binding, or a binding is automatically assigned. If binding
+ * assignment is unsuccessful, the node is left on the unmapped list. If
+ * binding assignment is successful, the associated binding list entry (if
+ * any) is removed, and the node is placed on the mapped list.
+ */
+/*
+ * For a Link Down, all nodes on the ADISC, PLOGI, unmapped or mapped
+ * lists will receive a DEVICE_RECOVERY event. If the linkdown or nodev timers
+ * expire, all effected nodes will receive a DEVICE_RM event.
+ */
+/*
+ * For a Link Up or RSCN, all nodes will move from the mapped / unmapped lists
+ * to either the ADISC or PLOGI list. After a Nameserver query or ALPA loopmap
+ * check, additional nodes may be added (DEVICE_ADD) or removed (DEVICE_RM) to /
+ * from the PLOGI or ADISC lists. Once the PLOGI and ADISC lists are populated,
+ * we will first process the ADISC list. 32 entries are processed initially and
+ * ADISC is initited for each one. Completions / Events for each node are
+ * funnelled thru the state machine. As each node finishes ADISC processing, it
+ * starts ADISC for any nodes waiting for ADISC processing. If no nodes are
+ * waiting, and the ADISC list count is identically 0, then we are done. For
+ * Link Up discovery, since all nodes on the PLOGI list are UNREG_LOGIN'ed, we
+ * can issue a CLEAR_LA and reenable Link Events. Next we will process the PLOGI
+ * list. 32 entries are processed initially and PLOGI is initited for each one.
+ * Completions / Events for each node are funnelled thru the state machine. As
+ * each node finishes PLOGI processing, it starts PLOGI for any nodes waiting
+ * for PLOGI processing. If no nodes are waiting, and the PLOGI list count is
+ * identically 0, then we are done. We have now completed discovery / RSCN
+ * handling. Upon completion, ALL nodes should be on either the mapped or
+ * unmapped lists.
+ */
+
+/* Defines for Node List Entry Events that could happen */
+#define NLP_EVT_RCV_PLOGI 0x0 /* Rcv'd an ELS PLOGI command */
+#define NLP_EVT_RCV_PRLI 0x1 /* Rcv'd an ELS PRLI command */
+#define NLP_EVT_RCV_LOGO 0x2 /* Rcv'd an ELS LOGO command */
+#define NLP_EVT_RCV_ADISC 0x3 /* Rcv'd an ELS ADISC command */
+#define NLP_EVT_RCV_PDISC 0x4 /* Rcv'd an ELS PDISC command */
+#define NLP_EVT_RCV_PRLO 0x5 /* Rcv'd an ELS PRLO command */
+#define NLP_EVT_CMPL_PLOGI 0x6 /* Sent an ELS PLOGI command */
+#define NLP_EVT_CMPL_PRLI 0x7 /* Sent an ELS PRLI command */
+#define NLP_EVT_CMPL_LOGO 0x8 /* Sent an ELS LOGO command */
+#define NLP_EVT_CMPL_ADISC 0x9 /* Sent an ELS ADISC command */
+#define NLP_EVT_CMPL_REG_LOGIN 0xa /* REG_LOGIN mbox cmd completed */
+#define NLP_EVT_DEVICE_RM 0xb /* Device not found in NS / ALPAmap */
+#define NLP_EVT_DEVICE_RECOVERY 0xc /* Device existence unknown */
+#define NLP_EVT_MAX_EVENT 0xd
+
+/* structure used to queue event to the discovery tasklet */
+struct lpfc_disc_evt {
+ struct list_head evt_listp;
+ void * evt_arg1;
+ void * evt_arg2;
+ uint32_t evt;
+};
+typedef struct lpfc_disc_evt LPFC_DISC_EVT_t;
+
+#define LPFC_EVT_MBOX 0x1
+#define LPFC_EVT_SOL_IOCB 0x2
+#define LPFC_EVT_UNSOL_IOCB 0x3
+#define LPFC_EVT_NODEV_TMO 0x4
+#define LPFC_EVT_SCAN 0x5
+
+/* Definitions for Binding Entry Type for lpfc_parse_binding_entry() */
+#define LPFC_BIND_WW_NN_PN 0
+#define LPFC_BIND_DID 1
+
+#endif /* _H_LPFC_DISC */
--- /dev/null
+/*******************************************************************
+ * This file is part of the Emulex Linux Device Driver for *
+ * Enterprise Fibre Channel Host Bus Adapters. *
+ * Refer to the README file included with this package for *
+ * driver version and adapter support. *
+ * Copyright (C) 2004 Emulex Corporation. *
+ * www.emulex.com *
+ * *
+ * This program is free software; you can redistribute it and/or *
+ * modify it under the terms of the GNU General Public License *
+ * as published by the Free Software Foundation; either version 2 *
+ * of the License, or (at your option) any later version. *
+ * *
+ * This program is distributed in the hope that it will be useful, *
+ * but WITHOUT ANY WARRANTY; without even the implied warranty of *
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the *
+ * GNU General Public License for more details, a copy of which *
+ * can be found in the file COPYING included with this package. *
+ *******************************************************************/
+
+/*
+ * $Id: lpfc_els.c 1.152 2004/11/18 18:27:53EST sf_support Exp $
+ */
+#include <linux/version.h>
+#include <linux/blkdev.h>
+#include <linux/dma-mapping.h>
+#include <linux/pci.h>
+#include <linux/spinlock.h>
+#include <scsi/scsi_device.h>
+#include <scsi/scsi_host.h>
+#include "lpfc_sli.h"
+#include "lpfc_disc.h"
+#include "lpfc_scsi.h"
+#include "lpfc.h"
+#include "lpfc_crtn.h"
+#include "lpfc_hw.h"
+#include "lpfc_logmsg.h"
+#include "lpfc_mem.h"
+
+
+static int lpfc_els_retry(struct lpfc_hba *, struct lpfc_iocbq *,
+ struct lpfc_iocbq *);
+static int lpfc_max_els_tries = 3;
+
+static int
+lpfc_els_chk_latt(struct lpfc_hba * phba)
+{
+ struct lpfc_sli *psli;
+ LPFC_MBOXQ_t *mbox;
+ uint32_t ha_copy;
+
+ psli = &phba->sli;
+
+ if ((phba->hba_state < LPFC_HBA_READY) &&
+ (phba->hba_state != LPFC_LINK_DOWN)) {
+
+ /* Read the HBA Host Attention Register */
+ ha_copy = readl(phba->HAregaddr);
+
+ if (ha_copy & HA_LATT) { /* Link Attention interrupt */
+
+ /* Pending Link Event during Discovery */
+ lpfc_printf_log(phba, KERN_WARNING, LOG_DISCOVERY,
+ "%d:0237 Pending Link Event during "
+ "Discovery: State x%x\n",
+ phba->brd_no, phba->hba_state);
+
+ /* CLEAR_LA should re-enable link attention events and
+ * we should then imediately take a LATT event. The
+ * LATT processing should call lpfc_linkdown() which
+ * will cleanup any left over in-progress discovery
+ * events.
+ */
+ phba->fc_flag |= FC_ABORT_DISCOVERY;
+
+ if (phba->hba_state != LPFC_CLEAR_LA) {
+ if ((mbox = mempool_alloc(phba->mbox_mem_pool,
+ GFP_ATOMIC))) {
+ phba->hba_state = LPFC_CLEAR_LA;
+ lpfc_clear_la(phba, mbox);
+ mbox->mbox_cmpl =
+ lpfc_mbx_cmpl_clear_la;
+ if (lpfc_sli_issue_mbox
+ (phba, mbox,
+ (MBX_NOWAIT | MBX_STOP_IOCB))
+ == MBX_NOT_FINISHED) {
+ mempool_free(mbox,
+ phba->mbox_mem_pool);
+ phba->hba_state =
+ LPFC_HBA_ERROR;
+ }
+ }
+ }
+ return (1);
+ }
+ }
+
+ return (0);
+}
+
+struct lpfc_iocbq *
+lpfc_prep_els_iocb(struct lpfc_hba * phba,
+ uint8_t expectRsp,
+ uint16_t cmdSize,
+ uint8_t retry, struct lpfc_nodelist * ndlp, uint32_t elscmd)
+{
+ struct lpfc_sli *psli;
+ struct lpfc_sli_ring *pring;
+ struct lpfc_iocbq *elsiocb;
+ struct lpfc_dmabuf *pcmd, *prsp, *pbuflist;
+ struct ulp_bde64 *bpl;
+ IOCB_t *icmd;
+ uint32_t tag;
+
+ psli = &phba->sli;
+ pring = &psli->ring[LPFC_ELS_RING]; /* ELS ring */
+
+ if (phba->hba_state < LPFC_LINK_UP)
+ return NULL;
+
+
+ /* Allocate buffer for command iocb */
+ elsiocb = mempool_alloc(phba->iocb_mem_pool, GFP_ATOMIC);
+ if (!elsiocb)
+ return NULL;
+
+ memset(elsiocb, 0, sizeof (struct lpfc_iocbq));
+ icmd = &elsiocb->iocb;
+
+ /* fill in BDEs for command */
+ /* Allocate buffer for command payload */
+ if (((pcmd = kmalloc(sizeof (struct lpfc_dmabuf), GFP_ATOMIC)) == 0) ||
+ ((pcmd->virt = lpfc_mbuf_alloc(phba,
+ MEM_PRI, &(pcmd->phys))) == 0)) {
+ if (pcmd)
+ kfree(pcmd);
+ mempool_free( elsiocb, phba->iocb_mem_pool);
+ return NULL;
+ }
+
+ INIT_LIST_HEAD(&pcmd->list);
+
+ /* Allocate buffer for response payload */
+ if (expectRsp) {
+ prsp = kmalloc(sizeof (struct lpfc_dmabuf), GFP_ATOMIC);
+ if (prsp)
+ prsp->virt = lpfc_mbuf_alloc(phba, MEM_PRI,
+ &prsp->phys);
+ if (prsp == 0 || prsp->virt == 0) {
+ if (prsp)
+ kfree(prsp);
+ lpfc_mbuf_free(phba, pcmd->virt, pcmd->phys);
+ kfree(pcmd);
+ mempool_free( elsiocb, phba->iocb_mem_pool);
+ return NULL;
+ }
+ INIT_LIST_HEAD(&prsp->list);
+ } else {
+ prsp = NULL;
+ }
+
+ /* Allocate buffer for Buffer ptr list */
+ pbuflist = kmalloc(sizeof (struct lpfc_dmabuf), GFP_ATOMIC);
+ if (pbuflist)
+ pbuflist->virt = lpfc_mbuf_alloc(phba, MEM_PRI,
+ &pbuflist->phys);
+ if (pbuflist == 0 || pbuflist->virt == 0) {
+ mempool_free( elsiocb, phba->iocb_mem_pool);
+ lpfc_mbuf_free(phba, pcmd->virt, pcmd->phys);
+ lpfc_mbuf_free(phba, prsp->virt, prsp->phys);
+ kfree(pcmd);
+ kfree(prsp);
+ if (pbuflist)
+ kfree(pbuflist);
+ return NULL;
+ }
+
+ INIT_LIST_HEAD(&pbuflist->list);
+
+ icmd->un.elsreq64.bdl.addrHigh = putPaddrHigh(pbuflist->phys);
+ icmd->un.elsreq64.bdl.addrLow = putPaddrLow(pbuflist->phys);
+ icmd->un.elsreq64.bdl.bdeFlags = BUFF_TYPE_BDL;
+ if (expectRsp) {
+ icmd->un.elsreq64.bdl.bdeSize = (2 * sizeof (struct ulp_bde64));
+ icmd->un.elsreq64.remoteID = ndlp->nlp_DID; /* DID */
+ icmd->ulpCommand = CMD_ELS_REQUEST64_CR;
+ } else {
+ icmd->un.elsreq64.bdl.bdeSize = sizeof (struct ulp_bde64);
+ icmd->ulpCommand = CMD_XMIT_ELS_RSP64_CX;
+ }
+
+ /* NOTE: we don't use ulpIoTag0 because it is a t2 structure */
+ tag = lpfc_sli_next_iotag(phba, pring);
+ icmd->ulpIoTag = (uint16_t)(tag & 0xffff);
+ icmd->un.elsreq64.bdl.ulpIoTag32 = tag;
+ icmd->ulpBdeCount = 1;
+ icmd->ulpLe = 1;
+ icmd->ulpClass = CLASS3;
+
+ bpl = (struct ulp_bde64 *) pbuflist->virt;
+ bpl->addrLow = le32_to_cpu(putPaddrLow(pcmd->phys));
+ bpl->addrHigh = le32_to_cpu(putPaddrHigh(pcmd->phys));
+ bpl->tus.f.bdeSize = cmdSize;
+ bpl->tus.f.bdeFlags = 0;
+ bpl->tus.w = le32_to_cpu(bpl->tus.w);
+
+ if (expectRsp) {
+ bpl++;
+ bpl->addrLow = le32_to_cpu(putPaddrLow(prsp->phys));
+ bpl->addrHigh = le32_to_cpu(putPaddrHigh(prsp->phys));
+ bpl->tus.f.bdeSize = FCELSSIZE;
+ bpl->tus.f.bdeFlags = BUFF_USE_RCV;
+ bpl->tus.w = le32_to_cpu(bpl->tus.w);
+ }
+
+ /* Save for completion so we can release these resources */
+ elsiocb->context1 = (uint8_t *) ndlp;
+ elsiocb->context2 = (uint8_t *) pcmd;
+ elsiocb->context3 = (uint8_t *) pbuflist;
+ elsiocb->retry = retry;
+ elsiocb->drvrTimeout = (phba->fc_ratov << 1) + LPFC_DRVR_TIMEOUT;
+
+ if (prsp) {
+ list_add(&prsp->list, &pcmd->list);
+ }
+
+ /* The els iocb is fully initialize. Flush it to main store for the
+ * HBA. Note that all els iocb context buffer are from the driver's
+ * dma pool and have length LPFC_BPL_SIZE. Get a short-hand pointer to
+ * the physical address.
+ */
+ pci_dma_sync_single_for_device(phba->pcidev, pbuflist->phys,
+ LPFC_BPL_SIZE, PCI_DMA_TODEVICE);
+
+ if (expectRsp) {
+ /* Xmit ELS command <elsCmd> to remote NPORT <did> */
+ lpfc_printf_log(phba, KERN_INFO, LOG_ELS,
+ "%d:0116 Xmit ELS command x%x to remote "
+ "NPORT x%x Data: x%x x%x\n",
+ phba->brd_no, elscmd,
+ ndlp->nlp_DID, icmd->ulpIoTag, phba->hba_state);
+ } else {
+ /* Xmit ELS response <elsCmd> to remote NPORT <did> */
+ lpfc_printf_log(phba, KERN_INFO, LOG_ELS,
+ "%d:0117 Xmit ELS response x%x to remote "
+ "NPORT x%x Data: x%x x%x\n",
+ phba->brd_no, elscmd,
+ ndlp->nlp_DID, icmd->ulpIoTag, cmdSize);
+ }
+
+ return (elsiocb);
+}
+
+static void
+lpfc_cmpl_els_flogi(struct lpfc_hba * phba,
+ struct lpfc_iocbq * cmdiocb, struct lpfc_iocbq * rspiocb)
+{
+ IOCB_t *irsp;
+ struct lpfc_dmabuf *pcmd, *prsp;
+ struct serv_parm *sp;
+ uint32_t *lp;
+ LPFC_MBOXQ_t *mbox;
+ struct lpfc_sli *psli;
+ struct lpfc_nodelist *ndlp;
+ int rc;
+
+ psli = &phba->sli;
+ irsp = &(rspiocb->iocb);
+ ndlp = (struct lpfc_nodelist *) cmdiocb->context1;
+ pcmd = (struct lpfc_dmabuf *) cmdiocb->context2;
+
+ /* Check to see if link went down during discovery */
+ if (lpfc_els_chk_latt(phba)) {
+ lpfc_nlp_remove(phba, ndlp);
+ goto out;
+ }
+
+ if (irsp->ulpStatus) {
+ /* Check for retry */
+ if (lpfc_els_retry(phba, cmdiocb, rspiocb)) {
+ /* ELS command is being retried */
+ goto out;
+ }
+ /* FLOGI failed, so there is no fabric */
+ phba->fc_flag &= ~(FC_FABRIC | FC_PUBLIC_LOOP);
+
+ /* If private loop, then allow max outstandting els to be
+ * LPFC_MAX_DISC_THREADS (32). Scanning in the case of no
+ * alpa map would take too long otherwise.
+ */
+ if (phba->alpa_map[0] == 0) {
+ phba->cfg_discovery_threads =
+ LPFC_MAX_DISC_THREADS;
+ }
+
+ /* FLOGI failure */
+ lpfc_printf_log(phba,
+ KERN_INFO,
+ LOG_ELS,
+ "%d:0100 FLOGI failure Data: x%x x%x\n",
+ phba->brd_no,
+ irsp->ulpStatus, irsp->un.ulpWord[4]);
+ } else {
+ /* The FLogI succeeded. Sync the data for the CPU before
+ * accessing it.
+ */
+ prsp = (struct lpfc_dmabuf *) pcmd->list.next;
+ lp = (uint32_t *) prsp->virt;
+
+ /* The HBA populated the response buffer. Flush cpu cache to
+ * before the driver touches this memory.
+ */
+ pci_dma_sync_single_for_cpu(phba->pcidev, prsp->phys,
+ LPFC_BPL_SIZE, PCI_DMA_FROMDEVICE);
+ sp = (struct serv_parm *) ((uint8_t *) lp + sizeof (uint32_t));
+
+ /* FLOGI completes successfully */
+ lpfc_printf_log(phba, KERN_INFO, LOG_ELS,
+ "%d:0101 FLOGI completes sucessfully "
+ "Data: x%x x%x x%x x%x\n",
+ phba->brd_no,
+ irsp->un.ulpWord[4], sp->cmn.e_d_tov,
+ sp->cmn.w2.r_a_tov, sp->cmn.edtovResolution);
+
+ if (phba->hba_state == LPFC_FLOGI) {
+ /* If Common Service Parameters indicate Nport
+ * we are point to point, if Fport we are Fabric.
+ */
+ if (sp->cmn.fPort) {
+ phba->fc_flag |= FC_FABRIC;
+ if (sp->cmn.edtovResolution) {
+ /* E_D_TOV ticks are in nanoseconds */
+ phba->fc_edtov =
+ (be32_to_cpu(sp->cmn.e_d_tov) +
+ 999999) / 1000000;
+ } else {
+ /* E_D_TOV ticks are in milliseconds */
+ phba->fc_edtov =
+ be32_to_cpu(sp->cmn.e_d_tov);
+ }
+ phba->fc_ratov =
+ (be32_to_cpu(sp->cmn.w2.r_a_tov) +
+ 999) / 1000;
+
+ if (phba->fc_topology == TOPOLOGY_LOOP) {
+ phba->fc_flag |= FC_PUBLIC_LOOP;
+ } else {
+ /* If we are a N-port connected to a
+ * Fabric, fixup sparam's so logins to
+ * devices on remote loops work.
+ */
+ phba->fc_sparam.cmn.altBbCredit = 1;
+ }
+
+ phba->fc_myDID = irsp->un.ulpWord[4] & Mask_DID;
+
+ memcpy(&ndlp->nlp_portname, &sp->portName,
+ sizeof (struct lpfc_name));
+ memcpy(&ndlp->nlp_nodename, &sp->nodeName,
+ sizeof (struct lpfc_name));
+ memcpy(&phba->fc_fabparam, sp,
+ sizeof (struct serv_parm));
+ if ((mbox = mempool_alloc(phba->mbox_mem_pool,
+ GFP_ATOMIC)) == 0) {
+ goto flogifail;
+ }
+ phba->hba_state = LPFC_FABRIC_CFG_LINK;
+ lpfc_config_link(phba, mbox);
+ if (lpfc_sli_issue_mbox
+ (phba, mbox, (MBX_NOWAIT | MBX_STOP_IOCB))
+ == MBX_NOT_FINISHED) {
+ mempool_free(mbox, phba->mbox_mem_pool);
+ goto flogifail;
+ }
+
+ if ((mbox = mempool_alloc(phba->mbox_mem_pool,
+ GFP_ATOMIC)) == 0) {
+ goto flogifail;
+ }
+ if (lpfc_reg_login(phba, Fabric_DID,
+ (uint8_t *) sp, mbox,
+ 0) == 0) {
+ /* set_slim mailbox command needs to
+ * execute first, queue this command to
+ * be processed later.
+ */
+ mbox->mbox_cmpl =
+ lpfc_mbx_cmpl_fabric_reg_login;
+ mbox->context2 = ndlp;
+ if (lpfc_sli_issue_mbox
+ (phba, mbox,
+ (MBX_NOWAIT | MBX_STOP_IOCB))
+ == MBX_NOT_FINISHED) {
+ mempool_free(mbox,
+ phba->mbox_mem_pool);
+ goto flogifail;
+ }
+ } else {
+ mempool_free(mbox, phba->mbox_mem_pool);
+ goto flogifail;
+ }
+ } else {
+ /* We FLOGIed into an NPort, initiate pt2pt
+ protocol */
+ phba->fc_flag &= ~(FC_FABRIC | FC_PUBLIC_LOOP);
+ phba->fc_edtov = FF_DEF_EDTOV;
+ phba->fc_ratov = FF_DEF_RATOV;
+ rc = memcmp(&phba->fc_portname, &sp->portName,
+ sizeof(struct lpfc_name));
+ if (rc >= 0) {
+ /* This side will initiate the PLOGI */
+ phba->fc_flag |= FC_PT2PT_PLOGI;
+
+ /* N_Port ID cannot be 0, set our to
+ * LocalID the other side will be
+ * RemoteID.
+ */
+
+ /* not equal */
+ if (rc)
+ phba->fc_myDID = PT2PT_LocalID;
+
+ if ((mbox =
+ mempool_alloc(phba->mbox_mem_pool,
+ GFP_ATOMIC))
+ == 0) {
+ goto flogifail;
+ }
+ lpfc_config_link(phba, mbox);
+ if (lpfc_sli_issue_mbox
+ (phba, mbox,
+ (MBX_NOWAIT | MBX_STOP_IOCB))
+ == MBX_NOT_FINISHED) {
+ mempool_free(mbox,
+ phba->mbox_mem_pool);
+ goto flogifail;
+ }
+ mempool_free( ndlp, phba->nlp_mem_pool);
+
+ if ((ndlp =
+ lpfc_findnode_did(phba,
+ NLP_SEARCH_ALL,
+ PT2PT_RemoteID))
+ == 0) {
+ /* Cannot find existing Fabric
+ ndlp, so allocate a new
+ one */
+ if ((ndlp =
+ mempool_alloc(
+ phba->nlp_mem_pool,
+ GFP_ATOMIC)) == 0) {
+ goto flogifail;
+ }
+ lpfc_nlp_init(phba, ndlp,
+ PT2PT_RemoteID);
+ }
+ memcpy(&ndlp->nlp_portname,
+ &sp->portName,
+ sizeof (struct lpfc_name));
+ memcpy(&ndlp->nlp_nodename,
+ &sp->nodeName,
+ sizeof (struct lpfc_name));
+ ndlp->nlp_state = NLP_STE_NPR_NODE;
+ lpfc_nlp_list(phba, ndlp, NLP_NPR_LIST);
+ ndlp->nlp_flag |= NLP_NPR_2B_DISC;
+ }
+ else {
+ /* This side will wait for the PLOGI */
+ mempool_free( ndlp, phba->nlp_mem_pool);
+ }
+
+ phba->fc_flag |= FC_PT2PT;
+
+ /* Start discovery - this should just do
+ CLEAR_LA */
+ lpfc_disc_start(phba);
+ }
+ goto out;
+ }
+ }
+
+flogifail:
+ lpfc_nlp_remove(phba, ndlp);
+
+ if((irsp->ulpStatus != IOSTAT_LOCAL_REJECT) ||
+ ((irsp->un.ulpWord[4] != IOERR_SLI_ABORTED) &&
+ (irsp->un.ulpWord[4] != IOERR_SLI_DOWN))) {
+
+ /* FLOGI failed, so just use loop map to make discovery list */
+ lpfc_disc_list_loopmap(phba);
+
+ /* Start discovery */
+ lpfc_disc_start(phba);
+ }
+
+out:
+ lpfc_els_free_iocb(phba, cmdiocb);
+ return;
+}
+
+static int
+lpfc_issue_els_flogi(struct lpfc_hba * phba, struct lpfc_nodelist * ndlp,
+ uint8_t retry)
+{
+ struct serv_parm *sp;
+ IOCB_t *icmd;
+ struct lpfc_iocbq *elsiocb;
+ struct lpfc_sli_ring *pring;
+ struct lpfc_sli *psli;
+ struct lpfc_dmabuf *bmp;
+ uint8_t *pcmd;
+ uint16_t cmdsize;
+ uint32_t tmo;
+
+ psli = &phba->sli;
+ pring = &psli->ring[LPFC_ELS_RING]; /* ELS ring */
+
+ cmdsize = (sizeof (uint32_t) + sizeof (struct serv_parm));
+ if ((elsiocb = lpfc_prep_els_iocb(phba, 1, cmdsize, retry,
+ ndlp, ELS_CMD_FLOGI)) == 0) {
+ return (1);
+ }
+
+ icmd = &elsiocb->iocb;
+ pcmd = (uint8_t *) (((struct lpfc_dmabuf *) elsiocb->context2)->virt);
+
+ /* For FLOGI request, remainder of payload is service parameters */
+ *((uint32_t *) (pcmd)) = ELS_CMD_FLOGI;
+ pcmd += sizeof (uint32_t);
+ memcpy(pcmd, &phba->fc_sparam, sizeof (struct serv_parm));
+ sp = (struct serv_parm *) pcmd;
+
+ /* Setup CSPs accordingly for Fabric */
+ sp->cmn.e_d_tov = 0;
+ sp->cmn.w2.r_a_tov = 0;
+ sp->cls1.classValid = 0;
+ sp->cls2.seqDelivery = 1;
+ sp->cls3.seqDelivery = 1;
+ if (sp->cmn.fcphLow < FC_PH3)
+ sp->cmn.fcphLow = FC_PH3;
+ if (sp->cmn.fcphHigh < FC_PH3)
+ sp->cmn.fcphHigh = FC_PH3;
+
+ tmo = phba->fc_ratov;
+ phba->fc_ratov = LPFC_DISC_FLOGI_TMO;
+ lpfc_set_disctmo(phba);
+ phba->fc_ratov = tmo;
+
+ /* Flush the els buffer to main store for the HBA. This context always
+ * comes from the driver's dma pool and is always LPFC_BPL_SIZE.
+ */
+ bmp = (struct lpfc_dmabuf *) (elsiocb->context2);
+ pci_dma_sync_single_for_device(phba->pcidev, bmp->phys,
+ LPFC_BPL_SIZE, PCI_DMA_TODEVICE);
+
+ phba->fc_stat.elsXmitFLOGI++;
+ elsiocb->iocb_cmpl = lpfc_cmpl_els_flogi;
+ if (lpfc_sli_issue_iocb(phba, pring, elsiocb, 0) == IOCB_ERROR) {
+ lpfc_els_free_iocb(phba, elsiocb);
+ return (1);
+ }
+ return (0);
+}
+
+int
+lpfc_els_abort_flogi(struct lpfc_hba * phba)
+{
+ struct lpfc_sli *psli;
+ struct lpfc_sli_ring *pring;
+ struct lpfc_iocbq *iocb, *next_iocb;
+ struct lpfc_nodelist *ndlp;
+ IOCB_t *icmd;
+ struct list_head *curr, *next;
+
+ /* Abort outstanding I/O on NPort <nlp_DID> */
+ lpfc_printf_log(phba, KERN_INFO, LOG_DISCOVERY,
+ "%d:0201 Abort outstanding I/O on NPort x%x\n",
+ phba->brd_no, Fabric_DID);
+
+ psli = &phba->sli;
+ pring = &psli->ring[LPFC_ELS_RING];
+
+ /* check the txcmplq */
+ list_for_each_safe(curr, next, &pring->txcmplq) {
+ next_iocb = list_entry(curr, struct lpfc_iocbq, list);
+ iocb = next_iocb;
+ /* Check to see if iocb matches the nport we are
+ looking for */
+ icmd = &iocb->iocb;
+ if (icmd->ulpCommand == CMD_ELS_REQUEST64_CR) {
+ ndlp = (struct lpfc_nodelist *)(iocb->context1);
+ if(ndlp && (ndlp->nlp_DID == Fabric_DID)) {
+ /* It matches, so deque and call compl
+ with an error */
+ list_del(&iocb->list);
+ pring->txcmplq_cnt--;
+
+ if ((icmd->un.elsreq64.bdl.ulpIoTag32)) {
+ lpfc_sli_issue_abort_iotag32
+ (phba, pring, iocb);
+ }
+ if (iocb->iocb_cmpl) {
+ icmd->ulpStatus =
+ IOSTAT_LOCAL_REJECT;
+ icmd->un.ulpWord[4] =
+ IOERR_SLI_ABORTED;
+ (iocb->iocb_cmpl) (phba, iocb, iocb);
+ } else {
+ mempool_free(iocb, phba->iocb_mem_pool);
+ }
+ }
+ }
+ }
+ return (0);
+}
+
+int
+lpfc_initial_flogi(struct lpfc_hba * phba)
+{
+ struct lpfc_nodelist *ndlp;
+
+ /* First look for Fabric ndlp on the unmapped list */
+
+ if ((ndlp =
+ lpfc_findnode_did(phba, NLP_SEARCH_UNMAPPED,
+ Fabric_DID)) == 0) {
+ /* Cannot find existing Fabric ndlp, so allocate a new one */
+ if ((ndlp = mempool_alloc(phba->nlp_mem_pool, GFP_ATOMIC))
+ == 0) {
+ return (0);
+ }
+ lpfc_nlp_init(phba, ndlp, Fabric_DID);
+ }
+ else {
+ phba->fc_unmap_cnt--;
+ list_del(&ndlp->nlp_listp);
+ ndlp->nlp_flag &= ~NLP_LIST_MASK;
+ }
+ if (lpfc_issue_els_flogi(phba, ndlp, 0)) {
+ mempool_free( ndlp, phba->nlp_mem_pool);
+ }
+ return (1);
+}
+
+static void
+lpfc_more_plogi(struct lpfc_hba * phba)
+{
+ int sentplogi;
+
+ if (phba->num_disc_nodes)
+ phba->num_disc_nodes--;
+
+ /* Continue discovery with <num_disc_nodes> PLOGIs to go */
+ lpfc_printf_log(phba, KERN_INFO, LOG_DISCOVERY,
+ "%d:0232 Continue discovery with %d PLOGIs to go "
+ "Data: x%x x%x x%x\n",
+ phba->brd_no, phba->num_disc_nodes, phba->fc_plogi_cnt,
+ phba->fc_flag, phba->hba_state);
+
+ /* Check to see if there are more PLOGIs to be sent */
+ if (phba->fc_flag & FC_NLP_MORE) {
+ /* go thru NPR list and issue any remaining ELS PLOGIs */
+ sentplogi = lpfc_els_disc_plogi(phba);
+ }
+ return;
+}
+
+static void
+lpfc_cmpl_els_plogi(struct lpfc_hba * phba, struct lpfc_iocbq * cmdiocb,
+ struct lpfc_iocbq * rspiocb)
+{
+ IOCB_t *irsp;
+ struct lpfc_sli *psli;
+ struct lpfc_nodelist *ndlp;
+ int disc, rc, did, type;
+
+ psli = &phba->sli;
+
+ /* we pass cmdiocb to state machine which needs rspiocb as well */
+ cmdiocb->context_un.rsp_iocb = rspiocb;
+
+ irsp = &rspiocb->iocb;
+ ndlp = (struct lpfc_nodelist *) cmdiocb->context1;
+ ndlp->nlp_flag &= ~NLP_PLOGI_SND;
+
+ /* Since ndlp can be freed in the disc state machine, note if this node
+ * is being used during discovery.
+ */
+ disc = (ndlp->nlp_flag & NLP_NPR_2B_DISC);
+ rc = 0;
+
+ /* PLOGI completes to NPort <nlp_DID> */
+ lpfc_printf_log(phba, KERN_INFO, LOG_ELS,
+ "%d:0102 PLOGI completes to NPort x%x "
+ "Data: x%x x%x x%x x%x\n",
+ phba->brd_no, ndlp->nlp_DID, irsp->ulpStatus,
+ irsp->un.ulpWord[4], disc, phba->num_disc_nodes);
+
+ /* Check to see if link went down during discovery */
+ if (lpfc_els_chk_latt(phba)) {
+ ndlp->nlp_flag |= NLP_NPR_2B_DISC;
+ goto out;
+ }
+
+ /* ndlp could be freed in DSM, save these values now */
+ type = ndlp->nlp_type;
+ did = ndlp->nlp_DID;
+
+ if (irsp->ulpStatus) {
+ /* Check for retry */
+ if (lpfc_els_retry(phba, cmdiocb, rspiocb)) {
+ /* ELS command is being retried */
+ if (disc) {
+ ndlp->nlp_flag |= NLP_NPR_2B_DISC;
+ }
+ goto out;
+ }
+
+ /* PLOGI failed */
+ /* Do not call DSM for lpfc_els_abort'ed ELS cmds */
+ if((irsp->ulpStatus == IOSTAT_LOCAL_REJECT) &&
+ ((irsp->un.ulpWord[4] == IOERR_SLI_ABORTED) ||
+ (irsp->un.ulpWord[4] == IOERR_SLI_DOWN))) {
+ disc = (ndlp->nlp_flag & NLP_NPR_2B_DISC);
+ }
+ else {
+ rc = lpfc_disc_state_machine(phba, ndlp, cmdiocb,
+ NLP_EVT_CMPL_PLOGI);
+ }
+ } else {
+ /* Good status, call state machine */
+ rc = lpfc_disc_state_machine(phba, ndlp, cmdiocb,
+ NLP_EVT_CMPL_PLOGI);
+ }
+
+ if(type & NLP_FABRIC) {
+ /* If we cannot login to Nameserver, kick off discovery now */
+ if ((did == NameServer_DID) && (rc == NLP_STE_FREED_NODE)) {
+ lpfc_disc_start(phba);
+ }
+ goto out;
+ }
+
+ if (disc && phba->num_disc_nodes) {
+ /* Check to see if there are more PLOGIs to be sent */
+ lpfc_more_plogi(phba);
+ }
+
+ if (rc != NLP_STE_FREED_NODE)
+ ndlp->nlp_flag &= ~NLP_NPR_2B_DISC;
+
+ if (phba->num_disc_nodes == 0) {
+ lpfc_can_disctmo(phba);
+ if (phba->fc_flag & FC_RSCN_MODE) {
+ /* Check to see if more RSCNs came in while we were
+ * processing this one.
+ */
+ if ((phba->fc_rscn_id_cnt == 0) &&
+ (!(phba->fc_flag & FC_RSCN_DISCOVERY))) {
+ lpfc_els_flush_rscn(phba);
+ } else {
+ lpfc_els_handle_rscn(phba);
+ }
+ }
+ }
+
+out:
+ lpfc_els_free_iocb(phba, cmdiocb);
+ return;
+}
+
+int
+lpfc_issue_els_plogi(struct lpfc_hba * phba, struct lpfc_nodelist * ndlp,
+ uint8_t retry)
+{
+ struct serv_parm *sp;
+ IOCB_t *icmd;
+ struct lpfc_iocbq *elsiocb;
+ struct lpfc_sli_ring *pring;
+ struct lpfc_sli *psli;
+ struct lpfc_dmabuf *bmp;
+ uint8_t *pcmd;
+ uint16_t cmdsize;
+
+ psli = &phba->sli;
+ pring = &psli->ring[LPFC_ELS_RING]; /* ELS ring */
+
+ cmdsize = (sizeof (uint32_t) + sizeof (struct serv_parm));
+ if ((elsiocb = lpfc_prep_els_iocb(phba, 1, cmdsize, retry,
+ ndlp, ELS_CMD_PLOGI)) == 0) {
+ return (1);
+ }
+
+ icmd = &elsiocb->iocb;
+ pcmd = (uint8_t *) (((struct lpfc_dmabuf *) elsiocb->context2)->virt);
+
+ /* For PLOGI request, remainder of payload is service parameters */
+ *((uint32_t *) (pcmd)) = ELS_CMD_PLOGI;
+ pcmd += sizeof (uint32_t);
+ memcpy(pcmd, &phba->fc_sparam, sizeof (struct serv_parm));
+ sp = (struct serv_parm *) pcmd;
+
+ if (sp->cmn.fcphLow < FC_PH_4_3)
+ sp->cmn.fcphLow = FC_PH_4_3;
+
+ if (sp->cmn.fcphHigh < FC_PH3)
+ sp->cmn.fcphHigh = FC_PH3;
+
+ /* The lpfc iocb is fully initialize. Flush it to main store for the
+ * HBA. Note that all els iocb context buffer are from the driver's
+ * dma pool and have length LPFC_BPL_SIZE.
+ */
+ bmp = (struct lpfc_dmabuf *) (elsiocb->context2);
+ pci_dma_sync_single_for_device(phba->pcidev, bmp->phys,
+ LPFC_BPL_SIZE, PCI_DMA_TODEVICE);
+
+ phba->fc_stat.elsXmitPLOGI++;
+ elsiocb->iocb_cmpl = lpfc_cmpl_els_plogi;
+ ndlp->nlp_flag |= NLP_PLOGI_SND;
+ if (lpfc_sli_issue_iocb(phba, pring, elsiocb, 0) == IOCB_ERROR) {
+ ndlp->nlp_flag &= ~NLP_PLOGI_SND;
+ lpfc_els_free_iocb(phba, elsiocb);
+ return (1);
+ }
+ return (0);
+}
+
+static void
+lpfc_cmpl_els_prli(struct lpfc_hba * phba, struct lpfc_iocbq * cmdiocb,
+ struct lpfc_iocbq * rspiocb)
+{
+ IOCB_t *irsp;
+ struct lpfc_sli *psli;
+ struct lpfc_nodelist *ndlp;
+
+ psli = &phba->sli;
+ /* we pass cmdiocb to state machine which needs rspiocb as well */
+ cmdiocb->context_un.rsp_iocb = rspiocb;
+
+ irsp = &(rspiocb->iocb);
+ ndlp = (struct lpfc_nodelist *) cmdiocb->context1;
+ ndlp->nlp_flag &= ~NLP_PRLI_SND;
+
+ /* PRLI completes to NPort <nlp_DID> */
+ lpfc_printf_log(phba, KERN_INFO, LOG_ELS,
+ "%d:0103 PRLI completes to NPort x%x "
+ "Data: x%x x%x x%x\n",
+ phba->brd_no, ndlp->nlp_DID, irsp->ulpStatus,
+ irsp->un.ulpWord[4], phba->num_disc_nodes);
+
+ phba->fc_prli_sent--;
+ /* Check to see if link went down during discovery */
+ if (lpfc_els_chk_latt(phba))
+ goto out;
+
+ if (irsp->ulpStatus) {
+ /* Check for retry */
+ if (lpfc_els_retry(phba, cmdiocb, rspiocb)) {
+ /* ELS command is being retried */
+ goto out;
+ }
+ /* PRLI failed */
+ /* Do not call DSM for lpfc_els_abort'ed ELS cmds */
+ if((irsp->ulpStatus == IOSTAT_LOCAL_REJECT) &&
+ ((irsp->un.ulpWord[4] == IOERR_SLI_ABORTED) ||
+ (irsp->un.ulpWord[4] == IOERR_SLI_DOWN))) {
+ goto out;
+ }
+ else {
+ lpfc_disc_state_machine(phba, ndlp, cmdiocb,
+ NLP_EVT_CMPL_PRLI);
+ }
+ } else {
+ /* Good status, call state machine */
+ lpfc_disc_state_machine(phba, ndlp, cmdiocb, NLP_EVT_CMPL_PRLI);
+ }
+
+out:
+ lpfc_els_free_iocb(phba, cmdiocb);
+ return;
+}
+
+int
+lpfc_issue_els_prli(struct lpfc_hba * phba, struct lpfc_nodelist * ndlp,
+ uint8_t retry)
+{
+ PRLI *npr;
+ IOCB_t *icmd;
+ struct lpfc_iocbq *elsiocb;
+ struct lpfc_sli_ring *pring;
+ struct lpfc_sli *psli;
+ struct lpfc_dmabuf *bmp;
+ uint8_t *pcmd;
+ uint16_t cmdsize;
+
+ psli = &phba->sli;
+ pring = &psli->ring[LPFC_ELS_RING]; /* ELS ring */
+
+ cmdsize = (sizeof (uint32_t) + sizeof (PRLI));
+ if ((elsiocb = lpfc_prep_els_iocb(phba, 1, cmdsize, retry,
+ ndlp, ELS_CMD_PRLI)) == 0) {
+ return (1);
+ }
+
+ icmd = &elsiocb->iocb;
+ pcmd = (uint8_t *) (((struct lpfc_dmabuf *) elsiocb->context2)->virt);
+
+ /* For PRLI request, remainder of payload is service parameters */
+ memset(pcmd, 0, (sizeof (PRLI) + sizeof (uint32_t)));
+ *((uint32_t *) (pcmd)) = ELS_CMD_PRLI;
+ pcmd += sizeof (uint32_t);
+
+ /* For PRLI, remainder of payload is PRLI parameter page */
+ npr = (PRLI *) pcmd;
+ /*
+ * If our firmware version is 3.20 or later,
+ * set the following bits for FC-TAPE support.
+ */
+ if (phba->vpd.rev.feaLevelHigh >= 0x02) {
+ npr->ConfmComplAllowed = 1;
+ npr->Retry = 1;
+ npr->TaskRetryIdReq = 1;
+ }
+ npr->estabImagePair = 1;
+ npr->readXferRdyDis = 1;
+
+ /* For FCP support */
+ npr->prliType = PRLI_FCP_TYPE;
+ npr->initiatorFunc = 1;
+
+ /* The lpfc iocb is fully initialize. Flush it to main store for the
+ * HBA. Note that all els iocb context buffer are from the driver's
+ * dma pool and have length LPFC_BPL_SIZE.
+ */
+ bmp = (struct lpfc_dmabuf *) (elsiocb->context2);
+ pci_dma_sync_single_for_device(phba->pcidev, bmp->phys,
+ LPFC_BPL_SIZE, PCI_DMA_TODEVICE);
+
+ phba->fc_stat.elsXmitPRLI++;
+ elsiocb->iocb_cmpl = lpfc_cmpl_els_prli;
+ ndlp->nlp_flag |= NLP_PRLI_SND;
+ if (lpfc_sli_issue_iocb(phba, pring, elsiocb, 0) == IOCB_ERROR) {
+ ndlp->nlp_flag &= ~NLP_PRLI_SND;
+ lpfc_els_free_iocb(phba, elsiocb);
+ return (1);
+ }
+ phba->fc_prli_sent++;
+ return (0);
+}
+
+static void
+lpfc_more_adisc(struct lpfc_hba * phba)
+{
+ int sentadisc;
+
+ if (phba->num_disc_nodes)
+ phba->num_disc_nodes--;
+
+ /* Continue discovery with <num_disc_nodes> ADISCs to go */
+ lpfc_printf_log(phba, KERN_INFO, LOG_DISCOVERY,
+ "%d:0210 Continue discovery with %d ADISCs to go "
+ "Data: x%x x%x x%x\n",
+ phba->brd_no, phba->num_disc_nodes, phba->fc_adisc_cnt,
+ phba->fc_flag, phba->hba_state);
+
+ /* Check to see if there are more ADISCs to be sent */
+ if (phba->fc_flag & FC_NLP_MORE) {
+ lpfc_set_disctmo(phba);
+
+ /* go thru NPR list and issue any remaining ELS ADISCs */
+ sentadisc = lpfc_els_disc_adisc(phba);
+ }
+ return;
+}
+
+static void
+lpfc_rscn_disc(struct lpfc_hba * phba)
+{
+ /* RSCN discovery */
+ /* go thru NPR list and issue ELS PLOGIs */
+ if (phba->fc_npr_cnt) {
+ lpfc_els_disc_plogi(phba);
+ return;
+ }
+ if (phba->fc_flag & FC_RSCN_MODE) {
+ /* Check to see if more RSCNs came in while we were
+ * processing this one.
+ */
+ if ((phba->fc_rscn_id_cnt == 0) &&
+ (!(phba->fc_flag & FC_RSCN_DISCOVERY))) {
+ lpfc_els_flush_rscn(phba);
+ } else {
+ lpfc_els_handle_rscn(phba);
+ }
+ }
+}
+
+static void
+lpfc_cmpl_els_adisc(struct lpfc_hba * phba, struct lpfc_iocbq * cmdiocb,
+ struct lpfc_iocbq * rspiocb)
+{
+ IOCB_t *irsp;
+ struct lpfc_sli *psli;
+ struct lpfc_nodelist *ndlp;
+ LPFC_MBOXQ_t *mbox;
+ int disc;
+
+ psli = &phba->sli;
+
+ /* we pass cmdiocb to state machine which needs rspiocb as well */
+ cmdiocb->context_un.rsp_iocb = rspiocb;
+
+ irsp = &(rspiocb->iocb);
+ ndlp = (struct lpfc_nodelist *) cmdiocb->context1;
+ ndlp->nlp_flag &= ~NLP_ADISC_SND;
+
+ /* Since ndlp can be freed in the disc state machine, note if this node
+ * is being used during discovery.
+ */
+ disc = (ndlp->nlp_flag & NLP_NPR_2B_DISC);
+
+ /* ADISC completes to NPort <nlp_DID> */
+ lpfc_printf_log(phba, KERN_INFO, LOG_ELS,
+ "%d:0104 ADISC completes to NPort x%x "
+ "Data: x%x x%x x%x x%x\n",
+ phba->brd_no, ndlp->nlp_DID, irsp->ulpStatus,
+ irsp->un.ulpWord[4], disc, phba->num_disc_nodes);
+
+ /* Check to see if link went down during discovery */
+ if (lpfc_els_chk_latt(phba)) {
+ ndlp->nlp_flag |= NLP_NPR_2B_DISC;
+ goto out;
+ }
+
+ if (irsp->ulpStatus) {
+ /* Check for retry */
+ if (lpfc_els_retry(phba, cmdiocb, rspiocb)) {
+ /* ELS command is being retried */
+ if (disc) {
+ ndlp->nlp_flag |= NLP_NPR_2B_DISC;
+ lpfc_set_disctmo(phba);
+ }
+ goto out;
+ }
+ /* ADISC failed */
+ /* Do not call DSM for lpfc_els_abort'ed ELS cmds */
+ if((irsp->ulpStatus == IOSTAT_LOCAL_REJECT) &&
+ ((irsp->un.ulpWord[4] == IOERR_SLI_ABORTED) ||
+ (irsp->un.ulpWord[4] == IOERR_SLI_DOWN))) {
+ disc = (ndlp->nlp_flag & NLP_NPR_2B_DISC);
+ }
+ else {
+ lpfc_disc_state_machine(phba, ndlp, cmdiocb,
+ NLP_EVT_CMPL_ADISC);
+ }
+ } else {
+ /* Good status, call state machine */
+ lpfc_disc_state_machine(phba, ndlp, cmdiocb,
+ NLP_EVT_CMPL_ADISC);
+ }
+
+ if (disc && phba->num_disc_nodes) {
+ /* Check to see if there are more ADISCs to be sent */
+ lpfc_more_adisc(phba);
+
+ /* Check to see if we are done with ADISC authentication */
+ if (phba->num_disc_nodes == 0) {
+ /* If we get here, there is nothing left to wait for */
+ if ((phba->hba_state < LPFC_HBA_READY) &&
+ (phba->hba_state != LPFC_CLEAR_LA)) {
+ /* Link up discovery */
+ if ((mbox = mempool_alloc(phba->mbox_mem_pool,
+ GFP_ATOMIC))) {
+ phba->hba_state = LPFC_CLEAR_LA;
+ lpfc_clear_la(phba, mbox);
+ mbox->mbox_cmpl =
+ lpfc_mbx_cmpl_clear_la;
+ if (lpfc_sli_issue_mbox
+ (phba, mbox,
+ (MBX_NOWAIT | MBX_STOP_IOCB))
+ == MBX_NOT_FINISHED) {
+ mempool_free(mbox,
+ phba->mbox_mem_pool);
+ lpfc_disc_flush_list(phba);
+ psli->ring[(psli->ip_ring)].
+ flag &=
+ ~LPFC_STOP_IOCB_EVENT;
+ psli->ring[(psli->fcp_ring)].
+ flag &=
+ ~LPFC_STOP_IOCB_EVENT;
+ psli->ring[(psli->next_ring)].
+ flag &=
+ ~LPFC_STOP_IOCB_EVENT;
+ phba->hba_state =
+ LPFC_HBA_READY;
+ }
+ }
+ } else {
+ lpfc_rscn_disc(phba);
+ }
+ }
+ }
+ ndlp->nlp_flag &= ~NLP_NPR_2B_DISC;
+out:
+ lpfc_els_free_iocb(phba, cmdiocb);
+ return;
+}
+
+int
+lpfc_issue_els_adisc(struct lpfc_hba * phba, struct lpfc_nodelist * ndlp,
+ uint8_t retry)
+{
+ ADISC *ap;
+ IOCB_t *icmd;
+ struct lpfc_iocbq *elsiocb;
+ struct lpfc_sli_ring *pring;
+ struct lpfc_sli *psli;
+ struct lpfc_dmabuf *bmp;
+ uint8_t *pcmd;
+ uint16_t cmdsize;
+
+ psli = &phba->sli;
+ pring = &psli->ring[LPFC_ELS_RING]; /* ELS ring */
+
+ cmdsize = (sizeof (uint32_t) + sizeof (ADISC));
+ if ((elsiocb = lpfc_prep_els_iocb(phba, 1, cmdsize, retry,
+ ndlp, ELS_CMD_ADISC)) == 0) {
+ return (1);
+ }
+
+ icmd = &elsiocb->iocb;
+ pcmd = (uint8_t *) (((struct lpfc_dmabuf *) elsiocb->context2)->virt);
+
+ /* For ADISC request, remainder of payload is service parameters */
+ *((uint32_t *) (pcmd)) = ELS_CMD_ADISC;
+ pcmd += sizeof (uint32_t);
+
+ /* Fill in ADISC payload */
+ ap = (ADISC *) pcmd;
+ ap->hardAL_PA = phba->fc_pref_ALPA;
+ memcpy(&ap->portName, &phba->fc_portname, sizeof (struct lpfc_name));
+ memcpy(&ap->nodeName, &phba->fc_nodename, sizeof (struct lpfc_name));
+ ap->DID = be32_to_cpu(phba->fc_myDID);
+
+ /* The lpfc iocb is fully initialize. Flush it to main store for the
+ * HBA. Note that all els iocb context buffer are from the driver's
+ * dma pool and have length LPFC_BPL_SIZE. Get a short-hand pointer to
+ * the physical address.
+ */
+ bmp = (struct lpfc_dmabuf *) (elsiocb->context2);
+ pci_dma_sync_single_for_device(phba->pcidev, bmp->phys,
+ LPFC_BPL_SIZE, PCI_DMA_TODEVICE);
+
+ phba->fc_stat.elsXmitADISC++;
+ elsiocb->iocb_cmpl = lpfc_cmpl_els_adisc;
+ ndlp->nlp_flag |= NLP_ADISC_SND;
+ if (lpfc_sli_issue_iocb(phba, pring, elsiocb, 0) == IOCB_ERROR) {
+ ndlp->nlp_flag &= ~NLP_ADISC_SND;
+ lpfc_els_free_iocb(phba, elsiocb);
+ return (1);
+ }
+ return (0);
+}
+
+static void
+lpfc_cmpl_els_logo(struct lpfc_hba * phba, struct lpfc_iocbq * cmdiocb,
+ struct lpfc_iocbq * rspiocb)
+{
+ IOCB_t *irsp;
+ struct lpfc_sli *psli;
+ struct lpfc_nodelist *ndlp;
+
+ psli = &phba->sli;
+ /* we pass cmdiocb to state machine which needs rspiocb as well */
+ cmdiocb->context_un.rsp_iocb = rspiocb;
+
+ irsp = &(rspiocb->iocb);
+ ndlp = (struct lpfc_nodelist *) cmdiocb->context1;
+ ndlp->nlp_flag &= ~NLP_LOGO_SND;
+
+ /* LOGO completes to NPort <nlp_DID> */
+ lpfc_printf_log(phba, KERN_INFO, LOG_ELS,
+ "%d:0105 LOGO completes to NPort x%x "
+ "Data: x%x x%x x%x\n",
+ phba->brd_no, ndlp->nlp_DID, irsp->ulpStatus,
+ irsp->un.ulpWord[4], phba->num_disc_nodes);
+
+ /* Check to see if link went down during discovery */
+ if (lpfc_els_chk_latt(phba))
+ goto out;
+
+ if (irsp->ulpStatus) {
+ /* Check for retry */
+ if (lpfc_els_retry(phba, cmdiocb, rspiocb)) {
+ /* ELS command is being retried */
+ goto out;
+ }
+ /* LOGO failed */
+ /* Do not call DSM for lpfc_els_abort'ed ELS cmds */
+ if((irsp->ulpStatus == IOSTAT_LOCAL_REJECT) &&
+ ((irsp->un.ulpWord[4] == IOERR_SLI_ABORTED) ||
+ (irsp->un.ulpWord[4] == IOERR_SLI_DOWN))) {
+ goto out;
+ }
+ else {
+ lpfc_disc_state_machine(phba, ndlp, cmdiocb,
+ NLP_EVT_CMPL_LOGO);
+ }
+ } else {
+ /* Good status, call state machine */
+ lpfc_disc_state_machine(phba, ndlp, cmdiocb, NLP_EVT_CMPL_LOGO);
+
+ if(ndlp->nlp_flag & NLP_DELAY_TMO) {
+ lpfc_unreg_rpi(phba, ndlp);
+ }
+ }
+
+out:
+ lpfc_els_free_iocb(phba, cmdiocb);
+ return;
+}
+
+int
+lpfc_issue_els_logo(struct lpfc_hba * phba, struct lpfc_nodelist * ndlp,
+ uint8_t retry)
+{
+ IOCB_t *icmd;
+ struct lpfc_iocbq *elsiocb;
+ struct lpfc_sli_ring *pring;
+ struct lpfc_sli *psli;
+ struct lpfc_dmabuf *bmp;
+ uint8_t *pcmd;
+ uint16_t cmdsize;
+
+ psli = &phba->sli;
+ pring = &psli->ring[LPFC_ELS_RING];
+
+ cmdsize = 2 * (sizeof (uint32_t) + sizeof (struct lpfc_name));
+ if ((elsiocb = lpfc_prep_els_iocb(phba, 1, cmdsize, retry,
+ ndlp, ELS_CMD_LOGO)) == 0) {
+ return (1);
+ }
+
+ icmd = &elsiocb->iocb;
+ pcmd = (uint8_t *) (((struct lpfc_dmabuf *) elsiocb->context2)->virt);
+ *((uint32_t *) (pcmd)) = ELS_CMD_LOGO;
+ pcmd += sizeof (uint32_t);
+
+ /* Fill in LOGO payload */
+ *((uint32_t *) (pcmd)) = be32_to_cpu(phba->fc_myDID);
+ pcmd += sizeof (uint32_t);
+ memcpy(pcmd, &phba->fc_portname, sizeof (struct lpfc_name));
+
+ /* The els iocb is fully initialize. Flush it to main store for the
+ * HBA. Note that all els iocb context buffer are from the driver's
+ * dma pool and have length LPFC_BPL_SIZE. Get a short-hand pointer to
+ * the physical address.
+ */
+ bmp = (struct lpfc_dmabuf *) (elsiocb->context2);
+ pci_dma_sync_single_for_device(phba->pcidev, bmp->phys,
+ LPFC_BPL_SIZE, PCI_DMA_TODEVICE);
+
+ phba->fc_stat.elsXmitLOGO++;
+ elsiocb->iocb_cmpl = lpfc_cmpl_els_logo;
+ ndlp->nlp_flag |= NLP_LOGO_SND;
+ if (lpfc_sli_issue_iocb(phba, pring, elsiocb, 0) == IOCB_ERROR) {
+ ndlp->nlp_flag &= ~NLP_LOGO_SND;
+ lpfc_els_free_iocb(phba, elsiocb);
+ return (1);
+ }
+ return (0);
+}
+
+static void
+lpfc_cmpl_els_cmd(struct lpfc_hba * phba, struct lpfc_iocbq * cmdiocb,
+ struct lpfc_iocbq * rspiocb)
+{
+ IOCB_t *irsp;
+
+ irsp = &rspiocb->iocb;
+
+ /* ELS cmd tag <ulpIoTag> completes */
+ lpfc_printf_log(phba,
+ KERN_INFO,
+ LOG_ELS,
+ "%d:0106 ELS cmd tag x%x completes Data: x%x x%x\n",
+ phba->brd_no,
+ irsp->ulpIoTag, irsp->ulpStatus, irsp->un.ulpWord[4]);
+
+ /* Check to see if link went down during discovery */
+ lpfc_els_chk_latt(phba);
+ lpfc_els_free_iocb(phba, cmdiocb);
+ return;
+}
+
+int
+lpfc_issue_els_scr(struct lpfc_hba * phba, uint32_t nportid, uint8_t retry)
+{
+ IOCB_t *icmd;
+ struct lpfc_iocbq *elsiocb;
+ struct lpfc_sli_ring *pring;
+ struct lpfc_sli *psli;
+ struct lpfc_dmabuf *bmp;
+ uint8_t *pcmd;
+ uint16_t cmdsize;
+ struct lpfc_nodelist *ndlp;
+
+ psli = &phba->sli;
+ pring = &psli->ring[LPFC_ELS_RING]; /* ELS ring */
+ cmdsize = (sizeof (uint32_t) + sizeof (SCR));
+ if ((ndlp = mempool_alloc(phba->nlp_mem_pool, GFP_ATOMIC)) == 0) {
+ return (1);
+ }
+
+ lpfc_nlp_init(phba, ndlp, nportid);
+
+ if ((elsiocb = lpfc_prep_els_iocb(phba, 1, cmdsize, retry,
+ ndlp, ELS_CMD_SCR)) == 0) {
+ mempool_free( ndlp, phba->nlp_mem_pool);
+ return (1);
+ }
+
+ icmd = &elsiocb->iocb;
+ pcmd = (uint8_t *) (((struct lpfc_dmabuf *) elsiocb->context2)->virt);
+
+ *((uint32_t *) (pcmd)) = ELS_CMD_SCR;
+ pcmd += sizeof (uint32_t);
+
+ /* For SCR, remainder of payload is SCR parameter page */
+ memset(pcmd, 0, sizeof (SCR));
+ ((SCR *) pcmd)->Function = SCR_FUNC_FULL;
+
+ /* The els iocb is fully initialize. Flush it to main store for the
+ * HBA. Note that all els iocb context buffer are from the driver's
+ * dma pool and have length LPFC_BPL_SIZE. Get a short-hand pointer to
+ * the physical address.
+ */
+ bmp = (struct lpfc_dmabuf *) (elsiocb->context2);
+ pci_dma_sync_single_for_device(phba->pcidev, bmp->phys,
+ LPFC_BPL_SIZE, PCI_DMA_TODEVICE);
+
+ phba->fc_stat.elsXmitSCR++;
+ elsiocb->iocb_cmpl = lpfc_cmpl_els_cmd;
+ if (lpfc_sli_issue_iocb(phba, pring, elsiocb, 0) == IOCB_ERROR) {
+ mempool_free( ndlp, phba->nlp_mem_pool);
+ lpfc_els_free_iocb(phba, elsiocb);
+ return (1);
+ }
+ mempool_free( ndlp, phba->nlp_mem_pool);
+ return (0);
+}
+
+static int
+lpfc_issue_els_farpr(struct lpfc_hba * phba, uint32_t nportid, uint8_t retry)
+{
+ IOCB_t *icmd;
+ struct lpfc_iocbq *elsiocb;
+ struct lpfc_sli_ring *pring;
+ struct lpfc_sli *psli;
+ struct lpfc_dmabuf *bmp;
+ FARP *fp;
+ uint8_t *pcmd;
+ uint32_t *lp;
+ uint16_t cmdsize;
+ struct lpfc_nodelist *ondlp;
+ struct lpfc_nodelist *ndlp;
+
+ psli = &phba->sli;
+ pring = &psli->ring[LPFC_ELS_RING]; /* ELS ring */
+ cmdsize = (sizeof (uint32_t) + sizeof (FARP));
+ if ((ndlp = mempool_alloc(phba->nlp_mem_pool, GFP_ATOMIC)) == 0) {
+ return (1);
+ }
+ lpfc_nlp_init(phba, ndlp, nportid);
+
+ if ((elsiocb = lpfc_prep_els_iocb(phba, 1, cmdsize, retry,
+ ndlp, ELS_CMD_RNID)) == 0) {
+ mempool_free( ndlp, phba->nlp_mem_pool);
+ return (1);
+ }
+
+ icmd = &elsiocb->iocb;
+ pcmd = (uint8_t *) (((struct lpfc_dmabuf *) elsiocb->context2)->virt);
+
+ *((uint32_t *) (pcmd)) = ELS_CMD_FARPR;
+ pcmd += sizeof (uint32_t);
+
+ /* Fill in FARPR payload */
+ fp = (FARP *) (pcmd);
+ memset(fp, 0, sizeof (FARP));
+ lp = (uint32_t *) pcmd;
+ *lp++ = be32_to_cpu(nportid);
+ *lp++ = be32_to_cpu(phba->fc_myDID);
+ fp->Rflags = 0;
+ fp->Mflags = (FARP_MATCH_PORT | FARP_MATCH_NODE);
+
+ memcpy(&fp->RportName, &phba->fc_portname, sizeof (struct lpfc_name));
+ memcpy(&fp->RnodeName, &phba->fc_nodename, sizeof (struct lpfc_name));
+ if ((ondlp = lpfc_findnode_did(phba, NLP_SEARCH_ALL, nportid))) {
+ memcpy(&fp->OportName, &ondlp->nlp_portname,
+ sizeof (struct lpfc_name));
+ memcpy(&fp->OnodeName, &ondlp->nlp_nodename,
+ sizeof (struct lpfc_name));
+ }
+
+ /* The els iocb is fully initialize. Flush it to main store for the
+ * HBA. Note that all els iocb context buffer are from the driver's
+ * dma pool and have length LPFC_BPL_SIZE. Get a short-hand pointer to
+ * the physical address.
+ */
+ bmp = (struct lpfc_dmabuf *) (elsiocb->context2);
+ pci_dma_sync_single_for_device(phba->pcidev, bmp->phys,
+ LPFC_BPL_SIZE, PCI_DMA_TODEVICE);
+
+ phba->fc_stat.elsXmitFARPR++;
+ elsiocb->iocb_cmpl = lpfc_cmpl_els_cmd;
+ if (lpfc_sli_issue_iocb(phba, pring, elsiocb, 0) == IOCB_ERROR) {
+ mempool_free( ndlp, phba->nlp_mem_pool);
+ lpfc_els_free_iocb(phba, elsiocb);
+ return (1);
+ }
+ mempool_free( ndlp, phba->nlp_mem_pool);
+ return (0);
+}
+
+void
+lpfc_els_retry_delay(unsigned long ptr)
+{
+ struct lpfc_hba *phba;
+ struct lpfc_nodelist *ndlp;
+ uint32_t cmd;
+ uint32_t did;
+ uint8_t retry;
+ unsigned long iflag;
+
+ ndlp = (struct lpfc_nodelist *)ptr;
+ phba = ndlp->nlp_phba;
+ spin_lock_irqsave(phba->host->host_lock, iflag);
+ did = (uint32_t) (ndlp->nlp_DID);
+ cmd = (uint32_t) (ndlp->nlp_last_elscmd);
+
+ ndlp->nlp_flag &= ~NLP_DELAY_TMO;
+ retry = ndlp->nlp_retry;
+
+ switch (cmd) {
+ case ELS_CMD_FLOGI:
+ lpfc_issue_els_flogi(phba, ndlp, retry);
+ break;
+ case ELS_CMD_PLOGI:
+ ndlp->nlp_state = NLP_STE_PLOGI_ISSUE;
+ lpfc_nlp_list(phba, ndlp, NLP_PLOGI_LIST);
+ lpfc_issue_els_plogi(phba, ndlp, retry);
+ break;
+ case ELS_CMD_ADISC:
+ ndlp->nlp_state = NLP_STE_ADISC_ISSUE;
+ lpfc_nlp_list(phba, ndlp, NLP_ADISC_LIST);
+ lpfc_issue_els_adisc(phba, ndlp, retry);
+ break;
+ case ELS_CMD_PRLI:
+ ndlp->nlp_state = NLP_STE_PRLI_ISSUE;
+ lpfc_nlp_list(phba, ndlp, NLP_PRLI_LIST);
+ lpfc_issue_els_prli(phba, ndlp, retry);
+ break;
+ case ELS_CMD_LOGO:
+ ndlp->nlp_state = NLP_STE_NPR_NODE;
+ lpfc_nlp_list(phba, ndlp, NLP_NPR_LIST);
+ lpfc_issue_els_logo(phba, ndlp, retry);
+ break;
+ }
+ spin_unlock_irqrestore(phba->host->host_lock, iflag);
+ return;
+}
+
+static int
+lpfc_els_retry(struct lpfc_hba * phba, struct lpfc_iocbq * cmdiocb,
+ struct lpfc_iocbq * rspiocb)
+{
+ IOCB_t *irsp;
+ struct lpfc_dmabuf *pcmd;
+ struct lpfc_nodelist *ndlp;
+ uint32_t *elscmd;
+ struct ls_rjt stat;
+ int retry, maxretry;
+ int delay;
+ uint32_t cmd;
+
+ retry = 0;
+ delay = 0;
+ maxretry = lpfc_max_els_tries;
+ irsp = &rspiocb->iocb;
+ ndlp = (struct lpfc_nodelist *) cmdiocb->context1;
+ pcmd = (struct lpfc_dmabuf *) cmdiocb->context2;
+ cmd = 0;
+ /* Note: context2 may be 0 for internal driver abort
+ * of delays ELS command.
+ */
+
+ if (pcmd && pcmd->virt) {
+ elscmd = (uint32_t *) (pcmd->virt);
+ cmd = *elscmd++;
+ }
+
+ switch (irsp->ulpStatus) {
+ case IOSTAT_FCP_RSP_ERROR:
+ case IOSTAT_REMOTE_STOP:
+ break;
+
+ case IOSTAT_LOCAL_REJECT:
+ switch ((irsp->un.ulpWord[4] & 0xff)) {
+ case IOERR_LOOP_OPEN_FAILURE:
+ if (cmd == ELS_CMD_PLOGI) {
+ if (cmdiocb->retry == 0) {
+ delay = 1;
+ }
+ }
+ retry = 1;
+ break;
+
+ case IOERR_SEQUENCE_TIMEOUT:
+ retry = 1;
+ if ((cmd == ELS_CMD_FLOGI)
+ && (phba->fc_topology != TOPOLOGY_LOOP)) {
+ delay = 1;
+ maxretry = 48;
+ }
+ break;
+
+ case IOERR_NO_RESOURCES:
+ if (cmd == ELS_CMD_PLOGI) {
+ delay = 1;
+ }
+ retry = 1;
+ break;
+
+ case IOERR_INVALID_RPI:
+ retry = 1;
+ break;
+ }
+ break;
+
+ case IOSTAT_NPORT_RJT:
+ case IOSTAT_FABRIC_RJT:
+ if (irsp->un.ulpWord[4] & RJT_UNAVAIL_TEMP) {
+ retry = 1;
+ break;
+ }
+ break;
+
+ case IOSTAT_NPORT_BSY:
+ case IOSTAT_FABRIC_BSY:
+ retry = 1;
+ break;
+
+ case IOSTAT_LS_RJT:
+ stat.un.lsRjtError = be32_to_cpu(irsp->un.ulpWord[4]);
+ /* Added for Vendor specifc support
+ * Just keep retrying for these Rsn / Exp codes
+ */
+ switch (stat.un.b.lsRjtRsnCode) {
+ case LSRJT_UNABLE_TPC:
+ if (stat.un.b.lsRjtRsnCodeExp ==
+ LSEXP_CMD_IN_PROGRESS) {
+ if (cmd == ELS_CMD_PLOGI) {
+ delay = 1;
+ maxretry = 48;
+ }
+ retry = 1;
+ break;
+ }
+ if (cmd == ELS_CMD_PLOGI) {
+ delay = 1;
+ maxretry = lpfc_max_els_tries + 1;
+ retry = 1;
+ break;
+ }
+ break;
+
+ case LSRJT_LOGICAL_BSY:
+ if (cmd == ELS_CMD_PLOGI) {
+ delay = 1;
+ maxretry = 48;
+ }
+ retry = 1;
+ break;
+ }
+ break;
+
+ case IOSTAT_INTERMED_RSP:
+ case IOSTAT_BA_RJT:
+ break;
+
+ default:
+ break;
+ }
+
+ if (ndlp->nlp_DID == FDMI_DID) {
+ retry = 1;
+ }
+
+ if ((++cmdiocb->retry) >= maxretry) {
+ phba->fc_stat.elsRetryExceeded++;
+ retry = 0;
+ }
+
+ if (retry) {
+
+ /* Retry ELS command <elsCmd> to remote NPORT <did> */
+ lpfc_printf_log(phba, KERN_INFO, LOG_ELS,
+ "%d:0107 Retry ELS command x%x to remote "
+ "NPORT x%x Data: x%x x%x\n",
+ phba->brd_no,
+ cmd, ndlp->nlp_DID, cmdiocb->retry, delay);
+
+ if ((cmd == ELS_CMD_PLOGI) || (cmd == ELS_CMD_ADISC)) {
+ /* If discovery / RSCN timer is running, reset it */
+ if (timer_pending(&phba->fc_disctmo) ||
+ (phba->fc_flag & FC_RSCN_MODE)) {
+ lpfc_set_disctmo(phba);
+ }
+ }
+
+ phba->fc_stat.elsXmitRetry++;
+ if (delay) {
+ phba->fc_stat.elsDelayRetry++;
+ ndlp->nlp_retry = cmdiocb->retry;
+
+ mod_timer(&ndlp->nlp_delayfunc, jiffies + HZ);
+ ndlp->nlp_flag |= NLP_DELAY_TMO;
+
+ ndlp->nlp_state = NLP_STE_NPR_NODE;
+ lpfc_nlp_list(phba, ndlp, NLP_NPR_LIST);
+ ndlp->nlp_last_elscmd = cmd;
+
+ return (1);
+ }
+ switch (cmd) {
+ case ELS_CMD_FLOGI:
+ lpfc_issue_els_flogi(phba, ndlp, cmdiocb->retry);
+ return (1);
+ case ELS_CMD_PLOGI:
+ ndlp->nlp_state = NLP_STE_PLOGI_ISSUE;
+ lpfc_nlp_list(phba, ndlp, NLP_PLOGI_LIST);
+ lpfc_issue_els_plogi(phba, ndlp, cmdiocb->retry);
+ return (1);
+ case ELS_CMD_ADISC:
+ ndlp->nlp_state = NLP_STE_ADISC_ISSUE;
+ lpfc_nlp_list(phba, ndlp, NLP_ADISC_LIST);
+ lpfc_issue_els_adisc(phba, ndlp, cmdiocb->retry);
+ return (1);
+ case ELS_CMD_PRLI:
+ ndlp->nlp_state = NLP_STE_PRLI_ISSUE;
+ lpfc_nlp_list(phba, ndlp, NLP_PRLI_LIST);
+ lpfc_issue_els_prli(phba, ndlp, cmdiocb->retry);
+ return (1);
+ case ELS_CMD_LOGO:
+ ndlp->nlp_state = NLP_STE_NPR_NODE;
+ lpfc_nlp_list(phba, ndlp, NLP_NPR_LIST);
+ lpfc_issue_els_logo(phba, ndlp, cmdiocb->retry);
+ return (1);
+ }
+ }
+
+ /* No retry ELS command <elsCmd> to remote NPORT <did> */
+ lpfc_printf_log(phba, KERN_INFO, LOG_ELS,
+ "%d:0108 No retry ELS command x%x to remote NPORT x%x "
+ "Data: x%x x%x\n",
+ phba->brd_no,
+ cmd, ndlp->nlp_DID, cmdiocb->retry, ndlp->nlp_flag);
+
+ return (0);
+}
+
+int
+lpfc_els_free_iocb(struct lpfc_hba * phba, struct lpfc_iocbq * elsiocb)
+{
+ struct lpfc_dmabuf *buf_ptr, *buf_ptr1;
+
+ /* context2 = cmd, context2->next = rsp, context3 = bpl */
+ if (elsiocb->context2) {
+ buf_ptr1 = (struct lpfc_dmabuf *) elsiocb->context2;
+ /* Free the response before processing the command. */
+ if (!list_empty(&buf_ptr1->list)) {
+ buf_ptr = list_entry(buf_ptr1->list.next,
+ struct lpfc_dmabuf, list);
+ lpfc_mbuf_free(phba, buf_ptr->virt, buf_ptr->phys);
+ kfree(buf_ptr);
+ }
+ lpfc_mbuf_free(phba, buf_ptr1->virt, buf_ptr1->phys);
+ kfree(buf_ptr1);
+ }
+
+ if (elsiocb->context3) {
+ buf_ptr = (struct lpfc_dmabuf *) elsiocb->context3;
+ lpfc_mbuf_free(phba, buf_ptr->virt, buf_ptr->phys);
+ kfree(buf_ptr);
+ }
+
+ mempool_free( elsiocb, phba->iocb_mem_pool);
+ return 0;
+}
+
+static void
+lpfc_cmpl_els_logo_acc(struct lpfc_hba * phba, struct lpfc_iocbq * cmdiocb,
+ struct lpfc_iocbq * rspiocb)
+{
+ struct lpfc_nodelist *ndlp;
+
+ ndlp = (struct lpfc_nodelist *) cmdiocb->context1;
+
+ /* ACC to LOGO completes to NPort <nlp_DID> */
+ lpfc_printf_log(phba, KERN_INFO, LOG_ELS,
+ "%d:0109 ACC to LOGO completes to NPort x%x "
+ "Data: x%x x%x x%x\n",
+ phba->brd_no, ndlp->nlp_DID, ndlp->nlp_flag,
+ ndlp->nlp_state, ndlp->nlp_rpi);
+
+ ndlp->nlp_flag &= ~NLP_LOGO_ACC;
+
+ switch (ndlp->nlp_state) {
+ case NLP_STE_UNUSED_NODE: /* node is just allocated */
+ lpfc_nlp_list(phba, ndlp, NLP_NO_LIST);
+ break;
+ case NLP_STE_NPR_NODE: /* NPort Recovery mode */
+ lpfc_unreg_rpi(phba, ndlp);
+ break;
+ default:
+ break;
+ }
+ lpfc_els_free_iocb(phba, cmdiocb);
+ return;
+}
+
+static void
+lpfc_cmpl_els_acc(struct lpfc_hba * phba, struct lpfc_iocbq * cmdiocb,
+ struct lpfc_iocbq * rspiocb)
+{
+ struct lpfc_nodelist *ndlp;
+ LPFC_MBOXQ_t *mbox = NULL;
+
+ ndlp = (struct lpfc_nodelist *) cmdiocb->context1;
+ if (cmdiocb->context_un.mbox)
+ mbox = cmdiocb->context_un.mbox;
+
+
+ /* Check to see if link went down during discovery */
+ if ((lpfc_els_chk_latt(phba)) || !ndlp) {
+ if (mbox) {
+ mempool_free( mbox, phba->mbox_mem_pool);
+ }
+ goto out;
+ }
+
+ /* ELS response tag <ulpIoTag> completes */
+ lpfc_printf_log(phba, KERN_INFO, LOG_ELS,
+ "%d:0110 ELS response tag x%x completes "
+ "Data: x%x x%x x%x x%x x%x x%x\n",
+ phba->brd_no,
+ cmdiocb->iocb.ulpIoTag, rspiocb->iocb.ulpStatus,
+ rspiocb->iocb.un.ulpWord[4], ndlp->nlp_DID,
+ ndlp->nlp_flag, ndlp->nlp_state, ndlp->nlp_rpi);
+
+ if (mbox) {
+ if ((rspiocb->iocb.ulpStatus == 0)
+ && (ndlp->nlp_flag & NLP_ACC_REGLOGIN)) {
+ /* set_slim mailbox command needs to execute first,
+ * queue this command to be processed later.
+ */
+ lpfc_unreg_rpi(phba, ndlp);
+ mbox->mbox_cmpl = lpfc_mbx_cmpl_reg_login;
+ mbox->context2 = ndlp;
+ ndlp->nlp_state = NLP_STE_REG_LOGIN_ISSUE;
+ lpfc_nlp_list(phba, ndlp, NLP_REGLOGIN_LIST);
+ if (lpfc_sli_issue_mbox(phba, mbox,
+ (MBX_NOWAIT | MBX_STOP_IOCB))
+ != MBX_NOT_FINISHED) {
+ goto out;
+ }
+ /* NOTE: we should have messages for unsuccessful
+ reglogin */
+ mempool_free( mbox, phba->mbox_mem_pool);
+ } else {
+ mempool_free( mbox, phba->mbox_mem_pool);
+ if (ndlp->nlp_flag & NLP_ACC_REGLOGIN) {
+ lpfc_nlp_list(phba, ndlp, NLP_NO_LIST);
+ }
+ }
+ }
+out:
+ if(ndlp)
+ ndlp->nlp_flag &= ~NLP_ACC_REGLOGIN;
+ lpfc_els_free_iocb(phba, cmdiocb);
+ return;
+}
+
+int
+lpfc_els_rsp_acc(struct lpfc_hba * phba, uint32_t flag,
+ struct lpfc_iocbq * oldiocb, struct lpfc_nodelist * ndlp,
+ LPFC_MBOXQ_t * mbox, uint8_t newnode)
+{
+ IOCB_t *icmd;
+ IOCB_t *oldcmd;
+ struct lpfc_iocbq *elsiocb;
+ struct lpfc_sli_ring *pring;
+ struct lpfc_sli *psli;
+ struct lpfc_dmabuf *bmp;
+ uint8_t *pcmd;
+ uint16_t cmdsize;
+
+ psli = &phba->sli;
+ pring = &psli->ring[LPFC_ELS_RING]; /* ELS ring */
+ oldcmd = &oldiocb->iocb;
+
+ switch (flag) {
+ case ELS_CMD_ACC:
+ cmdsize = sizeof (uint32_t);
+ if ((elsiocb =
+ lpfc_prep_els_iocb(phba, 0, cmdsize, oldiocb->retry,
+ ndlp, ELS_CMD_ACC)) == 0) {
+ return (1);
+ }
+ icmd = &elsiocb->iocb;
+ icmd->ulpContext = oldcmd->ulpContext; /* Xri */
+ pcmd = (((struct lpfc_dmabuf *) elsiocb->context2)->virt);
+ *((uint32_t *) (pcmd)) = ELS_CMD_ACC;
+ pcmd += sizeof (uint32_t);
+ break;
+ case ELS_CMD_PLOGI:
+ cmdsize = (sizeof (struct serv_parm) + sizeof (uint32_t));
+ if ((elsiocb =
+ lpfc_prep_els_iocb(phba, 0, cmdsize, oldiocb->retry,
+ ndlp, ELS_CMD_ACC)) == 0) {
+ return (1);
+ }
+ icmd = &elsiocb->iocb;
+ icmd->ulpContext = oldcmd->ulpContext; /* Xri */
+ pcmd = (((struct lpfc_dmabuf *) elsiocb->context2)->virt);
+
+ if (mbox)
+ elsiocb->context_un.mbox = mbox;
+
+ *((uint32_t *) (pcmd)) = ELS_CMD_ACC;
+ pcmd += sizeof (uint32_t);
+ memcpy(pcmd, &phba->fc_sparam, sizeof (struct serv_parm));
+ break;
+ default:
+ return (1);
+ }
+
+ if (newnode)
+ elsiocb->context1 = NULL;
+
+ /* Xmit ELS ACC response tag <ulpIoTag> */
+ lpfc_printf_log(phba, KERN_INFO, LOG_ELS,
+ "%d:0128 Xmit ELS ACC response tag x%x "
+ "Data: x%x x%x x%x x%x x%x\n",
+ phba->brd_no,
+ elsiocb->iocb.ulpIoTag,
+ elsiocb->iocb.ulpContext, ndlp->nlp_DID,
+ ndlp->nlp_flag, ndlp->nlp_state, ndlp->nlp_rpi);
+
+ bmp = (struct lpfc_dmabuf *) (elsiocb->context2);
+ pci_dma_sync_single_for_device(phba->pcidev, bmp->phys,
+ LPFC_BPL_SIZE, PCI_DMA_TODEVICE);
+
+ if (ndlp->nlp_flag & NLP_LOGO_ACC) {
+ elsiocb->iocb_cmpl = lpfc_cmpl_els_logo_acc;
+ } else {
+ elsiocb->iocb_cmpl = lpfc_cmpl_els_acc;
+ }
+
+ phba->fc_stat.elsXmitACC++;
+ if (lpfc_sli_issue_iocb(phba, pring, elsiocb, 0) == IOCB_ERROR) {
+ lpfc_els_free_iocb(phba, elsiocb);
+ return (1);
+ }
+ return (0);
+}
+
+int
+lpfc_els_rsp_reject(struct lpfc_hba * phba, uint32_t rejectError,
+ struct lpfc_iocbq * oldiocb, struct lpfc_nodelist * ndlp)
+{
+ IOCB_t *icmd;
+ IOCB_t *oldcmd;
+ struct lpfc_iocbq *elsiocb;
+ struct lpfc_sli_ring *pring;
+ struct lpfc_sli *psli;
+ struct lpfc_dmabuf *bmp;
+ uint8_t *pcmd;
+ uint16_t cmdsize;
+
+ psli = &phba->sli;
+ pring = &psli->ring[LPFC_ELS_RING]; /* ELS ring */
+
+ cmdsize = 2 * sizeof (uint32_t);
+ if ((elsiocb = lpfc_prep_els_iocb(phba, 0, cmdsize, oldiocb->retry,
+ ndlp, ELS_CMD_LS_RJT)) == 0) {
+ return (1);
+ }
+
+ icmd = &elsiocb->iocb;
+ oldcmd = &oldiocb->iocb;
+ icmd->ulpContext = oldcmd->ulpContext; /* Xri */
+ pcmd = (uint8_t *) (((struct lpfc_dmabuf *) elsiocb->context2)->virt);
+
+ *((uint32_t *) (pcmd)) = ELS_CMD_LS_RJT;
+ pcmd += sizeof (uint32_t);
+ *((uint32_t *) (pcmd)) = rejectError;
+
+ /* The els iocb is fully initialize. Flush it to main store for the
+ * HBA. Note that all els iocb context buffer are from the driver's
+ * dma pool and have length LPFC_BPL_SIZE. Get a short-hand pointer to
+ * the physical address.
+ */
+ bmp = (struct lpfc_dmabuf *) (elsiocb->context2);
+ pci_dma_sync_single_for_device(phba->pcidev, bmp->phys,
+ LPFC_BPL_SIZE, PCI_DMA_TODEVICE);
+
+ /* Xmit ELS RJT <err> response tag <ulpIoTag> */
+ lpfc_printf_log(phba, KERN_INFO, LOG_ELS,
+ "%d:0129 Xmit ELS RJT x%x response tag x%x "
+ "Data: x%x x%x x%x x%x x%x\n",
+ phba->brd_no,
+ rejectError, elsiocb->iocb.ulpIoTag,
+ elsiocb->iocb.ulpContext, ndlp->nlp_DID,
+ ndlp->nlp_flag, ndlp->nlp_state, ndlp->nlp_rpi);
+
+ phba->fc_stat.elsXmitLSRJT++;
+ elsiocb->iocb_cmpl = lpfc_cmpl_els_acc;
+
+ if (lpfc_sli_issue_iocb(phba, pring, elsiocb, 0) == IOCB_ERROR) {
+ lpfc_els_free_iocb(phba, elsiocb);
+ return (1);
+ }
+ return (0);
+}
+
+int
+lpfc_els_rsp_adisc_acc(struct lpfc_hba * phba,
+ struct lpfc_iocbq * oldiocb, struct lpfc_nodelist * ndlp)
+{
+ ADISC *ap;
+ IOCB_t *icmd;
+ IOCB_t *oldcmd;
+ struct lpfc_iocbq *elsiocb;
+ struct lpfc_sli_ring *pring;
+ struct lpfc_sli *psli;
+ struct lpfc_dmabuf *bmp;
+ uint8_t *pcmd;
+ uint16_t cmdsize;
+
+ psli = &phba->sli;
+ pring = &psli->ring[LPFC_ELS_RING]; /* ELS ring */
+
+ cmdsize = sizeof (uint32_t) + sizeof (ADISC);
+ if ((elsiocb = lpfc_prep_els_iocb(phba, 0, cmdsize, oldiocb->retry,
+ ndlp, ELS_CMD_ACC)) == 0) {
+ return (1);
+ }
+
+ /* Xmit ADISC ACC response tag <ulpIoTag> */
+ lpfc_printf_log(phba, KERN_INFO, LOG_ELS,
+ "%d:0130 Xmit ADISC ACC response tag x%x "
+ "Data: x%x x%x x%x x%x x%x\n",
+ phba->brd_no,
+ elsiocb->iocb.ulpIoTag,
+ elsiocb->iocb.ulpContext, ndlp->nlp_DID,
+ ndlp->nlp_flag, ndlp->nlp_state, ndlp->nlp_rpi);
+
+ icmd = &elsiocb->iocb;
+ oldcmd = &oldiocb->iocb;
+ icmd->ulpContext = oldcmd->ulpContext; /* Xri */
+ pcmd = (uint8_t *) (((struct lpfc_dmabuf *) elsiocb->context2)->virt);
+
+ *((uint32_t *) (pcmd)) = ELS_CMD_ACC;
+ pcmd += sizeof (uint32_t);
+
+ ap = (ADISC *) (pcmd);
+ ap->hardAL_PA = phba->fc_pref_ALPA;
+ memcpy(&ap->portName, &phba->fc_portname, sizeof (struct lpfc_name));
+ memcpy(&ap->nodeName, &phba->fc_nodename, sizeof (struct lpfc_name));
+ ap->DID = be32_to_cpu(phba->fc_myDID);
+
+ /* The els iocb is fully initialize. Flush it to main store for the
+ * HBA. Note that all els iocb context buffer are from the driver's
+ * dma pool and have length LPFC_BPL_SIZE. Get a short-hand pointer to
+ * the physical address.
+ */
+ bmp = (struct lpfc_dmabuf *) (elsiocb->context2);
+ pci_dma_sync_single_for_device(phba->pcidev, bmp->phys,
+ LPFC_BPL_SIZE, PCI_DMA_TODEVICE);
+
+ phba->fc_stat.elsXmitACC++;
+ elsiocb->iocb_cmpl = lpfc_cmpl_els_acc;
+
+ if (lpfc_sli_issue_iocb(phba, pring, elsiocb, 0) == IOCB_ERROR) {
+ lpfc_els_free_iocb(phba, elsiocb);
+ return (1);
+ }
+ return (0);
+}
+
+int
+lpfc_els_rsp_prli_acc(struct lpfc_hba * phba,
+ struct lpfc_iocbq * oldiocb, struct lpfc_nodelist * ndlp)
+{
+ PRLI *npr;
+ lpfc_vpd_t *vpd;
+ IOCB_t *icmd;
+ IOCB_t *oldcmd;
+ struct lpfc_iocbq *elsiocb;
+ struct lpfc_sli_ring *pring;
+ struct lpfc_sli *psli;
+ struct lpfc_dmabuf *bmp;
+ uint8_t *pcmd;
+ uint16_t cmdsize;
+
+ psli = &phba->sli;
+ pring = &psli->ring[LPFC_ELS_RING]; /* ELS ring */
+
+ cmdsize = sizeof (uint32_t) + sizeof (PRLI);
+ if ((elsiocb = lpfc_prep_els_iocb(phba, 0, cmdsize, oldiocb->retry,
+ ndlp,
+ (ELS_CMD_ACC |
+ (ELS_CMD_PRLI & ~ELS_RSP_MASK)))) ==
+ 0) {
+ return (1);
+ }
+
+ /* Xmit PRLI ACC response tag <ulpIoTag> */
+ lpfc_printf_log(phba, KERN_INFO, LOG_ELS,
+ "%d:0131 Xmit PRLI ACC response tag x%x "
+ "Data: x%x x%x x%x x%x x%x\n",
+ phba->brd_no,
+ elsiocb->iocb.ulpIoTag,
+ elsiocb->iocb.ulpContext, ndlp->nlp_DID,
+ ndlp->nlp_flag, ndlp->nlp_state, ndlp->nlp_rpi);
+
+ icmd = &elsiocb->iocb;
+ oldcmd = &oldiocb->iocb;
+ icmd->ulpContext = oldcmd->ulpContext; /* Xri */
+ pcmd = (uint8_t *) (((struct lpfc_dmabuf *) elsiocb->context2)->virt);
+
+ *((uint32_t *) (pcmd)) = (ELS_CMD_ACC | (ELS_CMD_PRLI & ~ELS_RSP_MASK));
+ pcmd += sizeof (uint32_t);
+
+ /* For PRLI, remainder of payload is PRLI parameter page */
+ memset(pcmd, 0, sizeof (PRLI));
+
+ npr = (PRLI *) pcmd;
+ vpd = &phba->vpd;
+ /*
+ * If our firmware version is 3.20 or later,
+ * set the following bits for FC-TAPE support.
+ */
+ if (vpd->rev.feaLevelHigh >= 0x02) {
+ npr->ConfmComplAllowed = 1;
+ npr->Retry = 1;
+ npr->TaskRetryIdReq = 1;
+ }
+
+ npr->acceptRspCode = PRLI_REQ_EXECUTED;
+ npr->estabImagePair = 1;
+ npr->readXferRdyDis = 1;
+ npr->ConfmComplAllowed = 1;
+
+ npr->prliType = PRLI_FCP_TYPE;
+ npr->initiatorFunc = 1;
+
+ /* The els iocb is fully initialize. Flush it to main store for the
+ * HBA. Note that all els iocb context buffer are from the driver's
+ * dma pool and have length LPFC_BPL_SIZE. Get a short-hand pointer to
+ * the physical address.
+ */
+ bmp = (struct lpfc_dmabuf *) (elsiocb->context2);
+ pci_dma_sync_single_for_device(phba->pcidev, bmp->phys,
+ LPFC_BPL_SIZE, PCI_DMA_TODEVICE);
+
+ phba->fc_stat.elsXmitACC++;
+ elsiocb->iocb_cmpl = lpfc_cmpl_els_acc;
+
+ if (lpfc_sli_issue_iocb(phba, pring, elsiocb, 0) == IOCB_ERROR) {
+ lpfc_els_free_iocb(phba, elsiocb);
+ return (1);
+ }
+ return (0);
+}
+
+static int
+lpfc_els_rsp_rnid_acc(struct lpfc_hba * phba,
+ uint8_t format,
+ struct lpfc_iocbq * oldiocb, struct lpfc_nodelist * ndlp)
+{
+ RNID *rn;
+ IOCB_t *icmd;
+ IOCB_t *oldcmd;
+ struct lpfc_iocbq *elsiocb;
+ struct lpfc_sli_ring *pring;
+ struct lpfc_sli *psli;
+ struct lpfc_dmabuf *bmp;
+ uint8_t *pcmd;
+ uint16_t cmdsize;
+
+ psli = &phba->sli;
+ pring = &psli->ring[LPFC_ELS_RING];
+
+ cmdsize = sizeof (uint32_t) + sizeof (uint32_t)
+ + (2 * sizeof (struct lpfc_name));
+ if (format)
+ cmdsize += sizeof (RNID_TOP_DISC);
+
+ if ((elsiocb = lpfc_prep_els_iocb(phba, 0, cmdsize, oldiocb->retry,
+ ndlp, ELS_CMD_ACC)) == 0) {
+ return (1);
+ }
+
+ /* Xmit RNID ACC response tag <ulpIoTag> */
+ lpfc_printf_log(phba, KERN_INFO, LOG_ELS,
+ "%d:0132 Xmit RNID ACC response tag x%x "
+ "Data: x%x\n",
+ phba->brd_no,
+ elsiocb->iocb.ulpIoTag,
+ elsiocb->iocb.ulpContext);
+
+ icmd = &elsiocb->iocb;
+ oldcmd = &oldiocb->iocb;
+ icmd->ulpContext = oldcmd->ulpContext; /* Xri */
+ pcmd = (uint8_t *) (((struct lpfc_dmabuf *) elsiocb->context2)->virt);
+
+ *((uint32_t *) (pcmd)) = ELS_CMD_ACC;
+ pcmd += sizeof (uint32_t);
+
+ memset(pcmd, 0, sizeof (RNID));
+ rn = (RNID *) (pcmd);
+ rn->Format = format;
+ rn->CommonLen = (2 * sizeof (struct lpfc_name));
+ memcpy(&rn->portName, &phba->fc_portname, sizeof (struct lpfc_name));
+ memcpy(&rn->nodeName, &phba->fc_nodename, sizeof (struct lpfc_name));
+ switch (format) {
+ case 0:
+ rn->SpecificLen = 0;
+ break;
+ case RNID_TOPOLOGY_DISC:
+ rn->SpecificLen = sizeof (RNID_TOP_DISC);
+ memcpy(&rn->un.topologyDisc.portName,
+ &phba->fc_portname, sizeof (struct lpfc_name));
+ rn->un.topologyDisc.unitType = RNID_HBA;
+ rn->un.topologyDisc.physPort = 0;
+ rn->un.topologyDisc.attachedNodes = 0;
+ break;
+ default:
+ rn->CommonLen = 0;
+ rn->SpecificLen = 0;
+ break;
+ }
+
+ /* The els iocb is fully initialize. Flush it to main store for the
+ * HBA. Note that all els iocb context buffer are from the driver's
+ * dma pool and have length LPFC_BPL_SIZE. Get a short-hand pointer to
+ * the physical address.
+ */
+ bmp = (struct lpfc_dmabuf *) (elsiocb->context2);
+ pci_dma_sync_single_for_device(phba->pcidev, bmp->phys,
+ LPFC_BPL_SIZE, PCI_DMA_TODEVICE);
+
+ phba->fc_stat.elsXmitACC++;
+ elsiocb->iocb_cmpl = lpfc_cmpl_els_acc;
+ elsiocb->context1 = NULL; /* Don't need ndlp for cmpl,
+ * it could be freed */
+
+ if (lpfc_sli_issue_iocb(phba, pring, elsiocb, 0) == IOCB_ERROR) {
+ lpfc_els_free_iocb(phba, elsiocb);
+ return (1);
+ }
+ return (0);
+}
+
+int
+lpfc_els_disc_adisc(struct lpfc_hba * phba)
+{
+ int sentadisc;
+ struct lpfc_nodelist *ndlp, *next_ndlp;
+
+ sentadisc = 0;
+ /* go thru NPR list and issue any remaining ELS ADISCs */
+ list_for_each_entry_safe(ndlp, next_ndlp, &phba->fc_npr_list,
+ nlp_listp) {
+ if(ndlp->nlp_flag & NLP_NPR_2B_DISC) {
+ if(ndlp->nlp_flag & NLP_NPR_ADISC) {
+ ndlp->nlp_flag &= ~NLP_NPR_ADISC;
+ ndlp->nlp_state = NLP_STE_ADISC_ISSUE;
+ lpfc_nlp_list(phba, ndlp,
+ NLP_ADISC_LIST);
+ lpfc_issue_els_adisc(phba, ndlp, 0);
+ sentadisc++;
+ phba->num_disc_nodes++;
+ if (phba->num_disc_nodes >=
+ phba->cfg_discovery_threads) {
+ phba->fc_flag |= FC_NLP_MORE;
+ break;
+ }
+ }
+ }
+ }
+ if (sentadisc == 0) {
+ phba->fc_flag &= ~FC_NLP_MORE;
+ }
+ return(sentadisc);
+}
+
+int
+lpfc_els_disc_plogi(struct lpfc_hba * phba)
+{
+ int sentplogi;
+ struct lpfc_nodelist *ndlp, *next_ndlp;
+
+ sentplogi = 0;
+ /* go thru NPR list and issue any remaining ELS PLOGIs */
+ list_for_each_entry_safe(ndlp, next_ndlp, &phba->fc_npr_list,
+ nlp_listp) {
+ if((ndlp->nlp_flag & NLP_NPR_2B_DISC) &&
+ (!(ndlp->nlp_flag & NLP_DELAY_TMO))) {
+ if(!(ndlp->nlp_flag & NLP_NPR_ADISC)) {
+ ndlp->nlp_state = NLP_STE_PLOGI_ISSUE;
+ lpfc_nlp_list(phba, ndlp, NLP_PLOGI_LIST);
+ lpfc_issue_els_plogi(phba, ndlp, 0);
+ sentplogi++;
+ phba->num_disc_nodes++;
+ if (phba->num_disc_nodes >=
+ phba->cfg_discovery_threads) {
+ phba->fc_flag |= FC_NLP_MORE;
+ break;
+ }
+ }
+ }
+ }
+ if (sentplogi == 0) {
+ phba->fc_flag &= ~FC_NLP_MORE;
+ }
+ return(sentplogi);
+}
+
+int
+lpfc_els_flush_rscn(struct lpfc_hba * phba)
+{
+ struct lpfc_dmabuf *mp;
+ int i;
+
+ for (i = 0; i < phba->fc_rscn_id_cnt; i++) {
+ mp = phba->fc_rscn_id_list[i];
+ lpfc_mbuf_free(phba, mp->virt, mp->phys);
+ kfree(mp);
+ phba->fc_rscn_id_list[i] = NULL;
+ }
+ phba->fc_rscn_id_cnt = 0;
+ phba->fc_flag &= ~(FC_RSCN_MODE | FC_RSCN_DISCOVERY);
+ lpfc_can_disctmo(phba);
+ return (0);
+}
+
+int
+lpfc_rscn_payload_check(struct lpfc_hba * phba, uint32_t did)
+{
+ D_ID ns_did;
+ D_ID rscn_did;
+ struct lpfc_dmabuf *mp;
+ uint32_t *lp;
+ uint32_t payload_len, cmd, i, match;
+
+ ns_did.un.word = did;
+ match = 0;
+
+ /* If we are doing a FULL RSCN rediscovery, match everything */
+ if (phba->fc_flag & FC_RSCN_DISCOVERY) {
+ return (did);
+ }
+
+ for (i = 0; i < phba->fc_rscn_id_cnt; i++) {
+ mp = phba->fc_rscn_id_list[i];
+ lp = (uint32_t *) mp->virt;
+ cmd = *lp++;
+ payload_len = be32_to_cpu(cmd) & 0xffff; /* payload length */
+ payload_len -= sizeof (uint32_t); /* take off word 0 */
+ while (payload_len) {
+ rscn_did.un.word = *lp++;
+ rscn_did.un.word = be32_to_cpu(rscn_did.un.word);
+ payload_len -= sizeof (uint32_t);
+ switch (rscn_did.un.b.resv) {
+ case 0: /* Single N_Port ID effected */
+ if (ns_did.un.word == rscn_did.un.word) {
+ match = did;
+ }
+ break;
+ case 1: /* Whole N_Port Area effected */
+ if ((ns_did.un.b.domain == rscn_did.un.b.domain)
+ && (ns_did.un.b.area == rscn_did.un.b.area))
+ {
+ match = did;
+ }
+ break;
+ case 2: /* Whole N_Port Domain effected */
+ if (ns_did.un.b.domain == rscn_did.un.b.domain)
+ {
+ match = did;
+ }
+ break;
+ case 3: /* Whole Fabric effected */
+ match = did;
+ break;
+ default:
+ /* Unknown Identifier in RSCN list */
+ lpfc_printf_log(phba, KERN_ERR, LOG_DISCOVERY,
+ "%d:0217 Unknown Identifier in "
+ "RSCN payload Data: x%x\n",
+ phba->brd_no, rscn_did.un.word);
+ break;
+ }
+ if (match) {
+ break;
+ }
+ }
+ }
+ return (match);
+}
+
+static int
+lpfc_rscn_recovery_check(struct lpfc_hba * phba)
+{
+ struct lpfc_nodelist *ndlp = NULL, *next_ndlp;
+ struct list_head *listp;
+ struct list_head *node_list[7];
+ int i;
+
+ /* Look at all nodes effected by pending RSCNs and move
+ * them to NPR list.
+ */
+ node_list[0] = &phba->fc_npr_list; /* MUST do this list first */
+ node_list[1] = &phba->fc_nlpmap_list;
+ node_list[2] = &phba->fc_nlpunmap_list;
+ node_list[3] = &phba->fc_prli_list;
+ node_list[4] = &phba->fc_reglogin_list;
+ node_list[5] = &phba->fc_adisc_list;
+ node_list[6] = &phba->fc_plogi_list;
+ for (i = 0; i < 7; i++) {
+ listp = node_list[i];
+ if (list_empty(listp))
+ continue;
+
+ list_for_each_entry_safe(ndlp, next_ndlp, listp, nlp_listp) {
+ if((lpfc_rscn_payload_check(phba, ndlp->nlp_DID))) {
+ /* part of RSCN, process this entry */
+ lpfc_set_failmask(phba, ndlp,
+ LPFC_DEV_DISCOVERY_INP,
+ LPFC_SET_BITMASK);
+
+ lpfc_disc_state_machine(phba, ndlp, NULL,
+ NLP_EVT_DEVICE_RECOVERY);
+ if(ndlp->nlp_flag & NLP_DELAY_TMO) {
+ ndlp->nlp_flag &= ~NLP_DELAY_TMO;
+ del_timer_sync(&ndlp->nlp_delayfunc);
+ }
+ }
+ }
+ }
+ return (0);
+}
+
+static int
+lpfc_els_rcv_rscn(struct lpfc_hba * phba,
+ struct lpfc_iocbq * cmdiocb,
+ struct lpfc_nodelist * ndlp, uint8_t newnode)
+{
+ struct lpfc_dmabuf *pcmd;
+ uint32_t *lp;
+ IOCB_t *icmd;
+ uint32_t payload_len, cmd;
+
+ icmd = &cmdiocb->iocb;
+ pcmd = (struct lpfc_dmabuf *) cmdiocb->context2;
+ lp = (uint32_t *) pcmd->virt;
+
+ /* The response iocb was populated by the HBA. Flush it to main store
+ * for the driver. Note that all iocb context buffers are from the
+ * driver's dma pool and have length LPFC_BPL_SIZE.
+ */
+ pci_dma_sync_single_for_device(phba->pcidev, pcmd->phys,
+ LPFC_BPL_SIZE, PCI_DMA_FROMDEVICE);
+
+ cmd = *lp++;
+ payload_len = be32_to_cpu(cmd) & 0xffff; /* payload length */
+ payload_len -= sizeof (uint32_t); /* take off word 0 */
+ cmd &= ELS_CMD_MASK;
+
+ /* RSCN received */
+ lpfc_printf_log(phba,
+ KERN_INFO,
+ LOG_DISCOVERY,
+ "%d:0214 RSCN received Data: x%x x%x x%x x%x\n",
+ phba->brd_no,
+ phba->fc_flag, payload_len, *lp, phba->fc_rscn_id_cnt);
+
+ /* if we are already processing an RSCN, save the received
+ * RSCN payload buffer, cmdiocb->context2 to process later.
+ * If we zero, cmdiocb->context2, the calling routine will
+ * not try to free it.
+ */
+ if (phba->fc_flag & FC_RSCN_MODE) {
+ if ((phba->fc_rscn_id_cnt < FC_MAX_HOLD_RSCN) &&
+ !(phba->fc_flag & FC_RSCN_DISCOVERY)) {
+ phba->fc_rscn_id_list[phba->fc_rscn_id_cnt++] = pcmd;
+ cmdiocb->context2 = NULL;
+ /* Deferred RSCN */
+ lpfc_printf_log(phba, KERN_INFO, LOG_DISCOVERY,
+ "%d:0235 Deferred RSCN "
+ "Data: x%x x%x x%x\n",
+ phba->brd_no, phba->fc_rscn_id_cnt,
+ phba->fc_flag, phba->hba_state);
+ } else {
+ phba->fc_flag |= FC_RSCN_DISCOVERY;
+ /* ReDiscovery RSCN */
+ lpfc_printf_log(phba, KERN_INFO, LOG_DISCOVERY,
+ "%d:0234 ReDiscovery RSCN "
+ "Data: x%x x%x x%x\n",
+ phba->brd_no, phba->fc_rscn_id_cnt,
+ phba->fc_flag, phba->hba_state);
+ }
+ /* Send back ACC */
+ lpfc_els_rsp_acc(phba, ELS_CMD_ACC, cmdiocb, ndlp, NULL,
+ newnode);
+
+ /* send RECOVERY event for ALL nodes that match RSCN payload */
+ lpfc_rscn_recovery_check(phba);
+ return (0);
+ }
+
+ phba->fc_flag |= FC_RSCN_MODE;
+ phba->fc_rscn_id_list[phba->fc_rscn_id_cnt++] = pcmd;
+ /*
+ * If we zero, cmdiocb->context2, the calling routine will
+ * not try to free it.
+ */
+ cmdiocb->context2 = NULL;
+
+ lpfc_set_disctmo(phba);
+
+ /* Send back ACC */
+ lpfc_els_rsp_acc(phba, ELS_CMD_ACC, cmdiocb, ndlp, NULL, newnode);
+
+ /* send RECOVERY event for ALL nodes that match RSCN payload */
+ lpfc_rscn_recovery_check(phba);
+
+ return (lpfc_els_handle_rscn(phba));
+}
+
+int
+lpfc_els_handle_rscn(struct lpfc_hba * phba)
+{
+ struct lpfc_nodelist *ndlp;
+
+ lpfc_put_event(phba, HBA_EVENT_RSCN, phba->fc_myDID,
+ (void *)(unsigned long)(phba->fc_myDID), 0, 0);
+
+ /* Start timer for RSCN processing */
+ lpfc_set_disctmo(phba);
+
+ /* RSCN processed */
+ lpfc_printf_log(phba,
+ KERN_INFO,
+ LOG_DISCOVERY,
+ "%d:0215 RSCN processed Data: x%x x%x x%x x%x\n",
+ phba->brd_no,
+ phba->fc_flag, 0, phba->fc_rscn_id_cnt,
+ phba->hba_state);
+
+ /* To process RSCN, first compare RSCN data with NameServer */
+ phba->fc_ns_retry = 0;
+ if ((ndlp = lpfc_findnode_did(phba, NLP_SEARCH_UNMAPPED,
+ NameServer_DID))) {
+ /* Good ndlp, issue CT Request to NameServer */
+ if (lpfc_ns_cmd(phba, ndlp, SLI_CTNS_GID_FT) == 0) {
+ /* Wait for NameServer query cmpl before we can
+ continue */
+ return (1);
+ }
+ } else {
+ /* If login to NameServer does not exist, issue one */
+ /* Good status, issue PLOGI to NameServer */
+ if ((ndlp =
+ lpfc_findnode_did(phba, NLP_SEARCH_ALL, NameServer_DID))) {
+ /* Wait for NameServer login cmpl before we can
+ continue */
+ return (1);
+ }
+ if ((ndlp = mempool_alloc(phba->nlp_mem_pool, GFP_ATOMIC))
+ == 0) {
+ lpfc_els_flush_rscn(phba);
+ return (0);
+ } else {
+ lpfc_nlp_init(phba, ndlp, NameServer_DID);
+ ndlp->nlp_type |= NLP_FABRIC;
+ ndlp->nlp_state = NLP_STE_PLOGI_ISSUE;
+ lpfc_issue_els_plogi(phba, ndlp, 0);
+ /* Wait for NameServer login cmpl before we can
+ continue */
+ return (1);
+ }
+ }
+
+ lpfc_els_flush_rscn(phba);
+ return (0);
+}
+
+static int
+lpfc_els_rcv_flogi(struct lpfc_hba * phba,
+ struct lpfc_iocbq * cmdiocb,
+ struct lpfc_nodelist * ndlp, uint8_t newnode)
+{
+ struct lpfc_dmabuf *pcmd = (struct lpfc_dmabuf *) cmdiocb->context2;
+ uint32_t *lp = (uint32_t *) pcmd->virt;
+ IOCB_t *icmd = &cmdiocb->iocb;
+ struct serv_parm *sp;
+ LPFC_MBOXQ_t *mbox;
+ struct ls_rjt stat;
+ uint32_t cmd, did;
+
+
+ /* The response iocb was populated by the HBA. Flush it to main store
+ * for the driver. Note that all iocb context buffers are from the
+ * driver's dma pool and have length LPFC_BPL_SIZE.
+ */
+ pci_dma_sync_single_for_device(phba->pcidev, pcmd->phys,
+ LPFC_BPL_SIZE, PCI_DMA_FROMDEVICE);
+
+ cmd = *lp++;
+ sp = (struct serv_parm *) lp;
+
+ /* FLOGI received */
+
+ lpfc_set_disctmo(phba);
+
+ if (phba->fc_topology == TOPOLOGY_LOOP) {
+ /* We should never receive a FLOGI in loop mode, ignore it */
+ did = icmd->un.elsreq64.remoteID;
+
+ /* An FLOGI ELS command <elsCmd> was received from DID <did> in
+ Loop Mode */
+ lpfc_printf_log(phba, KERN_ERR, LOG_ELS,
+ "%d:0113 An FLOGI ELS command x%x was received "
+ "from DID x%x in Loop Mode\n",
+ phba->brd_no, cmd, did);
+ return (1);
+ }
+
+ did = Fabric_DID;
+
+ if ((lpfc_check_sparm(phba, ndlp, sp, CLASS3))) {
+ /* For a FLOGI we accept, then if our portname is greater
+ * then the remote portname we initiate Nport login.
+ */
+ int rc;
+
+ rc = memcmp(&phba->fc_portname, &sp->portName,
+ sizeof (struct lpfc_name));
+
+ if (!rc) {
+ if ((mbox = mempool_alloc(phba->mbox_mem_pool,
+ GFP_ATOMIC)) == 0) {
+ return (1);
+ }
+ lpfc_linkdown(phba);
+ lpfc_init_link(phba, mbox,
+ phba->cfg_topology,
+ phba->cfg_link_speed);
+ mbox->mb.un.varInitLnk.lipsr_AL_PA = 0;
+ if (lpfc_sli_issue_mbox
+ (phba, mbox, (MBX_NOWAIT | MBX_STOP_IOCB))
+ == MBX_NOT_FINISHED) {
+ mempool_free( mbox, phba->mbox_mem_pool);
+ }
+ return (1);
+ }
+
+ else if (rc > 0) { /* greater than */
+ phba->fc_flag |= FC_PT2PT_PLOGI;
+ }
+ phba->fc_flag |= FC_PT2PT;
+ phba->fc_flag &= ~(FC_FABRIC | FC_PUBLIC_LOOP);
+ } else {
+ /* Reject this request because invalid parameters */
+ stat.un.b.lsRjtRsvd0 = 0;
+ stat.un.b.lsRjtRsnCode = LSRJT_UNABLE_TPC;
+ stat.un.b.lsRjtRsnCodeExp = LSEXP_SPARM_OPTIONS;
+ stat.un.b.vendorUnique = 0;
+ lpfc_els_rsp_reject(phba, stat.un.lsRjtError, cmdiocb, ndlp);
+ return (1);
+ }
+
+ /* Send back ACC */
+ lpfc_els_rsp_acc(phba, ELS_CMD_PLOGI, cmdiocb, ndlp, NULL, newnode);
+
+ return (0);
+}
+
+static int
+lpfc_els_rcv_rnid(struct lpfc_hba * phba,
+ struct lpfc_iocbq * cmdiocb, struct lpfc_nodelist * ndlp)
+{
+ struct lpfc_dmabuf *pcmd;
+ uint32_t *lp;
+ IOCB_t *icmd;
+ RNID *rn;
+ struct ls_rjt stat;
+ uint32_t cmd, did;
+
+ icmd = &cmdiocb->iocb;
+ did = icmd->un.elsreq64.remoteID;
+ pcmd = (struct lpfc_dmabuf *) cmdiocb->context2;
+ lp = (uint32_t *) pcmd->virt;
+
+ /* The response iocb was populated by the HBA. Flush it to main store
+ * for the driver. Note that all iocb context buffers are from the
+ * driver's dma pool and have length LPFC_BPL_SIZE.
+ */
+ pci_dma_sync_single_for_device(phba->pcidev, pcmd->phys,
+ LPFC_BPL_SIZE, PCI_DMA_FROMDEVICE);
+
+ cmd = *lp++;
+ rn = (RNID *) lp;
+
+ /* RNID received */
+
+ switch (rn->Format) {
+ case 0:
+ case RNID_TOPOLOGY_DISC:
+ /* Send back ACC */
+ lpfc_els_rsp_rnid_acc(phba, rn->Format, cmdiocb, ndlp);
+ break;
+ default:
+ /* Reject this request because format not supported */
+ stat.un.b.lsRjtRsvd0 = 0;
+ stat.un.b.lsRjtRsnCode = LSRJT_UNABLE_TPC;
+ stat.un.b.lsRjtRsnCodeExp = LSEXP_CANT_GIVE_DATA;
+ stat.un.b.vendorUnique = 0;
+ lpfc_els_rsp_reject(phba, stat.un.lsRjtError, cmdiocb, ndlp);
+ }
+ return (0);
+}
+
+static int
+lpfc_els_rcv_rrq(struct lpfc_hba * phba, struct lpfc_iocbq * cmdiocb,
+ struct lpfc_nodelist * ndlp)
+{
+ struct lpfc_dmabuf *pcmd;
+ uint32_t *lp;
+ IOCB_t *icmd;
+ struct lpfc_sli_ring *pring;
+ struct lpfc_sli *psli;
+ RRQ *rrq;
+ uint32_t cmd, did;
+
+ psli = &phba->sli;
+ pring = &psli->ring[LPFC_FCP_RING];
+ icmd = &cmdiocb->iocb;
+ did = icmd->un.elsreq64.remoteID;
+ pcmd = (struct lpfc_dmabuf *) cmdiocb->context2;
+ lp = (uint32_t *) pcmd->virt;
+
+ /* The response iocb was populated by the HBA. Flush it to main store
+ * for the driver. Note that all iocb context buffers are from the
+ * driver's dma pool and have length LPFC_BPL_SIZE.
+ */
+ pci_dma_sync_single_for_cpu(phba->pcidev, pcmd->phys,
+ LPFC_BPL_SIZE, PCI_DMA_FROMDEVICE);
+
+ cmd = *lp++;
+ rrq = (RRQ *) lp;
+
+ /* RRQ received */
+ /* Get oxid / rxid from payload and abort it */
+ if ((rrq->SID == be32_to_cpu(phba->fc_myDID))) {
+ lpfc_sli_abort_iocb_ctx(phba, pring, rrq->Oxid);
+ } else {
+ lpfc_sli_abort_iocb_ctx(phba, pring, rrq->Rxid);
+ }
+ /* ACCEPT the rrq request */
+ lpfc_els_rsp_acc(phba, ELS_CMD_ACC, cmdiocb, ndlp, NULL, 0);
+
+ return 0;
+}
+
+static int
+lpfc_els_rcv_farp(struct lpfc_hba * phba,
+ struct lpfc_iocbq * cmdiocb, struct lpfc_nodelist * ndlp)
+{
+ struct lpfc_dmabuf *pcmd;
+ uint32_t *lp;
+ IOCB_t *icmd;
+ FARP *fp;
+ uint32_t cmd, cnt, did;
+
+ icmd = &cmdiocb->iocb;
+ did = icmd->un.elsreq64.remoteID;
+ pcmd = (struct lpfc_dmabuf *) cmdiocb->context2;
+ lp = (uint32_t *) pcmd->virt;
+
+ /* The response iocb was populated by the HBA. Flush it to main store
+ * for the driver. Note that all iocb context buffers are from the
+ * driver's dma pool and have length LPFC_BPL_SIZE.
+ */
+ pci_dma_sync_single_for_cpu(phba->pcidev, pcmd->phys,
+ LPFC_BPL_SIZE, PCI_DMA_FROMDEVICE);
+
+ cmd = *lp++;
+ fp = (FARP *) lp;
+
+ /* FARP-REQ received from DID <did> */
+ lpfc_printf_log(phba,
+ KERN_INFO,
+ LOG_IP,
+ "%d:0601 FARP-REQ received from DID x%x\n",
+ phba->brd_no, did);
+
+ /* We will only support match on WWPN or WWNN */
+ if (fp->Mflags & ~(FARP_MATCH_NODE | FARP_MATCH_PORT)) {
+ return (0);
+ }
+
+ cnt = 0;
+ /* If this FARP command is searching for my portname */
+ if (fp->Mflags & FARP_MATCH_PORT) {
+ if (memcmp(&fp->RportName, &phba->fc_portname,
+ sizeof (struct lpfc_name)) == 0)
+ cnt = 1;
+ }
+
+ /* If this FARP command is searching for my nodename */
+ if (fp->Mflags & FARP_MATCH_NODE) {
+ if (memcmp(&fp->RnodeName, &phba->fc_nodename,
+ sizeof (struct lpfc_name)) == 0)
+ cnt = 1;
+ }
+
+ if (cnt) {
+ if((ndlp->nlp_state == NLP_STE_UNMAPPED_NODE) ||
+ (ndlp->nlp_state == NLP_STE_MAPPED_NODE)) {
+ /* Log back into the node before sending the FARP. */
+ if (fp->Rflags & FARP_REQUEST_PLOGI) {
+ ndlp->nlp_state = NLP_STE_PLOGI_ISSUE;
+ lpfc_nlp_list(phba, ndlp, NLP_PLOGI_LIST);
+ lpfc_issue_els_plogi(phba, ndlp, 0);
+ }
+
+ /* Send a FARP response to that node */
+ if (fp->Rflags & FARP_REQUEST_FARPR) {
+ lpfc_issue_els_farpr(phba, did, 0);
+ }
+ }
+ }
+ return (0);
+}
+
+static int
+lpfc_els_rcv_farpr(struct lpfc_hba * phba,
+ struct lpfc_iocbq * cmdiocb, struct lpfc_nodelist * ndlp)
+{
+ struct lpfc_dmabuf *pcmd;
+ uint32_t *lp;
+ IOCB_t *icmd;
+ uint32_t cmd, did;
+
+ icmd = &cmdiocb->iocb;
+ did = icmd->un.elsreq64.remoteID;
+ pcmd = (struct lpfc_dmabuf *) cmdiocb->context2;
+ lp = (uint32_t *) pcmd->virt;
+
+ /* The response iocb was populated by the HBA. Flush it to main store
+ * for the driver. Note that all iocb context buffers are from the
+ * driver's dma pool and have length LPFC_BPL_SIZE.
+ */
+ pci_dma_sync_single_for_cpu(phba->pcidev, pcmd->phys,
+ LPFC_BPL_SIZE, PCI_DMA_FROMDEVICE);
+
+ cmd = *lp++;
+ /* FARP-RSP received from DID <did> */
+ lpfc_printf_log(phba,
+ KERN_INFO,
+ LOG_IP,
+ "%d:0600 FARP-RSP received from DID x%x\n",
+ phba->brd_no, did);
+
+ /* ACCEPT the Farp resp request */
+ lpfc_els_rsp_acc(phba, ELS_CMD_ACC, cmdiocb, ndlp, NULL, 0);
+
+ return 0;
+}
+
+static int
+lpfc_els_rcv_fan(struct lpfc_hba * phba, struct lpfc_iocbq * cmdiocb,
+ struct lpfc_nodelist * ndlp)
+{
+ struct lpfc_dmabuf *pcmd;
+ uint32_t *lp;
+ IOCB_t *icmd;
+ FAN *fp;
+ uint32_t cmd, did;
+
+ icmd = &cmdiocb->iocb;
+ did = icmd->un.elsreq64.remoteID;
+ pcmd = (struct lpfc_dmabuf *) cmdiocb->context2;
+ lp = (uint32_t *) pcmd->virt;
+
+ /* The response iocb was populated by the HBA. Flush it to main store
+ * for the driver. Note that all iocb context buffers are from the
+ * driver's dma pool and have length LPFC_BPL_SIZE.
+ */
+ pci_dma_sync_single_for_cpu(phba->pcidev, pcmd->phys,
+ LPFC_BPL_SIZE, PCI_DMA_FROMDEVICE);
+
+ cmd = *lp++;
+ fp = (FAN *) lp;
+
+ /* FAN received */
+
+ /* ACCEPT the FAN request */
+ lpfc_els_rsp_acc(phba, ELS_CMD_ACC, cmdiocb, ndlp, NULL, 0);
+
+ if (phba->hba_state == LPFC_LOCAL_CFG_LINK) {
+ /* The discovery state machine needs to take a different
+ * action if this node has switched fabrics
+ */
+ if ((memcmp(&fp->FportName, &phba->fc_fabparam.portName,
+ sizeof (struct lpfc_name)) != 0)
+ ||
+ (memcmp(&fp->FnodeName, &phba->fc_fabparam.nodeName,
+ sizeof (struct lpfc_name)) != 0)) {
+ /* This node has switched fabrics. An FLOGI is required
+ * after the timeout
+ */
+ return (0);
+ }
+
+ /* Start discovery */
+ lpfc_disc_start(phba);
+ }
+
+ return (0);
+}
+
+void
+lpfc_els_timeout_handler(unsigned long ptr)
+{
+ struct lpfc_hba *phba;
+ struct lpfc_sli *psli;
+ struct lpfc_sli_ring *pring;
+ struct lpfc_iocbq *tmp_iocb, *piocb;
+ IOCB_t *cmd = NULL;
+ struct lpfc_dmabuf *pcmd;
+ struct list_head *dlp;
+ uint32_t *elscmd;
+ uint32_t els_command;
+ uint32_t timeout;
+ uint32_t remote_ID;
+ unsigned long iflag;
+
+ phba = (struct lpfc_hba *)ptr;
+ if(phba == 0)
+ return;
+ spin_lock_irqsave(phba->host->host_lock, iflag);
+ timeout = (uint32_t)(phba->fc_ratov << 1);
+
+ psli = &phba->sli;
+ pring = &psli->ring[LPFC_ELS_RING];
+ dlp = &pring->txcmplq;
+
+ list_for_each_entry_safe(piocb, tmp_iocb, &pring->txcmplq, list) {
+ cmd = &piocb->iocb;
+
+ if (piocb->iocb_flag & LPFC_IO_LIBDFC) {
+ continue;
+ }
+ pcmd = (struct lpfc_dmabuf *) piocb->context2;
+ elscmd = (uint32_t *) (pcmd->virt);
+ els_command = *elscmd;
+
+ if ((els_command == ELS_CMD_FARP)
+ || (els_command == ELS_CMD_FARPR)) {
+ continue;
+ }
+
+ if (piocb->drvrTimeout > 0) {
+ if (piocb->drvrTimeout >= timeout) {
+ piocb->drvrTimeout -= timeout;
+ } else {
+ piocb->drvrTimeout = 0;
+ }
+ continue;
+ }
+
+ list_del(&piocb->list);
+ pring->txcmplq_cnt--;
+
+ if (cmd->ulpCommand == CMD_GEN_REQUEST64_CR) {
+ struct lpfc_nodelist *ndlp;
+
+ ndlp = lpfc_findnode_rpi(phba, cmd->ulpContext);
+ remote_ID = ndlp->nlp_DID;
+ if (cmd->un.elsreq64.bdl.ulpIoTag32) {
+ lpfc_sli_issue_abort_iotag32(phba,
+ pring, piocb);
+ }
+ } else {
+ remote_ID = cmd->un.elsreq64.remoteID;
+ }
+
+ lpfc_printf_log(phba,
+ KERN_ERR,
+ LOG_ELS,
+ "%d:0127 ELS timeout Data: x%x x%x x%x x%x\n",
+ phba->brd_no, els_command,
+ remote_ID, cmd->ulpCommand, cmd->ulpIoTag);
+
+ /*
+ * The iocb has timed out; abort it.
+ */
+ if (piocb->iocb_cmpl) {
+ cmd->ulpStatus = IOSTAT_LOCAL_REJECT;
+ cmd->un.ulpWord[4] = IOERR_SLI_ABORTED;
+ (piocb->iocb_cmpl) (phba, piocb, piocb);
+ } else {
+ mempool_free(piocb, phba->iocb_mem_pool);
+ }
+ }
+
+ phba->els_tmofunc.expires = jiffies + HZ * timeout;
+ add_timer(&phba->els_tmofunc);
+ spin_unlock_irqrestore(phba->host->host_lock, iflag);
+}
+
+void
+lpfc_els_flush_cmd(struct lpfc_hba * phba)
+{
+ struct lpfc_sli *psli;
+ struct lpfc_sli_ring *pring;
+ struct lpfc_iocbq *tmp_iocb, *piocb;
+ IOCB_t *cmd = NULL;
+ struct lpfc_dmabuf *pcmd;
+ uint32_t *elscmd;
+ uint32_t els_command;
+ uint32_t remote_ID;
+
+ psli = &phba->sli;
+ pring = &psli->ring[LPFC_ELS_RING];
+
+ list_for_each_entry_safe(piocb, tmp_iocb, &pring->txq, list) {
+ cmd = &piocb->iocb;
+
+ if (piocb->iocb_flag & LPFC_IO_LIBDFC) {
+ continue;
+ }
+
+ /* Do not flush out the QUE_RING and ABORT/CLOSE iocbs */
+ if ((cmd->ulpCommand == CMD_QUE_RING_BUF_CN) ||
+ (cmd->ulpCommand == CMD_QUE_RING_BUF64_CN) ||
+ (cmd->ulpCommand == CMD_CLOSE_XRI_CN) ||
+ (cmd->ulpCommand == CMD_ABORT_XRI_CN)) {
+ continue;
+ }
+
+ pcmd = (struct lpfc_dmabuf *) piocb->context2;
+ elscmd = (uint32_t *) (pcmd->virt);
+ els_command = *elscmd;
+
+ if (cmd->ulpCommand == CMD_GEN_REQUEST64_CR) {
+ struct lpfc_nodelist *ndlp;
+
+ ndlp = lpfc_findnode_rpi(phba, cmd->ulpContext);
+ remote_ID = ndlp->nlp_DID;
+ if (phba->hba_state == LPFC_HBA_READY) {
+ continue;
+ }
+ } else {
+ remote_ID = cmd->un.elsreq64.remoteID;
+ }
+
+ list_del(&piocb->list);
+ pring->txcmplq_cnt--;
+
+ cmd->ulpStatus = IOSTAT_LOCAL_REJECT;
+ cmd->un.ulpWord[4] = IOERR_SLI_ABORTED;
+
+ if (piocb->iocb_cmpl) {
+ (piocb->iocb_cmpl) (phba, piocb, piocb);
+ } else {
+ mempool_free( piocb, phba->iocb_mem_pool);
+ }
+ }
+
+ list_for_each_entry_safe(piocb, tmp_iocb, &pring->txcmplq, list) {
+ cmd = &piocb->iocb;
+
+ if (piocb->iocb_flag & LPFC_IO_LIBDFC) {
+ continue;
+ }
+ pcmd = (struct lpfc_dmabuf *) piocb->context2;
+ elscmd = (uint32_t *) (pcmd->virt);
+ els_command = *elscmd;
+
+ if (cmd->ulpCommand == CMD_GEN_REQUEST64_CR) {
+ struct lpfc_nodelist *ndlp;
+
+ ndlp = lpfc_findnode_rpi(phba, cmd->ulpContext);
+ remote_ID = ndlp->nlp_DID;
+ if (phba->hba_state == LPFC_HBA_READY) {
+ continue;
+ }
+ } else {
+ remote_ID = cmd->un.elsreq64.remoteID;
+ }
+
+ list_del(&piocb->list);
+ pring->txcmplq_cnt--;
+
+ cmd->ulpStatus = IOSTAT_LOCAL_REJECT;
+ cmd->un.ulpWord[4] = IOERR_SLI_ABORTED;
+
+ if (piocb->iocb_cmpl) {
+ (piocb->iocb_cmpl) (phba, piocb, piocb);
+ } else {
+ mempool_free( piocb, phba->iocb_mem_pool);
+ }
+ }
+ return;
+}
+
+void
+lpfc_els_unsol_event(struct lpfc_hba * phba,
+ struct lpfc_sli_ring * pring, struct lpfc_iocbq * elsiocb)
+{
+ struct lpfc_sli *psli;
+ struct lpfc_nodelist *ndlp;
+ struct lpfc_dmabuf *mp;
+ uint32_t *lp;
+ IOCB_t *icmd;
+ struct ls_rjt stat;
+ uint32_t cmd;
+ uint32_t did;
+ uint32_t newnode;
+ uint32_t drop_cmd = 0; /* by default do NOT drop received cmd */
+ uint32_t rjt_err = 0;
+
+ psli = &phba->sli;
+ icmd = &elsiocb->iocb;
+
+ /* type of ELS cmd is first 32bit word in packet */
+ mp = lpfc_sli_ringpostbuf_get(phba, pring, getPaddr(icmd->un.
+ cont64[0].
+ addrHigh,
+ icmd->un.
+ cont64[0].addrLow));
+ if (mp == 0) {
+ drop_cmd = 1;
+ goto dropit;
+ }
+
+ newnode = 0;
+ lp = (uint32_t *) mp->virt;
+ cmd = *lp++;
+ lpfc_post_buffer(phba, &psli->ring[LPFC_ELS_RING], 1, 1);
+
+ if (icmd->ulpStatus) {
+ lpfc_mbuf_free(phba, mp->virt, mp->phys);
+ kfree(mp);
+ drop_cmd = 1;
+ goto dropit;
+ }
+
+ /* Check to see if link went down during discovery */
+ if (lpfc_els_chk_latt(phba)) {
+ lpfc_mbuf_free(phba, mp->virt, mp->phys);
+ kfree(mp);
+ drop_cmd = 1;
+ goto dropit;
+ }
+
+ did = icmd->un.rcvels.remoteID;
+ if ((ndlp = lpfc_findnode_did(phba, NLP_SEARCH_ALL, did)) == 0) {
+ /* Cannot find existing Fabric ndlp, so allocate a new one */
+ if ((ndlp = mempool_alloc(phba->nlp_mem_pool, GFP_ATOMIC))
+ == 0) {
+ lpfc_mbuf_free(phba, mp->virt, mp->phys);
+ kfree(mp);
+ drop_cmd = 1;
+ goto dropit;
+ }
+
+ lpfc_nlp_init(phba, ndlp, did);
+ newnode = 1;
+ if ((did & Fabric_DID_MASK) == Fabric_DID_MASK) {
+ ndlp->nlp_type |= NLP_FABRIC;
+ }
+ }
+
+ phba->fc_stat.elsRcvFrame++;
+ elsiocb->context1 = ndlp;
+ elsiocb->context2 = mp;
+
+ if ((cmd & ELS_CMD_MASK) == ELS_CMD_RSCN) {
+ cmd &= ELS_CMD_MASK;
+ }
+ /* ELS command <elsCmd> received from NPORT <did> */
+ lpfc_printf_log(phba, KERN_INFO, LOG_ELS,
+ "%d:0112 ELS command x%x received from NPORT x%x "
+ "Data: x%x\n", phba->brd_no, cmd, did, phba->hba_state);
+
+ switch (cmd) {
+ case ELS_CMD_PLOGI:
+ phba->fc_stat.elsRcvPLOGI++;
+ if(phba->hba_state < LPFC_DISC_AUTH) {
+ rjt_err = LSEXP_NOTHING_MORE;
+ break;
+ }
+ lpfc_disc_state_machine(phba, ndlp, elsiocb, NLP_EVT_RCV_PLOGI);
+ break;
+ case ELS_CMD_FLOGI:
+ phba->fc_stat.elsRcvFLOGI++;
+ lpfc_els_rcv_flogi(phba, elsiocb, ndlp, newnode);
+ if (newnode) {
+ mempool_free( ndlp, phba->nlp_mem_pool);
+ }
+ break;
+ case ELS_CMD_LOGO:
+ phba->fc_stat.elsRcvLOGO++;
+ if(phba->hba_state < LPFC_DISC_AUTH) {
+ rjt_err = LSEXP_NOTHING_MORE;
+ break;
+ }
+ lpfc_disc_state_machine(phba, ndlp, elsiocb, NLP_EVT_RCV_LOGO);
+ break;
+ case ELS_CMD_PRLO:
+ phba->fc_stat.elsRcvPRLO++;
+ if(phba->hba_state < LPFC_DISC_AUTH) {
+ rjt_err = LSEXP_NOTHING_MORE;
+ break;
+ }
+ lpfc_disc_state_machine(phba, ndlp, elsiocb, NLP_EVT_RCV_PRLO);
+ break;
+ case ELS_CMD_RSCN:
+ phba->fc_stat.elsRcvRSCN++;
+ lpfc_els_rcv_rscn(phba, elsiocb, ndlp, newnode);
+ if (newnode) {
+ mempool_free( ndlp, phba->nlp_mem_pool);
+ }
+ break;
+ case ELS_CMD_ADISC:
+ phba->fc_stat.elsRcvADISC++;
+ if(phba->hba_state < LPFC_DISC_AUTH) {
+ rjt_err = LSEXP_NOTHING_MORE;
+ break;
+ }
+ lpfc_disc_state_machine(phba, ndlp, elsiocb, NLP_EVT_RCV_ADISC);
+ break;
+ case ELS_CMD_PDISC:
+ phba->fc_stat.elsRcvPDISC++;
+ if(phba->hba_state < LPFC_DISC_AUTH) {
+ rjt_err = LSEXP_NOTHING_MORE;
+ break;
+ }
+ lpfc_disc_state_machine(phba, ndlp, elsiocb, NLP_EVT_RCV_PDISC);
+ break;
+ case ELS_CMD_FARPR:
+ phba->fc_stat.elsRcvFARPR++;
+ lpfc_els_rcv_farpr(phba, elsiocb, ndlp);
+ break;
+ case ELS_CMD_FARP:
+ phba->fc_stat.elsRcvFARP++;
+ lpfc_els_rcv_farp(phba, elsiocb, ndlp);
+ break;
+ case ELS_CMD_FAN:
+ phba->fc_stat.elsRcvFAN++;
+ lpfc_els_rcv_fan(phba, elsiocb, ndlp);
+ break;
+ case ELS_CMD_RRQ:
+ phba->fc_stat.elsRcvRRQ++;
+ lpfc_els_rcv_rrq(phba, elsiocb, ndlp);
+ break;
+ case ELS_CMD_PRLI:
+ phba->fc_stat.elsRcvPRLI++;
+ if(phba->hba_state < LPFC_DISC_AUTH) {
+ rjt_err = LSEXP_NOTHING_MORE;
+ break;
+ }
+ lpfc_disc_state_machine(phba, ndlp, elsiocb, NLP_EVT_RCV_PRLI);
+ break;
+ case ELS_CMD_RNID:
+ phba->fc_stat.elsRcvRNID++;
+ lpfc_els_rcv_rnid(phba, elsiocb, ndlp);
+ break;
+ default:
+ /* Unsupported ELS command, reject */
+ rjt_err = LSEXP_NOTHING_MORE;
+
+ /* Unknown ELS command <elsCmd> received from NPORT <did> */
+ lpfc_printf_log(phba, KERN_ERR, LOG_ELS,
+ "%d:0115 Unknown ELS command x%x received from "
+ "NPORT x%x\n", phba->brd_no, cmd, did);
+ if (newnode) {
+ mempool_free( ndlp, phba->nlp_mem_pool);
+ }
+ break;
+ }
+
+ /* check if need to LS_RJT received ELS cmd */
+ if (rjt_err) {
+ stat.un.b.lsRjtRsvd0 = 0;
+ stat.un.b.lsRjtRsnCode = LSRJT_UNABLE_TPC;
+ stat.un.b.lsRjtRsnCodeExp = rjt_err;
+ stat.un.b.vendorUnique = 0;
+ lpfc_els_rsp_reject(phba, stat.un.lsRjtError, elsiocb, ndlp);
+ }
+
+ if (elsiocb->context2) {
+ lpfc_mbuf_free(phba, mp->virt, mp->phys);
+ kfree(mp);
+ }
+dropit:
+ /* check if need to drop received ELS cmd */
+ if (drop_cmd == 1) {
+ lpfc_printf_log(phba, KERN_ERR, LOG_ELS,
+ "%d:0111 Dropping received ELS cmd "
+ "Data: x%x x%x\n", phba->brd_no,
+ icmd->ulpStatus, icmd->un.ulpWord[4]);
+ phba->fc_stat.elsRcvDrop++;
+ }
+ return;
+}
--- /dev/null
+/*******************************************************************
+ * This file is part of the Emulex Linux Device Driver for *
+ * Enterprise Fibre Channel Host Bus Adapters. *
+ * Refer to the README file included with this package for *
+ * driver version and adapter support. *
+ * Copyright (C) 2004 Emulex Corporation. *
+ * www.emulex.com *
+ * *
+ * This program is free software; you can redistribute it and/or *
+ * modify it under the terms of the GNU General Public License *
+ * as published by the Free Software Foundation; either version 2 *
+ * of the License, or (at your option) any later version. *
+ * *
+ * This program is distributed in the hope that it will be useful, *
+ * but WITHOUT ANY WARRANTY; without even the implied warranty of *
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the *
+ * GNU General Public License for more details, a copy of which *
+ * can be found in the file COPYING included with this package. *
+ *******************************************************************/
+
+/*
+ * $Id: lpfc_hbadisc.c 1.199 2004/11/18 20:19:30EST sf_support Exp $
+ */
+
+#include <linux/version.h>
+#include <linux/blkdev.h>
+#include <linux/dma-mapping.h>
+#include <linux/pci.h>
+#include <linux/spinlock.h>
+#include <linux/kernel.h>
+#include <linux/smp_lock.h>
+
+#include <scsi/scsi_cmnd.h>
+#include <scsi/scsi_device.h>
+#include <scsi/scsi_host.h>
+
+#include <scsi/scsi_transport_fc.h>
+
+#include "lpfc_sli.h"
+#include "lpfc_disc.h"
+#include "lpfc_scsi.h"
+#include "lpfc.h"
+#include "lpfc_crtn.h"
+#include "lpfc_fcp.h"
+#include "lpfc_hw.h"
+#include "lpfc_logmsg.h"
+#include "lpfc_mem.h"
+
+/* AlpaArray for assignment of scsid for scan-down and bind_method */
+uint8_t lpfcAlpaArray[] = {
+ 0xEF, 0xE8, 0xE4, 0xE2, 0xE1, 0xE0, 0xDC, 0xDA, 0xD9, 0xD6,
+ 0xD5, 0xD4, 0xD3, 0xD2, 0xD1, 0xCE, 0xCD, 0xCC, 0xCB, 0xCA,
+ 0xC9, 0xC7, 0xC6, 0xC5, 0xC3, 0xBC, 0xBA, 0xB9, 0xB6, 0xB5,
+ 0xB4, 0xB3, 0xB2, 0xB1, 0xAE, 0xAD, 0xAC, 0xAB, 0xAA, 0xA9,
+ 0xA7, 0xA6, 0xA5, 0xA3, 0x9F, 0x9E, 0x9D, 0x9B, 0x98, 0x97,
+ 0x90, 0x8F, 0x88, 0x84, 0x82, 0x81, 0x80, 0x7C, 0x7A, 0x79,
+ 0x76, 0x75, 0x74, 0x73, 0x72, 0x71, 0x6E, 0x6D, 0x6C, 0x6B,
+ 0x6A, 0x69, 0x67, 0x66, 0x65, 0x63, 0x5C, 0x5A, 0x59, 0x56,
+ 0x55, 0x54, 0x53, 0x52, 0x51, 0x4E, 0x4D, 0x4C, 0x4B, 0x4A,
+ 0x49, 0x47, 0x46, 0x45, 0x43, 0x3C, 0x3A, 0x39, 0x36, 0x35,
+ 0x34, 0x33, 0x32, 0x31, 0x2E, 0x2D, 0x2C, 0x2B, 0x2A, 0x29,
+ 0x27, 0x26, 0x25, 0x23, 0x1F, 0x1E, 0x1D, 0x1B, 0x18, 0x17,
+ 0x10, 0x0F, 0x08, 0x04, 0x02, 0x01
+};
+
+static void
+lpfc_evt_iocb_free(struct lpfc_hba * phba, struct lpfc_iocbq * saveq)
+{
+ struct lpfc_iocbq *rspiocbp, *tmpiocbp;
+
+ /* Free up iocb buffer chain for cmd just processed */
+ list_for_each_entry_safe(rspiocbp, tmpiocbp,
+ &saveq->list, list) {
+ list_del(&rspiocbp->list);
+ mempool_free( rspiocbp, phba->iocb_mem_pool);
+ }
+ mempool_free( saveq, phba->iocb_mem_pool);
+}
+
+void
+lpfc_process_nodev_timeout(struct lpfc_hba *phba, struct lpfc_nodelist *ndlp)
+{
+ struct lpfc_target *targetp;
+ int scsid;
+
+ if (!(ndlp->nlp_type & NLP_FABRIC)) {
+ /* Nodev timeout on NPort <nlp_DID> */
+ lpfc_printf_log(phba, KERN_ERR, LOG_DISCOVERY,
+ "%d:0203 Nodev timeout on NPort x%x "
+ "Data: x%x x%x x%x\n",
+ phba->brd_no, ndlp->nlp_DID, ndlp->nlp_flag,
+ ndlp->nlp_state, ndlp->nlp_rpi);
+ }
+
+ ndlp->nlp_flag &= ~NLP_NODEV_TMO;
+
+ for(scsid=0;scsid<MAX_FCP_TARGET;scsid++) {
+ targetp = phba->device_queue_hash[scsid];
+ /* First see if the SCSI ID has an allocated struct
+ lpfc_target */
+ if (targetp) {
+ if (targetp->pnode == ndlp) {
+ /* flush the target */
+ lpfc_sli_abort_iocb_tgt(phba,
+ &phba->sli.ring[phba->sli.fcp_ring],
+ scsid, LPFC_ABORT_ALLQ);
+ }
+ }
+ }
+
+ lpfc_disc_state_machine(phba, ndlp, NULL, NLP_EVT_DEVICE_RM);
+ return;
+}
+
+static void
+lpfc_disc_done(struct lpfc_hba * phba)
+{
+ struct lpfc_sli *psli = &phba->sli;
+ LPFC_DISC_EVT_t *evtp, *next_evtp;
+ LPFC_MBOXQ_t *pmb;
+ struct lpfc_iocbq *cmdiocbp, *saveq;
+ struct lpfc_nodelist *ndlp;
+ LPFC_RING_MASK_t *func;
+ struct Scsi_Host *shost;
+ LIST_HEAD(local_dpc_disc);
+
+ list_splice_init(&phba->dpc_disc, &local_dpc_disc);
+
+ /* check discovery event list */
+ list_for_each_entry_safe(evtp, next_evtp, &local_dpc_disc, evt_listp) {
+ list_del(&evtp->evt_listp);
+
+ switch(evtp->evt) {
+ case LPFC_EVT_MBOX:
+ pmb = (LPFC_MBOXQ_t *)(evtp->evt_arg1);
+ (pmb->mbox_cmpl) (phba, pmb);
+ break;
+ case LPFC_EVT_SOL_IOCB:
+ cmdiocbp = (struct lpfc_iocbq *)(evtp->evt_arg1);
+ saveq = (struct lpfc_iocbq *)(evtp->evt_arg2);
+ (cmdiocbp->iocb_cmpl) (phba, cmdiocbp, saveq);
+ lpfc_evt_iocb_free(phba, saveq);
+ break;
+ case LPFC_EVT_UNSOL_IOCB:
+ func = (LPFC_RING_MASK_t *)(evtp->evt_arg1);
+ saveq = (struct lpfc_iocbq *)(evtp->evt_arg2);
+ (func->lpfc_sli_rcv_unsol_event) (phba,
+ &psli->ring[LPFC_ELS_RING], saveq);
+ lpfc_evt_iocb_free(phba, saveq);
+ break;
+ case LPFC_EVT_NODEV_TMO:
+ ndlp = (struct lpfc_nodelist *)(evtp->evt_arg1);
+ lpfc_process_nodev_timeout(phba, ndlp);
+ break;
+ case LPFC_EVT_SCAN:
+ /* SCSI HOTPLUG supported */
+ shost = phba->host;
+#ifdef USE_SCAN_TARGET
+ {
+ struct lpfc_target *targetp;
+
+ targetp = (struct lpfc_target *)(evtp->evt_arg1);
+ lpfc_printf_log(phba, KERN_ERR, LOG_DISCOVERY | LOG_FCP,
+ "%d:0251 Rescanning scsi target %d\n",
+ phba->brd_no, targetp->scsi_id);
+
+ if(targetp ==
+ phba->device_queue_hash[targetp->scsi_id]) {
+ spin_unlock_irq(phba->host->host_lock);
+ scsi_scan_single_target(shost, 0,
+ targetp->scsi_id);
+ spin_lock_irq(phba->host->host_lock);
+ }
+ }
+#else
+ lpfc_printf_log(phba, KERN_ERR, LOG_DISCOVERY | LOG_FCP,
+ "%d:0251 Rescanning scsi host\n", phba->brd_no);
+ spin_unlock_irq(shost->host_lock);
+ scsi_scan_host(shost);
+ spin_lock_irq(shost->host_lock);
+#endif
+ break;
+ }
+ kfree(evtp);
+ }
+}
+
+int
+lpfc_do_dpc(void *p)
+{
+ unsigned long flags;
+ DECLARE_MUTEX_LOCKED(sem);
+ struct lpfc_hba *phba = (struct lpfc_hba *)p;
+
+ lock_kernel();
+
+ daemonize("lpfc_dpc_%d", phba->brd_no);
+ allow_signal(SIGHUP);
+
+ phba->dpc_wait = &sem;
+ set_user_nice(current, -20);
+
+ unlock_kernel();
+
+ complete(&phba->dpc_startup);
+
+ while (1) {
+ if (down_interruptible(&sem))
+ break;
+
+ if (signal_pending(current))
+ break;
+
+ if (phba->dpc_kill)
+ break;
+
+ spin_lock_irqsave(phba->host->host_lock, flags);
+ lpfc_disc_done(phba);
+ spin_unlock_irqrestore(phba->host->host_lock, flags);
+ }
+
+ /* Zero out semaphore we were waiting on. */
+ phba->dpc_wait = NULL;
+ complete_and_exit(&phba->dpc_exiting, 0);
+ return(0);
+}
+
+/*
+ * This is only called to handle FC discovery events. Since this a rare
+ * occurance, we allocate an LPFC_DISC_EVT_t structure here instead of
+ * embedding it in the IOCB.
+ */
+int
+lpfc_discq_post_event(struct lpfc_hba * phba, void *arg1, void *arg2,
+ uint32_t evt)
+{
+ LPFC_DISC_EVT_t *evtp;
+
+ /* All Mailbox completions and LPFC_ELS_RING rcv ring IOCB events
+ * will be queued to DPC for processing
+ */
+ evtp = (LPFC_DISC_EVT_t *) kmalloc(sizeof(LPFC_DISC_EVT_t), GFP_ATOMIC);
+ if (!evtp)
+ return 0;
+
+ evtp->evt_arg1 = arg1;
+ evtp->evt_arg2 = arg2;
+ evtp->evt = evt;
+ evtp->evt_listp.next = NULL;
+ evtp->evt_listp.prev = NULL;
+
+ /* Queue the event to the DPC to be processed later */
+ list_add_tail(&evtp->evt_listp, &phba->dpc_disc);
+ if (phba->dpc_wait)
+ up(phba->dpc_wait);
+
+ return 1;
+}
+
+int
+lpfc_linkdown(struct lpfc_hba * phba)
+{
+ struct lpfc_sli *psli;
+ struct lpfc_nodelist *ndlp, *next_ndlp;
+ struct list_head *listp;
+ struct list_head *node_list[7];
+ LPFC_MBOXQ_t *mb;
+ int rc, i;
+
+ psli = &phba->sli;
+ phba->hba_state = LPFC_LINK_DOWN;
+
+#if !defined(FC_TRANS_VER1) && !defined(FC_TRANS_265_BLKPATCH)
+ /* Stop all requests to the driver from the midlayer. */
+ scsi_block_requests(phba->host);
+#endif
+
+ lpfc_put_event(phba, HBA_EVENT_LINK_DOWN, phba->fc_myDID, NULL, 0, 0);
+
+ /* Clean up any firmware default rpi's */
+ if ((mb = mempool_alloc(phba->mbox_mem_pool, GFP_ATOMIC))) {
+ lpfc_unreg_did(phba, 0xffffffff, mb);
+ if (lpfc_sli_issue_mbox(phba, mb, (MBX_NOWAIT | MBX_STOP_IOCB))
+ == MBX_NOT_FINISHED) {
+ mempool_free( mb, phba->mbox_mem_pool);
+ }
+ }
+
+ /* Cleanup any outstanding RSCN activity */
+ lpfc_els_flush_rscn(phba);
+
+ /* Cleanup any outstanding ELS commands */
+ lpfc_els_flush_cmd(phba);
+
+ /*
+ * If this function was called by the lpfc_do_dpc, don't recurse into
+ * the routine again. If not, just process any outstanding
+ * discovery events.
+ */
+ if (!list_empty(&phba->dpc_disc)) {
+ lpfc_disc_done(phba);
+ }
+
+ /* Issue a LINK DOWN event to all nodes */
+ node_list[0] = &phba->fc_npr_list; /* MUST do this list first */
+ node_list[1] = &phba->fc_nlpmap_list;
+ node_list[2] = &phba->fc_nlpunmap_list;
+ node_list[3] = &phba->fc_prli_list;
+ node_list[4] = &phba->fc_reglogin_list;
+ node_list[5] = &phba->fc_adisc_list;
+ node_list[6] = &phba->fc_plogi_list;
+ for (i = 0; i < 7; i++) {
+ listp = node_list[i];
+ if (list_empty(listp))
+ continue;
+
+ list_for_each_entry_safe(ndlp, next_ndlp, listp, nlp_listp) {
+ /* Fabric nodes are not handled thru state machine for
+ link down */
+ if (ndlp->nlp_type & NLP_FABRIC) {
+ /* Remove ALL Fabric nodes except Fabric_DID */
+ if (ndlp->nlp_DID != Fabric_DID) {
+ /* Take it off current list and free */
+ lpfc_nlp_list(phba, ndlp,
+ NLP_NO_LIST);
+ }
+ }
+ else {
+ lpfc_set_failmask(phba, ndlp,
+ LPFC_DEV_LINK_DOWN,
+ LPFC_SET_BITMASK);
+
+ rc = lpfc_disc_state_machine(phba, ndlp, NULL,
+ NLP_EVT_DEVICE_RECOVERY);
+ }
+ }
+ }
+
+ /* free any ndlp's on unused list */
+ list_for_each_entry_safe(ndlp, next_ndlp, &phba->fc_unused_list,
+ nlp_listp) {
+ lpfc_nlp_list(phba, ndlp, NLP_NO_LIST);
+ }
+
+ /* Setup myDID for link up if we are in pt2pt mode */
+ if (phba->fc_flag & FC_PT2PT) {
+ phba->fc_myDID = 0;
+ if ((mb = mempool_alloc(phba->mbox_mem_pool, GFP_ATOMIC))) {
+ lpfc_config_link(phba, mb);
+ if (lpfc_sli_issue_mbox
+ (phba, mb, (MBX_NOWAIT | MBX_STOP_IOCB))
+ == MBX_NOT_FINISHED) {
+ mempool_free( mb, phba->mbox_mem_pool);
+ }
+ }
+ phba->fc_flag &= ~(FC_PT2PT | FC_PT2PT_PLOGI);
+ }
+ phba->fc_flag &= ~FC_LBIT;
+
+ /* Turn off discovery timer if its running */
+ lpfc_can_disctmo(phba);
+
+ /* Must process IOCBs on all rings to handle ABORTed I/Os */
+ return (0);
+}
+
+static int
+lpfc_linkup(struct lpfc_hba * phba)
+{
+ struct lpfc_nodelist *ndlp, *next_ndlp;
+ struct list_head *listp;
+ struct list_head *node_list[7];
+ int i;
+
+ phba->hba_state = LPFC_LINK_UP;
+ phba->fc_flag &= ~(FC_PT2PT | FC_PT2PT_PLOGI | FC_ABORT_DISCOVERY |
+ FC_RSCN_MODE | FC_NLP_MORE | FC_RSCN_DISCOVERY);
+ phba->fc_ns_retry = 0;
+
+
+ lpfc_put_event(phba, HBA_EVENT_LINK_UP, phba->fc_myDID,
+ (void *)(unsigned long)(phba->fc_topology),
+ 0, phba->fc_linkspeed);
+
+ /*
+ * Clean up old Fabric NLP_FABRIC logins.
+ */
+ list_for_each_entry_safe(ndlp, next_ndlp, &phba->fc_nlpunmap_list,
+ nlp_listp) {
+ if (ndlp->nlp_DID == Fabric_DID) {
+ /* Take it off current list and free */
+ lpfc_nlp_list(phba, ndlp, NLP_NO_LIST);
+ }
+ }
+
+ /* free any ndlp's on unused list */
+ list_for_each_entry_safe(ndlp, next_ndlp, &phba->fc_unused_list,
+ nlp_listp) {
+ lpfc_nlp_list(phba, ndlp, NLP_NO_LIST);
+ }
+
+ /* Mark all nodes for LINK UP */
+ node_list[0] = &phba->fc_plogi_list;
+ node_list[1] = &phba->fc_adisc_list;
+ node_list[2] = &phba->fc_reglogin_list;
+ node_list[3] = &phba->fc_prli_list;
+ node_list[4] = &phba->fc_nlpunmap_list;
+ node_list[5] = &phba->fc_nlpmap_list;
+ node_list[6] = &phba->fc_npr_list;
+ for (i = 0; i < 7; i++) {
+ listp = node_list[i];
+ if (list_empty(listp))
+ continue;
+
+ list_for_each_entry(ndlp, listp, nlp_listp) {
+ lpfc_set_failmask(phba, ndlp, LPFC_DEV_DISCOVERY_INP,
+ LPFC_SET_BITMASK);
+ lpfc_set_failmask(phba, ndlp, LPFC_DEV_LINK_DOWN,
+ LPFC_CLR_BITMASK);
+ }
+ }
+
+#if !defined(FC_TRANS_VER1) && !defined(FC_TRANS_265_BLKPATCH)
+ spin_unlock_irq(phba->host->host_lock);
+ scsi_unblock_requests(phba->host);
+ spin_lock_irq(phba->host->host_lock);
+#endif
+ return 0;
+}
+
+/*
+ * This routine handles processing a CLEAR_LA mailbox
+ * command upon completion. It is setup in the LPFC_MBOXQ
+ * as the completion routine when the command is
+ * handed off to the SLI layer.
+ */
+void
+lpfc_mbx_cmpl_clear_la(struct lpfc_hba * phba, LPFC_MBOXQ_t * pmb)
+{
+ struct lpfc_sli *psli;
+ MAILBOX_t *mb;
+ uint32_t control;
+
+ psli = &phba->sli;
+ mb = &pmb->mb;
+ /* Since we don't do discovery right now, turn these off here */
+ psli->ring[psli->ip_ring].flag &= ~LPFC_STOP_IOCB_EVENT;
+ psli->ring[psli->fcp_ring].flag &= ~LPFC_STOP_IOCB_EVENT;
+ psli->ring[psli->next_ring].flag &= ~LPFC_STOP_IOCB_EVENT;
+ /* Check for error */
+ if ((mb->mbxStatus) && (mb->mbxStatus != 0x1601)) {
+ /* CLEAR_LA mbox error <mbxStatus> state <hba_state> */
+ lpfc_printf_log(phba, KERN_ERR, LOG_MBOX,
+ "%d:0320 CLEAR_LA mbxStatus error x%x hba "
+ "state x%x\n",
+ phba->brd_no, mb->mbxStatus, phba->hba_state);
+
+ phba->hba_state = LPFC_HBA_ERROR;
+ goto out;
+ }
+
+ if(phba->fc_flag & FC_ABORT_DISCOVERY)
+ goto out;
+
+ phba->num_disc_nodes = 0;
+ /* go thru NPR list and issue ELS PLOGIs */
+ if (phba->fc_npr_cnt) {
+ lpfc_els_disc_plogi(phba);
+ }
+
+ phba->hba_state = LPFC_HBA_READY;
+
+out:
+ phba->fc_flag &= ~FC_ABORT_DISCOVERY;
+ /* Device Discovery completes */
+ lpfc_printf_log(phba,
+ KERN_INFO,
+ LOG_DISCOVERY,
+ "%d:0225 Device Discovery completes\n",
+ phba->brd_no);
+
+ mempool_free( pmb, phba->mbox_mem_pool);
+ if (phba->fc_flag & FC_ESTABLISH_LINK) {
+ phba->fc_flag &= ~FC_ESTABLISH_LINK;
+ }
+ del_timer_sync(&phba->fc_estabtmo);
+ lpfc_can_disctmo(phba);
+
+ /* turn on Link Attention interrupts */
+ psli->sliinit.sli_flag |= LPFC_PROCESS_LA;
+ control = readl(phba->HCregaddr);
+ control |= HC_LAINT_ENA;
+ writel(control, phba->HCregaddr);
+ readl(phba->HCregaddr); /* flush */
+
+ return;
+}
+
+static void
+lpfc_mbx_cmpl_config_link(struct lpfc_hba * phba, LPFC_MBOXQ_t * pmb)
+{
+ struct lpfc_sli *psli;
+ MAILBOX_t *mb;
+
+ psli = &phba->sli;
+ mb = &pmb->mb;
+ /* Check for error */
+ if (mb->mbxStatus) {
+ /* CONFIG_LINK mbox error <mbxStatus> state <hba_state> */
+ lpfc_printf_log(phba, KERN_ERR, LOG_MBOX,
+ "%d:0306 CONFIG_LINK mbxStatus error x%x "
+ "HBA state x%x\n",
+ phba->brd_no, mb->mbxStatus, phba->hba_state);
+
+ lpfc_linkdown(phba);
+ phba->hba_state = LPFC_HBA_ERROR;
+ goto out;
+ }
+
+ if (phba->hba_state == LPFC_LOCAL_CFG_LINK) {
+ if (phba->fc_topology == TOPOLOGY_LOOP) {
+ /* If we are public loop and L bit was set */
+ if ((phba->fc_flag & FC_PUBLIC_LOOP) &&
+ !(phba->fc_flag & FC_LBIT)) {
+ /* Need to wait for FAN - use discovery timer
+ * for timeout. hba_state is identically
+ * LPFC_LOCAL_CFG_LINK while waiting for FAN
+ */
+ lpfc_set_disctmo(phba);
+ mempool_free( pmb, phba->mbox_mem_pool);
+ return;
+ }
+ }
+
+ /* Start discovery by sending a FLOGI hba_state is identically
+ * LPFC_FLOGI while waiting for FLOGI cmpl
+ */
+ phba->hba_state = LPFC_FLOGI;
+ lpfc_set_disctmo(phba);
+ lpfc_initial_flogi(phba);
+ mempool_free( pmb, phba->mbox_mem_pool);
+ return;
+ }
+ if (phba->hba_state == LPFC_FABRIC_CFG_LINK) {
+ mempool_free( pmb, phba->mbox_mem_pool);
+ return;
+ }
+
+out:
+ /* CONFIG_LINK bad hba state <hba_state> */
+ lpfc_printf_log(phba,
+ KERN_ERR,
+ LOG_DISCOVERY,
+ "%d:0200 CONFIG_LINK bad hba state x%x\n",
+ phba->brd_no, phba->hba_state);
+
+ if (phba->hba_state != LPFC_CLEAR_LA) {
+ lpfc_clear_la(phba, pmb);
+ pmb->mbox_cmpl = lpfc_mbx_cmpl_clear_la;
+ if (lpfc_sli_issue_mbox(phba, pmb, (MBX_NOWAIT | MBX_STOP_IOCB))
+ == MBX_NOT_FINISHED) {
+ mempool_free( pmb, phba->mbox_mem_pool);
+ lpfc_disc_flush_list(phba);
+ psli->ring[(psli->ip_ring)].flag &=
+ ~LPFC_STOP_IOCB_EVENT;
+ psli->ring[(psli->fcp_ring)].flag &=
+ ~LPFC_STOP_IOCB_EVENT;
+ psli->ring[(psli->next_ring)].flag &=
+ ~LPFC_STOP_IOCB_EVENT;
+ phba->hba_state = LPFC_HBA_READY;
+ }
+ } else {
+ mempool_free( pmb, phba->mbox_mem_pool);
+ }
+ return;
+}
+
+static void
+lpfc_mbx_cmpl_read_sparam(struct lpfc_hba * phba, LPFC_MBOXQ_t * pmb)
+{
+ struct lpfc_sli *psli = &phba->sli;
+ MAILBOX_t *mb = &pmb->mb;
+ struct lpfc_dmabuf *mp = (struct lpfc_dmabuf *) pmb->context1;
+
+
+ /* Check for error */
+ if (mb->mbxStatus) {
+ /* READ_SPARAM mbox error <mbxStatus> state <hba_state> */
+ lpfc_printf_log(phba, KERN_ERR, LOG_MBOX,
+ "%d:0319 READ_SPARAM mbxStatus error x%x "
+ "hba state x%x>\n",
+ phba->brd_no, mb->mbxStatus, phba->hba_state);
+
+ lpfc_linkdown(phba);
+ phba->hba_state = LPFC_HBA_ERROR;
+ goto out;
+ }
+
+ /* The mailbox was populated by the HBA. Flush it to main store for the
+ * driver. Note that all context buffers are from the driver's
+ * dma pool and have length LPFC_BPL_SIZE.
+ */
+ pci_dma_sync_single_for_cpu(phba->pcidev, mp->phys, LPFC_BPL_SIZE,
+ PCI_DMA_FROMDEVICE);
+
+ memcpy((uint8_t *) & phba->fc_sparam, (uint8_t *) mp->virt,
+ sizeof (struct serv_parm));
+ memcpy((uint8_t *) & phba->fc_nodename,
+ (uint8_t *) & phba->fc_sparam.nodeName,
+ sizeof (struct lpfc_name));
+ memcpy((uint8_t *) & phba->fc_portname,
+ (uint8_t *) & phba->fc_sparam.portName,
+ sizeof (struct lpfc_name));
+ lpfc_mbuf_free(phba, mp->virt, mp->phys);
+ kfree(mp);
+ mempool_free( pmb, phba->mbox_mem_pool);
+ return;
+
+out:
+ pmb->context1 = NULL;
+ lpfc_mbuf_free(phba, mp->virt, mp->phys);
+ kfree(mp);
+ if (phba->hba_state != LPFC_CLEAR_LA) {
+ lpfc_clear_la(phba, pmb);
+ pmb->mbox_cmpl = lpfc_mbx_cmpl_clear_la;
+ if (lpfc_sli_issue_mbox(phba, pmb, (MBX_NOWAIT | MBX_STOP_IOCB))
+ == MBX_NOT_FINISHED) {
+ mempool_free( pmb, phba->mbox_mem_pool);
+ lpfc_disc_flush_list(phba);
+ psli->ring[(psli->ip_ring)].flag &=
+ ~LPFC_STOP_IOCB_EVENT;
+ psli->ring[(psli->fcp_ring)].flag &=
+ ~LPFC_STOP_IOCB_EVENT;
+ psli->ring[(psli->next_ring)].flag &=
+ ~LPFC_STOP_IOCB_EVENT;
+ phba->hba_state = LPFC_HBA_READY;
+ }
+ } else {
+ mempool_free( pmb, phba->mbox_mem_pool);
+ }
+ return;
+}
+
+/*
+ * This routine handles processing a READ_LA mailbox
+ * command upon completion. It is setup in the LPFC_MBOXQ
+ * as the completion routine when the command is
+ * handed off to the SLI layer.
+ */
+void
+lpfc_mbx_cmpl_read_la(struct lpfc_hba * phba, LPFC_MBOXQ_t * pmb)
+{
+ struct lpfc_sli *psli = &phba->sli;
+ READ_LA_VAR *la;
+ LPFC_MBOXQ_t *mbox;
+ MAILBOX_t *mb = &pmb->mb;
+ struct lpfc_dmabuf *mp = (struct lpfc_dmabuf *) (pmb->context1);
+ uint32_t control;
+ int i;
+
+ /* Check for error */
+ if (mb->mbxStatus) {
+ /* READ_LA mbox error <mbxStatus> state <hba_state> */
+ lpfc_printf_log(phba,
+ KERN_INFO,
+ LOG_LINK_EVENT,
+ "%d:1307 READ_LA mbox error x%x state x%x\n",
+ phba->brd_no,
+ mb->mbxStatus, phba->hba_state);
+
+ lpfc_linkdown(phba);
+ phba->hba_state = LPFC_HBA_ERROR;
+
+ /* turn on Link Attention interrupts */
+ psli->sliinit.sli_flag |= LPFC_PROCESS_LA;
+ control = readl(phba->HCregaddr);
+ control |= HC_LAINT_ENA;
+ writel(control, phba->HCregaddr);
+ readl(phba->HCregaddr); /* flush */
+ return;
+ }
+ la = (READ_LA_VAR *) & pmb->mb.un.varReadLA;
+
+
+ /* The mailbox was populated by the HBA. Flush it to main store for the
+ * driver. Note that all context buffers are from the driver's
+ * dma pool and have length LPFC_BPL_SIZE.
+ */
+ pci_dma_sync_single_for_cpu(phba->pcidev, mp->phys, LPFC_BPL_SIZE,
+ PCI_DMA_FROMDEVICE);
+
+ /* Get Loop Map information */
+ if (mp) {
+ memcpy(&phba->alpa_map[0], mp->virt, 128);
+ } else {
+ memset(&phba->alpa_map[0], 0, 128);
+ }
+
+ if (((phba->fc_eventTag + 1) < la->eventTag) ||
+ (phba->fc_eventTag == la->eventTag)) {
+ phba->fc_stat.LinkMultiEvent++;
+ if (la->attType == AT_LINK_UP) {
+ if (phba->fc_eventTag != 0) {
+
+ lpfc_linkdown(phba);
+ }
+ }
+ }
+
+ phba->fc_eventTag = la->eventTag;
+
+ if (la->attType == AT_LINK_UP) {
+ phba->fc_stat.LinkUp++;
+ /* Link Up Event <eventTag> received */
+ lpfc_printf_log(phba, KERN_ERR, LOG_LINK_EVENT,
+ "%d:1303 Link Up Event x%x received "
+ "Data: x%x x%x x%x x%x\n",
+ phba->brd_no, la->eventTag, phba->fc_eventTag,
+ la->granted_AL_PA, la->UlnkSpeed,
+ phba->alpa_map[0]);
+
+ switch(la->UlnkSpeed) {
+ case LA_1GHZ_LINK:
+ phba->fc_linkspeed = LA_1GHZ_LINK;
+ break;
+ case LA_2GHZ_LINK:
+ phba->fc_linkspeed = LA_2GHZ_LINK;
+ break;
+ case LA_4GHZ_LINK:
+ phba->fc_linkspeed = LA_4GHZ_LINK;
+ break;
+ default:
+ phba->fc_linkspeed = LA_UNKNW_LINK;
+ break;
+ }
+
+ if ((phba->fc_topology = la->topology) == TOPOLOGY_LOOP) {
+
+ if (la->il) {
+ phba->fc_flag |= FC_LBIT;
+ }
+
+ phba->fc_myDID = la->granted_AL_PA;
+
+ i = la->un.lilpBde64.tus.f.bdeSize;
+ if (i == 0) {
+ phba->alpa_map[0] = 0;
+ } else {
+ if (phba->cfg_log_verbose
+ & LOG_LINK_EVENT) {
+ int numalpa, j, k;
+ union {
+ uint8_t pamap[16];
+ struct {
+ uint32_t wd1;
+ uint32_t wd2;
+ uint32_t wd3;
+ uint32_t wd4;
+ } pa;
+ } un;
+
+ numalpa = phba->alpa_map[0];
+ j = 0;
+ while (j < numalpa) {
+ memset(un.pamap, 0, 16);
+ for (k = 1; j < numalpa; k++) {
+ un.pamap[k - 1] =
+ phba->alpa_map[j +
+ 1];
+ j++;
+ if (k == 16)
+ break;
+ }
+ /* Link Up Event ALPA map */
+ lpfc_printf_log(phba,
+ KERN_WARNING,
+ LOG_LINK_EVENT,
+ "%d:1304 Link Up Event "
+ "ALPA map Data: x%x "
+ "x%x x%x x%x\n",
+ phba->brd_no,
+ un.pa.wd1, un.pa.wd2,
+ un.pa.wd3, un.pa.wd4);
+ }
+ }
+ }
+ } else {
+ phba->fc_myDID = phba->fc_pref_DID;
+ phba->fc_flag |= FC_LBIT;
+ }
+
+ lpfc_linkup(phba);
+ if ((mbox = mempool_alloc(phba->mbox_mem_pool, GFP_ATOMIC))) {
+ lpfc_read_sparam(phba, mbox);
+ mbox->mbox_cmpl = lpfc_mbx_cmpl_read_sparam;
+ lpfc_sli_issue_mbox
+ (phba, mbox, (MBX_NOWAIT | MBX_STOP_IOCB));
+ }
+
+ if ((mbox = mempool_alloc(phba->mbox_mem_pool, GFP_ATOMIC))) {
+ phba->hba_state = LPFC_LOCAL_CFG_LINK;
+ lpfc_config_link(phba, mbox);
+ mbox->mbox_cmpl = lpfc_mbx_cmpl_config_link;
+ lpfc_sli_issue_mbox
+ (phba, mbox, (MBX_NOWAIT | MBX_STOP_IOCB));
+ }
+ } else {
+ phba->fc_stat.LinkDown++;
+ /* Link Down Event <eventTag> received */
+ lpfc_printf_log(phba, KERN_ERR, LOG_LINK_EVENT,
+ "%d:1305 Link Down Event x%x received "
+ "Data: x%x x%x x%x\n",
+ phba->brd_no, la->eventTag, phba->fc_eventTag,
+ phba->hba_state, phba->fc_flag);
+
+ lpfc_linkdown(phba);
+
+ /* turn on Link Attention interrupts - no CLEAR_LA needed */
+ psli->sliinit.sli_flag |= LPFC_PROCESS_LA;
+ control = readl(phba->HCregaddr);
+ control |= HC_LAINT_ENA;
+ writel(control, phba->HCregaddr);
+ readl(phba->HCregaddr); /* flush */
+ }
+
+ pmb->context1 = NULL;
+ lpfc_mbuf_free(phba, mp->virt, mp->phys);
+ kfree(mp);
+ mempool_free( pmb, phba->mbox_mem_pool);
+ return;
+}
+
+/*
+ * This routine handles processing a REG_LOGIN mailbox
+ * command upon completion. It is setup in the LPFC_MBOXQ
+ * as the completion routine when the command is
+ * handed off to the SLI layer.
+ */
+void
+lpfc_mbx_cmpl_reg_login(struct lpfc_hba * phba, LPFC_MBOXQ_t * pmb)
+{
+ struct lpfc_sli *psli;
+ MAILBOX_t *mb;
+ struct lpfc_dmabuf *mp;
+ struct lpfc_nodelist *ndlp;
+
+ psli = &phba->sli;
+ mb = &pmb->mb;
+
+ ndlp = (struct lpfc_nodelist *) pmb->context2;
+ mp = (struct lpfc_dmabuf *) (pmb->context1);
+
+ /* The mailbox was populated by the HBA. Flush it to main store for the
+ * driver. Note that all context buffers are from the driver's
+ * dma pool and have length LPFC_BPL_SIZE.
+ */
+ pci_dma_sync_single_for_cpu(phba->pcidev, mp->phys, LPFC_BPL_SIZE,
+ PCI_DMA_FROMDEVICE);
+
+ pmb->context1 = NULL;
+
+ /* Good status, call state machine */
+ lpfc_disc_state_machine(phba, ndlp, pmb, NLP_EVT_CMPL_REG_LOGIN);
+ lpfc_mbuf_free(phba, mp->virt, mp->phys);
+ kfree(mp);
+ mempool_free( pmb, phba->mbox_mem_pool);
+
+ return;
+}
+
+/*
+ * This routine handles processing a Fabric REG_LOGIN mailbox
+ * command upon completion. It is setup in the LPFC_MBOXQ
+ * as the completion routine when the command is
+ * handed off to the SLI layer.
+ */
+void
+lpfc_mbx_cmpl_fabric_reg_login(struct lpfc_hba * phba, LPFC_MBOXQ_t * pmb)
+{
+ struct lpfc_sli *psli;
+ MAILBOX_t *mb;
+ struct lpfc_dmabuf *mp;
+ struct lpfc_nodelist *ndlp;
+ struct lpfc_nodelist *ndlp_fdmi;
+
+
+ psli = &phba->sli;
+ mb = &pmb->mb;
+
+ ndlp = (struct lpfc_nodelist *) pmb->context2;
+ mp = (struct lpfc_dmabuf *) (pmb->context1);
+
+ if (mb->mbxStatus) {
+ lpfc_mbuf_free(phba, mp->virt, mp->phys);
+ kfree(mp);
+ mempool_free( pmb, phba->mbox_mem_pool);
+ mempool_free( ndlp, phba->nlp_mem_pool);
+
+ /* FLOGI failed, so just use loop map to make discovery list */
+ lpfc_disc_list_loopmap(phba);
+
+ /* Start discovery */
+ lpfc_disc_start(phba);
+ return;
+ }
+
+ /* The mailbox was populated by the HBA. Flush it to main store for the
+ * driver. Note that all context buffers are from the driver's
+ * dma pool and have length LPFC_BPL_SIZE.
+ */
+ pci_dma_sync_single_for_cpu(phba->pcidev, mp->phys, LPFC_BPL_SIZE,
+ PCI_DMA_FROMDEVICE);
+
+ pmb->context1 = NULL;
+
+ if (ndlp->nlp_rpi != 0)
+ lpfc_findnode_remove_rpi(phba, ndlp->nlp_rpi);
+ ndlp->nlp_rpi = mb->un.varWords[0];
+ lpfc_addnode_rpi(phba, ndlp, ndlp->nlp_rpi);
+ ndlp->nlp_type |= NLP_FABRIC;
+ ndlp->nlp_state = NLP_STE_UNMAPPED_NODE;
+ lpfc_nlp_list(phba, ndlp, NLP_UNMAPPED_LIST);
+
+ if (phba->hba_state == LPFC_FABRIC_CFG_LINK) {
+ /* This NPort has been assigned an NPort_ID by the fabric as a
+ * result of the completed fabric login. Issue a State Change
+ * Registration (SCR) ELS request to the fabric controller
+ * (SCR_DID) so that this NPort gets RSCN events from the
+ * fabric.
+ */
+ lpfc_issue_els_scr(phba, SCR_DID, 0);
+
+ /* Allocate a new node instance. If the pool is empty, just
+ * start the discovery process and skip the Nameserver login
+ * process. This is attempted again later on. Otherwise, issue
+ * a Port Login (PLOGI) to the NameServer
+ */
+ if ((ndlp = mempool_alloc(phba->nlp_mem_pool, GFP_ATOMIC))
+ == 0) {
+ lpfc_disc_start(phba);
+ } else {
+ lpfc_nlp_init(phba, ndlp, NameServer_DID);
+ ndlp->nlp_type |= NLP_FABRIC;
+ ndlp->nlp_state = NLP_STE_PLOGI_ISSUE;
+ lpfc_nlp_list(phba, ndlp, NLP_PLOGI_LIST);
+ lpfc_issue_els_plogi(phba, ndlp, 0);
+ if (phba->cfg_fdmi_on) {
+ if ((ndlp_fdmi = mempool_alloc(
+ phba->nlp_mem_pool,
+ GFP_ATOMIC))) {
+ lpfc_nlp_init(phba, ndlp_fdmi,
+ FDMI_DID);
+ ndlp_fdmi->nlp_type |= NLP_FABRIC;
+ ndlp_fdmi->nlp_state =
+ NLP_STE_PLOGI_ISSUE;
+ lpfc_issue_els_plogi(phba, ndlp_fdmi,
+ 0);
+ }
+ }
+ }
+ }
+
+ lpfc_mbuf_free(phba, mp->virt, mp->phys);
+ kfree(mp);
+ mempool_free( pmb, phba->mbox_mem_pool);
+
+ return;
+}
+
+/*
+ * This routine handles processing a NameServer REG_LOGIN mailbox
+ * command upon completion. It is setup in the LPFC_MBOXQ
+ * as the completion routine when the command is
+ * handed off to the SLI layer.
+ */
+void
+lpfc_mbx_cmpl_ns_reg_login(struct lpfc_hba * phba, LPFC_MBOXQ_t * pmb)
+{
+ struct lpfc_sli *psli;
+ MAILBOX_t *mb;
+ struct lpfc_dmabuf *mp;
+ struct lpfc_nodelist *ndlp;
+
+ psli = &phba->sli;
+ mb = &pmb->mb;
+
+ ndlp = (struct lpfc_nodelist *) pmb->context2;
+ mp = (struct lpfc_dmabuf *) (pmb->context1);
+
+ if (mb->mbxStatus) {
+ lpfc_mbuf_free(phba, mp->virt, mp->phys);
+ kfree(mp);
+ mempool_free( pmb, phba->mbox_mem_pool);
+ lpfc_nlp_list(phba, ndlp, NLP_NO_LIST);
+
+ /* RegLogin failed, so just use loop map to make discovery
+ list */
+ lpfc_disc_list_loopmap(phba);
+
+ /* Start discovery */
+ lpfc_disc_start(phba);
+ return;
+ }
+
+ /* The mailbox was populated by the HBA. Flush it to main store for the
+ * driver. Note that all context buffers are from the driver's
+ * dma pool and have length LPFC_BPL_SIZE.
+ */
+ pci_dma_sync_single_for_cpu(phba->pcidev, mp->phys, LPFC_BPL_SIZE,
+ PCI_DMA_FROMDEVICE);
+
+ pmb->context1 = NULL;
+
+ if (ndlp->nlp_rpi != 0)
+ lpfc_findnode_remove_rpi(phba, ndlp->nlp_rpi);
+ ndlp->nlp_rpi = mb->un.varWords[0];
+ lpfc_addnode_rpi(phba, ndlp, ndlp->nlp_rpi);
+ ndlp->nlp_type |= NLP_FABRIC;
+ ndlp->nlp_state = NLP_STE_UNMAPPED_NODE;
+ lpfc_nlp_list(phba, ndlp, NLP_UNMAPPED_LIST);
+
+ if (phba->hba_state < LPFC_HBA_READY) {
+ /* Link up discovery requires Fabrib registration. */
+ lpfc_ns_cmd(phba, ndlp, SLI_CTNS_RNN_ID);
+ lpfc_ns_cmd(phba, ndlp, SLI_CTNS_RSNN_NN);
+ lpfc_ns_cmd(phba, ndlp, SLI_CTNS_RFT_ID);
+ }
+
+ phba->fc_ns_retry = 0;
+ /* Good status, issue CT Request to NameServer */
+ if (lpfc_ns_cmd(phba, ndlp, SLI_CTNS_GID_FT)) {
+ /* Cannot issue NameServer Query, so finish up discovery */
+ lpfc_disc_start(phba);
+ }
+
+ lpfc_mbuf_free(phba, mp->virt, mp->phys);
+ kfree(mp);
+ mempool_free( pmb, phba->mbox_mem_pool);
+
+ return;
+}
+
+/* Put blp on the bind list */
+int
+lpfc_consistent_bind_save(struct lpfc_hba * phba, struct lpfc_bindlist * blp)
+{
+ /* Put it at the end of the bind list */
+ list_add_tail(&blp->nlp_listp, &phba->fc_nlpbind_list);
+ phba->fc_bind_cnt++;
+
+ /* Add scsiid <sid> to BIND list */
+ lpfc_printf_log(phba, KERN_INFO, LOG_NODE,
+ "%d:0903 Add scsiid %d to BIND list "
+ "Data: x%x x%x x%x x%p\n",
+ phba->brd_no, blp->nlp_sid, phba->fc_bind_cnt,
+ blp->nlp_DID, blp->nlp_bind_type, blp);
+
+ return (0);
+}
+
+int
+lpfc_nlp_list(struct lpfc_hba * phba, struct lpfc_nodelist * nlp, int list)
+{
+ struct lpfc_bindlist *blp;
+ struct lpfc_target *targetp;
+ struct lpfc_sli *psli;
+ psli = &phba->sli;
+
+ /* Sanity check to ensure we are not moving to / from the same list */
+ if((nlp->nlp_flag & NLP_LIST_MASK) == list) {
+ if(list != NLP_NO_LIST)
+ return(0);
+ }
+
+ blp = nlp->nlp_listp_bind;
+
+ switch(nlp->nlp_flag & NLP_LIST_MASK) {
+ case NLP_NO_LIST: /* Not on any list */
+ break;
+ case NLP_UNUSED_LIST:
+ phba->fc_unused_cnt--;
+ list_del(&nlp->nlp_listp);
+ nlp->nlp_flag &= ~NLP_LIST_MASK;
+ break;
+ case NLP_PLOGI_LIST:
+ phba->fc_plogi_cnt--;
+ list_del(&nlp->nlp_listp);
+ nlp->nlp_flag &= ~NLP_LIST_MASK;
+ break;
+ case NLP_ADISC_LIST:
+ phba->fc_adisc_cnt--;
+ list_del(&nlp->nlp_listp);
+ nlp->nlp_flag &= ~NLP_LIST_MASK;
+ break;
+ case NLP_REGLOGIN_LIST:
+ phba->fc_reglogin_cnt--;
+ list_del(&nlp->nlp_listp);
+ nlp->nlp_flag &= ~NLP_LIST_MASK;
+ break;
+ case NLP_PRLI_LIST:
+ phba->fc_prli_cnt--;
+ list_del(&nlp->nlp_listp);
+ nlp->nlp_flag &= ~NLP_LIST_MASK;
+ break;
+ case NLP_UNMAPPED_LIST:
+ phba->fc_unmap_cnt--;
+ list_del(&nlp->nlp_listp);
+ nlp->nlp_flag &= ~NLP_LIST_MASK;
+ nlp->nlp_flag &= ~NLP_TGT_NO_SCSIID;
+ nlp->nlp_type &= ~NLP_FC_NODE;
+ phba->nport_event_cnt++;
+ break;
+ case NLP_MAPPED_LIST:
+ phba->fc_map_cnt--;
+ list_del(&nlp->nlp_listp);
+ nlp->nlp_flag &= ~NLP_LIST_MASK;
+ phba->nport_event_cnt++;
+ lpfc_set_failmask(phba, nlp, LPFC_DEV_DISAPPEARED,
+ LPFC_SET_BITMASK);
+ nlp->nlp_type &= ~NLP_FCP_TARGET;
+ targetp = nlp->nlp_Target;
+ if (targetp && (list != NLP_MAPPED_LIST)) {
+ nlp->nlp_Target = NULL;
+#if defined(FC_TRANS_VER1) || defined(FC_TRANS_265_BLKPATCH)
+ /*
+ * Do not block the target if the driver has just reset
+ * its interface to the hardware.
+ */
+ if (phba->hba_state != LPFC_INIT_START)
+ lpfc_target_block(phba, targetp);
+#endif
+ }
+
+ break;
+ case NLP_NPR_LIST:
+ phba->fc_npr_cnt--;
+ list_del(&nlp->nlp_listp);
+ nlp->nlp_flag &= ~NLP_LIST_MASK;
+ /* Stop delay tmo if taking node off NPR list */
+ if ((nlp->nlp_flag & NLP_DELAY_TMO) &&
+ (list != NLP_NPR_LIST)) {
+ nlp->nlp_flag &= ~NLP_DELAY_TMO;
+ del_timer_sync(&nlp->nlp_delayfunc);
+ }
+ break;
+ }
+
+ /* Add NPort <did> to <num> list */
+ lpfc_printf_log(phba,
+ KERN_INFO,
+ LOG_NODE,
+ "%d:0904 Add NPort x%x to %d list Data: x%x x%p\n",
+ phba->brd_no,
+ nlp->nlp_DID, list, nlp->nlp_flag, blp);
+
+ nlp->nlp_listp_bind = NULL;
+
+ switch(list) {
+ case NLP_NO_LIST: /* No list, just remove it */
+ lpfc_nlp_remove(phba, nlp);
+ break;
+ case NLP_UNUSED_LIST:
+ nlp->nlp_flag |= list;
+ /* Put it at the end of the unused list */
+ list_add_tail(&nlp->nlp_listp, &phba->fc_unused_list);
+ phba->fc_unused_cnt++;
+ break;
+ case NLP_PLOGI_LIST:
+ nlp->nlp_flag |= list;
+ /* Put it at the end of the plogi list */
+ list_add_tail(&nlp->nlp_listp, &phba->fc_plogi_list);
+ phba->fc_plogi_cnt++;
+ break;
+ case NLP_ADISC_LIST:
+ nlp->nlp_flag |= list;
+ /* Put it at the end of the adisc list */
+ list_add_tail(&nlp->nlp_listp, &phba->fc_adisc_list);
+ phba->fc_adisc_cnt++;
+ break;
+ case NLP_REGLOGIN_LIST:
+ nlp->nlp_flag |= list;
+ /* Put it at the end of the reglogin list */
+ list_add_tail(&nlp->nlp_listp, &phba->fc_reglogin_list);
+ phba->fc_reglogin_cnt++;
+ break;
+ case NLP_PRLI_LIST:
+ nlp->nlp_flag |= list;
+ /* Put it at the end of the prli list */
+ list_add_tail(&nlp->nlp_listp, &phba->fc_prli_list);
+ phba->fc_prli_cnt++;
+ break;
+ case NLP_UNMAPPED_LIST:
+ nlp->nlp_flag |= list;
+ /* Put it at the end of the unmap list */
+ list_add_tail(&nlp->nlp_listp, &phba->fc_nlpunmap_list);
+ phba->fc_unmap_cnt++;
+ phba->nport_event_cnt++;
+ /* stop nodev tmo if running */
+ if (nlp->nlp_flag & NLP_NODEV_TMO) {
+ del_timer_sync(&nlp->nlp_tmofunc);
+ }
+ nlp->nlp_flag &= ~NLP_NODEV_TMO;
+ nlp->nlp_type |= NLP_FC_NODE;
+ lpfc_set_failmask(phba, nlp, LPFC_DEV_DISCOVERY_INP,
+ LPFC_CLR_BITMASK);
+ break;
+ case NLP_MAPPED_LIST:
+ nlp->nlp_flag |= list;
+ /* Put it at the end of the map list */
+ list_add_tail(&nlp->nlp_listp, &phba->fc_nlpmap_list);
+ phba->fc_map_cnt++;
+ phba->nport_event_cnt++;
+ /* stop nodev tmo if running */
+ if (nlp->nlp_flag & NLP_NODEV_TMO) {
+ del_timer_sync(&nlp->nlp_tmofunc);
+ }
+ nlp->nlp_flag &= ~NLP_NODEV_TMO;
+ nlp->nlp_type |= NLP_FCP_TARGET;
+ lpfc_set_failmask(phba, nlp, LPFC_DEV_DISAPPEARED,
+ LPFC_CLR_BITMASK);
+ lpfc_set_failmask(phba, nlp, LPFC_DEV_DISCOVERY_INP,
+ LPFC_CLR_BITMASK);
+
+ targetp = phba->device_queue_hash[nlp->nlp_sid];
+ if (targetp && targetp->pnode) {
+ nlp->nlp_Target = targetp;
+#if defined(FC_TRANS_VER1) || defined(FC_TRANS_265_BLKPATCH)
+ /* Unblock I/Os on target */
+ if(targetp->blocked)
+ lpfc_target_unblock(phba, targetp);
+#endif
+ }
+ break;
+ case NLP_NPR_LIST:
+ nlp->nlp_flag |= list;
+ /* Put it at the end of the npr list */
+ list_add_tail(&nlp->nlp_listp, &phba->fc_npr_list);
+ phba->fc_npr_cnt++;
+
+ /*
+ * Sanity check for Fabric entity.
+ * Set nodev_tmo for NPR state, for Fabric use 1 sec.
+ */
+ if (nlp->nlp_type & NLP_FABRIC) {
+ mod_timer(&nlp->nlp_tmofunc, jiffies + HZ);
+ }
+ else {
+ mod_timer(&nlp->nlp_tmofunc,
+ jiffies + HZ * phba->cfg_nodev_tmo);
+ }
+ nlp->nlp_flag |= NLP_NODEV_TMO;
+ nlp->nlp_flag &= ~NLP_RCV_PLOGI;
+ break;
+ case NLP_JUST_DQ:
+ break;
+ }
+
+ if (blp) {
+ nlp->nlp_sid = 0;
+ nlp->nlp_flag &= ~NLP_SEED_MASK;
+ nlp->nlp_Target = NULL;
+ lpfc_consistent_bind_save(phba, blp);
+ }
+ return (0);
+}
+
+/*
+ * Start / ReStart rescue timer for Discovery / RSCN handling
+ */
+void
+lpfc_set_disctmo(struct lpfc_hba * phba)
+{
+ uint32_t tmo;
+
+ tmo = ((phba->fc_ratov * 2) + 1);
+
+ mod_timer(&phba->fc_disctmo, jiffies + HZ * tmo);
+ phba->fc_flag |= FC_DISC_TMO;
+
+ /* Start Discovery Timer state <hba_state> */
+ lpfc_printf_log(phba, KERN_INFO, LOG_DISCOVERY,
+ "%d:0247 Start Discovery Timer state x%x "
+ "Data: x%x x%lx x%x x%x\n",
+ phba->brd_no,
+ phba->hba_state, tmo, (unsigned long)&phba->fc_disctmo,
+ phba->fc_plogi_cnt, phba->fc_adisc_cnt);
+
+ return;
+}
+
+/*
+ * Cancel rescue timer for Discovery / RSCN handling
+ */
+int
+lpfc_can_disctmo(struct lpfc_hba * phba)
+{
+ /* Turn off discovery timer if its running */
+ if(phba->fc_flag & FC_DISC_TMO) {
+ del_timer_sync(&phba->fc_disctmo);
+ }
+ phba->fc_flag &= ~FC_DISC_TMO;
+
+ /* Cancel Discovery Timer state <hba_state> */
+ lpfc_printf_log(phba, KERN_INFO, LOG_DISCOVERY,
+ "%d:0248 Cancel Discovery Timer state x%x "
+ "Data: x%x x%x x%x\n",
+ phba->brd_no, phba->hba_state, phba->fc_flag,
+ phba->fc_plogi_cnt, phba->fc_adisc_cnt);
+
+ return (0);
+}
+
+/*
+ * Check specified ring for outstanding IOCB on the SLI queue
+ * Return true if iocb matches the specified nport
+ */
+int
+lpfc_check_sli_ndlp(struct lpfc_hba * phba,
+ struct lpfc_sli_ring * pring,
+ struct lpfc_iocbq * iocb, struct lpfc_nodelist * ndlp)
+{
+ struct lpfc_sli *psli;
+ IOCB_t *icmd;
+
+ psli = &phba->sli;
+ icmd = &iocb->iocb;
+ if (pring->ringno == LPFC_ELS_RING) {
+ switch (icmd->ulpCommand) {
+ case CMD_GEN_REQUEST64_CR:
+ if (icmd->ulpContext == (volatile ushort)ndlp->nlp_rpi)
+ return (1);
+ case CMD_ELS_REQUEST64_CR:
+ case CMD_XMIT_ELS_RSP64_CX:
+ if (iocb->context1 == (uint8_t *) ndlp)
+ return (1);
+ }
+ } else if (pring->ringno == psli->ip_ring) {
+
+ } else if (pring->ringno == psli->fcp_ring) {
+ /* Skip match check if waiting to relogin to FCP target */
+ if ((ndlp->nlp_type & NLP_FCP_TARGET) &&
+ (ndlp->nlp_flag & NLP_DELAY_TMO)) {
+ return (0);
+ }
+ if (icmd->ulpContext == (volatile ushort)ndlp->nlp_rpi) {
+ return (1);
+ }
+ } else if (pring->ringno == psli->next_ring) {
+
+ }
+ return (0);
+}
+
+/*
+ * Free resources / clean up outstanding I/Os
+ * associated with nlp_rpi in the LPFC_NODELIST entry.
+ */
+static int
+lpfc_no_rpi(struct lpfc_hba * phba, struct lpfc_nodelist * ndlp)
+{
+ struct lpfc_sli *psli;
+ struct lpfc_sli_ring *pring;
+ struct lpfc_iocbq *iocb, *next_iocb;
+ IOCB_t *icmd;
+ uint32_t rpi, i;
+
+ psli = &phba->sli;
+ rpi = ndlp->nlp_rpi;
+ if (rpi) {
+ /* Now process each ring */
+ for (i = 0; i < psli->sliinit.num_rings; i++) {
+ pring = &psli->ring[i];
+
+ list_for_each_entry_safe(iocb, next_iocb, &pring->txq,
+ list) {
+ /*
+ * Check to see if iocb matches the nport we are
+ * looking for
+ */
+ if ((lpfc_check_sli_ndlp
+ (phba, pring, iocb, ndlp))) {
+ /* It matches, so deque and call compl
+ with an error */
+ list_del(&iocb->list);
+ pring->txq_cnt--;
+ if (iocb->iocb_cmpl) {
+ icmd = &iocb->iocb;
+ icmd->ulpStatus =
+ IOSTAT_LOCAL_REJECT;
+ icmd->un.ulpWord[4] =
+ IOERR_SLI_ABORTED;
+ (iocb->iocb_cmpl) (phba,
+ iocb, iocb);
+ } else {
+ mempool_free(iocb,
+ phba->iocb_mem_pool);
+ }
+ }
+ }
+ /* Everything that matches on txcmplq will be returned
+ * by firmware with a no rpi error.
+ */
+ }
+ }
+ return (0);
+}
+
+/*
+ * Free rpi associated with LPFC_NODELIST entry.
+ * This routine is called from lpfc_freenode(), when we are removing
+ * a LPFC_NODELIST entry. It is also called if the driver initiates a
+ * LOGO that completes successfully, and we are waiting to PLOGI back
+ * to the remote NPort. In addition, it is called after we receive
+ * and unsolicated ELS cmd, send back a rsp, the rsp completes and
+ * we are waiting to PLOGI back to the remote NPort.
+ */
+int
+lpfc_unreg_rpi(struct lpfc_hba * phba, struct lpfc_nodelist * ndlp)
+{
+ LPFC_MBOXQ_t *mbox;
+
+ if (ndlp->nlp_rpi) {
+ if ((mbox = mempool_alloc(phba->mbox_mem_pool, GFP_ATOMIC))) {
+ lpfc_unreg_login(phba, ndlp->nlp_rpi, mbox);
+ if (lpfc_sli_issue_mbox
+ (phba, mbox, (MBX_NOWAIT | MBX_STOP_IOCB))
+ == MBX_NOT_FINISHED) {
+ mempool_free( mbox, phba->mbox_mem_pool);
+ }
+ }
+ lpfc_findnode_remove_rpi(phba, ndlp->nlp_rpi);
+ lpfc_no_rpi(phba, ndlp);
+ ndlp->nlp_rpi = 0;
+ lpfc_set_failmask(phba, ndlp, LPFC_DEV_DISCONNECTED,
+ LPFC_SET_BITMASK);
+ return 1;
+ }
+ return 0;
+}
+
+/*
+ * Free resources associated with LPFC_NODELIST entry
+ * so it can be freed.
+ */
+static int
+lpfc_freenode(struct lpfc_hba * phba, struct lpfc_nodelist * ndlp)
+{
+ struct lpfc_target *targetp;
+ struct lpfc_sli *psli;
+ int scsid;
+
+ /* The psli variable gets rid of the long pointer deference. */
+ psli = &phba->sli;
+
+ /* Cleanup node for NPort <nlp_DID> */
+ lpfc_printf_log(phba, KERN_INFO, LOG_NODE,
+ "%d:0900 Cleanup node for NPort x%x "
+ "Data: x%x x%x x%x\n",
+ phba->brd_no, ndlp->nlp_DID, ndlp->nlp_flag,
+ ndlp->nlp_state, ndlp->nlp_rpi);
+
+ lpfc_nlp_list(phba, ndlp, NLP_JUST_DQ);
+
+ if(ndlp->nlp_flag & NLP_NODEV_TMO) {
+ del_timer_sync(&ndlp->nlp_tmofunc);
+ }
+ ndlp->nlp_flag &= ~NLP_NODEV_TMO;
+
+ if(ndlp->nlp_flag & NLP_DELAY_TMO) {
+ del_timer_sync(&ndlp->nlp_delayfunc);
+ }
+ ndlp->nlp_flag &= ~NLP_DELAY_TMO;
+
+ lpfc_unreg_rpi(phba, ndlp);
+
+ for(scsid=0;scsid<MAX_FCP_TARGET;scsid++) {
+ targetp = phba->device_queue_hash[scsid];
+ /* First see if the SCSI ID has an allocated struct
+ lpfc_target */
+ if (targetp) {
+ if (targetp->pnode == ndlp) {
+ targetp->pnode = NULL;
+ ndlp->nlp_Target = NULL;
+#ifdef FC_TRANS_VER1
+ /*
+ * This code does not apply to SLES9 since there
+ * is no starget defined in the midlayer.
+ * Additionally, dynamic target discovery to the
+ * midlayer is not supported yet.
+ */
+ if (targetp->starget) {
+ /* Remove SCSI target / SCSI Hotplug */
+ lpfc_target_remove(phba, targetp);
+ }
+#endif
+ break;
+ }
+ }
+ }
+ return (0);
+}
+
+/*
+ * Check to see if we can free the nlp back to the freelist.
+ * If we are in the middle of using the nlp in the discovery state
+ * machine, defer the free till we reach the end of the state machine.
+ */
+int
+lpfc_nlp_remove(struct lpfc_hba * phba, struct lpfc_nodelist * ndlp)
+{
+ if(ndlp->nlp_flag & NLP_NODEV_TMO) {
+ del_timer_sync(&ndlp->nlp_tmofunc);
+ }
+ ndlp->nlp_flag &= ~NLP_NODEV_TMO;
+
+ if(ndlp->nlp_flag & NLP_DELAY_TMO) {
+ del_timer_sync(&ndlp->nlp_delayfunc);
+ }
+ ndlp->nlp_flag &= ~NLP_DELAY_TMO;
+
+ if (ndlp->nlp_disc_refcnt) {
+ ndlp->nlp_flag |= NLP_DELAY_REMOVE;
+ }
+ else {
+ lpfc_freenode(phba, ndlp);
+ mempool_free( ndlp, phba->nlp_mem_pool);
+ }
+ return(0);
+}
+
+static int
+lpfc_matchdid(struct lpfc_hba * phba, struct lpfc_nodelist * ndlp, uint32_t did)
+{
+ D_ID mydid;
+ D_ID ndlpdid;
+ D_ID matchdid;
+
+ if (did == Bcast_DID)
+ return (0);
+
+ if (ndlp->nlp_DID == 0) {
+ return (0);
+ }
+
+ /* First check for Direct match */
+ if (ndlp->nlp_DID == did)
+ return (1);
+
+ /* Next check for area/domain identically equals 0 match */
+ mydid.un.word = phba->fc_myDID;
+ if ((mydid.un.b.domain == 0) && (mydid.un.b.area == 0)) {
+ return (0);
+ }
+
+ matchdid.un.word = did;
+ ndlpdid.un.word = ndlp->nlp_DID;
+ if (matchdid.un.b.id == ndlpdid.un.b.id) {
+ if ((mydid.un.b.domain == matchdid.un.b.domain) &&
+ (mydid.un.b.area == matchdid.un.b.area)) {
+ if ((ndlpdid.un.b.domain == 0) &&
+ (ndlpdid.un.b.area == 0)) {
+ if (ndlpdid.un.b.id)
+ return (1);
+ }
+ return (0);
+ }
+
+ matchdid.un.word = ndlp->nlp_DID;
+ if ((mydid.un.b.domain == ndlpdid.un.b.domain) &&
+ (mydid.un.b.area == ndlpdid.un.b.area)) {
+ if ((matchdid.un.b.domain == 0) &&
+ (matchdid.un.b.area == 0)) {
+ if (matchdid.un.b.id)
+ return (1);
+ }
+ }
+ }
+ return (0);
+}
+
+/* Search for a nodelist entry on a specific list */
+struct lpfc_nodelist *
+lpfc_findnode_wwpn(struct lpfc_hba * phba, uint32_t order,
+ struct lpfc_name * wwpn)
+{
+ struct lpfc_nodelist *ndlp, *next_ndlp;
+ uint32_t data1;
+
+ if (order & NLP_SEARCH_UNMAPPED) {
+ list_for_each_entry_safe(ndlp, next_ndlp,
+ &phba->fc_nlpunmap_list, nlp_listp) {
+ if (memcmp(&ndlp->nlp_portname, wwpn,
+ sizeof(struct lpfc_name)) == 0) {
+
+ data1 = (((uint32_t) ndlp->nlp_state << 24) |
+ ((uint32_t) ndlp->nlp_xri << 16) |
+ ((uint32_t) ndlp->nlp_type << 8) |
+ ((uint32_t) ndlp->nlp_rpi & 0xff));
+ /* FIND node DID unmapped */
+ lpfc_printf_log(phba,
+ KERN_INFO,
+ LOG_NODE,
+ "%d:0911 FIND node DID unmapped"
+ " Data: x%p x%x x%x x%x\n",
+ phba->brd_no,
+ ndlp, ndlp->nlp_DID,
+ ndlp->nlp_flag, data1);
+ return (ndlp);
+ }
+ }
+ }
+
+ if (order & NLP_SEARCH_MAPPED) {
+ list_for_each_entry_safe(ndlp, next_ndlp, &phba->fc_nlpmap_list,
+ nlp_listp) {
+ if (memcmp(&ndlp->nlp_portname, wwpn,
+ sizeof(struct lpfc_name)) == 0) {
+
+ data1 = (((uint32_t) ndlp->nlp_state << 24) |
+ ((uint32_t) ndlp->nlp_xri << 16) |
+ ((uint32_t) ndlp->nlp_type << 8) |
+ ((uint32_t) ndlp->nlp_rpi & 0xff));
+ /* FIND node DID mapped */
+ lpfc_printf_log(phba, KERN_INFO, LOG_NODE,
+ "%d:0901 FIND node DID mapped "
+ "Data: x%p x%x x%x x%x\n",
+ phba->brd_no,
+ ndlp, ndlp->nlp_DID,
+ ndlp->nlp_flag, data1);
+ return (ndlp);
+ }
+ }
+ }
+
+ /* no match found */
+ return ((struct lpfc_nodelist *) 0);
+}
+/* Search for a nodelist entry on a specific list */
+struct lpfc_nodelist *
+lpfc_findnode_wwnn(struct lpfc_hba * phba, uint32_t order,
+ struct lpfc_name * wwnn)
+{
+ struct lpfc_nodelist *ndlp, *next_ndlp;
+ uint32_t data1;
+
+ if (order & NLP_SEARCH_UNMAPPED) {
+ list_for_each_entry_safe(ndlp, next_ndlp,
+ &phba->fc_nlpunmap_list, nlp_listp) {
+ if (memcmp(&ndlp->nlp_nodename, wwnn,
+ sizeof(struct lpfc_name)) == 0) {
+
+ data1 = (((uint32_t) ndlp->nlp_state << 24) |
+ ((uint32_t) ndlp->nlp_xri << 16) |
+ ((uint32_t) ndlp->nlp_type << 8) |
+ ((uint32_t) ndlp->nlp_rpi & 0xff));
+ /* FIND node DID unmapped */
+ lpfc_printf_log(phba, KERN_INFO, LOG_NODE,
+ "%d:0910 FIND node DID unmapped"
+ "Data: x%p x%x x%x x%x\n",
+ phba->brd_no,
+ ndlp, ndlp->nlp_DID,
+ ndlp->nlp_flag, data1);
+ return (ndlp);
+ }
+ }
+ }
+
+ if (order & NLP_SEARCH_MAPPED) {
+ list_for_each_entry_safe(ndlp, next_ndlp, &phba->fc_nlpmap_list,
+ nlp_listp) {
+ if (memcmp(&ndlp->nlp_nodename, wwnn,
+ sizeof(struct lpfc_name)) == 0) {
+
+ data1 = (((uint32_t) ndlp->nlp_state << 24) |
+ ((uint32_t) ndlp->nlp_xri << 16) |
+ ((uint32_t) ndlp->nlp_type << 8) |
+ ((uint32_t) ndlp->nlp_rpi & 0xff));
+ /* FIND node did mapped */
+ lpfc_printf_log(phba, KERN_INFO, LOG_NODE,
+ "%d:0902 FIND node DID mapped "
+ "Data: x%p x%x x%x x%x\n",
+ phba->brd_no,
+ ndlp, ndlp->nlp_DID,
+ ndlp->nlp_flag, data1);
+ return (ndlp);
+ }
+ }
+ }
+
+ /* no match found */
+ return ((struct lpfc_nodelist *) 0);
+}
+/* Search for a nodelist entry on a specific list */
+struct lpfc_nodelist *
+lpfc_findnode_did(struct lpfc_hba * phba, uint32_t order, uint32_t did)
+{
+ struct lpfc_nodelist *ndlp, *next_ndlp;
+ uint32_t data1;
+
+ if (order & NLP_SEARCH_UNMAPPED) {
+ list_for_each_entry_safe(ndlp, next_ndlp,
+ &phba->fc_nlpunmap_list, nlp_listp) {
+ if (lpfc_matchdid(phba, ndlp, did)) {
+ data1 = (((uint32_t) ndlp->nlp_state << 24) |
+ ((uint32_t) ndlp->nlp_xri << 16) |
+ ((uint32_t) ndlp->nlp_type << 8) |
+ ((uint32_t) ndlp->nlp_rpi & 0xff));
+ /* FIND node DID unmapped */
+ lpfc_printf_log(phba, KERN_INFO, LOG_NODE,
+ "%d:0929 FIND node DID unmapped"
+ " Data: x%p x%x x%x x%x\n",
+ phba->brd_no,
+ ndlp, ndlp->nlp_DID,
+ ndlp->nlp_flag, data1);
+ return (ndlp);
+ }
+ }
+ }
+
+ if (order & NLP_SEARCH_MAPPED) {
+ list_for_each_entry_safe(ndlp, next_ndlp, &phba->fc_nlpmap_list,
+ nlp_listp) {
+ if (lpfc_matchdid(phba, ndlp, did)) {
+
+ data1 = (((uint32_t) ndlp->nlp_state << 24) |
+ ((uint32_t) ndlp->nlp_xri << 16) |
+ ((uint32_t) ndlp->nlp_type << 8) |
+ ((uint32_t) ndlp->nlp_rpi & 0xff));
+ /* FIND node DID mapped */
+ lpfc_printf_log(phba, KERN_INFO, LOG_NODE,
+ "%d:0930 FIND node DID mapped "
+ "Data: x%p x%x x%x x%x\n",
+ phba->brd_no,
+ ndlp, ndlp->nlp_DID,
+ ndlp->nlp_flag, data1);
+ return (ndlp);
+ }
+ }
+ }
+
+ if (order & NLP_SEARCH_PLOGI) {
+ list_for_each_entry_safe(ndlp, next_ndlp, &phba->fc_plogi_list,
+ nlp_listp) {
+ if (lpfc_matchdid(phba, ndlp, did)) {
+
+ data1 = (((uint32_t) ndlp->nlp_state << 24) |
+ ((uint32_t) ndlp->nlp_xri << 16) |
+ ((uint32_t) ndlp->nlp_type << 8) |
+ ((uint32_t) ndlp->nlp_rpi & 0xff));
+ /* LOG change to PLOGI */
+ /* FIND node DID plogi */
+ lpfc_printf_log(phba, KERN_INFO, LOG_NODE,
+ "%d:0908 FIND node DID plogi "
+ "Data: x%p x%x x%x x%x\n",
+ phba->brd_no,
+ ndlp, ndlp->nlp_DID,
+ ndlp->nlp_flag, data1);
+ return (ndlp);
+ }
+ }
+ }
+
+ if (order & NLP_SEARCH_ADISC) {
+ list_for_each_entry_safe(ndlp, next_ndlp, &phba->fc_adisc_list,
+ nlp_listp) {
+ if (lpfc_matchdid(phba, ndlp, did)) {
+
+ data1 = (((uint32_t) ndlp->nlp_state << 24) |
+ ((uint32_t) ndlp->nlp_xri << 16) |
+ ((uint32_t) ndlp->nlp_type << 8) |
+ ((uint32_t) ndlp->nlp_rpi & 0xff));
+ /* LOG change to ADISC */
+ /* FIND node DID adisc */
+ lpfc_printf_log(phba, KERN_INFO, LOG_NODE,
+ "%d:0931 FIND node DID adisc "
+ "Data: x%p x%x x%x x%x\n",
+ phba->brd_no,
+ ndlp, ndlp->nlp_DID,
+ ndlp->nlp_flag, data1);
+ return (ndlp);
+ }
+ }
+ }
+
+ if (order & NLP_SEARCH_REGLOGIN) {
+ list_for_each_entry_safe(ndlp, next_ndlp,
+ &phba->fc_reglogin_list, nlp_listp) {
+ if (lpfc_matchdid(phba, ndlp, did)) {
+
+ data1 = (((uint32_t) ndlp->nlp_state << 24) |
+ ((uint32_t) ndlp->nlp_xri << 16) |
+ ((uint32_t) ndlp->nlp_type << 8) |
+ ((uint32_t) ndlp->nlp_rpi & 0xff));
+ /* LOG change to REGLOGIN */
+ /* FIND node DID reglogin */
+ lpfc_printf_log(phba, KERN_INFO, LOG_NODE,
+ "%d:0931 FIND node DID reglogin"
+ " Data: x%p x%x x%x x%x\n",
+ phba->brd_no,
+ ndlp, ndlp->nlp_DID,
+ ndlp->nlp_flag, data1);
+ return (ndlp);
+ }
+ }
+ }
+
+ if (order & NLP_SEARCH_PRLI) {
+ list_for_each_entry_safe(ndlp, next_ndlp, &phba->fc_prli_list,
+ nlp_listp) {
+ if (lpfc_matchdid(phba, ndlp, did)) {
+
+ data1 = (((uint32_t) ndlp->nlp_state << 24) |
+ ((uint32_t) ndlp->nlp_xri << 16) |
+ ((uint32_t) ndlp->nlp_type << 8) |
+ ((uint32_t) ndlp->nlp_rpi & 0xff));
+ /* LOG change to PRLI */
+ /* FIND node DID prli */
+ lpfc_printf_log(phba, KERN_INFO, LOG_NODE,
+ "%d:0931 FIND node DID prli "
+ "Data: x%p x%x x%x x%x\n",
+ phba->brd_no,
+ ndlp, ndlp->nlp_DID,
+ ndlp->nlp_flag, data1);
+ return (ndlp);
+ }
+ }
+ }
+
+ if (order & NLP_SEARCH_NPR) {
+ list_for_each_entry_safe(ndlp, next_ndlp, &phba->fc_npr_list,
+ nlp_listp) {
+ if (lpfc_matchdid(phba, ndlp, did)) {
+
+ data1 = (((uint32_t) ndlp->nlp_state << 24) |
+ ((uint32_t) ndlp->nlp_xri << 16) |
+ ((uint32_t) ndlp->nlp_type << 8) |
+ ((uint32_t) ndlp->nlp_rpi & 0xff));
+ /* LOG change to NPR */
+ /* FIND node DID npr */
+ lpfc_printf_log(phba, KERN_INFO, LOG_NODE,
+ "%d:0931 FIND node DID npr "
+ "Data: x%p x%x x%x x%x\n",
+ phba->brd_no,
+ ndlp, ndlp->nlp_DID,
+ ndlp->nlp_flag, data1);
+ return (ndlp);
+ }
+ }
+ }
+
+ if (order & NLP_SEARCH_UNUSED) {
+ list_for_each_entry_safe(ndlp, next_ndlp, &phba->fc_adisc_list,
+ nlp_listp) {
+ if (lpfc_matchdid(phba, ndlp, did)) {
+
+ data1 = (((uint32_t) ndlp->nlp_state << 24) |
+ ((uint32_t) ndlp->nlp_xri << 16) |
+ ((uint32_t) ndlp->nlp_type << 8) |
+ ((uint32_t) ndlp->nlp_rpi & 0xff));
+ /* LOG change to UNUSED */
+ /* FIND node DID unused */
+ lpfc_printf_log(phba, KERN_INFO, LOG_NODE,
+ "%d:0931 FIND node DID unused "
+ "Data: x%p x%x x%x x%x\n",
+ phba->brd_no,
+ ndlp, ndlp->nlp_DID,
+ ndlp->nlp_flag, data1);
+ return (ndlp);
+ }
+ }
+ }
+
+ /* FIND node did <did> NOT FOUND */
+ lpfc_printf_log(phba,
+ KERN_INFO,
+ LOG_NODE,
+ "%d:0932 FIND node did x%x NOT FOUND Data: x%x\n",
+ phba->brd_no, did, order);
+
+ /* no match found */
+ return ((struct lpfc_nodelist *) 0);
+}
+
+struct lpfc_nodelist *
+lpfc_setup_disc_node(struct lpfc_hba * phba, uint32_t did)
+{
+ struct lpfc_nodelist *ndlp;
+ uint32_t flg;
+
+ if((ndlp = lpfc_findnode_did(phba, NLP_SEARCH_ALL, did)) == 0) {
+ if ((phba->hba_state == LPFC_HBA_READY) &&
+ ((lpfc_rscn_payload_check(phba, did) == 0)))
+ return NULL;
+ ndlp = (struct lpfc_nodelist *)
+ mempool_alloc(phba->nlp_mem_pool, GFP_ATOMIC);
+ if (!ndlp)
+ return NULL;
+ lpfc_nlp_init(phba, ndlp, did);
+ ndlp->nlp_state = NLP_STE_NPR_NODE;
+ lpfc_nlp_list(phba, ndlp, NLP_NPR_LIST);
+ ndlp->nlp_flag |= NLP_NPR_2B_DISC;
+ return ndlp;
+ }
+ if ((phba->hba_state == LPFC_HBA_READY) &&
+ (phba->fc_flag & FC_RSCN_MODE)) {
+ if(lpfc_rscn_payload_check(phba, did)) {
+ ndlp->nlp_flag |= NLP_NPR_2B_DISC;
+ }
+ else {
+ ndlp->nlp_flag &= ~NLP_NPR_2B_DISC;
+ ndlp = NULL;
+ }
+ }
+ else {
+ flg = ndlp->nlp_flag & NLP_LIST_MASK;
+ if ((flg == NLP_ADISC_LIST) ||
+ (flg == NLP_PLOGI_LIST)) {
+ return NULL;
+ }
+ ndlp->nlp_state = NLP_STE_NPR_NODE;
+ lpfc_nlp_list(phba, ndlp, NLP_NPR_LIST);
+ ndlp->nlp_flag |= NLP_NPR_2B_DISC;
+ }
+ return ndlp;
+}
+
+/* Build a list of nodes to discover based on the loopmap */
+void
+lpfc_disc_list_loopmap(struct lpfc_hba * phba)
+{
+ int j;
+ uint32_t alpa, index;
+
+ if (phba->hba_state <= LPFC_LINK_DOWN) {
+ return;
+ }
+ if (phba->fc_topology != TOPOLOGY_LOOP) {
+ return;
+ }
+
+ /* Check for loop map present or not */
+ if (phba->alpa_map[0]) {
+ for (j = 1; j <= phba->alpa_map[0]; j++) {
+ alpa = phba->alpa_map[j];
+
+ if (((phba->fc_myDID & 0xff) == alpa) || (alpa == 0)) {
+ continue;
+ }
+ lpfc_setup_disc_node(phba, alpa);
+ }
+ } else {
+ /* No alpamap, so try all alpa's */
+ for (j = 0; j < FC_MAXLOOP; j++) {
+ /* If cfg_scan_down is set, start from highest
+ * ALPA (0xef) to lowest (0x1).
+ */
+ if (phba->cfg_scan_down)
+ index = j;
+ else
+ index = FC_MAXLOOP - j - 1;
+ alpa = lpfcAlpaArray[index];
+ if ((phba->fc_myDID & 0xff) == alpa) {
+ continue;
+ }
+
+ lpfc_setup_disc_node(phba, alpa);
+ }
+ }
+ return;
+}
+
+/* Start Link up / RSCN discovery on NPR list */
+void
+lpfc_disc_start(struct lpfc_hba * phba)
+{
+ struct lpfc_sli *psli;
+ LPFC_MBOXQ_t *mbox;
+ struct lpfc_nodelist *ndlp, *next_ndlp;
+ uint32_t did_changed, num_sent;
+ uint32_t clear_la_pending;
+
+ psli = &phba->sli;
+
+ if (phba->hba_state <= LPFC_LINK_DOWN) {
+ return;
+ }
+ if (phba->hba_state == LPFC_CLEAR_LA)
+ clear_la_pending = 1;
+ else
+ clear_la_pending = 0;
+
+ if (phba->hba_state < LPFC_HBA_READY) {
+ phba->hba_state = LPFC_DISC_AUTH;
+ }
+ lpfc_set_disctmo(phba);
+
+ if (phba->fc_prevDID == phba->fc_myDID) {
+ did_changed = 0;
+ } else {
+ did_changed = 1;
+ }
+ phba->fc_prevDID = phba->fc_myDID;
+ phba->num_disc_nodes = 0;
+
+ /* Start Discovery state <hba_state> */
+ lpfc_printf_log(phba, KERN_INFO, LOG_DISCOVERY,
+ "%d:0202 Start Discovery hba state x%x "
+ "Data: x%x x%x x%x\n",
+ phba->brd_no, phba->hba_state, phba->fc_flag,
+ phba->fc_plogi_cnt, phba->fc_adisc_cnt);
+
+ /* If our did changed, we MUST do PLOGI */
+ list_for_each_entry_safe(ndlp, next_ndlp, &phba->fc_npr_list,
+ nlp_listp) {
+ if(ndlp->nlp_flag & NLP_NPR_2B_DISC) {
+ if(did_changed)
+ ndlp->nlp_flag &= ~NLP_NPR_ADISC;
+ }
+ }
+
+ /* First do ADISCs - if any */
+ num_sent = lpfc_els_disc_adisc(phba);
+
+ if(num_sent)
+ return;
+
+ if ((phba->hba_state < LPFC_HBA_READY) && (!clear_la_pending)) {
+ /* If we get here, there is nothing to ADISC */
+ if ((mbox = mempool_alloc(phba->mbox_mem_pool, GFP_ATOMIC))) {
+ phba->hba_state = LPFC_CLEAR_LA;
+ lpfc_clear_la(phba, mbox);
+ mbox->mbox_cmpl = lpfc_mbx_cmpl_clear_la;
+ if (lpfc_sli_issue_mbox
+ (phba, mbox, (MBX_NOWAIT | MBX_STOP_IOCB))
+ == MBX_NOT_FINISHED) {
+ mempool_free( mbox, phba->mbox_mem_pool);
+ lpfc_disc_flush_list(phba);
+ psli->ring[(psli->ip_ring)].flag &=
+ ~LPFC_STOP_IOCB_EVENT;
+ psli->ring[(psli->fcp_ring)].flag &=
+ ~LPFC_STOP_IOCB_EVENT;
+ psli->ring[(psli->next_ring)].flag &=
+ ~LPFC_STOP_IOCB_EVENT;
+ phba->hba_state = LPFC_HBA_READY;
+ }
+ }
+ } else {
+ /* Next do PLOGIs - if any */
+ num_sent = lpfc_els_disc_plogi(phba);
+
+ if(num_sent)
+ return;
+
+ if (phba->fc_flag & FC_RSCN_MODE) {
+ /* Check to see if more RSCNs came in while we
+ * were processing this one.
+ */
+ if ((phba->fc_rscn_id_cnt == 0) &&
+ (!(phba->fc_flag & FC_RSCN_DISCOVERY))) {
+ lpfc_els_flush_rscn(phba);
+ } else {
+ lpfc_els_handle_rscn(phba);
+ }
+ }
+ }
+ return;
+}
+
+/*
+ * Ignore completion for all IOCBs on tx and txcmpl queue for ELS
+ * ring the match the sppecified nodelist.
+ */
+static void
+lpfc_free_tx(struct lpfc_hba * phba, struct lpfc_nodelist * ndlp)
+{
+ struct lpfc_sli *psli;
+ IOCB_t *icmd;
+ struct lpfc_iocbq *iocb, *next_iocb;
+ struct lpfc_sli_ring *pring;
+ struct lpfc_dmabuf *mp;
+
+ psli = &phba->sli;
+ pring = &psli->ring[LPFC_ELS_RING];
+
+ /* Error matching iocb on txq or txcmplq
+ * First check the txq.
+ */
+ list_for_each_entry_safe(iocb, next_iocb, &pring->txq, list) {
+ if (iocb->context1 != ndlp) {
+ continue;
+ }
+ icmd = &iocb->iocb;
+ if ((icmd->ulpCommand == CMD_ELS_REQUEST64_CR) ||
+ (icmd->ulpCommand == CMD_XMIT_ELS_RSP64_CX)) {
+
+ list_del(&iocb->list);
+ pring->txq_cnt--;
+ lpfc_els_free_iocb(phba, iocb);
+ }
+ }
+
+ /* Next check the txcmplq */
+ list_for_each_entry_safe(iocb, next_iocb, &pring->txcmplq, list) {
+ if (iocb->context1 != ndlp) {
+ continue;
+ }
+ icmd = &iocb->iocb;
+ if ((icmd->ulpCommand == CMD_ELS_REQUEST64_CR) ||
+ (icmd->ulpCommand == CMD_XMIT_ELS_RSP64_CX)) {
+
+ iocb->iocb_cmpl = NULL;
+ /* context2 = cmd, context2->next = rsp, context3 =
+ bpl */
+ if (iocb->context2) {
+ /* Free the response IOCB before handling the
+ command. */
+
+ mp = (struct lpfc_dmabuf *)
+ (((struct lpfc_dmabuf *) (iocb->context2))
+ ->list.next);
+ if (mp) {
+ /* Delay before releasing rsp buffer to
+ * give UNREG mbox a chance to take
+ * effect.
+ */
+ list_add(&mp->list,
+ &phba->freebufList);
+ }
+ lpfc_mbuf_free(phba,
+ ((struct lpfc_dmabuf *)
+ iocb->context2)->virt,
+ ((struct lpfc_dmabuf *)
+ iocb->context2)->phys);
+ kfree(iocb->context2);
+ }
+
+ if (iocb->context3) {
+ lpfc_mbuf_free(phba,
+ ((struct lpfc_dmabuf *)
+ iocb->context3)->virt,
+ ((struct lpfc_dmabuf *)
+ iocb->context3)->phys);
+ kfree(iocb->context3);
+ }
+ }
+ }
+
+ return;
+}
+
+void
+lpfc_disc_flush_list(struct lpfc_hba * phba)
+{
+ struct lpfc_nodelist *ndlp, *next_ndlp;
+
+ if (phba->fc_plogi_cnt) {
+ list_for_each_entry_safe(ndlp, next_ndlp, &phba->fc_plogi_list,
+ nlp_listp) {
+ lpfc_set_failmask(phba, ndlp, LPFC_DEV_DISCONNECTED,
+ LPFC_SET_BITMASK);
+ lpfc_free_tx(phba, ndlp);
+ lpfc_nlp_remove(phba, ndlp);
+ }
+ }
+ if (phba->fc_adisc_cnt) {
+ list_for_each_entry_safe(ndlp, next_ndlp, &phba->fc_adisc_list,
+ nlp_listp) {
+ lpfc_set_failmask(phba, ndlp, LPFC_DEV_DISCONNECTED,
+ LPFC_SET_BITMASK);
+ lpfc_free_tx(phba, ndlp);
+ lpfc_nlp_remove(phba, ndlp);
+ }
+ }
+ return;
+}
+
+/*****************************************************************************/
+/*
+ * NAME: lpfc_disc_timeout
+ *
+ * FUNCTION: Fibre Channel driver discovery timeout routine.
+ *
+ * EXECUTION ENVIRONMENT: interrupt only
+ *
+ * CALLED FROM:
+ * Timer function
+ *
+ * RETURNS:
+ * none
+ */
+/*****************************************************************************/
+void
+lpfc_disc_timeout(unsigned long ptr)
+{
+ struct lpfc_hba *phba;
+ struct lpfc_sli *psli;
+ struct lpfc_nodelist *ndlp;
+ LPFC_MBOXQ_t *mbox;
+ unsigned long iflag;
+
+ phba = (struct lpfc_hba *)ptr;
+ if (!phba) {
+ return;
+ }
+ spin_lock_irqsave(phba->host->host_lock, iflag);
+
+ psli = &phba->sli;
+ phba->fc_flag &= ~FC_DISC_TMO;
+
+ /* hba_state is identically LPFC_LOCAL_CFG_LINK while waiting for FAN */
+ if (phba->hba_state == LPFC_LOCAL_CFG_LINK) {
+ /* FAN timeout */
+ lpfc_printf_log(phba,
+ KERN_WARNING,
+ LOG_DISCOVERY,
+ "%d:0221 FAN timeout\n",
+ phba->brd_no);
+
+ /* Forget about FAN, Start discovery by sending a FLOGI
+ * hba_state is identically LPFC_FLOGI while waiting for FLOGI
+ * cmpl
+ */
+ phba->hba_state = LPFC_FLOGI;
+ lpfc_set_disctmo(phba);
+ lpfc_initial_flogi(phba);
+ goto out;
+ }
+
+ /* hba_state is identically LPFC_FLOGI while waiting for FLOGI cmpl */
+ if (phba->hba_state == LPFC_FLOGI) {
+ /* Initial FLOGI timeout */
+ lpfc_printf_log(phba,
+ KERN_ERR,
+ LOG_DISCOVERY,
+ "%d:0222 Initial FLOGI timeout\n",
+ phba->brd_no);
+
+ /* Assume no Fabric and go on with discovery.
+ * Check for outstanding ELS FLOGI to abort.
+ */
+
+ /* FLOGI failed, so just use loop map to make discovery list */
+ lpfc_disc_list_loopmap(phba);
+
+ /* Start discovery */
+ lpfc_disc_start(phba);
+ goto out;
+ }
+
+ /* hba_state is identically LPFC_FABRIC_CFG_LINK while waiting for
+ NameServer login */
+ if (phba->hba_state == LPFC_FABRIC_CFG_LINK) {
+ /* Timeout while waiting for NameServer login */
+ lpfc_printf_log(phba, KERN_ERR, LOG_DISCOVERY,
+ "%d:0223 Timeout while waiting for NameServer "
+ "login\n", phba->brd_no);
+
+ /* Next look for NameServer ndlp */
+ if ((ndlp = lpfc_findnode_did(phba,
+ NLP_SEARCH_ALL, NameServer_DID))) {
+ lpfc_nlp_remove(phba, ndlp);
+ }
+ /* Start discovery */
+ lpfc_disc_start(phba);
+ goto out;
+ }
+
+ /* Check for wait for NameServer Rsp timeout */
+ if (phba->hba_state == LPFC_NS_QRY) {
+ /* NameServer Query timeout */
+ lpfc_printf_log(phba, KERN_ERR, LOG_DISCOVERY,
+ "%d:0224 NameServer Query timeout "
+ "Data: x%x x%x\n",
+ phba->brd_no,
+ phba->fc_ns_retry, LPFC_MAX_NS_RETRY);
+
+ if ((ndlp =
+ lpfc_findnode_did(phba, NLP_SEARCH_UNMAPPED,
+ NameServer_DID))) {
+ if (phba->fc_ns_retry < LPFC_MAX_NS_RETRY) {
+ /* Try it one more time */
+ if (lpfc_ns_cmd(phba, ndlp, SLI_CTNS_GID_FT) ==
+ 0) {
+ goto out;
+ }
+ }
+ phba->fc_ns_retry = 0;
+ }
+
+ /* Nothing to authenticate, so CLEAR_LA right now */
+ if (phba->hba_state != LPFC_CLEAR_LA) {
+ if ((mbox = mempool_alloc(phba->mbox_mem_pool,
+ GFP_ATOMIC))) {
+ phba->hba_state = LPFC_CLEAR_LA;
+ lpfc_clear_la(phba, mbox);
+ mbox->mbox_cmpl = lpfc_mbx_cmpl_clear_la;
+ if (lpfc_sli_issue_mbox
+ (phba, mbox, (MBX_NOWAIT | MBX_STOP_IOCB))
+ == MBX_NOT_FINISHED) {
+ mempool_free(mbox, phba->mbox_mem_pool);
+ goto clrlaerr;
+ }
+ } else {
+ /* Device Discovery completion error */
+ lpfc_printf_log(phba, KERN_ERR, LOG_DISCOVERY,
+ "%d:0226 Device Discovery "
+ "completion error\n",
+ phba->brd_no);
+ phba->hba_state = LPFC_HBA_ERROR;
+ }
+ }
+ if ((mbox = mempool_alloc(phba->mbox_mem_pool, GFP_ATOMIC))) {
+ /* Setup and issue mailbox INITIALIZE LINK command */
+ lpfc_linkdown(phba);
+ lpfc_init_link(phba, mbox,
+ phba->cfg_topology,
+ phba->cfg_link_speed);
+ mbox->mb.un.varInitLnk.lipsr_AL_PA = 0;
+ if (lpfc_sli_issue_mbox
+ (phba, mbox, (MBX_NOWAIT | MBX_STOP_IOCB))
+ == MBX_NOT_FINISHED) {
+ mempool_free( mbox, phba->mbox_mem_pool);
+ }
+ }
+ goto out;
+ }
+
+ if (phba->hba_state == LPFC_DISC_AUTH) {
+ /* Node Authentication timeout */
+ lpfc_printf_log(phba,
+ KERN_ERR,
+ LOG_DISCOVERY,
+ "%d:0227 Node Authentication timeout\n",
+ phba->brd_no);
+ lpfc_disc_flush_list(phba);
+ if (phba->hba_state != LPFC_CLEAR_LA) {
+ if ((mbox = mempool_alloc(phba->mbox_mem_pool,
+ GFP_ATOMIC))) {
+ phba->hba_state = LPFC_CLEAR_LA;
+ lpfc_clear_la(phba, mbox);
+ mbox->mbox_cmpl = lpfc_mbx_cmpl_clear_la;
+ if (lpfc_sli_issue_mbox
+ (phba, mbox, (MBX_NOWAIT | MBX_STOP_IOCB))
+ == MBX_NOT_FINISHED) {
+ mempool_free(mbox, phba->mbox_mem_pool);
+ goto clrlaerr;
+ }
+ }
+ }
+ goto out;
+ }
+
+ if (phba->hba_state == LPFC_CLEAR_LA) {
+ /* CLEAR LA timeout */
+ lpfc_printf_log(phba,
+ KERN_ERR,
+ LOG_DISCOVERY,
+ "%d:0228 CLEAR LA timeout\n",
+ phba->brd_no);
+clrlaerr:
+ lpfc_disc_flush_list(phba);
+ psli->ring[(psli->ip_ring)].flag &= ~LPFC_STOP_IOCB_EVENT;
+ psli->ring[(psli->fcp_ring)].flag &= ~LPFC_STOP_IOCB_EVENT;
+ psli->ring[(psli->next_ring)].flag &= ~LPFC_STOP_IOCB_EVENT;
+ phba->hba_state = LPFC_HBA_READY;
+ goto out;
+ }
+
+ if ((phba->hba_state == LPFC_HBA_READY) &&
+ (phba->fc_flag & FC_RSCN_MODE)) {
+ /* RSCN timeout */
+ lpfc_printf_log(phba,
+ KERN_ERR,
+ LOG_DISCOVERY,
+ "%d:0231 RSCN timeout Data: x%x x%x\n",
+ phba->brd_no,
+ phba->fc_ns_retry, LPFC_MAX_NS_RETRY);
+
+ /* Cleanup any outstanding ELS commands */
+ lpfc_els_flush_cmd(phba);
+
+ lpfc_els_flush_rscn(phba);
+ lpfc_disc_flush_list(phba);
+ goto out;
+ }
+
+out:
+ spin_unlock_irqrestore(phba->host->host_lock, iflag);
+ return;
+}
+
+/*****************************************************************************/
+/*
+ * NAME: lpfc_scan_timeout
+ *
+ * FUNCTION: Fibre Channel driver scsi_scan_host timeout routine.
+ *
+ * EXECUTION ENVIRONMENT: interrupt only
+ *
+ * CALLED FROM:
+ * Timer function
+ *
+ * RETURNS:
+ * none
+ */
+/*****************************************************************************/
+void
+lpfc_scan_timeout(unsigned long ptr)
+{
+ struct lpfc_hba *phba;
+ unsigned long iflag;
+
+ phba = (struct lpfc_hba *)ptr;
+ if (!phba) {
+ return;
+ }
+ spin_lock_irqsave(phba->host->host_lock, iflag);
+ phba->fc_flag &= ~FC_SCSI_SCAN_TMO;
+ lpfc_discq_post_event(phba, NULL, NULL, LPFC_EVT_SCAN);
+ spin_unlock_irqrestore(phba->host->host_lock, iflag);
+ return;
+}
+
+static void
+lpfc_nodev_timeout(unsigned long ptr)
+{
+ struct lpfc_hba *phba;
+ struct lpfc_nodelist *ndlp;
+ unsigned long iflag;
+
+ ndlp = (struct lpfc_nodelist *)ptr;
+ phba = ndlp->nlp_phba;
+ spin_lock_irqsave(phba->host->host_lock, iflag);
+
+ /* All nodev timeouts are posted to discovery tasklet */
+ lpfc_discq_post_event(phba, ndlp, NULL, LPFC_EVT_NODEV_TMO);
+
+ spin_unlock_irqrestore(phba->host->host_lock, iflag);
+ return;
+}
+
+/*****************************************************************************/
+/*
+ * NAME: lpfc_find_target
+ *
+ * FUNCTION: Fibre Channel bus/target/LUN to struct lpfc_target lookup
+ *
+ * EXECUTION ENVIRONMENT:
+ *
+ * RETURNS:
+ * ptr to desired struct lpfc_target
+ */
+/*****************************************************************************/
+struct lpfc_target *
+lpfc_find_target(struct lpfc_hba * phba, uint32_t tgt,
+ struct lpfc_nodelist *nlp)
+{
+ struct lpfc_target *targetp;
+
+ /* search the mapped list for this target ID */
+ if(!nlp) {
+ list_for_each_entry(nlp, &phba->fc_nlpmap_list, nlp_listp) {
+ if (tgt == nlp->nlp_sid)
+ break;
+ }
+
+ if (&(nlp->nlp_listp) == &(phba->fc_nlpmap_list))
+ return NULL;
+ }
+
+ targetp = phba->device_queue_hash[tgt];
+
+ /* First see if the SCSI ID has an allocated struct lpfc_target */
+ if (!targetp) {
+ targetp = kmalloc(sizeof (struct lpfc_target), GFP_ATOMIC);
+ if (!targetp)
+ return NULL;
+
+ memset(targetp, 0, sizeof (struct lpfc_target));
+ phba->device_queue_hash[tgt] = targetp;
+ targetp->scsi_id = tgt;
+
+ /* Create SCSI Target <tgt> */
+ lpfc_printf_log(phba,
+ KERN_INFO,
+ LOG_DISCOVERY | LOG_FCP,
+ "%d:0204 Create SCSI Target %d\n",
+ phba->brd_no, tgt);
+ }
+
+ if (targetp->pnode == NULL) {
+ targetp->pnode = nlp;
+ nlp->nlp_Target = targetp;
+#ifdef FC_TRANS_VER1
+ /*
+ * This code does not apply to SLES9 since there is no
+ * starget defined in the midlayer. Additionally,
+ * dynamic target discovery to the midlayer is not
+ * supported yet.
+ */
+ if(!(phba->fc_flag & FC_LOADING)) {
+ /* Add SCSI target / SCSI Hotplug if called
+ * after initial driver load.
+ */
+ lpfc_target_add(phba, targetp);
+ }
+#endif
+ }
+ else {
+ if(targetp->pnode != nlp) {
+ /*
+ * Get rid of the old nlp before updating
+ * targetp with the new one.
+ */
+ lpfc_nlp_list(phba, targetp->pnode, NLP_NO_LIST);
+ targetp->pnode = nlp;
+ }
+ }
+ nlp->nlp_Target = targetp;
+ return (targetp);
+}
+
+/*
+ * lpfc_set_failmask
+ * Set, or clear, failMask bits in struct lpfc_nodelist
+ */
+void
+lpfc_set_failmask(struct lpfc_hba * phba,
+ struct lpfc_nodelist * ndlp, uint32_t bitmask, uint32_t flag)
+{
+ uint32_t oldmask;
+ uint32_t changed;
+
+ /* Failmask change on NPort <nlp_DID> */
+ lpfc_printf_log(phba, KERN_INFO, LOG_DISCOVERY,
+ "%d:0208 Failmask change on NPort x%x "
+ "Data: x%x x%x x%x\n",
+ phba->brd_no,
+ ndlp->nlp_DID, ndlp->nlp_failMask, bitmask, flag);
+
+ if (flag == LPFC_SET_BITMASK) {
+ oldmask = ndlp->nlp_failMask;
+ /* Set failMask event */
+ ndlp->nlp_failMask |= bitmask;
+ if (oldmask != ndlp->nlp_failMask) {
+ changed = 1;
+ } else {
+ changed = 0;
+ }
+
+ } else {
+ /* Clear failMask event */
+ ndlp->nlp_failMask &= ~bitmask;
+ changed = 1;
+ }
+ return;
+}
+
+/*
+ * This routine handles processing a NameServer REG_LOGIN mailbox
+ * command upon completion. It is setup in the LPFC_MBOXQ
+ * as the completion routine when the command is
+ * handed off to the SLI layer.
+ */
+void
+lpfc_mbx_cmpl_fdmi_reg_login(struct lpfc_hba * phba, LPFC_MBOXQ_t * pmb)
+{
+ struct lpfc_sli *psli;
+ MAILBOX_t *mb;
+ struct lpfc_dmabuf *mp;
+ struct lpfc_nodelist *ndlp;
+
+ psli = &phba->sli;
+ mb = &pmb->mb;
+
+ ndlp = (struct lpfc_nodelist *) pmb->context2;
+ mp = (struct lpfc_dmabuf *) (pmb->context1);
+
+ /* The mailbox was populated by the HBA. Flush it to main store for the
+ * driver. Note that all context buffers are from the driver's
+ * dma pool and have length LPFC_BPL_SIZE.
+ */
+ pci_dma_sync_single_for_cpu(phba->pcidev, mp->phys, LPFC_BPL_SIZE,
+ PCI_DMA_FROMDEVICE);
+
+ pmb->context1 = NULL;
+
+ if (ndlp->nlp_rpi != 0)
+ lpfc_findnode_remove_rpi(phba, ndlp->nlp_rpi);
+ ndlp->nlp_rpi = mb->un.varWords[0];
+ lpfc_addnode_rpi(phba, ndlp, ndlp->nlp_rpi);
+ ndlp->nlp_type |= NLP_FABRIC;
+ ndlp->nlp_state = NLP_STE_UNMAPPED_NODE;
+ lpfc_nlp_list(phba, ndlp, NLP_UNMAPPED_LIST);
+
+ /* Start issuing Fabric-Device Management Interface (FDMI)
+ * command to 0xfffffa (FDMI well known port)
+ */
+ if (phba->cfg_fdmi_on == 1) {
+ lpfc_fdmi_cmd(phba, ndlp, SLI_MGMT_DHBA);
+ } else {
+ /*
+ * Delay issuing FDMI command if fdmi-on=2
+ * (supporting RPA/hostnmae)
+ */
+ mod_timer(&phba->fc_fdmitmo, jiffies + HZ * 60);
+ }
+
+ lpfc_mbuf_free(phba, mp->virt, mp->phys);
+ kfree(mp);
+ mempool_free( pmb, phba->mbox_mem_pool);
+
+ return;
+}
+
+/*
+ * This routine looks up the ndlp hash
+ * table for the given RPI. If rpi found
+ * it return the node list pointer
+ * else return 0.
+ */
+struct lpfc_nodelist *
+lpfc_findnode_rpi(struct lpfc_hba * phba, uint16_t rpi)
+{
+ struct lpfc_nodelist *ret;
+
+ ret = phba->fc_nlplookup[LPFC_RPI_HASH_FUNC(rpi)];
+ while ((ret != 0) && (ret->nlp_rpi != rpi)) {
+ ret = ret->nlp_rpi_hash_next;
+ }
+ return ret;
+}
+
+/*
+ * This routine looks up the ndlp hash table for the
+ * given RPI. If rpi found it return the node list
+ * pointer else return 0 after deleting the entry
+ * from hash table.
+ */
+struct lpfc_nodelist *
+lpfc_findnode_remove_rpi(struct lpfc_hba * phba, uint16_t rpi)
+{
+ struct lpfc_nodelist *ret, *temp;;
+
+ ret = phba->fc_nlplookup[LPFC_RPI_HASH_FUNC(rpi)];
+ if (ret == 0)
+ return NULL;
+
+ if (ret->nlp_rpi == rpi) {
+ phba->fc_nlplookup[LPFC_RPI_HASH_FUNC(rpi)] =
+ ret->nlp_rpi_hash_next;
+ ret->nlp_rpi_hash_next = NULL;
+ return ret;
+ }
+
+ while ((ret->nlp_rpi_hash_next != 0) &&
+ (ret->nlp_rpi_hash_next->nlp_rpi != rpi)) {
+ ret = ret->nlp_rpi_hash_next;
+ }
+
+ if (ret->nlp_rpi_hash_next != 0) {
+ temp = ret->nlp_rpi_hash_next;
+ ret->nlp_rpi_hash_next = temp->nlp_rpi_hash_next;
+ temp->nlp_rpi_hash_next = NULL;
+ return temp;
+ } else {
+ return NULL;
+ }
+}
+
+/*
+ * This routine adds the node list entry to the
+ * ndlp hash table.
+ */
+void
+lpfc_addnode_rpi(struct lpfc_hba * phba, struct lpfc_nodelist * ndlp,
+ uint16_t rpi)
+{
+
+ uint32_t index;
+
+ index = LPFC_RPI_HASH_FUNC(rpi);
+ ndlp->nlp_rpi_hash_next = phba->fc_nlplookup[index];
+ phba->fc_nlplookup[index] = ndlp;
+ return;
+}
+
+void
+lpfc_nlp_init(struct lpfc_hba * phba, struct lpfc_nodelist * ndlp,
+ uint32_t did)
+{
+ memset(ndlp, 0, sizeof (struct lpfc_nodelist));
+ init_timer(&ndlp->nlp_tmofunc);
+ ndlp->nlp_tmofunc.function = lpfc_nodev_timeout;
+ ndlp->nlp_tmofunc.data = (unsigned long)ndlp;
+ init_timer(&ndlp->nlp_delayfunc);
+ ndlp->nlp_delayfunc.function = lpfc_els_retry_delay;
+ ndlp->nlp_delayfunc.data = (unsigned long)ndlp;
+ ndlp->nlp_DID = did;
+ ndlp->nlp_phba = phba;
+ return;
+}
+
--- /dev/null
+/*******************************************************************
+ * This file is part of the Emulex Linux Device Driver for *
+ * Enterprise Fibre Channel Host Bus Adapters. *
+ * Refer to the README file included with this package for *
+ * driver version and adapter support. *
+ * Copyright (C) 2004 Emulex Corporation. *
+ * www.emulex.com *
+ * *
+ * This program is free software; you can redistribute it and/or *
+ * modify it under the terms of the GNU General Public License *
+ * as published by the Free Software Foundation; either version 2 *
+ * of the License, or (at your option) any later version. *
+ * *
+ * This program is distributed in the hope that it will be useful, *
+ * but WITHOUT ANY WARRANTY; without even the implied warranty of *
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the *
+ * GNU General Public License for more details, a copy of which *
+ * can be found in the file COPYING included with this package. *
+ *******************************************************************/
+
+/*
+ * $Id: lpfc_hw.h 1.29 2004/11/18 17:28:05EST sf_support Exp $
+ */
+
+#ifndef _H_LPFC_HW
+#define _H_LPFC_HW
+
+#define FDMI_DID ((uint32_t)0xfffffa)
+#define NameServer_DID ((uint32_t)0xfffffc)
+#define SCR_DID ((uint32_t)0xfffffd)
+#define Fabric_DID ((uint32_t)0xfffffe)
+#define Bcast_DID ((uint32_t)0xffffff)
+#define Mask_DID ((uint32_t)0xffffff)
+#define CT_DID_MASK ((uint32_t)0xffff00)
+#define Fabric_DID_MASK ((uint32_t)0xfff000)
+#define WELL_KNOWN_DID_MASK ((uint32_t)0xfffff0)
+
+#define PT2PT_LocalID ((uint32_t)1)
+#define PT2PT_RemoteID ((uint32_t)2)
+
+#define FF_DEF_EDTOV 2000 /* Default E_D_TOV (2000ms) */
+#define FF_DEF_ALTOV 15 /* Default AL_TIME (15ms) */
+#define FF_DEF_RATOV 2 /* Default RA_TOV (2s) */
+#define FF_DEF_ARBTOV 1900 /* Default ARB_TOV (1900ms) */
+
+#define LPFC_BUF_RING0 64 /* Number of buffers to post to RING
+ 0 */
+
+#define FCELSSIZE 1024 /* maximum ELS transfer size */
+
+#define LPFC_FCP_RING 0 /* ring 2 for FCP initiator commands */
+#define LPFC_IP_RING 1 /* ring 1 for IP commands */
+#define LPFC_ELS_RING 2 /* ring 0 for ELS commands */
+#define LPFC_FCP_NEXT_RING 3
+
+#define SLI2_IOCB_CMD_R0_ENTRIES 172 /* SLI-2 FCP command ring entries */
+#define SLI2_IOCB_RSP_R0_ENTRIES 134 /* SLI-2 FCP response ring entries */
+#define SLI2_IOCB_CMD_R1_ENTRIES 4 /* SLI-2 IP command ring entries */
+#define SLI2_IOCB_RSP_R1_ENTRIES 4 /* SLI-2 IP response ring entries */
+#define SLI2_IOCB_CMD_R1XTRA_ENTRIES 36 /* SLI-2 extra FCP cmd ring entries */
+#define SLI2_IOCB_RSP_R1XTRA_ENTRIES 52 /* SLI-2 extra FCP rsp ring entries */
+#define SLI2_IOCB_CMD_R2_ENTRIES 20 /* SLI-2 ELS command ring entries */
+#define SLI2_IOCB_RSP_R2_ENTRIES 20 /* SLI-2 ELS response ring entries */
+#define SLI2_IOCB_CMD_R3_ENTRIES 0
+#define SLI2_IOCB_RSP_R3_ENTRIES 0
+#define SLI2_IOCB_CMD_R3XTRA_ENTRIES 24
+#define SLI2_IOCB_RSP_R3XTRA_ENTRIES 32
+
+/* Common Transport structures and definitions */
+
+union CtRevisionId {
+ /* Structure is in Big Endian format */
+ struct {
+ uint32_t Revision:8;
+ uint32_t InId:24;
+ } bits;
+ uint32_t word;
+};
+
+union CtCommandResponse {
+ /* Structure is in Big Endian format */
+ struct {
+ uint32_t CmdRsp:16;
+ uint32_t Size:16;
+ } bits;
+ uint32_t word;
+};
+
+struct lpfc_sli_ct_request {
+ /* Structure is in Big Endian format */
+ union CtRevisionId RevisionId;
+ uint8_t FsType;
+ uint8_t FsSubType;
+ uint8_t Options;
+ uint8_t Rsrvd1;
+ union CtCommandResponse CommandResponse;
+ uint8_t Rsrvd2;
+ uint8_t ReasonCode;
+ uint8_t Explanation;
+ uint8_t VendorUnique;
+
+ union {
+ uint32_t PortID;
+ struct gid {
+ uint8_t PortType; /* for GID_PT requests */
+ uint8_t DomainScope;
+ uint8_t AreaScope;
+ uint8_t Fc4Type; /* for GID_FT requests */
+ } gid;
+ struct rft {
+ uint32_t PortId; /* For RFT_ID requests */
+
+#ifdef __BIG_ENDIAN_BITFIELD
+ uint32_t rsvd0:16;
+ uint32_t rsvd1:7;
+ uint32_t fcpReg:1; /* Type 8 */
+ uint32_t rsvd2:2;
+ uint32_t ipReg:1; /* Type 5 */
+ uint32_t rsvd3:5;
+#else /* __LITTLE_ENDIAN_BITFIELD */
+ uint32_t rsvd0:16;
+ uint32_t fcpReg:1; /* Type 8 */
+ uint32_t rsvd1:7;
+ uint32_t rsvd3:5;
+ uint32_t ipReg:1; /* Type 5 */
+ uint32_t rsvd2:2;
+#endif
+
+ uint32_t rsvd[7];
+ } rft;
+ struct rnn {
+ uint32_t PortId; /* For RNN_ID requests */
+ uint8_t wwnn[8];
+ } rnn;
+ struct rsnn { /* For RSNN_ID requests */
+ uint8_t wwnn[8];
+ uint8_t len;
+ uint8_t symbname[255];
+ } rsnn;
+ } un;
+};
+
+#define SLI_CT_REVISION 1
+#define GID_REQUEST_SZ (sizeof(struct lpfc_sli_ct_request) - 260)
+#define RFT_REQUEST_SZ (sizeof(struct lpfc_sli_ct_request) - 228)
+#define RNN_REQUEST_SZ (sizeof(struct lpfc_sli_ct_request) - 252)
+#define RSNN_REQUEST_SZ (sizeof(struct lpfc_sli_ct_request))
+
+/*
+ * FsType Definitions
+ */
+
+#define SLI_CT_MANAGEMENT_SERVICE 0xFA
+#define SLI_CT_TIME_SERVICE 0xFB
+#define SLI_CT_DIRECTORY_SERVICE 0xFC
+#define SLI_CT_FABRIC_CONTROLLER_SERVICE 0xFD
+
+/*
+ * Directory Service Subtypes
+ */
+
+#define SLI_CT_DIRECTORY_NAME_SERVER 0x02
+
+/*
+ * Response Codes
+ */
+
+#define SLI_CT_RESPONSE_FS_RJT 0x8001
+#define SLI_CT_RESPONSE_FS_ACC 0x8002
+
+/*
+ * Reason Codes
+ */
+
+#define SLI_CT_NO_ADDITIONAL_EXPL 0x0
+#define SLI_CT_INVALID_COMMAND 0x01
+#define SLI_CT_INVALID_VERSION 0x02
+#define SLI_CT_LOGICAL_ERROR 0x03
+#define SLI_CT_INVALID_IU_SIZE 0x04
+#define SLI_CT_LOGICAL_BUSY 0x05
+#define SLI_CT_PROTOCOL_ERROR 0x07
+#define SLI_CT_UNABLE_TO_PERFORM_REQ 0x09
+#define SLI_CT_REQ_NOT_SUPPORTED 0x0b
+#define SLI_CT_HBA_INFO_NOT_REGISTERED 0x10
+#define SLI_CT_MULTIPLE_HBA_ATTR_OF_SAME_TYPE 0x11
+#define SLI_CT_INVALID_HBA_ATTR_BLOCK_LEN 0x12
+#define SLI_CT_HBA_ATTR_NOT_PRESENT 0x13
+#define SLI_CT_PORT_INFO_NOT_REGISTERED 0x20
+#define SLI_CT_MULTIPLE_PORT_ATTR_OF_SAME_TYPE 0x21
+#define SLI_CT_INVALID_PORT_ATTR_BLOCK_LEN 0x22
+#define SLI_CT_VENDOR_UNIQUE 0xff
+
+/*
+ * Name Server SLI_CT_UNABLE_TO_PERFORM_REQ Explanations
+ */
+
+#define SLI_CT_NO_PORT_ID 0x01
+#define SLI_CT_NO_PORT_NAME 0x02
+#define SLI_CT_NO_NODE_NAME 0x03
+#define SLI_CT_NO_CLASS_OF_SERVICE 0x04
+#define SLI_CT_NO_IP_ADDRESS 0x05
+#define SLI_CT_NO_IPA 0x06
+#define SLI_CT_NO_FC4_TYPES 0x07
+#define SLI_CT_NO_SYMBOLIC_PORT_NAME 0x08
+#define SLI_CT_NO_SYMBOLIC_NODE_NAME 0x09
+#define SLI_CT_NO_PORT_TYPE 0x0A
+#define SLI_CT_ACCESS_DENIED 0x10
+#define SLI_CT_INVALID_PORT_ID 0x11
+#define SLI_CT_DATABASE_EMPTY 0x12
+
+/*
+ * Name Server Command Codes
+ */
+
+#define SLI_CTNS_GA_NXT 0x0100
+#define SLI_CTNS_GPN_ID 0x0112
+#define SLI_CTNS_GNN_ID 0x0113
+#define SLI_CTNS_GCS_ID 0x0114
+#define SLI_CTNS_GFT_ID 0x0117
+#define SLI_CTNS_GSPN_ID 0x0118
+#define SLI_CTNS_GPT_ID 0x011A
+#define SLI_CTNS_GID_PN 0x0121
+#define SLI_CTNS_GID_NN 0x0131
+#define SLI_CTNS_GIP_NN 0x0135
+#define SLI_CTNS_GIPA_NN 0x0136
+#define SLI_CTNS_GSNN_NN 0x0139
+#define SLI_CTNS_GNN_IP 0x0153
+#define SLI_CTNS_GIPA_IP 0x0156
+#define SLI_CTNS_GID_FT 0x0171
+#define SLI_CTNS_GID_PT 0x01A1
+#define SLI_CTNS_RPN_ID 0x0212
+#define SLI_CTNS_RNN_ID 0x0213
+#define SLI_CTNS_RCS_ID 0x0214
+#define SLI_CTNS_RFT_ID 0x0217
+#define SLI_CTNS_RSPN_ID 0x0218
+#define SLI_CTNS_RPT_ID 0x021A
+#define SLI_CTNS_RIP_NN 0x0235
+#define SLI_CTNS_RIPA_NN 0x0236
+#define SLI_CTNS_RSNN_NN 0x0239
+#define SLI_CTNS_DA_ID 0x0300
+
+/*
+ * Port Types
+ */
+
+#define SLI_CTPT_N_PORT 0x01
+#define SLI_CTPT_NL_PORT 0x02
+#define SLI_CTPT_FNL_PORT 0x03
+#define SLI_CTPT_IP 0x04
+#define SLI_CTPT_FCP 0x08
+#define SLI_CTPT_NX_PORT 0x7F
+#define SLI_CTPT_F_PORT 0x81
+#define SLI_CTPT_FL_PORT 0x82
+#define SLI_CTPT_E_PORT 0x84
+
+#define SLI_CT_LAST_ENTRY 0x80000000
+
+/* Fibre Channel Service Parameter definitions */
+
+#define FC_PH_4_0 6 /* FC-PH version 4.0 */
+#define FC_PH_4_1 7 /* FC-PH version 4.1 */
+#define FC_PH_4_2 8 /* FC-PH version 4.2 */
+#define FC_PH_4_3 9 /* FC-PH version 4.3 */
+
+#define FC_PH_LOW 8 /* Lowest supported FC-PH version */
+#define FC_PH_HIGH 9 /* Highest supported FC-PH version */
+#define FC_PH3 0x20 /* FC-PH-3 version */
+
+#define FF_FRAME_SIZE 2048
+
+struct lpfc_name {
+#ifdef __BIG_ENDIAN_BITFIELD
+ uint8_t nameType:4; /* FC Word 0, bit 28:31 */
+ uint8_t IEEEextMsn:4; /* FC Word 0, bit 24:27, bit 8:11 of IEEE ext */
+#else /* __LITTLE_ENDIAN_BITFIELD */
+ uint8_t IEEEextMsn:4; /* FC Word 0, bit 24:27, bit 8:11 of IEEE ext */
+ uint8_t nameType:4; /* FC Word 0, bit 28:31 */
+#endif
+
+#define NAME_IEEE 0x1 /* IEEE name - nameType */
+#define NAME_IEEE_EXT 0x2 /* IEEE extended name */
+#define NAME_FC_TYPE 0x3 /* FC native name type */
+#define NAME_IP_TYPE 0x4 /* IP address */
+#define NAME_CCITT_TYPE 0xC
+#define NAME_CCITT_GR_TYPE 0xE
+ uint8_t IEEEextLsb; /* FC Word 0, bit 16:23, IEEE extended Lsb */
+ uint8_t IEEE[6]; /* FC IEEE address */
+};
+
+struct csp {
+ uint8_t fcphHigh; /* FC Word 0, byte 0 */
+ uint8_t fcphLow;
+ uint8_t bbCreditMsb;
+ uint8_t bbCreditlsb; /* FC Word 0, byte 3 */
+
+#ifdef __BIG_ENDIAN_BITFIELD
+ uint16_t increasingOffset:1; /* FC Word 1, bit 31 */
+ uint16_t randomOffset:1; /* FC Word 1, bit 30 */
+ uint16_t word1Reserved2:1; /* FC Word 1, bit 29 */
+ uint16_t fPort:1; /* FC Word 1, bit 28 */
+ uint16_t altBbCredit:1; /* FC Word 1, bit 27 */
+ uint16_t edtovResolution:1; /* FC Word 1, bit 26 */
+ uint16_t multicast:1; /* FC Word 1, bit 25 */
+ uint16_t broadcast:1; /* FC Word 1, bit 24 */
+
+ uint16_t huntgroup:1; /* FC Word 1, bit 23 */
+ uint16_t simplex:1; /* FC Word 1, bit 22 */
+ uint16_t word1Reserved1:3; /* FC Word 1, bit 21:19 */
+ uint16_t dhd:1; /* FC Word 1, bit 18 */
+ uint16_t contIncSeqCnt:1; /* FC Word 1, bit 17 */
+ uint16_t payloadlength:1; /* FC Word 1, bit 16 */
+#else /* __LITTLE_ENDIAN_BITFIELD */
+ uint16_t broadcast:1; /* FC Word 1, bit 24 */
+ uint16_t multicast:1; /* FC Word 1, bit 25 */
+ uint16_t edtovResolution:1; /* FC Word 1, bit 26 */
+ uint16_t altBbCredit:1; /* FC Word 1, bit 27 */
+ uint16_t fPort:1; /* FC Word 1, bit 28 */
+ uint16_t word1Reserved2:1; /* FC Word 1, bit 29 */
+ uint16_t randomOffset:1; /* FC Word 1, bit 30 */
+ uint16_t increasingOffset:1; /* FC Word 1, bit 31 */
+
+ uint16_t payloadlength:1; /* FC Word 1, bit 16 */
+ uint16_t contIncSeqCnt:1; /* FC Word 1, bit 17 */
+ uint16_t dhd:1; /* FC Word 1, bit 18 */
+ uint16_t word1Reserved1:3; /* FC Word 1, bit 21:19 */
+ uint16_t simplex:1; /* FC Word 1, bit 22 */
+ uint16_t huntgroup:1; /* FC Word 1, bit 23 */
+#endif
+
+ uint8_t bbRcvSizeMsb; /* Upper nibble is reserved */
+ uint8_t bbRcvSizeLsb; /* FC Word 1, byte 3 */
+ union {
+ struct {
+ uint8_t word2Reserved1; /* FC Word 2 byte 0 */
+
+ uint8_t totalConcurrSeq; /* FC Word 2 byte 1 */
+ uint8_t roByCategoryMsb; /* FC Word 2 byte 2 */
+
+ uint8_t roByCategoryLsb; /* FC Word 2 byte 3 */
+ } nPort;
+ uint32_t r_a_tov; /* R_A_TOV must be in B.E. format */
+ } w2;
+
+ uint32_t e_d_tov; /* E_D_TOV must be in B.E. format */
+};
+
+struct class_parms {
+#ifdef __BIG_ENDIAN_BITFIELD
+ uint8_t classValid:1; /* FC Word 0, bit 31 */
+ uint8_t intermix:1; /* FC Word 0, bit 30 */
+ uint8_t stackedXparent:1; /* FC Word 0, bit 29 */
+ uint8_t stackedLockDown:1; /* FC Word 0, bit 28 */
+ uint8_t seqDelivery:1; /* FC Word 0, bit 27 */
+ uint8_t word0Reserved1:3; /* FC Word 0, bit 24:26 */
+#else /* __LITTLE_ENDIAN_BITFIELD */
+ uint8_t word0Reserved1:3; /* FC Word 0, bit 24:26 */
+ uint8_t seqDelivery:1; /* FC Word 0, bit 27 */
+ uint8_t stackedLockDown:1; /* FC Word 0, bit 28 */
+ uint8_t stackedXparent:1; /* FC Word 0, bit 29 */
+ uint8_t intermix:1; /* FC Word 0, bit 30 */
+ uint8_t classValid:1; /* FC Word 0, bit 31 */
+
+#endif
+
+ uint8_t word0Reserved2; /* FC Word 0, bit 16:23 */
+
+#ifdef __BIG_ENDIAN_BITFIELD
+ uint8_t iCtlXidReAssgn:2; /* FC Word 0, Bit 14:15 */
+ uint8_t iCtlInitialPa:2; /* FC Word 0, bit 12:13 */
+ uint8_t iCtlAck0capable:1; /* FC Word 0, bit 11 */
+ uint8_t iCtlAckNcapable:1; /* FC Word 0, bit 10 */
+ uint8_t word0Reserved3:2; /* FC Word 0, bit 8: 9 */
+#else /* __LITTLE_ENDIAN_BITFIELD */
+ uint8_t word0Reserved3:2; /* FC Word 0, bit 8: 9 */
+ uint8_t iCtlAckNcapable:1; /* FC Word 0, bit 10 */
+ uint8_t iCtlAck0capable:1; /* FC Word 0, bit 11 */
+ uint8_t iCtlInitialPa:2; /* FC Word 0, bit 12:13 */
+ uint8_t iCtlXidReAssgn:2; /* FC Word 0, Bit 14:15 */
+#endif
+
+ uint8_t word0Reserved4; /* FC Word 0, bit 0: 7 */
+
+#ifdef __BIG_ENDIAN_BITFIELD
+ uint8_t rCtlAck0capable:1; /* FC Word 1, bit 31 */
+ uint8_t rCtlAckNcapable:1; /* FC Word 1, bit 30 */
+ uint8_t rCtlXidInterlck:1; /* FC Word 1, bit 29 */
+ uint8_t rCtlErrorPolicy:2; /* FC Word 1, bit 27:28 */
+ uint8_t word1Reserved1:1; /* FC Word 1, bit 26 */
+ uint8_t rCtlCatPerSeq:2; /* FC Word 1, bit 24:25 */
+#else /* __LITTLE_ENDIAN_BITFIELD */
+ uint8_t rCtlCatPerSeq:2; /* FC Word 1, bit 24:25 */
+ uint8_t word1Reserved1:1; /* FC Word 1, bit 26 */
+ uint8_t rCtlErrorPolicy:2; /* FC Word 1, bit 27:28 */
+ uint8_t rCtlXidInterlck:1; /* FC Word 1, bit 29 */
+ uint8_t rCtlAckNcapable:1; /* FC Word 1, bit 30 */
+ uint8_t rCtlAck0capable:1; /* FC Word 1, bit 31 */
+#endif
+
+ uint8_t word1Reserved2; /* FC Word 1, bit 16:23 */
+ uint8_t rcvDataSizeMsb; /* FC Word 1, bit 8:15 */
+ uint8_t rcvDataSizeLsb; /* FC Word 1, bit 0: 7 */
+
+ uint8_t concurrentSeqMsb; /* FC Word 2, bit 24:31 */
+ uint8_t concurrentSeqLsb; /* FC Word 2, bit 16:23 */
+ uint8_t EeCreditSeqMsb; /* FC Word 2, bit 8:15 */
+ uint8_t EeCreditSeqLsb; /* FC Word 2, bit 0: 7 */
+
+ uint8_t openSeqPerXchgMsb; /* FC Word 3, bit 24:31 */
+ uint8_t openSeqPerXchgLsb; /* FC Word 3, bit 16:23 */
+ uint8_t word3Reserved1; /* Fc Word 3, bit 8:15 */
+ uint8_t word3Reserved2; /* Fc Word 3, bit 0: 7 */
+};
+
+struct serv_parm { /* Structure is in Big Endian format */
+ struct csp cmn;
+ struct lpfc_name portName;
+ struct lpfc_name nodeName;
+ struct class_parms cls1;
+ struct class_parms cls2;
+ struct class_parms cls3;
+ struct class_parms cls4;
+ uint8_t vendorVersion[16];
+};
+
+/*
+ * Extended Link Service LS_COMMAND codes (Payload Word 0)
+ */
+#ifdef __BIG_ENDIAN_BITFIELD
+#define ELS_CMD_MASK 0xffff0000
+#define ELS_RSP_MASK 0xff000000
+#define ELS_CMD_LS_RJT 0x01000000
+#define ELS_CMD_ACC 0x02000000
+#define ELS_CMD_PLOGI 0x03000000
+#define ELS_CMD_FLOGI 0x04000000
+#define ELS_CMD_LOGO 0x05000000
+#define ELS_CMD_ABTX 0x06000000
+#define ELS_CMD_RCS 0x07000000
+#define ELS_CMD_RES 0x08000000
+#define ELS_CMD_RSS 0x09000000
+#define ELS_CMD_RSI 0x0A000000
+#define ELS_CMD_ESTS 0x0B000000
+#define ELS_CMD_ESTC 0x0C000000
+#define ELS_CMD_ADVC 0x0D000000
+#define ELS_CMD_RTV 0x0E000000
+#define ELS_CMD_RLS 0x0F000000
+#define ELS_CMD_ECHO 0x10000000
+#define ELS_CMD_TEST 0x11000000
+#define ELS_CMD_RRQ 0x12000000
+#define ELS_CMD_PRLI 0x20100014
+#define ELS_CMD_PRLO 0x21100014
+#define ELS_CMD_PDISC 0x50000000
+#define ELS_CMD_FDISC 0x51000000
+#define ELS_CMD_ADISC 0x52000000
+#define ELS_CMD_FARP 0x54000000
+#define ELS_CMD_FARPR 0x55000000
+#define ELS_CMD_FAN 0x60000000
+#define ELS_CMD_RSCN 0x61040000
+#define ELS_CMD_SCR 0x62000000
+#define ELS_CMD_RNID 0x78000000
+#else /* __LITTLE_ENDIAN_BITFIELD */
+#define ELS_CMD_MASK 0xffff
+#define ELS_RSP_MASK 0xff
+#define ELS_CMD_LS_RJT 0x01
+#define ELS_CMD_ACC 0x02
+#define ELS_CMD_PLOGI 0x03
+#define ELS_CMD_FLOGI 0x04
+#define ELS_CMD_LOGO 0x05
+#define ELS_CMD_ABTX 0x06
+#define ELS_CMD_RCS 0x07
+#define ELS_CMD_RES 0x08
+#define ELS_CMD_RSS 0x09
+#define ELS_CMD_RSI 0x0A
+#define ELS_CMD_ESTS 0x0B
+#define ELS_CMD_ESTC 0x0C
+#define ELS_CMD_ADVC 0x0D
+#define ELS_CMD_RTV 0x0E
+#define ELS_CMD_RLS 0x0F
+#define ELS_CMD_ECHO 0x10
+#define ELS_CMD_TEST 0x11
+#define ELS_CMD_RRQ 0x12
+#define ELS_CMD_PRLI 0x14001020
+#define ELS_CMD_PRLO 0x14001021
+#define ELS_CMD_PDISC 0x50
+#define ELS_CMD_FDISC 0x51
+#define ELS_CMD_ADISC 0x52
+#define ELS_CMD_FARP 0x54
+#define ELS_CMD_FARPR 0x55
+#define ELS_CMD_FAN 0x60
+#define ELS_CMD_RSCN 0x0461
+#define ELS_CMD_SCR 0x62
+#define ELS_CMD_RNID 0x78
+#endif
+
+/*
+ * LS_RJT Payload Definition
+ */
+
+struct ls_rjt { /* Structure is in Big Endian format */
+ union {
+ uint32_t lsRjtError;
+ struct {
+ uint8_t lsRjtRsvd0; /* FC Word 0, bit 24:31 */
+
+ uint8_t lsRjtRsnCode; /* FC Word 0, bit 16:23 */
+ /* LS_RJT reason codes */
+#define LSRJT_INVALID_CMD 0x01
+#define LSRJT_LOGICAL_ERR 0x03
+#define LSRJT_LOGICAL_BSY 0x05
+#define LSRJT_PROTOCOL_ERR 0x07
+#define LSRJT_UNABLE_TPC 0x09 /* Unable to perform command */
+#define LSRJT_CMD_UNSUPPORTED 0x0B
+#define LSRJT_VENDOR_UNIQUE 0xFF /* See Byte 3 */
+
+ uint8_t lsRjtRsnCodeExp; /* FC Word 0, bit 8:15 */
+ /* LS_RJT reason explanation */
+#define LSEXP_NOTHING_MORE 0x00
+#define LSEXP_SPARM_OPTIONS 0x01
+#define LSEXP_SPARM_ICTL 0x03
+#define LSEXP_SPARM_RCTL 0x05
+#define LSEXP_SPARM_RCV_SIZE 0x07
+#define LSEXP_SPARM_CONCUR_SEQ 0x09
+#define LSEXP_SPARM_CREDIT 0x0B
+#define LSEXP_INVALID_PNAME 0x0D
+#define LSEXP_INVALID_NNAME 0x0E
+#define LSEXP_INVALID_CSP 0x0F
+#define LSEXP_INVALID_ASSOC_HDR 0x11
+#define LSEXP_ASSOC_HDR_REQ 0x13
+#define LSEXP_INVALID_O_SID 0x15
+#define LSEXP_INVALID_OX_RX 0x17
+#define LSEXP_CMD_IN_PROGRESS 0x19
+#define LSEXP_INVALID_NPORT_ID 0x1F
+#define LSEXP_INVALID_SEQ_ID 0x21
+#define LSEXP_INVALID_XCHG 0x23
+#define LSEXP_INACTIVE_XCHG 0x25
+#define LSEXP_RQ_REQUIRED 0x27
+#define LSEXP_OUT_OF_RESOURCE 0x29
+#define LSEXP_CANT_GIVE_DATA 0x2A
+#define LSEXP_REQ_UNSUPPORTED 0x2C
+ uint8_t vendorUnique; /* FC Word 0, bit 0: 7 */
+ } b;
+ } un;
+};
+
+/*
+ * N_Port Login (FLOGO/PLOGO Request) Payload Definition
+ */
+
+typedef struct _LOGO { /* Structure is in Big Endian format */
+ union {
+ uint32_t nPortId32; /* Access nPortId as a word */
+ struct {
+ uint8_t word1Reserved1; /* FC Word 1, bit 31:24 */
+ uint8_t nPortIdByte0; /* N_port ID bit 16:23 */
+ uint8_t nPortIdByte1; /* N_port ID bit 8:15 */
+ uint8_t nPortIdByte2; /* N_port ID bit 0: 7 */
+ } b;
+ } un;
+ struct lpfc_name portName; /* N_port name field */
+} LOGO;
+
+/*
+ * FCP Login (PRLI Request / ACC) Payload Definition
+ */
+
+#define PRLX_PAGE_LEN 0x10
+#define TPRLO_PAGE_LEN 0x14
+
+typedef struct _PRLI { /* Structure is in Big Endian format */
+ uint8_t prliType; /* FC Parm Word 0, bit 24:31 */
+
+#define PRLI_FCP_TYPE 0x08
+ uint8_t word0Reserved1; /* FC Parm Word 0, bit 16:23 */
+
+#ifdef __BIG_ENDIAN_BITFIELD
+ uint8_t origProcAssocV:1; /* FC Parm Word 0, bit 15 */
+ uint8_t respProcAssocV:1; /* FC Parm Word 0, bit 14 */
+ uint8_t estabImagePair:1; /* FC Parm Word 0, bit 13 */
+
+ /* ACC = imagePairEstablished */
+ uint8_t word0Reserved2:1; /* FC Parm Word 0, bit 12 */
+ uint8_t acceptRspCode:4; /* FC Parm Word 0, bit 8:11, ACC ONLY */
+#else /* __LITTLE_ENDIAN_BITFIELD */
+ uint8_t acceptRspCode:4; /* FC Parm Word 0, bit 8:11, ACC ONLY */
+ uint8_t word0Reserved2:1; /* FC Parm Word 0, bit 12 */
+ uint8_t estabImagePair:1; /* FC Parm Word 0, bit 13 */
+ uint8_t respProcAssocV:1; /* FC Parm Word 0, bit 14 */
+ uint8_t origProcAssocV:1; /* FC Parm Word 0, bit 15 */
+ /* ACC = imagePairEstablished */
+#endif
+
+#define PRLI_REQ_EXECUTED 0x1 /* acceptRspCode */
+#define PRLI_NO_RESOURCES 0x2
+#define PRLI_INIT_INCOMPLETE 0x3
+#define PRLI_NO_SUCH_PA 0x4
+#define PRLI_PREDEF_CONFIG 0x5
+#define PRLI_PARTIAL_SUCCESS 0x6
+#define PRLI_INVALID_PAGE_CNT 0x7
+ uint8_t word0Reserved3; /* FC Parm Word 0, bit 0:7 */
+
+ uint32_t origProcAssoc; /* FC Parm Word 1, bit 0:31 */
+
+ uint32_t respProcAssoc; /* FC Parm Word 2, bit 0:31 */
+
+ uint8_t word3Reserved1; /* FC Parm Word 3, bit 24:31 */
+ uint8_t word3Reserved2; /* FC Parm Word 3, bit 16:23 */
+
+#ifdef __BIG_ENDIAN_BITFIELD
+ uint16_t Word3bit15Resved:1; /* FC Parm Word 3, bit 15 */
+ uint16_t Word3bit14Resved:1; /* FC Parm Word 3, bit 14 */
+ uint16_t Word3bit13Resved:1; /* FC Parm Word 3, bit 13 */
+ uint16_t Word3bit12Resved:1; /* FC Parm Word 3, bit 12 */
+ uint16_t Word3bit11Resved:1; /* FC Parm Word 3, bit 11 */
+ uint16_t Word3bit10Resved:1; /* FC Parm Word 3, bit 10 */
+ uint16_t TaskRetryIdReq:1; /* FC Parm Word 3, bit 9 */
+ uint16_t Retry:1; /* FC Parm Word 3, bit 8 */
+ uint16_t ConfmComplAllowed:1; /* FC Parm Word 3, bit 7 */
+ uint16_t dataOverLay:1; /* FC Parm Word 3, bit 6 */
+ uint16_t initiatorFunc:1; /* FC Parm Word 3, bit 5 */
+ uint16_t targetFunc:1; /* FC Parm Word 3, bit 4 */
+ uint16_t cmdDataMixEna:1; /* FC Parm Word 3, bit 3 */
+ uint16_t dataRspMixEna:1; /* FC Parm Word 3, bit 2 */
+ uint16_t readXferRdyDis:1; /* FC Parm Word 3, bit 1 */
+ uint16_t writeXferRdyDis:1; /* FC Parm Word 3, bit 0 */
+#else /* __LITTLE_ENDIAN_BITFIELD */
+ uint16_t Retry:1; /* FC Parm Word 3, bit 8 */
+ uint16_t TaskRetryIdReq:1; /* FC Parm Word 3, bit 9 */
+ uint16_t Word3bit10Resved:1; /* FC Parm Word 3, bit 10 */
+ uint16_t Word3bit11Resved:1; /* FC Parm Word 3, bit 11 */
+ uint16_t Word3bit12Resved:1; /* FC Parm Word 3, bit 12 */
+ uint16_t Word3bit13Resved:1; /* FC Parm Word 3, bit 13 */
+ uint16_t Word3bit14Resved:1; /* FC Parm Word 3, bit 14 */
+ uint16_t Word3bit15Resved:1; /* FC Parm Word 3, bit 15 */
+ uint16_t writeXferRdyDis:1; /* FC Parm Word 3, bit 0 */
+ uint16_t readXferRdyDis:1; /* FC Parm Word 3, bit 1 */
+ uint16_t dataRspMixEna:1; /* FC Parm Word 3, bit 2 */
+ uint16_t cmdDataMixEna:1; /* FC Parm Word 3, bit 3 */
+ uint16_t targetFunc:1; /* FC Parm Word 3, bit 4 */
+ uint16_t initiatorFunc:1; /* FC Parm Word 3, bit 5 */
+ uint16_t dataOverLay:1; /* FC Parm Word 3, bit 6 */
+ uint16_t ConfmComplAllowed:1; /* FC Parm Word 3, bit 7 */
+#endif
+} PRLI;
+
+/*
+ * FCP Logout (PRLO Request / ACC) Payload Definition
+ */
+
+typedef struct _PRLO { /* Structure is in Big Endian format */
+ uint8_t prloType; /* FC Parm Word 0, bit 24:31 */
+
+#define PRLO_FCP_TYPE 0x08
+ uint8_t word0Reserved1; /* FC Parm Word 0, bit 16:23 */
+
+#ifdef __BIG_ENDIAN_BITFIELD
+ uint8_t origProcAssocV:1; /* FC Parm Word 0, bit 15 */
+ uint8_t respProcAssocV:1; /* FC Parm Word 0, bit 14 */
+ uint8_t word0Reserved2:2; /* FC Parm Word 0, bit 12:13 */
+ uint8_t acceptRspCode:4; /* FC Parm Word 0, bit 8:11, ACC ONLY */
+#else /* __LITTLE_ENDIAN_BITFIELD */
+ uint8_t acceptRspCode:4; /* FC Parm Word 0, bit 8:11, ACC ONLY */
+ uint8_t word0Reserved2:2; /* FC Parm Word 0, bit 12:13 */
+ uint8_t respProcAssocV:1; /* FC Parm Word 0, bit 14 */
+ uint8_t origProcAssocV:1; /* FC Parm Word 0, bit 15 */
+#endif
+
+#define PRLO_REQ_EXECUTED 0x1 /* acceptRspCode */
+#define PRLO_NO_SUCH_IMAGE 0x4
+#define PRLO_INVALID_PAGE_CNT 0x7
+
+ uint8_t word0Reserved3; /* FC Parm Word 0, bit 0:7 */
+
+ uint32_t origProcAssoc; /* FC Parm Word 1, bit 0:31 */
+
+ uint32_t respProcAssoc; /* FC Parm Word 2, bit 0:31 */
+
+ uint32_t word3Reserved1; /* FC Parm Word 3, bit 0:31 */
+} PRLO;
+
+typedef struct _ADISC { /* Structure is in Big Endian format */
+ uint32_t hardAL_PA;
+ struct lpfc_name portName;
+ struct lpfc_name nodeName;
+ uint32_t DID;
+} ADISC;
+
+typedef struct _FARP { /* Structure is in Big Endian format */
+ uint32_t Mflags:8;
+ uint32_t Odid:24;
+#define FARP_NO_ACTION 0 /* FARP information enclosed, no
+ action */
+#define FARP_MATCH_PORT 0x1 /* Match on Responder Port Name */
+#define FARP_MATCH_NODE 0x2 /* Match on Responder Node Name */
+#define FARP_MATCH_IP 0x4 /* Match on IP address, not supported */
+#define FARP_MATCH_IPV4 0x5 /* Match on IPV4 address, not
+ supported */
+#define FARP_MATCH_IPV6 0x6 /* Match on IPV6 address, not
+ supported */
+ uint32_t Rflags:8;
+ uint32_t Rdid:24;
+#define FARP_REQUEST_PLOGI 0x1 /* Request for PLOGI */
+#define FARP_REQUEST_FARPR 0x2 /* Request for FARP Response */
+ struct lpfc_name OportName;
+ struct lpfc_name OnodeName;
+ struct lpfc_name RportName;
+ struct lpfc_name RnodeName;
+ uint8_t Oipaddr[16];
+ uint8_t Ripaddr[16];
+} FARP;
+
+typedef struct _FAN { /* Structure is in Big Endian format */
+ uint32_t Fdid;
+ struct lpfc_name FportName;
+ struct lpfc_name FnodeName;
+} FAN;
+
+typedef struct _SCR { /* Structure is in Big Endian format */
+ uint8_t resvd1;
+ uint8_t resvd2;
+ uint8_t resvd3;
+ uint8_t Function;
+#define SCR_FUNC_FABRIC 0x01
+#define SCR_FUNC_NPORT 0x02
+#define SCR_FUNC_FULL 0x03
+#define SCR_CLEAR 0xff
+} SCR;
+
+typedef struct _RNID_TOP_DISC {
+ struct lpfc_name portName;
+ uint8_t resvd[8];
+ uint32_t unitType;
+#define RNID_HBA 0x7
+#define RNID_HOST 0xa
+#define RNID_DRIVER 0xd
+ uint32_t physPort;
+ uint32_t attachedNodes;
+ uint16_t ipVersion;
+#define RNID_IPV4 0x1
+#define RNID_IPV6 0x2
+ uint16_t UDPport;
+ uint8_t ipAddr[16];
+ uint16_t resvd1;
+ uint16_t flags;
+#define RNID_TD_SUPPORT 0x1
+#define RNID_LP_VALID 0x2
+} RNID_TOP_DISC;
+
+typedef struct _RNID { /* Structure is in Big Endian format */
+ uint8_t Format;
+#define RNID_TOPOLOGY_DISC 0xdf
+ uint8_t CommonLen;
+ uint8_t resvd1;
+ uint8_t SpecificLen;
+ struct lpfc_name portName;
+ struct lpfc_name nodeName;
+ union {
+ RNID_TOP_DISC topologyDisc; /* topology disc (0xdf) */
+ } un;
+} RNID;
+
+typedef struct _RRQ { /* Structure is in Big Endian format */
+ uint32_t SID;
+ uint16_t Oxid;
+ uint16_t Rxid;
+ uint8_t resv[32]; /* optional association hdr */
+} RRQ;
+
+/* This is used for RSCN command */
+typedef struct _D_ID { /* Structure is in Big Endian format */
+ union {
+ uint32_t word;
+ struct {
+#ifdef __BIG_ENDIAN_BITFIELD
+ uint8_t resv;
+ uint8_t domain;
+ uint8_t area;
+ uint8_t id;
+#else /* __LITTLE_ENDIAN_BITFIELD */
+ uint8_t id;
+ uint8_t area;
+ uint8_t domain;
+ uint8_t resv;
+#endif
+ } b;
+ } un;
+} D_ID;
+
+/*
+ * Structure to define all ELS Payload types
+ */
+
+typedef struct _ELS_PKT { /* Structure is in Big Endian format */
+ uint8_t elsCode; /* FC Word 0, bit 24:31 */
+ uint8_t elsByte1;
+ uint8_t elsByte2;
+ uint8_t elsByte3;
+ union {
+ struct ls_rjt lsRjt; /* Payload for LS_RJT ELS response */
+ struct serv_parm logi; /* Payload for PLOGI/FLOGI/PDISC/ACC */
+ LOGO logo; /* Payload for PLOGO/FLOGO/ACC */
+ PRLI prli; /* Payload for PRLI/ACC */
+ PRLO prlo; /* Payload for PRLO/ACC */
+ ADISC adisc; /* Payload for ADISC/ACC */
+ FARP farp; /* Payload for FARP/ACC */
+ FAN fan; /* Payload for FAN */
+ SCR scr; /* Payload for SCR/ACC */
+ RRQ rrq; /* Payload for RRQ */
+ RNID rnid; /* Payload for RNID */
+ uint8_t pad[128 - 4]; /* Pad out to payload of 128 bytes */
+ } un;
+} ELS_PKT;
+
+/*
+ * FDMI
+ * HBA MAnagement Operations Command Codes
+ */
+#define SLI_MGMT_GRHL 0x100 /* Get registered HBA list */
+#define SLI_MGMT_GHAT 0x101 /* Get HBA attributes */
+#define SLI_MGMT_GRPL 0x102 /* Get registered Port list */
+#define SLI_MGMT_GPAT 0x110 /* Get Port attributes */
+#define SLI_MGMT_RHBA 0x200 /* Register HBA */
+#define SLI_MGMT_RHAT 0x201 /* Register HBA atttributes */
+#define SLI_MGMT_RPRT 0x210 /* Register Port */
+#define SLI_MGMT_RPA 0x211 /* Register Port attributes */
+#define SLI_MGMT_DHBA 0x300 /* De-register HBA */
+#define SLI_MGMT_DPRT 0x310 /* De-register Port */
+
+/*
+ * Management Service Subtypes
+ */
+#define SLI_CT_FDMI_Subtypes 0x10
+
+/*
+ * HBA Management Service Reject Code
+ */
+#define REJECT_CODE 0x9 /* Unable to perform command request */
+
+/*
+ * HBA Management Service Reject Reason Code
+ * Please refer to the Reason Codes above
+ */
+
+/*
+ * HBA Attribute Types
+ */
+#define NODE_NAME 0x1
+#define MANUFACTURER 0x2
+#define SERIAL_NUMBER 0x3
+#define MODEL 0x4
+#define MODEL_DESCRIPTION 0x5
+#define HARDWARE_VERSION 0x6
+#define DRIVER_VERSION 0x7
+#define OPTION_ROM_VERSION 0x8
+#define FIRMWARE_VERSION 0x9
+#define OS_NAME_VERSION 0xa
+#define MAX_CT_PAYLOAD_LEN 0xb
+
+/*
+ * Port Attrubute Types
+ */
+#define SUPPORTED_FC4_TYPES 0x1
+#define SUPPORTED_SPEED 0x2
+#define PORT_SPEED 0x3
+#define MAX_FRAME_SIZE 0x4
+#define OS_DEVICE_NAME 0x5
+#define HOST_NAME 0x6
+
+union AttributesDef {
+ /* Structure is in Big Endian format */
+ struct {
+ uint32_t AttrType:16;
+ uint32_t AttrLen:16;
+ } bits;
+ uint32_t word;
+};
+
+
+/*
+ * HBA Attribute Entry (8 - 260 bytes)
+ */
+typedef struct {
+ union AttributesDef ad;
+ union {
+ uint32_t VendorSpecific;
+ uint8_t Manufacturer[64];
+ uint8_t SerialNumber[64];
+ uint8_t Model[256];
+ uint8_t ModelDescription[256];
+ uint8_t HardwareVersion[256];
+ uint8_t DriverVersion[256];
+ uint8_t OptionROMVersion[256];
+ uint8_t FirmwareVersion[256];
+ struct lpfc_name NodeName;
+ uint8_t SupportFC4Types[32];
+ uint32_t SupportSpeed;
+ uint32_t PortSpeed;
+ uint32_t MaxFrameSize;
+ uint8_t OsDeviceName[256];
+ uint8_t OsNameVersion[256];
+ uint32_t MaxCTPayloadLen;
+ uint8_t HostName[256];
+ } un;
+} ATTRIBUTE_ENTRY;
+
+/*
+ * HBA Attribute Block
+ */
+typedef struct {
+ uint32_t EntryCnt; /* Number of HBA attribute entries */
+ ATTRIBUTE_ENTRY Entry; /* Variable-length array */
+} ATTRIBUTE_BLOCK;
+
+/*
+ * Port Entry
+ */
+typedef struct {
+ struct lpfc_name PortName;
+} PORT_ENTRY;
+
+/*
+ * HBA Identifier
+ */
+typedef struct {
+ struct lpfc_name PortName;
+} HBA_IDENTIFIER;
+
+/*
+ * Registered Port List Format
+ */
+typedef struct {
+ uint32_t EntryCnt;
+ PORT_ENTRY pe; /* Variable-length array */
+} REG_PORT_LIST;
+
+/*
+ * Register HBA(RHBA)
+ */
+typedef struct {
+ HBA_IDENTIFIER hi;
+ REG_PORT_LIST rpl; /* variable-length array */
+/* ATTRIBUTE_BLOCK ab; */
+} REG_HBA;
+
+/*
+ * Register HBA Attributes (RHAT)
+ */
+typedef struct {
+ struct lpfc_name HBA_PortName;
+ ATTRIBUTE_BLOCK ab;
+} REG_HBA_ATTRIBUTE;
+
+/*
+ * Register Port Attributes (RPA)
+ */
+typedef struct {
+ struct lpfc_name PortName;
+ ATTRIBUTE_BLOCK ab;
+} REG_PORT_ATTRIBUTE;
+
+/*
+ * Get Registered HBA List (GRHL) Accept Payload Format
+ */
+typedef struct {
+ uint32_t HBA__Entry_Cnt; /* Number of Registered HBA Identifiers */
+ struct lpfc_name HBA_PortName; /* Variable-length array */
+} GRHL_ACC_PAYLOAD;
+
+/*
+ * Get Registered Port List (GRPL) Accept Payload Format
+ */
+typedef struct {
+ uint32_t RPL_Entry_Cnt; /* Number of Registered Port Entries */
+ PORT_ENTRY Reg_Port_Entry[1]; /* Variable-length array */
+} GRPL_ACC_PAYLOAD;
+
+/*
+ * Get Port Attributes (GPAT) Accept Payload Format
+ */
+
+typedef struct {
+ ATTRIBUTE_BLOCK pab;
+} GPAT_ACC_PAYLOAD;
+
+
+/*
+ * Begin HBA configuration parameters.
+ * The PCI configuration register BAR assignments are:
+ * BAR0, offset 0x10 - SLIM base memory address
+ * BAR1, offset 0x14 - SLIM base memory high address
+ * BAR2, offset 0x18 - REGISTER base memory address
+ * BAR3, offset 0x1c - REGISTER base memory high address
+ * BAR4, offset 0x20 - BIU I/O registers
+ * BAR5, offset 0x24 - REGISTER base io high address
+ */
+
+/* Number of rings currently used and available. */
+#define MAX_CONFIGURED_RINGS 3
+#define MAX_RINGS 4
+
+/* IOCB / Mailbox is owned by FireFly */
+#define OWN_CHIP 1
+
+/* IOCB / Mailbox is owned by Host */
+#define OWN_HOST 0
+
+/* Number of 4-byte words in an IOCB. */
+#define IOCB_WORD_SZ 8
+
+/* defines for type field in fc header */
+#define FC_ELS_DATA 0x1
+#define FC_LLC_SNAP 0x5
+#define FC_FCP_DATA 0x8
+#define FC_COMMON_TRANSPORT_ULP 0x20
+
+/* defines for rctl field in fc header */
+#define FC_DEV_DATA 0x0
+#define FC_UNSOL_CTL 0x2
+#define FC_SOL_CTL 0x3
+#define FC_UNSOL_DATA 0x4
+#define FC_FCP_CMND 0x6
+#define FC_ELS_REQ 0x22
+#define FC_ELS_RSP 0x23
+
+/* network headers for Dfctl field */
+#define FC_NET_HDR 0x20
+
+/* Start FireFly Register definitions */
+#define PCI_VENDOR_ID_EMULEX 0x10df
+#define PCI_DEVICE_ID_FIREFLY 0x1ae5
+#define PCI_DEVICE_ID_SUPERFLY 0xf700
+#define PCI_DEVICE_ID_DRAGONFLY 0xf800
+#define PCI_DEVICE_ID_RFLY 0xf095
+#define PCI_DEVICE_ID_PFLY 0xf098
+#define PCI_DEVICE_ID_TFLY 0xf0a5
+#define PCI_DEVICE_ID_CENTAUR 0xf900
+#define PCI_DEVICE_ID_PEGASUS 0xf980
+#define PCI_DEVICE_ID_THOR 0xfa00
+#define PCI_DEVICE_ID_VIPER 0xfb00
+#define PCI_DEVICE_ID_HELIOS 0xfd00
+#define PCI_DEVICE_ID_BMID 0xf0d5
+#define PCI_DEVICE_ID_BSMB 0xf0d1
+#define PCI_DEVICE_ID_ZEPHYR 0xfe00
+#define PCI_DEVICE_ID_ZMID 0xf0e5
+#define PCI_DEVICE_ID_ZSMB 0xf0e1
+#define PCI_DEVICE_ID_LP101 0xf0a1
+
+#define JEDEC_ID_ADDRESS 0x0080001c
+#define FIREFLY_JEDEC_ID 0x1ACC
+#define SUPERFLY_JEDEC_ID 0x0020
+#define DRAGONFLY_JEDEC_ID 0x0021
+#define DRAGONFLY_V2_JEDEC_ID 0x0025
+#define CENTAUR_2G_JEDEC_ID 0x0026
+#define CENTAUR_1G_JEDEC_ID 0x0028
+#define PEGASUS_ORION_JEDEC_ID 0x0036
+#define PEGASUS_JEDEC_ID 0x0038
+#define THOR_JEDEC_ID 0x0012
+#define HELIOS_JEDEC_ID 0x0364
+#define ZEPHYR_JEDEC_ID 0x0577
+#define VIPER_JEDEC_ID 0x4838
+
+#define JEDEC_ID_MASK 0x0FFFF000
+#define JEDEC_ID_SHIFT 12
+#define FC_JEDEC_ID(id) ((id & JEDEC_ID_MASK) >> JEDEC_ID_SHIFT)
+
+typedef struct { /* FireFly BIU registers */
+ uint32_t hostAtt; /* See definitions for Host Attention
+ register */
+ uint32_t chipAtt; /* See definitions for Chip Attention
+ register */
+ uint32_t hostStatus; /* See definitions for Host Status register */
+ uint32_t hostControl; /* See definitions for Host Control register */
+ uint32_t buiConfig; /* See definitions for BIU configuration
+ register */
+} FF_REGS;
+
+/* IO Register size in bytes */
+#define FF_REG_AREA_SIZE 256
+
+/* Host Attention Register */
+
+#define HA_REG_OFFSET 0 /* Word offset from register base address */
+
+#define HA_R0RE_REQ 0x00000001 /* Bit 0 */
+#define HA_R0CE_RSP 0x00000002 /* Bit 1 */
+#define HA_R0ATT 0x00000008 /* Bit 3 */
+#define HA_R1RE_REQ 0x00000010 /* Bit 4 */
+#define HA_R1CE_RSP 0x00000020 /* Bit 5 */
+#define HA_R1ATT 0x00000080 /* Bit 7 */
+#define HA_R2RE_REQ 0x00000100 /* Bit 8 */
+#define HA_R2CE_RSP 0x00000200 /* Bit 9 */
+#define HA_R2ATT 0x00000800 /* Bit 11 */
+#define HA_R3RE_REQ 0x00001000 /* Bit 12 */
+#define HA_R3CE_RSP 0x00002000 /* Bit 13 */
+#define HA_R3ATT 0x00008000 /* Bit 15 */
+#define HA_LATT 0x20000000 /* Bit 29 */
+#define HA_MBATT 0x40000000 /* Bit 30 */
+#define HA_ERATT 0x80000000 /* Bit 31 */
+
+#define HA_RXRE_REQ 0x00000001 /* Bit 0 */
+#define HA_RXCE_RSP 0x00000002 /* Bit 1 */
+#define HA_RXATT 0x00000008 /* Bit 3 */
+#define HA_RXMASK 0x0000000f
+
+/* Chip Attention Register */
+
+#define CA_REG_OFFSET 1 /* Word offset from register base address */
+
+#define CA_R0CE_REQ 0x00000001 /* Bit 0 */
+#define CA_R0RE_RSP 0x00000002 /* Bit 1 */
+#define CA_R0ATT 0x00000008 /* Bit 3 */
+#define CA_R1CE_REQ 0x00000010 /* Bit 4 */
+#define CA_R1RE_RSP 0x00000020 /* Bit 5 */
+#define CA_R1ATT 0x00000080 /* Bit 7 */
+#define CA_R2CE_REQ 0x00000100 /* Bit 8 */
+#define CA_R2RE_RSP 0x00000200 /* Bit 9 */
+#define CA_R2ATT 0x00000800 /* Bit 11 */
+#define CA_R3CE_REQ 0x00001000 /* Bit 12 */
+#define CA_R3RE_RSP 0x00002000 /* Bit 13 */
+#define CA_R3ATT 0x00008000 /* Bit 15 */
+#define CA_MBATT 0x40000000 /* Bit 30 */
+
+/* Host Status Register */
+
+#define HS_REG_OFFSET 2 /* Word offset from register base address */
+
+#define HS_MBRDY 0x00400000 /* Bit 22 */
+#define HS_FFRDY 0x00800000 /* Bit 23 */
+#define HS_FFER8 0x01000000 /* Bit 24 */
+#define HS_FFER7 0x02000000 /* Bit 25 */
+#define HS_FFER6 0x04000000 /* Bit 26 */
+#define HS_FFER5 0x08000000 /* Bit 27 */
+#define HS_FFER4 0x10000000 /* Bit 28 */
+#define HS_FFER3 0x20000000 /* Bit 29 */
+#define HS_FFER2 0x40000000 /* Bit 30 */
+#define HS_FFER1 0x80000000 /* Bit 31 */
+#define HS_FFERM 0xFF000000 /* Mask for error bits 31:24 */
+
+/* Host Control Register */
+
+#define HC_REG_OFFSET 3 /* Word offset from register base address */
+
+#define HC_MBINT_ENA 0x00000001 /* Bit 0 */
+#define HC_R0INT_ENA 0x00000002 /* Bit 1 */
+#define HC_R1INT_ENA 0x00000004 /* Bit 2 */
+#define HC_R2INT_ENA 0x00000008 /* Bit 3 */
+#define HC_R3INT_ENA 0x00000010 /* Bit 4 */
+#define HC_INITHBI 0x02000000 /* Bit 25 */
+#define HC_INITMB 0x04000000 /* Bit 26 */
+#define HC_INITFF 0x08000000 /* Bit 27 */
+#define HC_LAINT_ENA 0x20000000 /* Bit 29 */
+#define HC_ERINT_ENA 0x80000000 /* Bit 31 */
+
+/* Mailbox Commands */
+#define MBX_SHUTDOWN 0x00 /* terminate testing */
+#define MBX_LOAD_SM 0x01
+#define MBX_READ_NV 0x02
+#define MBX_WRITE_NV 0x03
+#define MBX_RUN_BIU_DIAG 0x04
+#define MBX_INIT_LINK 0x05
+#define MBX_DOWN_LINK 0x06
+#define MBX_CONFIG_LINK 0x07
+#define MBX_CONFIG_RING 0x09
+#define MBX_RESET_RING 0x0A
+#define MBX_READ_CONFIG 0x0B
+#define MBX_READ_RCONFIG 0x0C
+#define MBX_READ_SPARM 0x0D
+#define MBX_READ_STATUS 0x0E
+#define MBX_READ_RPI 0x0F
+#define MBX_READ_XRI 0x10
+#define MBX_READ_REV 0x11
+#define MBX_READ_LNK_STAT 0x12
+#define MBX_REG_LOGIN 0x13
+#define MBX_UNREG_LOGIN 0x14
+#define MBX_READ_LA 0x15
+#define MBX_CLEAR_LA 0x16
+#define MBX_DUMP_MEMORY 0x17
+#define MBX_DUMP_CONTEXT 0x18
+#define MBX_RUN_DIAGS 0x19
+#define MBX_RESTART 0x1A
+#define MBX_UPDATE_CFG 0x1B
+#define MBX_DOWN_LOAD 0x1C
+#define MBX_DEL_LD_ENTRY 0x1D
+#define MBX_RUN_PROGRAM 0x1E
+#define MBX_SET_MASK 0x20
+#define MBX_SET_SLIM 0x21
+#define MBX_UNREG_D_ID 0x23
+#define MBX_CONFIG_FARP 0x25
+
+#define MBX_LOAD_AREA 0x81
+#define MBX_RUN_BIU_DIAG64 0x84
+#define MBX_CONFIG_PORT 0x88
+#define MBX_READ_SPARM64 0x8D
+#define MBX_READ_RPI64 0x8F
+#define MBX_REG_LOGIN64 0x93
+#define MBX_READ_LA64 0x95
+
+#define MBX_FLASH_WR_ULA 0x98
+#define MBX_SET_DEBUG 0x99
+#define MBX_LOAD_EXP_ROM 0x9C
+
+#define MBX_MAX_CMDS 0x9D
+#define MBX_SLI2_CMD_MASK 0x80
+
+/* IOCB Commands */
+
+#define CMD_RCV_SEQUENCE_CX 0x01
+#define CMD_XMIT_SEQUENCE_CR 0x02
+#define CMD_XMIT_SEQUENCE_CX 0x03
+#define CMD_XMIT_BCAST_CN 0x04
+#define CMD_XMIT_BCAST_CX 0x05
+#define CMD_QUE_RING_BUF_CN 0x06
+#define CMD_QUE_XRI_BUF_CX 0x07
+#define CMD_IOCB_CONTINUE_CN 0x08
+#define CMD_RET_XRI_BUF_CX 0x09
+#define CMD_ELS_REQUEST_CR 0x0A
+#define CMD_ELS_REQUEST_CX 0x0B
+#define CMD_RCV_ELS_REQ_CX 0x0D
+#define CMD_ABORT_XRI_CN 0x0E
+#define CMD_ABORT_XRI_CX 0x0F
+#define CMD_CLOSE_XRI_CN 0x10
+#define CMD_CLOSE_XRI_CX 0x11
+#define CMD_CREATE_XRI_CR 0x12
+#define CMD_CREATE_XRI_CX 0x13
+#define CMD_GET_RPI_CN 0x14
+#define CMD_XMIT_ELS_RSP_CX 0x15
+#define CMD_GET_RPI_CR 0x16
+#define CMD_XRI_ABORTED_CX 0x17
+#define CMD_FCP_IWRITE_CR 0x18
+#define CMD_FCP_IWRITE_CX 0x19
+#define CMD_FCP_IREAD_CR 0x1A
+#define CMD_FCP_IREAD_CX 0x1B
+#define CMD_FCP_ICMND_CR 0x1C
+#define CMD_FCP_ICMND_CX 0x1D
+
+#define CMD_ADAPTER_MSG 0x20
+#define CMD_ADAPTER_DUMP 0x22
+
+/* SLI_2 IOCB Command Set */
+
+#define CMD_RCV_SEQUENCE64_CX 0x81
+#define CMD_XMIT_SEQUENCE64_CR 0x82
+#define CMD_XMIT_SEQUENCE64_CX 0x83
+#define CMD_XMIT_BCAST64_CN 0x84
+#define CMD_XMIT_BCAST64_CX 0x85
+#define CMD_QUE_RING_BUF64_CN 0x86
+#define CMD_QUE_XRI_BUF64_CX 0x87
+#define CMD_IOCB_CONTINUE64_CN 0x88
+#define CMD_RET_XRI_BUF64_CX 0x89
+#define CMD_ELS_REQUEST64_CR 0x8A
+#define CMD_ELS_REQUEST64_CX 0x8B
+#define CMD_ABORT_MXRI64_CN 0x8C
+#define CMD_RCV_ELS_REQ64_CX 0x8D
+#define CMD_XMIT_ELS_RSP64_CX 0x95
+#define CMD_FCP_IWRITE64_CR 0x98
+#define CMD_FCP_IWRITE64_CX 0x99
+#define CMD_FCP_IREAD64_CR 0x9A
+#define CMD_FCP_IREAD64_CX 0x9B
+#define CMD_FCP_ICMND64_CR 0x9C
+#define CMD_FCP_ICMND64_CX 0x9D
+
+#define CMD_GEN_REQUEST64_CR 0xC2
+#define CMD_GEN_REQUEST64_CX 0xC3
+
+#define CMD_MAX_IOCB_CMD 0xE6
+#define CMD_IOCB_MASK 0xff
+
+#define MAX_MSG_DATA 28 /* max msg data in CMD_ADAPTER_MSG
+ iocb */
+#define LPFC_MAX_ADPTMSG 32 /* max msg data */
+/*
+ * Define Status
+ */
+#define MBX_SUCCESS 0
+#define MBXERR_NUM_RINGS 1
+#define MBXERR_NUM_IOCBS 2
+#define MBXERR_IOCBS_EXCEEDED 3
+#define MBXERR_BAD_RING_NUMBER 4
+#define MBXERR_MASK_ENTRIES_RANGE 5
+#define MBXERR_MASKS_EXCEEDED 6
+#define MBXERR_BAD_PROFILE 7
+#define MBXERR_BAD_DEF_CLASS 8
+#define MBXERR_BAD_MAX_RESPONDER 9
+#define MBXERR_BAD_MAX_ORIGINATOR 10
+#define MBXERR_RPI_REGISTERED 11
+#define MBXERR_RPI_FULL 12
+#define MBXERR_NO_RESOURCES 13
+#define MBXERR_BAD_RCV_LENGTH 14
+#define MBXERR_DMA_ERROR 15
+#define MBXERR_ERROR 16
+#define MBX_NOT_FINISHED 255
+
+#define MBX_BUSY 0xffffff /* Attempted cmd to busy Mailbox */
+#define MBX_TIMEOUT 0xfffffe /* time-out expired waiting for */
+
+/*
+ * Begin Structure Definitions for Mailbox Commands
+ */
+
+typedef struct {
+#ifdef __BIG_ENDIAN_BITFIELD
+ uint8_t tval;
+ uint8_t tmask;
+ uint8_t rval;
+ uint8_t rmask;
+#else /* __LITTLE_ENDIAN_BITFIELD */
+ uint8_t rmask;
+ uint8_t rval;
+ uint8_t tmask;
+ uint8_t tval;
+#endif
+} RR_REG;
+
+struct ulp_bde {
+ uint32_t bdeAddress;
+#ifdef __BIG_ENDIAN_BITFIELD
+ uint32_t bdeReserved:4;
+ uint32_t bdeAddrHigh:4;
+ uint32_t bdeSize:24;
+#else /* __LITTLE_ENDIAN_BITFIELD */
+ uint32_t bdeSize:24;
+ uint32_t bdeAddrHigh:4;
+ uint32_t bdeReserved:4;
+#endif
+};
+
+struct ulp_bde64 { /* SLI-2 */
+ union ULP_BDE_TUS {
+ uint32_t w;
+ struct {
+#ifdef __BIG_ENDIAN_BITFIELD
+ uint32_t bdeFlags:8; /* BDE Flags 0 IS A SUPPORTED
+ VALUE !! */
+ uint32_t bdeSize:24; /* Size of buffer (in bytes) */
+#else /* __LITTLE_ENDIAN_BITFIELD */
+ uint32_t bdeSize:24; /* Size of buffer (in bytes) */
+ uint32_t bdeFlags:8; /* BDE Flags 0 IS A SUPPORTED
+ VALUE !! */
+#endif
+
+#define BUFF_USE_RSVD 0x01 /* bdeFlags */
+#define BUFF_USE_INTRPT 0x02 /* Not Implemented with LP6000 */
+#define BUFF_USE_CMND 0x04 /* Optional, 1=cmd/rsp 0=data buffer */
+#define BUFF_USE_RCV 0x08 /* "" "", 1=rcv buffer, 0=xmit
+ buffer */
+#define BUFF_TYPE_32BIT 0x10 /* "" "", 1=32 bit addr 0=64 bit
+ addr */
+#define BUFF_TYPE_SPECIAL 0x20 /* Not Implemented with LP6000 */
+#define BUFF_TYPE_BDL 0x40 /* Optional, may be set in BDL */
+#define BUFF_TYPE_INVALID 0x80 /* "" "" */
+ } f;
+ } tus;
+ uint32_t addrLow;
+ uint32_t addrHigh;
+};
+#define BDE64_SIZE_WORD 0
+#define BPL64_SIZE_WORD 0x40
+
+typedef struct ULP_BDL { /* SLI-2 */
+#ifdef __BIG_ENDIAN_BITFIELD
+ uint32_t bdeFlags:8; /* BDL Flags */
+ uint32_t bdeSize:24; /* Size of BDL array in host memory (bytes) */
+#else /* __LITTLE_ENDIAN_BITFIELD */
+ uint32_t bdeSize:24; /* Size of BDL array in host memory (bytes) */
+ uint32_t bdeFlags:8; /* BDL Flags */
+#endif
+
+ uint32_t addrLow; /* Address 0:31 */
+ uint32_t addrHigh; /* Address 32:63 */
+ uint32_t ulpIoTag32; /* Can be used for 32 bit I/O Tag */
+} ULP_BDL;
+
+/* Structure for MB Command LOAD_SM and DOWN_LOAD */
+
+typedef struct {
+#ifdef __BIG_ENDIAN_BITFIELD
+ uint32_t rsvd2:25;
+ uint32_t acknowledgment:1;
+ uint32_t version:1;
+ uint32_t erase_or_prog:1;
+ uint32_t update_flash:1;
+ uint32_t update_ram:1;
+ uint32_t method:1;
+ uint32_t load_cmplt:1;
+#else /* __LITTLE_ENDIAN_BITFIELD */
+ uint32_t load_cmplt:1;
+ uint32_t method:1;
+ uint32_t update_ram:1;
+ uint32_t update_flash:1;
+ uint32_t erase_or_prog:1;
+ uint32_t version:1;
+ uint32_t acknowledgment:1;
+ uint32_t rsvd2:25;
+#endif
+
+ uint32_t dl_to_adr_low;
+ uint32_t dl_to_adr_high;
+ uint32_t dl_len;
+ union {
+ uint32_t dl_from_mbx_offset;
+ struct ulp_bde dl_from_bde;
+ struct ulp_bde64 dl_from_bde64;
+ } un;
+
+} LOAD_SM_VAR;
+
+/* Structure for MB Command READ_NVPARM (02) */
+
+typedef struct {
+ uint32_t rsvd1[3]; /* Read as all one's */
+ uint32_t rsvd2; /* Read as all zero's */
+ uint32_t portname[2]; /* N_PORT name */
+ uint32_t nodename[2]; /* NODE name */
+
+#ifdef __BIG_ENDIAN_BITFIELD
+ uint32_t pref_DID:24;
+ uint32_t hardAL_PA:8;
+#else /* __LITTLE_ENDIAN_BITFIELD */
+ uint32_t hardAL_PA:8;
+ uint32_t pref_DID:24;
+#endif
+
+ uint32_t rsvd3[21]; /* Read as all one's */
+} READ_NV_VAR;
+
+/* Structure for MB Command WRITE_NVPARMS (03) */
+
+typedef struct {
+ uint32_t rsvd1[3]; /* Must be all one's */
+ uint32_t rsvd2; /* Must be all zero's */
+ uint32_t portname[2]; /* N_PORT name */
+ uint32_t nodename[2]; /* NODE name */
+
+#ifdef __BIG_ENDIAN_BITFIELD
+ uint32_t pref_DID:24;
+ uint32_t hardAL_PA:8;
+#else /* __LITTLE_ENDIAN_BITFIELD */
+ uint32_t hardAL_PA:8;
+ uint32_t pref_DID:24;
+#endif
+
+ uint32_t rsvd3[21]; /* Must be all one's */
+} WRITE_NV_VAR;
+
+/* Structure for MB Command RUN_BIU_DIAG (04) */
+/* Structure for MB Command RUN_BIU_DIAG64 (0x84) */
+
+typedef struct {
+ uint32_t rsvd1;
+ union {
+ struct {
+ struct ulp_bde xmit_bde;
+ struct ulp_bde rcv_bde;
+ } s1;
+ struct {
+ struct ulp_bde64 xmit_bde64;
+ struct ulp_bde64 rcv_bde64;
+ } s2;
+ } un;
+} BIU_DIAG_VAR;
+
+/* Structure for MB Command INIT_LINK (05) */
+
+typedef struct {
+#ifdef __BIG_ENDIAN_BITFIELD
+ uint32_t rsvd1:24;
+ uint32_t lipsr_AL_PA:8; /* AL_PA to issue Lip Selective Reset to */
+#else /* __LITTLE_ENDIAN_BITFIELD */
+ uint32_t lipsr_AL_PA:8; /* AL_PA to issue Lip Selective Reset to */
+ uint32_t rsvd1:24;
+#endif
+
+#ifdef __BIG_ENDIAN_BITFIELD
+ uint8_t fabric_AL_PA; /* If using a Fabric Assigned AL_PA */
+ uint8_t rsvd2;
+ uint16_t link_flags;
+#else /* __LITTLE_ENDIAN_BITFIELD */
+ uint16_t link_flags;
+ uint8_t rsvd2;
+ uint8_t fabric_AL_PA; /* If using a Fabric Assigned AL_PA */
+#endif
+
+#define FLAGS_LOCAL_LB 0x01 /* link_flags (=1) ENDEC loopback */
+#define FLAGS_TOPOLOGY_MODE_LOOP_PT 0x00 /* Attempt loop then pt-pt */
+#define FLAGS_TOPOLOGY_MODE_PT_PT 0x02 /* Attempt pt-pt only */
+#define FLAGS_TOPOLOGY_MODE_LOOP 0x04 /* Attempt loop only */
+#define FLAGS_TOPOLOGY_MODE_PT_LOOP 0x06 /* Attempt pt-pt then loop */
+#define FLAGS_LIRP_LILP 0x80 /* LIRP / LILP is disabled */
+
+#define FLAGS_TOPOLOGY_FAILOVER 0x0400 /* Bit 10 */
+#define FLAGS_LINK_SPEED 0x0800 /* Bit 11 */
+
+ uint32_t link_speed;
+#define LINK_SPEED_AUTO 0 /* Auto selection */
+#define LINK_SPEED_1G 1 /* 1 Gigabaud */
+#define LINK_SPEED_2G 2 /* 2 Gigabaud */
+#define LINK_SPEED_4G 4 /* 4 Gigabaud */
+#define LINK_SPEED_8G 8 /* 4 Gigabaud */
+#define LINK_SPEED_10G 16 /* 10 Gigabaud */
+
+} INIT_LINK_VAR;
+
+/* Structure for MB Command DOWN_LINK (06) */
+
+typedef struct {
+ uint32_t rsvd1;
+} DOWN_LINK_VAR;
+
+/* Structure for MB Command CONFIG_LINK (07) */
+
+typedef struct {
+#ifdef __BIG_ENDIAN_BITFIELD
+ uint32_t cr:1;
+ uint32_t ci:1;
+ uint32_t cr_delay:6;
+ uint32_t cr_count:8;
+ uint32_t rsvd1:8;
+ uint32_t MaxBBC:8;
+#else /* __LITTLE_ENDIAN_BITFIELD */
+ uint32_t MaxBBC:8;
+ uint32_t rsvd1:8;
+ uint32_t cr_count:8;
+ uint32_t cr_delay:6;
+ uint32_t ci:1;
+ uint32_t cr:1;
+#endif
+
+ uint32_t myId;
+ uint32_t rsvd2;
+ uint32_t edtov;
+ uint32_t arbtov;
+ uint32_t ratov;
+ uint32_t rttov;
+ uint32_t altov;
+ uint32_t crtov;
+ uint32_t citov;
+#ifdef __BIG_ENDIAN_BITFIELD
+ uint32_t rrq_enable:1;
+ uint32_t rrq_immed:1;
+ uint32_t rsvd4:29;
+ uint32_t ack0_enable:1;
+#else /* __LITTLE_ENDIAN_BITFIELD */
+ uint32_t ack0_enable:1;
+ uint32_t rsvd4:29;
+ uint32_t rrq_immed:1;
+ uint32_t rrq_enable:1;
+#endif
+} CONFIG_LINK;
+
+/* Structure for MB Command PART_SLIM (08)
+ * will be removed since SLI1 is no longer supported!
+ */
+typedef struct {
+#ifdef __BIG_ENDIAN_BITFIELD
+ uint16_t offCiocb;
+ uint16_t numCiocb;
+ uint16_t offRiocb;
+ uint16_t numRiocb;
+#else /* __LITTLE_ENDIAN_BITFIELD */
+ uint16_t numCiocb;
+ uint16_t offCiocb;
+ uint16_t numRiocb;
+ uint16_t offRiocb;
+#endif
+} RING_DEF;
+
+typedef struct {
+#ifdef __BIG_ENDIAN_BITFIELD
+ uint32_t unused1:24;
+ uint32_t numRing:8;
+#else /* __LITTLE_ENDIAN_BITFIELD */
+ uint32_t numRing:8;
+ uint32_t unused1:24;
+#endif
+
+ RING_DEF ringdef[4];
+ uint32_t hbainit;
+} PART_SLIM_VAR;
+
+/* Structure for MB Command CONFIG_RING (09) */
+
+typedef struct {
+#ifdef __BIG_ENDIAN_BITFIELD
+ uint32_t unused2:6;
+ uint32_t recvSeq:1;
+ uint32_t recvNotify:1;
+ uint32_t numMask:8;
+ uint32_t profile:8;
+ uint32_t unused1:4;
+ uint32_t ring:4;
+#else /* __LITTLE_ENDIAN_BITFIELD */
+ uint32_t ring:4;
+ uint32_t unused1:4;
+ uint32_t profile:8;
+ uint32_t numMask:8;
+ uint32_t recvNotify:1;
+ uint32_t recvSeq:1;
+ uint32_t unused2:6;
+#endif
+
+#ifdef __BIG_ENDIAN_BITFIELD
+ uint16_t maxRespXchg;
+ uint16_t maxOrigXchg;
+#else /* __LITTLE_ENDIAN_BITFIELD */
+ uint16_t maxOrigXchg;
+ uint16_t maxRespXchg;
+#endif
+
+ RR_REG rrRegs[6];
+} CONFIG_RING_VAR;
+
+/* Structure for MB Command RESET_RING (10) */
+
+typedef struct {
+ uint32_t ring_no;
+} RESET_RING_VAR;
+
+/* Structure for MB Command READ_CONFIG (11) */
+
+typedef struct {
+#ifdef __BIG_ENDIAN_BITFIELD
+ uint32_t cr:1;
+ uint32_t ci:1;
+ uint32_t cr_delay:6;
+ uint32_t cr_count:8;
+ uint32_t InitBBC:8;
+ uint32_t MaxBBC:8;
+#else /* __LITTLE_ENDIAN_BITFIELD */
+ uint32_t MaxBBC:8;
+ uint32_t InitBBC:8;
+ uint32_t cr_count:8;
+ uint32_t cr_delay:6;
+ uint32_t ci:1;
+ uint32_t cr:1;
+#endif
+
+#ifdef __BIG_ENDIAN_BITFIELD
+ uint32_t topology:8;
+ uint32_t myDid:24;
+#else /* __LITTLE_ENDIAN_BITFIELD */
+ uint32_t myDid:24;
+ uint32_t topology:8;
+#endif
+
+ /* Defines for topology (defined previously) */
+#ifdef __BIG_ENDIAN_BITFIELD
+ uint32_t AR:1;
+ uint32_t IR:1;
+ uint32_t rsvd1:29;
+ uint32_t ack0:1;
+#else /* __LITTLE_ENDIAN_BITFIELD */
+ uint32_t ack0:1;
+ uint32_t rsvd1:29;
+ uint32_t IR:1;
+ uint32_t AR:1;
+#endif
+
+ uint32_t edtov;
+ uint32_t arbtov;
+ uint32_t ratov;
+ uint32_t rttov;
+ uint32_t altov;
+ uint32_t lmt;
+#define LMT_RESERVED 0x0 /* Not used */
+#define LMT_266_10bit 0x1 /* 265.625 Mbaud 10 bit iface */
+#define LMT_532_10bit 0x2 /* 531.25 Mbaud 10 bit iface */
+#define LMT_1063_20bit 0x3 /* 1062.5 Mbaud 20 bit iface */
+#define LMT_1063_10bit 0x4 /* 1062.5 Mbaud 10 bit iface */
+#define LMT_2125_10bit 0x8 /* 2125 Mbaud 10 bit iface */
+#define LMT_4250_10bit 0x40 /* 4250 Mbaud 10 bit iface */
+
+ uint32_t rsvd2;
+ uint32_t rsvd3;
+ uint32_t max_xri;
+ uint32_t max_iocb;
+ uint32_t max_rpi;
+ uint32_t avail_xri;
+ uint32_t avail_iocb;
+ uint32_t avail_rpi;
+ uint32_t default_rpi;
+} READ_CONFIG_VAR;
+
+/* Structure for MB Command READ_RCONFIG (12) */
+
+typedef struct {
+#ifdef __BIG_ENDIAN_BITFIELD
+ uint32_t rsvd2:7;
+ uint32_t recvNotify:1;
+ uint32_t numMask:8;
+ uint32_t profile:8;
+ uint32_t rsvd1:4;
+ uint32_t ring:4;
+#else /* __LITTLE_ENDIAN_BITFIELD */
+ uint32_t ring:4;
+ uint32_t rsvd1:4;
+ uint32_t profile:8;
+ uint32_t numMask:8;
+ uint32_t recvNotify:1;
+ uint32_t rsvd2:7;
+#endif
+
+#ifdef __BIG_ENDIAN_BITFIELD
+ uint16_t maxResp;
+ uint16_t maxOrig;
+#else /* __LITTLE_ENDIAN_BITFIELD */
+ uint16_t maxOrig;
+ uint16_t maxResp;
+#endif
+
+ RR_REG rrRegs[6];
+
+#ifdef __BIG_ENDIAN_BITFIELD
+ uint16_t cmdRingOffset;
+ uint16_t cmdEntryCnt;
+ uint16_t rspRingOffset;
+ uint16_t rspEntryCnt;
+ uint16_t nextCmdOffset;
+ uint16_t rsvd3;
+ uint16_t nextRspOffset;
+ uint16_t rsvd4;
+#else /* __LITTLE_ENDIAN_BITFIELD */
+ uint16_t cmdEntryCnt;
+ uint16_t cmdRingOffset;
+ uint16_t rspEntryCnt;
+ uint16_t rspRingOffset;
+ uint16_t rsvd3;
+ uint16_t nextCmdOffset;
+ uint16_t rsvd4;
+ uint16_t nextRspOffset;
+#endif
+} READ_RCONF_VAR;
+
+/* Structure for MB Command READ_SPARM (13) */
+/* Structure for MB Command READ_SPARM64 (0x8D) */
+
+typedef struct {
+ uint32_t rsvd1;
+ uint32_t rsvd2;
+ union {
+ struct ulp_bde sp; /* This BDE points to struct serv_parm
+ structure */
+ struct ulp_bde64 sp64;
+ } un;
+} READ_SPARM_VAR;
+
+/* Structure for MB Command READ_STATUS (14) */
+
+typedef struct {
+#ifdef __BIG_ENDIAN_BITFIELD
+ uint32_t rsvd1:31;
+ uint32_t clrCounters:1;
+ uint16_t activeXriCnt;
+ uint16_t activeRpiCnt;
+#else /* __LITTLE_ENDIAN_BITFIELD */
+ uint32_t clrCounters:1;
+ uint32_t rsvd1:31;
+ uint16_t activeRpiCnt;
+ uint16_t activeXriCnt;
+#endif
+
+ uint32_t xmitByteCnt;
+ uint32_t rcvByteCnt;
+ uint32_t xmitFrameCnt;
+ uint32_t rcvFrameCnt;
+ uint32_t xmitSeqCnt;
+ uint32_t rcvSeqCnt;
+ uint32_t totalOrigExchanges;
+ uint32_t totalRespExchanges;
+ uint32_t rcvPbsyCnt;
+ uint32_t rcvFbsyCnt;
+} READ_STATUS_VAR;
+
+/* Structure for MB Command READ_RPI (15) */
+/* Structure for MB Command READ_RPI64 (0x8F) */
+
+typedef struct {
+#ifdef __BIG_ENDIAN_BITFIELD
+ uint16_t nextRpi;
+ uint16_t reqRpi;
+ uint32_t rsvd2:8;
+ uint32_t DID:24;
+#else /* __LITTLE_ENDIAN_BITFIELD */
+ uint16_t reqRpi;
+ uint16_t nextRpi;
+ uint32_t DID:24;
+ uint32_t rsvd2:8;
+#endif
+
+ union {
+ struct ulp_bde sp;
+ struct ulp_bde64 sp64;
+ } un;
+
+} READ_RPI_VAR;
+
+/* Structure for MB Command READ_XRI (16) */
+
+typedef struct {
+#ifdef __BIG_ENDIAN_BITFIELD
+ uint16_t nextXri;
+ uint16_t reqXri;
+ uint16_t rsvd1;
+ uint16_t rpi;
+ uint32_t rsvd2:8;
+ uint32_t DID:24;
+ uint32_t rsvd3:8;
+ uint32_t SID:24;
+ uint32_t rsvd4;
+ uint8_t seqId;
+ uint8_t rsvd5;
+ uint16_t seqCount;
+ uint16_t oxId;
+ uint16_t rxId;
+ uint32_t rsvd6:30;
+ uint32_t si:1;
+ uint32_t exchOrig:1;
+#else /* __LITTLE_ENDIAN_BITFIELD */
+ uint16_t reqXri;
+ uint16_t nextXri;
+ uint16_t rpi;
+ uint16_t rsvd1;
+ uint32_t DID:24;
+ uint32_t rsvd2:8;
+ uint32_t SID:24;
+ uint32_t rsvd3:8;
+ uint32_t rsvd4;
+ uint16_t seqCount;
+ uint8_t rsvd5;
+ uint8_t seqId;
+ uint16_t rxId;
+ uint16_t oxId;
+ uint32_t exchOrig:1;
+ uint32_t si:1;
+ uint32_t rsvd6:30;
+#endif
+} READ_XRI_VAR;
+
+/* Structure for MB Command READ_REV (17) */
+
+typedef struct {
+#ifdef __BIG_ENDIAN_BITFIELD
+ uint32_t cv:1;
+ uint32_t rr:1;
+ uint32_t rsvd1:29;
+ uint32_t rv:1;
+#else /* __LITTLE_ENDIAN_BITFIELD */
+ uint32_t rv:1;
+ uint32_t rsvd1:29;
+ uint32_t rr:1;
+ uint32_t cv:1;
+#endif
+
+ uint32_t biuRev;
+ uint32_t smRev;
+ union {
+ uint32_t smFwRev;
+ struct {
+#ifdef __BIG_ENDIAN_BITFIELD
+ uint8_t ProgType;
+ uint8_t ProgId;
+ uint16_t ProgVer:4;
+ uint16_t ProgRev:4;
+ uint16_t ProgFixLvl:2;
+ uint16_t ProgDistType:2;
+ uint16_t DistCnt:4;
+#else /* __LITTLE_ENDIAN_BITFIELD */
+ uint16_t DistCnt:4;
+ uint16_t ProgDistType:2;
+ uint16_t ProgFixLvl:2;
+ uint16_t ProgRev:4;
+ uint16_t ProgVer:4;
+ uint8_t ProgId;
+ uint8_t ProgType;
+#endif
+
+ } b;
+ } un;
+ uint32_t endecRev;
+#ifdef __BIG_ENDIAN_BITFIELD
+ uint8_t feaLevelHigh;
+ uint8_t feaLevelLow;
+ uint8_t fcphHigh;
+ uint8_t fcphLow;
+#else /* __LITTLE_ENDIAN_BITFIELD */
+ uint8_t fcphLow;
+ uint8_t fcphHigh;
+ uint8_t feaLevelLow;
+ uint8_t feaLevelHigh;
+#endif
+
+ uint32_t postKernRev;
+ uint32_t opFwRev;
+ uint8_t opFwName[16];
+ uint32_t sli1FwRev;
+ uint8_t sli1FwName[16];
+ uint32_t sli2FwRev;
+ uint8_t sli2FwName[16];
+ uint32_t rsvd2;
+ uint32_t RandomData[7];
+} READ_REV_VAR;
+
+/* Structure for MB Command READ_LINK_STAT (18) */
+
+typedef struct {
+ uint32_t rsvd1;
+ uint32_t linkFailureCnt;
+ uint32_t lossSyncCnt;
+
+ uint32_t lossSignalCnt;
+ uint32_t primSeqErrCnt;
+ uint32_t invalidXmitWord;
+ uint32_t crcCnt;
+ uint32_t primSeqTimeout;
+ uint32_t elasticOverrun;
+ uint32_t arbTimeout;
+} READ_LNK_VAR;
+
+/* Structure for MB Command REG_LOGIN (19) */
+/* Structure for MB Command REG_LOGIN64 (0x93) */
+
+typedef struct {
+#ifdef __BIG_ENDIAN_BITFIELD
+ uint16_t rsvd1;
+ uint16_t rpi;
+ uint32_t rsvd2:8;
+ uint32_t did:24;
+#else /* __LITTLE_ENDIAN_BITFIELD */
+ uint16_t rpi;
+ uint16_t rsvd1;
+ uint32_t did:24;
+ uint32_t rsvd2:8;
+#endif
+
+ union {
+ struct ulp_bde sp;
+ struct ulp_bde64 sp64;
+ } un;
+
+} REG_LOGIN_VAR;
+
+/* Word 30 contents for REG_LOGIN */
+typedef union {
+ struct {
+#ifdef __BIG_ENDIAN_BITFIELD
+ uint16_t rsvd1:12;
+ uint16_t wd30_class:4;
+ uint16_t xri;
+#else /* __LITTLE_ENDIAN_BITFIELD */
+ uint16_t xri;
+ uint16_t wd30_class:4;
+ uint16_t rsvd1:12;
+#endif
+ } f;
+ uint32_t word;
+} REG_WD30;
+
+/* Structure for MB Command UNREG_LOGIN (20) */
+
+typedef struct {
+#ifdef __BIG_ENDIAN_BITFIELD
+ uint16_t rsvd1;
+ uint16_t rpi;
+#else /* __LITTLE_ENDIAN_BITFIELD */
+ uint16_t rpi;
+ uint16_t rsvd1;
+#endif
+} UNREG_LOGIN_VAR;
+
+/* Structure for MB Command UNREG_D_ID (0x23) */
+
+typedef struct {
+ uint32_t did;
+} UNREG_D_ID_VAR;
+
+/* Structure for MB Command READ_LA (21) */
+/* Structure for MB Command READ_LA64 (0x95) */
+
+typedef struct {
+ uint32_t eventTag; /* Event tag */
+#ifdef __BIG_ENDIAN_BITFIELD
+ uint32_t rsvd1:22;
+ uint32_t pb:1;
+ uint32_t il:1;
+ uint32_t attType:8;
+#else /* __LITTLE_ENDIAN_BITFIELD */
+ uint32_t attType:8;
+ uint32_t il:1;
+ uint32_t pb:1;
+ uint32_t rsvd1:22;
+#endif
+
+#define AT_RESERVED 0x00 /* Reserved - attType */
+#define AT_LINK_UP 0x01 /* Link is up */
+#define AT_LINK_DOWN 0x02 /* Link is down */
+
+#ifdef __BIG_ENDIAN_BITFIELD
+ uint8_t granted_AL_PA;
+ uint8_t lipAlPs;
+ uint8_t lipType;
+ uint8_t topology;
+#else /* __LITTLE_ENDIAN_BITFIELD */
+ uint8_t topology;
+ uint8_t lipType;
+ uint8_t lipAlPs;
+ uint8_t granted_AL_PA;
+#endif
+
+#define TOPOLOGY_PT_PT 0x01 /* Topology is pt-pt / pt-fabric */
+#define TOPOLOGY_LOOP 0x02 /* Topology is FC-AL */
+
+ union {
+ struct ulp_bde lilpBde; /* This BDE points to a 128 byte buffer
+ to */
+ /* store the LILP AL_PA position map into */
+ struct ulp_bde64 lilpBde64;
+ } un;
+
+#ifdef __BIG_ENDIAN_BITFIELD
+ uint32_t Dlu:1;
+ uint32_t Dtf:1;
+ uint32_t Drsvd2:14;
+ uint32_t DlnkSpeed:8;
+ uint32_t DnlPort:4;
+ uint32_t Dtx:2;
+ uint32_t Drx:2;
+#else /* __LITTLE_ENDIAN_BITFIELD */
+ uint32_t Drx:2;
+ uint32_t Dtx:2;
+ uint32_t DnlPort:4;
+ uint32_t DlnkSpeed:8;
+ uint32_t Drsvd2:14;
+ uint32_t Dtf:1;
+ uint32_t Dlu:1;
+#endif
+
+#ifdef __BIG_ENDIAN_BITFIELD
+ uint32_t Ulu:1;
+ uint32_t Utf:1;
+ uint32_t Ursvd2:14;
+ uint32_t UlnkSpeed:8;
+ uint32_t UnlPort:4;
+ uint32_t Utx:2;
+ uint32_t Urx:2;
+#else /* __LITTLE_ENDIAN_BITFIELD */
+ uint32_t Urx:2;
+ uint32_t Utx:2;
+ uint32_t UnlPort:4;
+ uint32_t UlnkSpeed:8;
+ uint32_t Ursvd2:14;
+ uint32_t Utf:1;
+ uint32_t Ulu:1;
+#endif
+
+#define LA_UNKNW_LINK 0x0 /* lnkSpeed */
+#define LA_1GHZ_LINK 0x04 /* lnkSpeed */
+#define LA_2GHZ_LINK 0x08 /* lnkSpeed */
+#define LA_4GHZ_LINK 0x10 /* lnkSpeed */
+#define LA_8GHZ_LINK 0x20 /* lnkSpeed */
+#define LA_10GHZ_LINK 0x40 /* lnkSpeed */
+
+} READ_LA_VAR;
+
+/* Structure for MB Command CLEAR_LA (22) */
+
+typedef struct {
+ uint32_t eventTag; /* Event tag */
+ uint32_t rsvd1;
+} CLEAR_LA_VAR;
+
+/* Structure for MB Command DUMP */
+
+typedef struct {
+#ifdef __BIG_ENDIAN_BITFIELD
+ uint32_t rsvd:25;
+ uint32_t ra:1;
+ uint32_t co:1;
+ uint32_t cv:1;
+ uint32_t type:4;
+ uint32_t entry_index:16;
+ uint32_t region_id:16;
+#else /* __LITTLE_ENDIAN_BITFIELD */
+ uint32_t type:4;
+ uint32_t cv:1;
+ uint32_t co:1;
+ uint32_t ra:1;
+ uint32_t rsvd:25;
+ uint32_t region_id:16;
+ uint32_t entry_index:16;
+#endif
+
+ uint32_t rsvd1;
+ uint32_t word_cnt;
+ uint32_t resp_offset;
+} DUMP_VAR;
+
+#define DMP_MEM_REG 0x1
+#define DMP_NV_PARAMS 0x2
+
+#define DMP_REGION_VPD 0xe
+#define DMP_VPD_SIZE 0x100
+
+/* Structure for MB Command CONFIG_PORT (0x88) */
+
+typedef struct {
+ uint32_t pcbLen;
+ uint32_t pcbLow; /* bit 31:0 of memory based port config block */
+ uint32_t pcbHigh; /* bit 63:32 of memory based port config block */
+ uint32_t hbainit[5];
+} CONFIG_PORT_VAR;
+
+/* SLI-2 Port Control Block */
+
+/* SLIM POINTER */
+#define SLIMOFF 0x30 /* WORD */
+
+typedef struct _SLI2_RDSC {
+ uint32_t cmdEntries;
+ uint32_t cmdAddrLow;
+ uint32_t cmdAddrHigh;
+
+ uint32_t rspEntries;
+ uint32_t rspAddrLow;
+ uint32_t rspAddrHigh;
+} SLI2_RDSC;
+
+typedef struct _PCB {
+#ifdef __BIG_ENDIAN_BITFIELD
+ uint32_t type:8;
+#define TYPE_NATIVE_SLI2 0x01;
+ uint32_t feature:8;
+#define FEATURE_INITIAL_SLI2 0x01;
+ uint32_t rsvd:12;
+ uint32_t maxRing:4;
+#else /* __LITTLE_ENDIAN_BITFIELD */
+ uint32_t maxRing:4;
+ uint32_t rsvd:12;
+ uint32_t feature:8;
+#define FEATURE_INITIAL_SLI2 0x01;
+ uint32_t type:8;
+#define TYPE_NATIVE_SLI2 0x01;
+#endif
+
+ uint32_t mailBoxSize;
+ uint32_t mbAddrLow;
+ uint32_t mbAddrHigh;
+
+ uint32_t hgpAddrLow;
+ uint32_t hgpAddrHigh;
+
+ uint32_t pgpAddrLow;
+ uint32_t pgpAddrHigh;
+ SLI2_RDSC rdsc[MAX_RINGS];
+} PCB_t;
+
+/* NEW_FEATURE */
+typedef struct {
+#ifdef __BIG_ENDIAN_BITFIELD
+ uint32_t rsvd0:27;
+ uint32_t discardFarp:1;
+ uint32_t IPEnable:1;
+ uint32_t nodeName:1;
+ uint32_t portName:1;
+ uint32_t filterEnable:1;
+#else /* __LITTLE_ENDIAN_BITFIELD */
+ uint32_t filterEnable:1;
+ uint32_t portName:1;
+ uint32_t nodeName:1;
+ uint32_t IPEnable:1;
+ uint32_t discardFarp:1;
+ uint32_t rsvd:27;
+#endif
+
+ uint8_t portname[8]; /* Used to be struct lpfc_name */
+ uint8_t nodename[8];
+ uint32_t rsvd1;
+ uint32_t rsvd2;
+ uint32_t rsvd3;
+ uint32_t IPAddress;
+} CONFIG_FARP_VAR;
+
+/* Union of all Mailbox Command types */
+#define MAILBOX_CMD_WSIZE 32
+
+typedef union {
+ uint32_t varWords[MAILBOX_CMD_WSIZE - 1];
+ LOAD_SM_VAR varLdSM; /* cmd = 1 (LOAD_SM) */
+ READ_NV_VAR varRDnvp; /* cmd = 2 (READ_NVPARMS) */
+ WRITE_NV_VAR varWTnvp; /* cmd = 3 (WRITE_NVPARMS) */
+ BIU_DIAG_VAR varBIUdiag; /* cmd = 4 (RUN_BIU_DIAG) */
+ INIT_LINK_VAR varInitLnk; /* cmd = 5 (INIT_LINK) */
+ DOWN_LINK_VAR varDwnLnk; /* cmd = 6 (DOWN_LINK) */
+ CONFIG_LINK varCfgLnk; /* cmd = 7 (CONFIG_LINK) */
+ PART_SLIM_VAR varSlim; /* cmd = 8 (PART_SLIM) */
+ CONFIG_RING_VAR varCfgRing; /* cmd = 9 (CONFIG_RING) */
+ RESET_RING_VAR varRstRing; /* cmd = 10 (RESET_RING) */
+ READ_CONFIG_VAR varRdConfig; /* cmd = 11 (READ_CONFIG) */
+ READ_RCONF_VAR varRdRConfig; /* cmd = 12 (READ_RCONFIG) */
+ READ_SPARM_VAR varRdSparm; /* cmd = 13 (READ_SPARM(64)) */
+ READ_STATUS_VAR varRdStatus; /* cmd = 14 (READ_STATUS) */
+ READ_RPI_VAR varRdRPI; /* cmd = 15 (READ_RPI(64)) */
+ READ_XRI_VAR varRdXRI; /* cmd = 16 (READ_XRI) */
+ READ_REV_VAR varRdRev; /* cmd = 17 (READ_REV) */
+ READ_LNK_VAR varRdLnk; /* cmd = 18 (READ_LNK_STAT) */
+ REG_LOGIN_VAR varRegLogin; /* cmd = 19 (REG_LOGIN(64)) */
+ UNREG_LOGIN_VAR varUnregLogin; /* cmd = 20 (UNREG_LOGIN) */
+ READ_LA_VAR varReadLA; /* cmd = 21 (READ_LA(64)) */
+ CLEAR_LA_VAR varClearLA; /* cmd = 22 (CLEAR_LA) */
+ DUMP_VAR varDmp; /* Warm Start DUMP mbx cmd */
+ UNREG_D_ID_VAR varUnregDID; /* cmd = 0x23 (UNREG_D_ID) */
+ CONFIG_FARP_VAR varCfgFarp; /* cmd = 0x25 (CONFIG_FARP) NEW_FEATURE */
+ CONFIG_PORT_VAR varCfgPort; /* cmd = 0x88 (CONFIG_PORT) */
+} MAILVARIANTS;
+
+/*
+ * SLI-2 specific structures
+ */
+
+typedef struct {
+ uint32_t cmdPutInx;
+ uint32_t rspGetInx;
+} HGP;
+
+typedef struct {
+ uint32_t cmdGetInx;
+ uint32_t rspPutInx;
+} PGP;
+
+typedef struct _SLI2_DESC {
+ HGP host[MAX_RINGS];
+ uint32_t unused1[16];
+ PGP port[MAX_RINGS];
+} SLI2_DESC;
+
+typedef union {
+ SLI2_DESC s2;
+} SLI_VAR;
+
+typedef volatile struct {
+#ifdef __BIG_ENDIAN_BITFIELD
+ uint16_t mbxStatus;
+ uint8_t mbxCommand;
+ uint8_t mbxReserved:6;
+ uint8_t mbxHc:1;
+ uint8_t mbxOwner:1; /* Low order bit first word */
+#else /* __LITTLE_ENDIAN_BITFIELD */
+ uint8_t mbxOwner:1; /* Low order bit first word */
+ uint8_t mbxHc:1;
+ uint8_t mbxReserved:6;
+ uint8_t mbxCommand;
+ uint16_t mbxStatus;
+#endif
+
+ MAILVARIANTS un;
+ SLI_VAR us;
+} MAILBOX_t;
+
+/*
+ * Begin Structure Definitions for IOCB Commands
+ */
+
+typedef struct {
+#ifdef __BIG_ENDIAN_BITFIELD
+ uint8_t statAction;
+ uint8_t statRsn;
+ uint8_t statBaExp;
+ uint8_t statLocalError;
+#else /* __LITTLE_ENDIAN_BITFIELD */
+ uint8_t statLocalError;
+ uint8_t statBaExp;
+ uint8_t statRsn;
+ uint8_t statAction;
+#endif
+ /* statRsn P/F_RJT reason codes */
+#define RJT_BAD_D_ID 0x01 /* Invalid D_ID field */
+#define RJT_BAD_S_ID 0x02 /* Invalid S_ID field */
+#define RJT_UNAVAIL_TEMP 0x03 /* N_Port unavailable temp. */
+#define RJT_UNAVAIL_PERM 0x04 /* N_Port unavailable perm. */
+#define RJT_UNSUP_CLASS 0x05 /* Class not supported */
+#define RJT_DELIM_ERR 0x06 /* Delimiter usage error */
+#define RJT_UNSUP_TYPE 0x07 /* Type not supported */
+#define RJT_BAD_CONTROL 0x08 /* Invalid link conrtol */
+#define RJT_BAD_RCTL 0x09 /* R_CTL invalid */
+#define RJT_BAD_FCTL 0x0A /* F_CTL invalid */
+#define RJT_BAD_OXID 0x0B /* OX_ID invalid */
+#define RJT_BAD_RXID 0x0C /* RX_ID invalid */
+#define RJT_BAD_SEQID 0x0D /* SEQ_ID invalid */
+#define RJT_BAD_DFCTL 0x0E /* DF_CTL invalid */
+#define RJT_BAD_SEQCNT 0x0F /* SEQ_CNT invalid */
+#define RJT_BAD_PARM 0x10 /* Param. field invalid */
+#define RJT_XCHG_ERR 0x11 /* Exchange error */
+#define RJT_PROT_ERR 0x12 /* Protocol error */
+#define RJT_BAD_LENGTH 0x13 /* Invalid Length */
+#define RJT_UNEXPECTED_ACK 0x14 /* Unexpected ACK */
+#define RJT_LOGIN_REQUIRED 0x16 /* Login required */
+#define RJT_TOO_MANY_SEQ 0x17 /* Excessive sequences */
+#define RJT_XCHG_NOT_STRT 0x18 /* Exchange not started */
+#define RJT_UNSUP_SEC_HDR 0x19 /* Security hdr not supported */
+#define RJT_UNAVAIL_PATH 0x1A /* Fabric Path not available */
+#define RJT_VENDOR_UNIQUE 0xFF /* Vendor unique error */
+
+#define IOERR_SUCCESS 0x00 /* statLocalError */
+#define IOERR_MISSING_CONTINUE 0x01
+#define IOERR_SEQUENCE_TIMEOUT 0x02
+#define IOERR_INTERNAL_ERROR 0x03
+#define IOERR_INVALID_RPI 0x04
+#define IOERR_NO_XRI 0x05
+#define IOERR_ILLEGAL_COMMAND 0x06
+#define IOERR_XCHG_DROPPED 0x07
+#define IOERR_ILLEGAL_FIELD 0x08
+#define IOERR_BAD_CONTINUE 0x09
+#define IOERR_TOO_MANY_BUFFERS 0x0A
+#define IOERR_RCV_BUFFER_WAITING 0x0B
+#define IOERR_NO_CONNECTION 0x0C
+#define IOERR_TX_DMA_FAILED 0x0D
+#define IOERR_RX_DMA_FAILED 0x0E
+#define IOERR_ILLEGAL_FRAME 0x0F
+#define IOERR_EXTRA_DATA 0x10
+#define IOERR_NO_RESOURCES 0x11
+#define IOERR_RESERVED 0x12
+#define IOERR_ILLEGAL_LENGTH 0x13
+#define IOERR_UNSUPPORTED_FEATURE 0x14
+#define IOERR_ABORT_IN_PROGRESS 0x15
+#define IOERR_ABORT_REQUESTED 0x16
+#define IOERR_RECEIVE_BUFFER_TIMEOUT 0x17
+#define IOERR_LOOP_OPEN_FAILURE 0x18
+#define IOERR_RING_RESET 0x19
+#define IOERR_LINK_DOWN 0x1A
+#define IOERR_CORRUPTED_DATA 0x1B
+#define IOERR_CORRUPTED_RPI 0x1C
+#define IOERR_OUT_OF_ORDER_DATA 0x1D
+#define IOERR_OUT_OF_ORDER_ACK 0x1E
+#define IOERR_DUP_FRAME 0x1F
+#define IOERR_LINK_CONTROL_FRAME 0x20 /* ACK_N received */
+#define IOERR_BAD_HOST_ADDRESS 0x21
+#define IOERR_RCV_HDRBUF_WAITING 0x22
+#define IOERR_MISSING_HDR_BUFFER 0x23
+#define IOERR_MSEQ_CHAIN_CORRUPTED 0x24
+#define IOERR_ABORTMULT_REQUESTED 0x25
+#define IOERR_BUFFER_SHORTAGE 0x28
+#define IOERR_DEFAULT 0x29
+#define IOERR_CNT 0x2A
+
+#define IOERR_DRVR_MASK 0x100
+#define IOERR_SLI_DOWN 0x101 /* ulpStatus - Driver defined */
+#define IOERR_SLI_BRESET 0x102
+#define IOERR_SLI_ABORTED 0x103
+} PARM_ERR;
+
+typedef union {
+ struct {
+#ifdef __BIG_ENDIAN_BITFIELD
+ uint8_t Rctl; /* R_CTL field */
+ uint8_t Type; /* TYPE field */
+ uint8_t Dfctl; /* DF_CTL field */
+ uint8_t Fctl; /* Bits 0-7 of IOCB word 5 */
+#else /* __LITTLE_ENDIAN_BITFIELD */
+ uint8_t Fctl; /* Bits 0-7 of IOCB word 5 */
+ uint8_t Dfctl; /* DF_CTL field */
+ uint8_t Type; /* TYPE field */
+ uint8_t Rctl; /* R_CTL field */
+#endif
+
+#define BC 0x02 /* Broadcast Received - Fctl */
+#define SI 0x04 /* Sequence Initiative */
+#define LA 0x08 /* Ignore Link Attention state */
+#define LS 0x80 /* Last Sequence */
+ } hcsw;
+ uint32_t reserved;
+} WORD5;
+
+/* IOCB Command template for a generic response */
+typedef struct {
+ uint32_t reserved[4];
+ PARM_ERR perr;
+} GENERIC_RSP;
+
+/* IOCB Command template for XMIT / XMIT_BCAST / RCV_SEQUENCE / XMIT_ELS */
+typedef struct {
+ struct ulp_bde xrsqbde[2];
+ uint32_t xrsqRo; /* Starting Relative Offset */
+ WORD5 w5; /* Header control/status word */
+} XR_SEQ_FIELDS;
+
+/* IOCB Command template for ELS_REQUEST */
+typedef struct {
+ struct ulp_bde elsReq;
+ struct ulp_bde elsRsp;
+
+#ifdef __BIG_ENDIAN_BITFIELD
+ uint32_t word4Rsvd:7;
+ uint32_t fl:1;
+ uint32_t myID:24;
+ uint32_t word5Rsvd:8;
+ uint32_t remoteID:24;
+#else /* __LITTLE_ENDIAN_BITFIELD */
+ uint32_t myID:24;
+ uint32_t fl:1;
+ uint32_t word4Rsvd:7;
+ uint32_t remoteID:24;
+ uint32_t word5Rsvd:8;
+#endif
+} ELS_REQUEST;
+
+/* IOCB Command template for RCV_ELS_REQ */
+typedef struct {
+ struct ulp_bde elsReq[2];
+ uint32_t parmRo;
+
+#ifdef __BIG_ENDIAN_BITFIELD
+ uint32_t word5Rsvd:8;
+ uint32_t remoteID:24;
+#else /* __LITTLE_ENDIAN_BITFIELD */
+ uint32_t remoteID:24;
+ uint32_t word5Rsvd:8;
+#endif
+} RCV_ELS_REQ;
+
+/* IOCB Command template for ABORT / CLOSE_XRI */
+typedef struct {
+ uint32_t rsvd[3];
+ uint32_t abortType;
+#define ABORT_TYPE_ABTX 0x00000000
+#define ABORT_TYPE_ABTS 0x00000001
+ uint32_t parm;
+#ifdef __BIG_ENDIAN_BITFIELD
+ uint16_t abortContextTag; /* ulpContext from command to abort/close */
+ uint16_t abortIoTag; /* ulpIoTag from command to abort/close */
+#else /* __LITTLE_ENDIAN_BITFIELD */
+ uint16_t abortIoTag; /* ulpIoTag from command to abort/close */
+ uint16_t abortContextTag; /* ulpContext from command to abort/close */
+#endif
+} AC_XRI;
+
+/* IOCB Command template for ABORT_MXRI64 */
+typedef struct {
+ uint32_t rsvd[3];
+ uint32_t abortType;
+ uint32_t parm;
+ uint32_t iotag32;
+} A_MXRI64;
+
+/* IOCB Command template for GET_RPI */
+typedef struct {
+ uint32_t rsvd[4];
+ uint32_t parmRo;
+#ifdef __BIG_ENDIAN_BITFIELD
+ uint32_t word5Rsvd:8;
+ uint32_t remoteID:24;
+#else /* __LITTLE_ENDIAN_BITFIELD */
+ uint32_t remoteID:24;
+ uint32_t word5Rsvd:8;
+#endif
+} GET_RPI;
+
+/* IOCB Command template for all FCP Initiator commands */
+typedef struct {
+ struct ulp_bde fcpi_cmnd; /* FCP_CMND payload descriptor */
+ struct ulp_bde fcpi_rsp; /* Rcv buffer */
+ uint32_t fcpi_parm;
+ uint32_t fcpi_XRdy; /* transfer ready for IWRITE */
+} FCPI_FIELDS;
+
+/* IOCB Command template for all FCP Target commands */
+typedef struct {
+ struct ulp_bde fcpt_Buffer[2]; /* FCP_CMND payload descriptor */
+ uint32_t fcpt_Offset;
+ uint32_t fcpt_Length; /* transfer ready for IWRITE */
+} FCPT_FIELDS;
+
+/* SLI-2 IOCB structure definitions */
+
+/* IOCB Command template for 64 bit XMIT / XMIT_BCAST / XMIT_ELS */
+typedef struct {
+ ULP_BDL bdl;
+ uint32_t xrsqRo; /* Starting Relative Offset */
+ WORD5 w5; /* Header control/status word */
+} XMT_SEQ_FIELDS64;
+
+/* IOCB Command template for 64 bit RCV_SEQUENCE64 */
+typedef struct {
+ struct ulp_bde64 rcvBde;
+ uint32_t rsvd1;
+ uint32_t xrsqRo; /* Starting Relative Offset */
+ WORD5 w5; /* Header control/status word */
+} RCV_SEQ_FIELDS64;
+
+/* IOCB Command template for ELS_REQUEST64 */
+typedef struct {
+ ULP_BDL bdl;
+#ifdef __BIG_ENDIAN_BITFIELD
+ uint32_t word4Rsvd:7;
+ uint32_t fl:1;
+ uint32_t myID:24;
+ uint32_t word5Rsvd:8;
+ uint32_t remoteID:24;
+#else /* __LITTLE_ENDIAN_BITFIELD */
+ uint32_t myID:24;
+ uint32_t fl:1;
+ uint32_t word4Rsvd:7;
+ uint32_t remoteID:24;
+ uint32_t word5Rsvd:8;
+#endif
+} ELS_REQUEST64;
+
+/* IOCB Command template for GEN_REQUEST64 */
+typedef struct {
+ ULP_BDL bdl;
+ uint32_t xrsqRo; /* Starting Relative Offset */
+ WORD5 w5; /* Header control/status word */
+} GEN_REQUEST64;
+
+/* IOCB Command template for RCV_ELS_REQ64 */
+typedef struct {
+ struct ulp_bde64 elsReq;
+ uint32_t rcvd1;
+ uint32_t parmRo;
+
+#ifdef __BIG_ENDIAN_BITFIELD
+ uint32_t word5Rsvd:8;
+ uint32_t remoteID:24;
+#else /* __LITTLE_ENDIAN_BITFIELD */
+ uint32_t remoteID:24;
+ uint32_t word5Rsvd:8;
+#endif
+} RCV_ELS_REQ64;
+
+/* IOCB Command template for all 64 bit FCP Initiator commands */
+typedef struct {
+ ULP_BDL bdl;
+ uint32_t fcpi_parm;
+ uint32_t fcpi_XRdy; /* transfer ready for IWRITE */
+} FCPI_FIELDS64;
+
+/* IOCB Command template for all 64 bit FCP Target commands */
+typedef struct {
+ ULP_BDL bdl;
+ uint32_t fcpt_Offset;
+ uint32_t fcpt_Length; /* transfer ready for IWRITE */
+} FCPT_FIELDS64;
+
+typedef volatile struct _IOCB { /* IOCB structure */
+ union {
+ GENERIC_RSP grsp; /* Generic response */
+ XR_SEQ_FIELDS xrseq; /* XMIT / BCAST / RCV_SEQUENCE cmd */
+ struct ulp_bde cont[3]; /* up to 3 continuation bdes */
+ RCV_ELS_REQ rcvels; /* RCV_ELS_REQ template */
+ AC_XRI acxri; /* ABORT / CLOSE_XRI template */
+ A_MXRI64 amxri; /* abort multiple xri command overlay */
+ GET_RPI getrpi; /* GET_RPI template */
+ FCPI_FIELDS fcpi; /* FCP Initiator template */
+ FCPT_FIELDS fcpt; /* FCP target template */
+
+ /* SLI-2 structures */
+
+ struct ulp_bde64 cont64[2]; /* up to 2 64 bit continuation
+ bde_64s */
+ ELS_REQUEST64 elsreq64; /* ELS_REQUEST template */
+ GEN_REQUEST64 genreq64; /* GEN_REQUEST template */
+ RCV_ELS_REQ64 rcvels64; /* RCV_ELS_REQ template */
+ XMT_SEQ_FIELDS64 xseq64; /* XMIT / BCAST cmd */
+ FCPI_FIELDS64 fcpi64; /* FCP 64 bit Initiator template */
+ FCPT_FIELDS64 fcpt64; /* FCP 64 bit target template */
+
+ uint32_t ulpWord[IOCB_WORD_SZ - 2]; /* generic 6 'words' */
+ } un;
+ union {
+ struct {
+#ifdef __BIG_ENDIAN_BITFIELD
+ uint16_t ulpContext; /* High order bits word 6 */
+ uint16_t ulpIoTag; /* Low order bits word 6 */
+#else /* __LITTLE_ENDIAN_BITFIELD */
+ uint16_t ulpIoTag; /* Low order bits word 6 */
+ uint16_t ulpContext; /* High order bits word 6 */
+#endif
+ } t1;
+ struct {
+#ifdef __BIG_ENDIAN_BITFIELD
+ uint16_t ulpContext; /* High order bits word 6 */
+ uint16_t ulpIoTag1:2; /* Low order bits word 6 */
+ uint16_t ulpIoTag0:14; /* Low order bits word 6 */
+#else /* __LITTLE_ENDIAN_BITFIELD */
+ uint16_t ulpIoTag0:14; /* Low order bits word 6 */
+ uint16_t ulpIoTag1:2; /* Low order bits word 6 */
+ uint16_t ulpContext; /* High order bits word 6 */
+#endif
+ } t2;
+ } un1;
+#define ulpContext un1.t1.ulpContext
+#define ulpIoTag un1.t1.ulpIoTag
+#define ulpIoTag0 un1.t2.ulpIoTag0
+
+#ifdef __BIG_ENDIAN_BITFIELD
+ uint32_t ulpTimeout:8;
+ uint32_t ulpXS:1;
+ uint32_t ulpFCP2Rcvy:1;
+ uint32_t ulpPU:2;
+ uint32_t ulpIr:1;
+ uint32_t ulpClass:3;
+ uint32_t ulpCommand:8;
+ uint32_t ulpStatus:4;
+ uint32_t ulpBdeCount:2;
+ uint32_t ulpLe:1;
+ uint32_t ulpOwner:1; /* Low order bit word 7 */
+#else /* __LITTLE_ENDIAN_BITFIELD */
+ uint32_t ulpOwner:1; /* Low order bit word 7 */
+ uint32_t ulpLe:1;
+ uint32_t ulpBdeCount:2;
+ uint32_t ulpStatus:4;
+ uint32_t ulpCommand:8;
+ uint32_t ulpClass:3;
+ uint32_t ulpIr:1;
+ uint32_t ulpPU:2;
+ uint32_t ulpFCP2Rcvy:1;
+ uint32_t ulpXS:1;
+ uint32_t ulpTimeout:8;
+#endif
+
+#define PARM_UNUSED 0 /* PU field (Word 4) not used */
+#define PARM_REL_OFF 1 /* PU field (Word 4) = R. O. */
+#define PARM_READ_CHECK 2 /* PU field (Word 4) = Data Transfer Length */
+#define CLASS1 0 /* Class 1 */
+#define CLASS2 1 /* Class 2 */
+#define CLASS3 2 /* Class 3 */
+#define CLASS_FCP_INTERMIX 7 /* FCP Data->Cls 1, all else->Cls 2 */
+
+#define IOSTAT_SUCCESS 0x0 /* ulpStatus - HBA defined */
+#define IOSTAT_FCP_RSP_ERROR 0x1
+#define IOSTAT_REMOTE_STOP 0x2
+#define IOSTAT_LOCAL_REJECT 0x3
+#define IOSTAT_NPORT_RJT 0x4
+#define IOSTAT_FABRIC_RJT 0x5
+#define IOSTAT_NPORT_BSY 0x6
+#define IOSTAT_FABRIC_BSY 0x7
+#define IOSTAT_INTERMED_RSP 0x8
+#define IOSTAT_LS_RJT 0x9
+#define IOSTAT_BA_RJT 0xA
+#define IOSTAT_RSVD1 0xB
+#define IOSTAT_RSVD2 0xC
+#define IOSTAT_RSVD3 0xD
+#define IOSTAT_RSVD4 0xE
+#define IOSTAT_RSVD5 0xF
+#define IOSTAT_DRIVER_REJECT 0x10 /* ulpStatus - Driver defined */
+#define IOSTAT_DEFAULT 0xF /* Same as rsvd5 for now */
+#define IOSTAT_CNT 0x11
+
+} IOCB_t;
+
+
+#define SLI1_SLIM_SIZE (4 * 1024)
+
+/* Up to 498 IOCBs will fit into 16k
+ * 256 (MAILBOX_t) + 140 (PCB_t) + ( 32 (IOCB_t) * 498 ) = < 16384
+ */
+#define SLI2_SLIM_SIZE (16 * 1024)
+
+/* Maximum IOCBs that will fit in SLI2 slim */
+#define MAX_SLI2_IOCB 498
+
+struct lpfc_sli2_slim {
+ MAILBOX_t mbx;
+ PCB_t pcb;
+ IOCB_t IOCBs[MAX_SLI2_IOCB];
+};
+
+/*******************************************************************
+This macro check PCI device to allow special handling for LC HBAs.
+
+Parameters:
+device : struct pci_dev 's device field
+
+return 1 => TRUE
+ 0 => FALSE
+ *******************************************************************/
+static inline int
+lpfc_is_LC_HBA(unsigned short device)
+{
+ if ((device == PCI_DEVICE_ID_TFLY) ||
+ (device == PCI_DEVICE_ID_PFLY) ||
+ (device == PCI_DEVICE_ID_LP101) ||
+ (device == PCI_DEVICE_ID_BMID) ||
+ (device == PCI_DEVICE_ID_BSMB) ||
+ (device == PCI_DEVICE_ID_ZMID) ||
+ (device == PCI_DEVICE_ID_ZSMB) ||
+ (device == PCI_DEVICE_ID_RFLY))
+ return 1;
+ else
+ return 0;
+}
+
+#endif /* _H_LPFC_HW */
--- /dev/null
+/*******************************************************************
+ * This file is part of the Emulex Linux Device Driver for *
+ * Enterprise Fibre Channel Host Bus Adapters. *
+ * Refer to the README file included with this package for *
+ * driver version and adapter support. *
+ * Copyright (C) 2004 Emulex Corporation. *
+ * www.emulex.com *
+ * *
+ * This program is free software; you can redistribute it and/or *
+ * modify it under the terms of the GNU General Public License *
+ * as published by the Free Software Foundation; either version 2 *
+ * of the License, or (at your option) any later version. *
+ * *
+ * This program is distributed in the hope that it will be useful, *
+ * but WITHOUT ANY WARRANTY; without even the implied warranty of *
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the *
+ * GNU General Public License for more details, a copy of which *
+ * can be found in the file COPYING included with this package. *
+ *******************************************************************/
+
+/*
+ * $Id: lpfc_init.c 1.168 2004/11/15 11:01:33EST sf_support Exp $
+ */
+
+#include <linux/version.h>
+#include <linux/blkdev.h>
+#include <linux/ctype.h>
+#include <linux/dma-mapping.h>
+#include <linux/pci.h>
+#include <linux/spinlock.h>
+
+#include <scsi/scsi_device.h>
+#include <scsi/scsi_host.h>
+
+#include "lpfc_sli.h"
+#include "lpfc_disc.h"
+#include "lpfc_scsi.h"
+#include "lpfc.h"
+#include "lpfc_crtn.h"
+#include "lpfc_hw.h"
+#include "lpfc_logmsg.h"
+#include "lpfc_mem.h"
+#include "lpfc_version.h"
+#include "lpfc_compat.h"
+
+static int lpfc_parse_vpd(struct lpfc_hba *, uint8_t *);
+static int lpfc_post_rcv_buf(struct lpfc_hba *);
+static int lpfc_rdrev_wd30 = 0;
+
+/************************************************************************/
+/* */
+/* lpfc_config_port_prep */
+/* This routine will do LPFC initialization prior to the */
+/* CONFIG_PORT mailbox command. This will be initialized */
+/* as a SLI layer callback routine. */
+/* This routine returns 0 on success or -ERESTART if it wants */
+/* the SLI layer to reset the HBA and try again. Any */
+/* other return value indicates an error. */
+/* */
+/************************************************************************/
+int
+lpfc_config_port_prep(struct lpfc_hba * phba)
+{
+ lpfc_vpd_t *vp = &phba->vpd;
+ int i = 0;
+ LPFC_MBOXQ_t *pmb;
+ MAILBOX_t *mb;
+
+
+ /* Get a Mailbox buffer to setup mailbox commands for HBA
+ initialization */
+ pmb = mempool_alloc(phba->mbox_mem_pool, GFP_ATOMIC);
+ if (!pmb) {
+ phba->hba_state = LPFC_HBA_ERROR;
+ return -ENOMEM;
+ }
+
+ mb = &pmb->mb;
+ phba->hba_state = LPFC_INIT_MBX_CMDS;
+
+ /* special handling for LC HBAs */
+ if (lpfc_is_LC_HBA(phba->pcidev->device)) {
+ char licensed[56] =
+ "key unlock for use with gnu public licensed code only\0";
+ uint32_t *ptext = (uint32_t *) licensed;
+
+ for (i = 0; i < 56; i += sizeof (uint32_t), ptext++)
+ *ptext = cpu_to_be32(*ptext);
+
+ /* Setup and issue mailbox READ NVPARAMS command */
+ lpfc_read_nv(phba, pmb);
+ memset((char*)mb->un.varRDnvp.rsvd3, 0,
+ sizeof (mb->un.varRDnvp.rsvd3));
+ memcpy((char*)mb->un.varRDnvp.rsvd3, licensed,
+ sizeof (licensed));
+
+ if (lpfc_sli_issue_mbox(phba, pmb, MBX_POLL) != MBX_SUCCESS) {
+ /* Adapter initialization error, mbxCmd <cmd>
+ READ_NVPARM, mbxStatus <status> */
+ lpfc_printf_log(phba,
+ KERN_ERR,
+ LOG_MBOX,
+ "%d:0324 Config Port initialization "
+ "error, mbxCmd x%x READ_NVPARM, "
+ "mbxStatus x%x\n",
+ phba->brd_no,
+ mb->mbxCommand, mb->mbxStatus);
+ return -ERESTART;
+ }
+ memcpy(phba->wwnn, (char *)mb->un.varRDnvp.nodename,
+ sizeof (mb->un.varRDnvp.nodename));
+ }
+
+ /* Setup and issue mailbox READ REV command */
+ lpfc_read_rev(phba, pmb);
+ if (lpfc_sli_issue_mbox(phba, pmb, MBX_POLL) != MBX_SUCCESS) {
+ /* Adapter failed to init, mbxCmd <mbxCmd> READ_REV, mbxStatus
+ <status> */
+ lpfc_printf_log(phba,
+ KERN_ERR,
+ LOG_INIT,
+ "%d:0439 Adapter failed to init, mbxCmd x%x "
+ "READ_REV, mbxStatus x%x\n",
+ phba->brd_no,
+ mb->mbxCommand, mb->mbxStatus);
+ mempool_free( pmb, phba->mbox_mem_pool);
+ return -ERESTART;
+ }
+
+ /* The HBA's current state is provided by the ProgType and rr fields.
+ * Read and check the value of these fields before continuing to config
+ * this port.
+ */
+ if (mb->un.varRdRev.rr == 0 || mb->un.varRdRev.un.b.ProgType != 2) {
+ /* Old firmware */
+ vp->rev.rBit = 0;
+ /* Adapter failed to init, mbxCmd <cmd> READ_REV detected
+ outdated firmware */
+ lpfc_printf_log(phba,
+ KERN_ERR,
+ LOG_INIT,
+ "%d:0440 Adapter failed to init, mbxCmd x%x "
+ "READ_REV detected outdated firmware"
+ "Data: x%x\n",
+ phba->brd_no,
+ mb->mbxCommand, 0);
+ mempool_free(pmb, phba->mbox_mem_pool);
+ return -ERESTART;
+ } else {
+ vp->rev.rBit = 1;
+ vp->rev.sli1FwRev = mb->un.varRdRev.sli1FwRev;
+ memcpy(vp->rev.sli1FwName,
+ (char*)mb->un.varRdRev.sli1FwName, 16);
+ vp->rev.sli2FwRev = mb->un.varRdRev.sli2FwRev;
+ memcpy(vp->rev.sli2FwName,
+ (char *)mb->un.varRdRev.sli2FwName, 16);
+ }
+
+ /* Save information as VPD data */
+ vp->rev.biuRev = mb->un.varRdRev.biuRev;
+ vp->rev.smRev = mb->un.varRdRev.smRev;
+ vp->rev.smFwRev = mb->un.varRdRev.un.smFwRev;
+ vp->rev.endecRev = mb->un.varRdRev.endecRev;
+ vp->rev.fcphHigh = mb->un.varRdRev.fcphHigh;
+ vp->rev.fcphLow = mb->un.varRdRev.fcphLow;
+ vp->rev.feaLevelHigh = mb->un.varRdRev.feaLevelHigh;
+ vp->rev.feaLevelLow = mb->un.varRdRev.feaLevelLow;
+ vp->rev.postKernRev = mb->un.varRdRev.postKernRev;
+ vp->rev.opFwRev = mb->un.varRdRev.opFwRev;
+ lpfc_rdrev_wd30 = mb->un.varWords[30];
+
+ if (lpfc_is_LC_HBA(phba->pcidev->device))
+ memcpy(phba->RandomData, (char *)&mb->un.varWords[24],
+ sizeof (phba->RandomData));
+
+ /* Get adapter VPD information */
+ lpfc_dump_mem(phba, pmb);
+ if (lpfc_sli_issue_mbox(phba, pmb, MBX_POLL) != MBX_SUCCESS) {
+ /* Let it go through even if failed. */
+ /* Adapter failed to init, mbxCmd <cmd> DUMP VPD,
+ mbxStatus <status> */
+ lpfc_printf_log(phba,
+ KERN_INFO,
+ LOG_INIT,
+ "%d:0441 VPD not present on adapter, mbxCmd "
+ "x%x DUMP VPD, mbxStatus x%x\n",
+ phba->brd_no,
+ mb->mbxCommand, mb->mbxStatus);
+ } else if (mb->un.varDmp.ra == 1) {
+ lpfc_parse_vpd(phba, (uint8_t *)&mb->un.varDmp.resp_offset);
+ }
+ mempool_free(pmb, phba->mbox_mem_pool);
+ return 0;
+}
+
+/************************************************************************/
+/* */
+/* lpfc_config_port_post */
+/* This routine will do LPFC initialization after the */
+/* CONFIG_PORT mailbox command. This will be initialized */
+/* as a SLI layer callback routine. */
+/* This routine returns 0 on success. Any other return value */
+/* indicates an error. */
+/* */
+/************************************************************************/
+int
+lpfc_config_port_post(struct lpfc_hba * phba)
+{
+ LPFC_MBOXQ_t *pmb;
+ MAILBOX_t *mb;
+ struct lpfc_dmabuf *mp;
+ struct lpfc_sli *psli = &phba->sli;
+ uint32_t status, timeout;
+ int i, j, flogi_sent;
+ unsigned long isr_cnt, clk_cnt;
+
+
+ /* Get a Mailbox buffer to setup mailbox commands for HBA
+ initialization */
+ pmb = mempool_alloc(phba->mbox_mem_pool, GFP_ATOMIC);
+ if (!pmb) {
+ phba->hba_state = LPFC_HBA_ERROR;
+ return -ENOMEM;
+ }
+ mb = &pmb->mb;
+
+ /* Setup link timers */
+ lpfc_config_link(phba, pmb);
+ if (lpfc_sli_issue_mbox(phba, pmb, MBX_POLL) != MBX_SUCCESS) {
+ lpfc_printf_log(phba,
+ KERN_ERR,
+ LOG_INIT,
+ "%d:0447 Adapter failed init, mbxCmd x%x "
+ "CONFIG_LINK mbxStatus x%x\n",
+ phba->brd_no,
+ mb->mbxCommand, mb->mbxStatus);
+ phba->hba_state = LPFC_HBA_ERROR;
+ mempool_free( pmb, phba->mbox_mem_pool);
+ return -EIO;
+ }
+
+ /* Get login parameters for NID. */
+ lpfc_read_sparam(phba, pmb);
+ if (lpfc_sli_issue_mbox(phba, pmb, MBX_POLL) != MBX_SUCCESS) {
+ lpfc_printf_log(phba,
+ KERN_ERR,
+ LOG_INIT,
+ "%d:0448 Adapter failed init, mbxCmd x%x "
+ "READ_SPARM mbxStatus x%x\n",
+ phba->brd_no,
+ mb->mbxCommand, mb->mbxStatus);
+ phba->hba_state = LPFC_HBA_ERROR;
+ mempool_free( pmb, phba->mbox_mem_pool);
+ return -EIO;
+ }
+
+ mp = (struct lpfc_dmabuf *) pmb->context1;
+
+ /* The mailbox was populated by the HBA. Flush it to main store for the
+ * driver. Note that all context buffers are from the driver's
+ * dma pool and have length LPFC_BPL_SIZE.
+ */
+ pci_dma_sync_single_for_cpu(phba->pcidev, mp->phys, LPFC_BPL_SIZE,
+ PCI_DMA_FROMDEVICE);
+
+ memcpy(&phba->fc_sparam, mp->virt, sizeof (struct serv_parm));
+ lpfc_mbuf_free(phba, mp->virt, mp->phys);
+ kfree(mp);
+ pmb->context1 = NULL;
+
+ memcpy(&phba->fc_nodename, &phba->fc_sparam.nodeName,
+ sizeof (struct lpfc_name));
+ memcpy(&phba->fc_portname, &phba->fc_sparam.portName,
+ sizeof (struct lpfc_name));
+ /* If no serial number in VPD data, use low 6 bytes of WWNN */
+ /* This should be consolidated into parse_vpd ? - mr */
+ if (phba->SerialNumber[0] == 0) {
+ uint8_t *outptr;
+
+ outptr = (uint8_t *) & phba->fc_nodename.IEEE[0];
+ for (i = 0; i < 12; i++) {
+ status = *outptr++;
+ j = ((status & 0xf0) >> 4);
+ if (j <= 9)
+ phba->SerialNumber[i] =
+ (char)((uint8_t) 0x30 + (uint8_t) j);
+ else
+ phba->SerialNumber[i] =
+ (char)((uint8_t) 0x61 + (uint8_t) (j - 10));
+ i++;
+ j = (status & 0xf);
+ if (j <= 9)
+ phba->SerialNumber[i] =
+ (char)((uint8_t) 0x30 + (uint8_t) j);
+ else
+ phba->SerialNumber[i] =
+ (char)((uint8_t) 0x61 + (uint8_t) (j - 10));
+ }
+ }
+
+ /* This should turn on DELAYED ABTS for ELS timeouts */
+ lpfc_set_slim(phba, pmb, 0x052198, 0x1);
+ if (lpfc_sli_issue_mbox(phba, pmb, MBX_POLL) != MBX_SUCCESS) {
+ phba->hba_state = LPFC_HBA_ERROR;
+ mempool_free( pmb, phba->mbox_mem_pool);
+ return -EIO;
+ }
+
+
+ lpfc_read_config(phba, pmb);
+ if (lpfc_sli_issue_mbox(phba, pmb, MBX_POLL) != MBX_SUCCESS) {
+ lpfc_printf_log(phba,
+ KERN_ERR,
+ LOG_INIT,
+ "%d:0453 Adapter failed to init, mbxCmd x%x "
+ "READ_CONFIG, mbxStatus x%x\n",
+ phba->brd_no,
+ mb->mbxCommand, mb->mbxStatus);
+ phba->hba_state = LPFC_HBA_ERROR;
+ mempool_free( pmb, phba->mbox_mem_pool);
+ return -EIO;
+ }
+
+ /* Reset the DFT_HBA_Q_DEPTH to the max xri */
+ if (phba->cfg_hba_queue_depth > (mb->un.varRdConfig.max_xri+1))
+ phba->cfg_hba_queue_depth =
+ mb->un.varRdConfig.max_xri + 1;
+
+ phba->lmt = mb->un.varRdConfig.lmt;
+ /* HBA is not 4GB capable, or HBA is not 2GB capable,
+ don't let link speed ask for it */
+ if ((((phba->lmt & LMT_4250_10bit) != LMT_4250_10bit) &&
+ (phba->cfg_link_speed > LINK_SPEED_2G)) ||
+ (((phba->lmt & LMT_2125_10bit) != LMT_2125_10bit) &&
+ (phba->cfg_link_speed > LINK_SPEED_1G))) {
+ /* Reset link speed to auto. 1G/2GB HBA cfg'd for 4G */
+ lpfc_printf_log(phba,
+ KERN_WARNING,
+ LOG_LINK_EVENT,
+ "%d:1302 Invalid speed for this board: "
+ "Reset link speed to auto: x%x\n",
+ phba->brd_no,
+ phba->cfg_link_speed);
+ phba->cfg_link_speed = LINK_SPEED_AUTO;
+ }
+
+ if (!phba->intr_inited) {
+ /* Add our interrupt routine to kernel's interrupt chain &
+ enable it */
+
+ if (request_irq(phba->pcidev->irq,
+ lpfc_intr_handler,
+ SA_SHIRQ,
+ LPFC_DRIVER_NAME,
+ phba) != 0) {
+ /* Enable interrupt handler failed */
+ lpfc_printf_log(phba,
+ KERN_ERR,
+ LOG_INIT,
+ "%d:0451 Enable interrupt handler "
+ "failed\n",
+ phba->brd_no);
+ phba->hba_state = LPFC_HBA_ERROR;
+ mempool_free(pmb, phba->mbox_mem_pool);
+ return -EIO;
+ }
+ phba->intr_inited =
+ (HC_MBINT_ENA | HC_ERINT_ENA | HC_LAINT_ENA);
+ }
+
+ phba->hba_state = LPFC_LINK_DOWN;
+
+ /* Only process IOCBs on ring 0 till hba_state is READY */
+ if (psli->ring[psli->ip_ring].cmdringaddr)
+ psli->ring[psli->ip_ring].flag |= LPFC_STOP_IOCB_EVENT;
+ if (psli->ring[psli->fcp_ring].cmdringaddr)
+ psli->ring[psli->fcp_ring].flag |= LPFC_STOP_IOCB_EVENT;
+ if (psli->ring[psli->next_ring].cmdringaddr)
+ psli->ring[psli->next_ring].flag |= LPFC_STOP_IOCB_EVENT;
+
+ /* Post receive buffers for desired rings */
+ lpfc_post_rcv_buf(phba);
+
+ /* Enable appropriate host interrupts */
+ status = readl(phba->HCregaddr);
+ status |= phba->intr_inited;
+ if (psli->sliinit.num_rings > 0)
+ status |= HC_R0INT_ENA;
+ if (psli->sliinit.num_rings > 1)
+ status |= HC_R1INT_ENA;
+ if (psli->sliinit.num_rings > 2)
+ status |= HC_R2INT_ENA;
+ if (psli->sliinit.num_rings > 3)
+ status |= HC_R3INT_ENA;
+
+ writel(status, phba->HCregaddr);
+ readl(phba->HCregaddr); /* flush */
+
+ /* Setup and issue mailbox INITIALIZE LINK command */
+ lpfc_init_link(phba, pmb, phba->cfg_topology,
+ phba->cfg_link_speed);
+
+ isr_cnt = psli->slistat.sliIntr;
+ clk_cnt = jiffies;
+
+ if (lpfc_sli_issue_mbox(phba, pmb, MBX_NOWAIT) != MBX_SUCCESS) {
+ lpfc_printf_log(phba,
+ KERN_ERR,
+ LOG_INIT,
+ "%d:0454 Adapter failed to init, mbxCmd x%x "
+ "INIT_LINK, mbxStatus x%x\n",
+ phba->brd_no,
+ mb->mbxCommand, mb->mbxStatus);
+
+ /* Clear all interrupt enable conditions */
+ writel(0, phba->HCregaddr);
+ readl(phba->HCregaddr); /* flush */
+ /* Clear all pending interrupts */
+ writel(0xffffffff, phba->HAregaddr);
+ readl(phba->HAregaddr); /* flush */
+
+ free_irq(phba->pcidev->irq, phba);
+ phba->hba_state = LPFC_HBA_ERROR;
+ mempool_free(pmb, phba->mbox_mem_pool);
+ return -EIO;
+ }
+ /* MBOX buffer will be freed in mbox compl */
+
+ /*
+ * Setup the ring 0 (els) timeout handler
+ */
+ timeout = phba->fc_ratov << 1;
+
+ phba->els_tmofunc.expires = jiffies + HZ * timeout;
+ add_timer(&phba->els_tmofunc);
+
+ phba->fc_prevDID = Mask_DID;
+ flogi_sent = 0;
+ i = 0;
+ while ((phba->hba_state != LPFC_HBA_READY) ||
+ (phba->num_disc_nodes) || (phba->fc_prli_sent) ||
+ ((phba->fc_map_cnt == 0) && (i<2)) ||
+ (psli->sliinit.sli_flag & LPFC_SLI_MBOX_ACTIVE)) {
+ /* Check every second for 30 retries. */
+ i++;
+ if (i > 30) {
+ break;
+ }
+ if ((i >= 15) && (phba->hba_state <= LPFC_LINK_DOWN)) {
+ /* The link is down. Set linkdown timeout */
+ break;
+ }
+
+ /* Delay for 1 second to give discovery time to complete. */
+ for (j = 0; j < 20; j++) {
+ /* On some systems, the driver's attach/detect routines
+ * are uninterruptible. Since the driver cannot predict
+ * when this is true, just manually call the ISR every
+ * 50 ms to service any interrupts.
+ */
+ msleep(50);
+ if (isr_cnt == psli->slistat.sliIntr) {
+ lpfc_sli_intr(phba);
+ isr_cnt = psli->slistat.sliIntr;
+ }
+ }
+ isr_cnt = psli->slistat.sliIntr;
+
+ if (clk_cnt == jiffies) {
+ /* REMOVE: IF THIS HAPPENS, SYSTEM CLOCK IS NOT RUNNING.
+ * WE HAVE TO MANUALLY CALL OUR TIMEOUT ROUTINES.
+ */
+ clk_cnt = jiffies;
+ }
+ }
+
+ /* Since num_disc_nodes keys off of PLOGI, delay a bit to let
+ * any potential PRLIs to flush thru the SLI sub-system.
+ */
+ msleep(50);
+ if (isr_cnt == psli->slistat.sliIntr) {
+ lpfc_sli_intr(phba);
+ }
+
+ return (0);
+}
+
+/************************************************************************/
+/* */
+/* lpfc_hba_down_prep */
+/* This routine will do LPFC uninitialization before the */
+/* HBA is reset when bringing down the SLI Layer. This will be */
+/* initialized as a SLI layer callback routine. */
+/* This routine returns 0 on success. Any other return value */
+/* indicates an error. */
+/* */
+/************************************************************************/
+int
+lpfc_hba_down_prep(struct lpfc_hba * phba)
+{
+ /* Disable interrupts */
+ writel(0, phba->HCregaddr);
+ readl(phba->HCregaddr); /* flush */
+
+ /* Cleanup potential discovery resources */
+ lpfc_els_flush_rscn(phba);
+ lpfc_els_flush_cmd(phba);
+ lpfc_disc_flush_list(phba);
+
+ return (0);
+}
+
+/************************************************************************/
+/* */
+/* lpfc_handle_eratt */
+/* This routine will handle processing a Host Attention */
+/* Error Status event. This will be initialized */
+/* as a SLI layer callback routine. */
+/* */
+/************************************************************************/
+void
+lpfc_handle_eratt(struct lpfc_hba * phba, uint32_t status)
+{
+ struct lpfc_sli *psli;
+ struct lpfc_sli_ring *pring;
+ struct lpfc_iocbq *iocb, *next_iocb;
+ IOCB_t *icmd = NULL, *cmd = NULL;
+ struct lpfc_scsi_buf *lpfc_cmd;
+ volatile uint32_t status1, status2;
+ void *from_slim;
+ unsigned long iflag;
+
+ psli = &phba->sli;
+ from_slim = ((uint8_t *)phba->MBslimaddr + 0xa8);
+ status1 = readl( from_slim);
+ from_slim = ((uint8_t *)phba->MBslimaddr + 0xac);
+ status2 = readl( from_slim);
+
+ if (status & HS_FFER6) {
+ /* Re-establishing Link */
+ spin_lock_irqsave(phba->host->host_lock, iflag);
+ lpfc_printf_log(phba, KERN_INFO, LOG_LINK_EVENT,
+ "%d:1301 Re-establishing Link "
+ "Data: x%x x%x x%x\n",
+ phba->brd_no, status, status1, status2);
+ phba->fc_flag |= FC_ESTABLISH_LINK;
+
+ /*
+ * Firmware stops when it triggled erratt with HS_FFER6.
+ * That could cause the I/Os dropped by the firmware.
+ * Error iocb (I/O) on txcmplq and let the SCSI layer
+ * retry it after re-establishing link.
+ */
+ pring = &psli->ring[psli->fcp_ring];
+
+ list_for_each_entry_safe(iocb, next_iocb, &pring->txcmplq,
+ list) {
+ cmd = &iocb->iocb;
+
+ /* Must be a FCP command */
+ if ((cmd->ulpCommand != CMD_FCP_ICMND64_CR) &&
+ (cmd->ulpCommand != CMD_FCP_IWRITE64_CR) &&
+ (cmd->ulpCommand != CMD_FCP_IREAD64_CR)) {
+ continue;
+ }
+
+ /* context1 MUST be a struct lpfc_scsi_buf */
+ lpfc_cmd = (struct lpfc_scsi_buf *)(iocb->context1);
+ if (lpfc_cmd == 0) {
+ continue;
+ }
+
+ list_del(&iocb->list);
+ pring->txcmplq_cnt--;
+
+ if (iocb->iocb_cmpl) {
+ icmd = &iocb->iocb;
+ icmd->ulpStatus = IOSTAT_LOCAL_REJECT;
+ icmd->un.ulpWord[4] = IOERR_SLI_ABORTED;
+ (iocb->iocb_cmpl)(phba, iocb, iocb);
+ } else {
+ mempool_free( iocb, phba->iocb_mem_pool);
+ }
+ }
+
+ /*
+ * There was a firmware error. Take the hba offline and then
+ * attempt to restart it.
+ */
+ spin_unlock_irqrestore(phba->host->host_lock, iflag);
+ lpfc_offline(phba);
+ if (lpfc_online(phba) == 0) { /* Initialize the HBA */
+ mod_timer(&phba->fc_estabtmo, jiffies + HZ * 60);
+ return;
+ }
+ } else {
+ /* The if clause above forces this code path when the status
+ * failure is a value other than FFER6. Do not call the offline
+ * twice. This is the adapter hardware error path.
+ */
+ lpfc_printf_log(phba, KERN_ERR, LOG_INIT,
+ "%d:0457 Adapter Hardware Error "
+ "Data: x%x x%x x%x\n",
+ phba->brd_no, status, status1, status2);
+
+ lpfc_offline(phba);
+
+ /*
+ * Restart all traffic to this host. Since the fc_transport
+ * block functions (future) were not called in lpfc_offline,
+ * don't call them here.
+ */
+ scsi_unblock_requests(phba->host);
+ }
+ return;
+}
+
+/************************************************************************/
+/* */
+/* lpfc_handle_latt */
+/* This routine will handle processing a Host Attention */
+/* Link Status event. This will be initialized */
+/* as a SLI layer callback routine. */
+/* */
+/************************************************************************/
+void
+lpfc_handle_latt(struct lpfc_hba * phba)
+{
+ struct lpfc_sli *psli;
+ LPFC_MBOXQ_t *pmb;
+ volatile uint32_t control;
+ unsigned long iflag;
+
+
+ spin_lock_irqsave(phba->host->host_lock, iflag);
+
+ /* called from host_interrupt, to process LATT */
+ psli = &phba->sli;
+ psli->slistat.linkEvent++;
+
+ /* Get a buffer which will be used for mailbox commands */
+ if ((pmb = (LPFC_MBOXQ_t *) mempool_alloc(phba->mbox_mem_pool,
+ GFP_ATOMIC))) {
+ if (lpfc_read_la(phba, pmb) == 0) {
+ pmb->mbox_cmpl = lpfc_mbx_cmpl_read_la;
+ if (lpfc_sli_issue_mbox
+ (phba, pmb, (MBX_NOWAIT | MBX_STOP_IOCB))
+ != MBX_NOT_FINISHED) {
+ /* Turn off Link Attention interrupts until
+ CLEAR_LA done */
+ psli->sliinit.sli_flag &= ~LPFC_PROCESS_LA;
+ control = readl(phba->HCregaddr);
+ control &= ~HC_LAINT_ENA;
+ writel(control, phba->HCregaddr);
+ readl(phba->HCregaddr); /* flush */
+
+ /* Clear Link Attention in HA REG */
+ writel(HA_LATT, phba->HAregaddr);
+ readl(phba->HAregaddr); /* flush */
+ spin_unlock_irqrestore(phba->host->host_lock,
+ iflag);
+ return;
+ } else {
+ mempool_free(pmb, phba->mbox_mem_pool);
+ }
+ } else {
+ mempool_free(pmb, phba->mbox_mem_pool);
+ }
+ }
+
+ /* Clear Link Attention in HA REG */
+ writel(HA_LATT, phba->HAregaddr);
+ readl(phba->HAregaddr); /* flush */
+ lpfc_linkdown(phba);
+ phba->hba_state = LPFC_HBA_ERROR;
+ spin_unlock_irqrestore(phba->host->host_lock, iflag);
+ return;
+}
+
+/************************************************************************/
+/* */
+/* lpfc_parse_vpd */
+/* This routine will parse the VPD data */
+/* */
+/************************************************************************/
+static int
+lpfc_parse_vpd(struct lpfc_hba * phba, uint8_t * vpd)
+{
+ uint8_t lenlo, lenhi;
+ uint8_t *Length;
+ int i, j;
+ int finished = 0;
+ int index = 0;
+
+ /* Vital Product */
+ lpfc_printf_log(phba,
+ KERN_INFO,
+ LOG_INIT,
+ "%d:0455 Vital Product Data: x%x x%x x%x x%x\n",
+ phba->brd_no,
+ (uint32_t) vpd[0], (uint32_t) vpd[1], (uint32_t) vpd[2],
+ (uint32_t) vpd[3]);
+ do {
+ switch (vpd[index]) {
+ case 0x82:
+ index += 1;
+ lenlo = vpd[index];
+ index += 1;
+ lenhi = vpd[index];
+ index += 1;
+ i = ((((unsigned short)lenhi) << 8) + lenlo);
+ index += i;
+ break;
+ case 0x90:
+ index += 1;
+ lenlo = vpd[index];
+ index += 1;
+ lenhi = vpd[index];
+ index += 1;
+ i = ((((unsigned short)lenhi) << 8) + lenlo);
+ do {
+ /* Look for Serial Number */
+ if ((vpd[index] == 'S')
+ && (vpd[index + 1] == 'N')) {
+ index += 2;
+ Length = &vpd[index];
+ index += 1;
+ i = *Length;
+ j = 0;
+ while (i--) {
+ phba->SerialNumber[j++] =
+ vpd[index++];
+ if (j == 31)
+ break;
+ }
+ phba->SerialNumber[j] = 0;
+ return (1);
+ } else {
+ index += 2;
+ Length = &vpd[index];
+ index += 1;
+ j = (int)(*Length);
+ index += j;
+ i -= (3 + j);
+ }
+ } while (i > 0);
+ finished = 0;
+ break;
+ case 0x78:
+ finished = 1;
+ break;
+ default:
+ return (0);
+ }
+ } while (!finished);
+ return (1);
+}
+
+/**************************************************/
+/* lpfc_post_buffer */
+/* */
+/* This routine will post count buffers to the */
+/* ring with the QUE_RING_BUF_CN command. This */
+/* allows 3 buffers / command to be posted. */
+/* Returns the number of buffers NOT posted. */
+/**************************************************/
+int
+lpfc_post_buffer(struct lpfc_hba * phba, struct lpfc_sli_ring * pring, int cnt,
+ int type)
+{
+ IOCB_t *icmd;
+ struct lpfc_iocbq *iocb;
+ struct lpfc_dmabuf *mp1, *mp2;
+
+ cnt += pring->missbufcnt;
+
+ /* While there are buffers to post */
+ while (cnt > 0) {
+ /* Allocate buffer for command iocb */
+ if ((iocb = mempool_alloc(phba->iocb_mem_pool, GFP_ATOMIC))
+ == 0) {
+ pring->missbufcnt = cnt;
+ return (cnt);
+ }
+ memset(iocb, 0, sizeof (struct lpfc_iocbq));
+ icmd = &iocb->iocb;
+
+ /* 2 buffers can be posted per command */
+ /* Allocate buffer to post */
+ mp1 = kmalloc(sizeof (struct lpfc_dmabuf), GFP_ATOMIC);
+ if (mp1)
+ mp1->virt = lpfc_mbuf_alloc(phba, MEM_PRI,
+ &mp1->phys);
+ if (mp1 == 0 || mp1->virt == 0) {
+ if (mp1)
+ kfree(mp1);
+
+ mempool_free( iocb, phba->iocb_mem_pool);
+ pring->missbufcnt = cnt;
+ return (cnt);
+ }
+
+ INIT_LIST_HEAD(&mp1->list);
+ /* Allocate buffer to post */
+ if (cnt > 1) {
+ mp2 = kmalloc(sizeof (struct lpfc_dmabuf), GFP_KERNEL);
+ if (mp2)
+ mp2->virt = lpfc_mbuf_alloc(phba, MEM_PRI,
+ &mp2->phys);
+ if (mp2 == 0 || mp2->virt == 0) {
+ if (mp2)
+ kfree(mp2);
+ lpfc_mbuf_free(phba, mp1->virt, mp1->phys);
+ kfree(mp1);
+ mempool_free( iocb, phba->iocb_mem_pool);
+ pring->missbufcnt = cnt;
+ return (cnt);
+ }
+
+ INIT_LIST_HEAD(&mp2->list);
+ } else {
+ mp2 = NULL;
+ }
+
+ icmd->un.cont64[0].addrHigh = putPaddrHigh(mp1->phys);
+ icmd->un.cont64[0].addrLow = putPaddrLow(mp1->phys);
+ icmd->un.cont64[0].tus.f.bdeSize = FCELSSIZE;
+ icmd->ulpBdeCount = 1;
+ cnt--;
+ if (mp2) {
+ icmd->un.cont64[1].addrHigh = putPaddrHigh(mp2->phys);
+ icmd->un.cont64[1].addrLow = putPaddrLow(mp2->phys);
+ icmd->un.cont64[1].tus.f.bdeSize = FCELSSIZE;
+ cnt--;
+ icmd->ulpBdeCount = 2;
+ }
+
+ icmd->ulpCommand = CMD_QUE_RING_BUF64_CN;
+ icmd->ulpIoTag = lpfc_sli_next_iotag(phba, pring);
+ icmd->ulpLe = 1;
+
+ if (lpfc_sli_issue_iocb(phba, pring, iocb, 0) == IOCB_ERROR) {
+ lpfc_mbuf_free(phba, mp1->virt, mp1->phys);
+ kfree(mp1);
+ if (mp2) {
+ lpfc_mbuf_free(phba, mp2->virt, mp2->phys);
+ kfree(mp2);
+ }
+ mempool_free( iocb, phba->iocb_mem_pool);
+ pring->missbufcnt = cnt;
+ return (cnt);
+ }
+ lpfc_sli_ringpostbuf_put(phba, pring, mp1);
+ if (mp2) {
+ lpfc_sli_ringpostbuf_put(phba, pring, mp2);
+ }
+ }
+ pring->missbufcnt = 0;
+ return (0);
+}
+
+/************************************************************************/
+/* */
+/* lpfc_post_rcv_buf */
+/* This routine post initial rcv buffers to the configured rings */
+/* */
+/************************************************************************/
+static int
+lpfc_post_rcv_buf(struct lpfc_hba * phba)
+{
+ struct lpfc_sli *psli = &phba->sli;
+
+ /* Ring 0, ELS / CT buffers */
+ lpfc_post_buffer(phba, &psli->ring[LPFC_ELS_RING], LPFC_BUF_RING0, 1);
+ /* Ring 2 - FCP no buffers needed */
+
+ return 0;
+}
+
+#define S(N,V) (((V)<<(N))|((V)>>(32-(N))))
+
+/************************************************************************/
+/* */
+/* lpfc_sha_init */
+/* */
+/************************************************************************/
+static void
+lpfc_sha_init(uint32_t * HashResultPointer)
+{
+ HashResultPointer[0] = 0x67452301;
+ HashResultPointer[1] = 0xEFCDAB89;
+ HashResultPointer[2] = 0x98BADCFE;
+ HashResultPointer[3] = 0x10325476;
+ HashResultPointer[4] = 0xC3D2E1F0;
+}
+
+/************************************************************************/
+/* */
+/* lpfc_sha_iterate */
+/* */
+/************************************************************************/
+static void
+lpfc_sha_iterate(uint32_t * HashResultPointer, uint32_t * HashWorkingPointer)
+{
+ int t;
+ uint32_t TEMP;
+ uint32_t A, B, C, D, E;
+ t = 16;
+ do {
+ HashWorkingPointer[t] =
+ S(1,
+ HashWorkingPointer[t - 3] ^ HashWorkingPointer[t -
+ 8] ^
+ HashWorkingPointer[t - 14] ^ HashWorkingPointer[t - 16]);
+ } while (++t <= 79);
+ t = 0;
+ A = HashResultPointer[0];
+ B = HashResultPointer[1];
+ C = HashResultPointer[2];
+ D = HashResultPointer[3];
+ E = HashResultPointer[4];
+
+ do {
+ if (t < 20) {
+ TEMP = ((B & C) | ((~B) & D)) + 0x5A827999;
+ } else if (t < 40) {
+ TEMP = (B ^ C ^ D) + 0x6ED9EBA1;
+ } else if (t < 60) {
+ TEMP = ((B & C) | (B & D) | (C & D)) + 0x8F1BBCDC;
+ } else {
+ TEMP = (B ^ C ^ D) + 0xCA62C1D6;
+ }
+ TEMP += S(5, A) + E + HashWorkingPointer[t];
+ E = D;
+ D = C;
+ C = S(30, B);
+ B = A;
+ A = TEMP;
+ } while (++t <= 79);
+
+ HashResultPointer[0] += A;
+ HashResultPointer[1] += B;
+ HashResultPointer[2] += C;
+ HashResultPointer[3] += D;
+ HashResultPointer[4] += E;
+
+}
+
+/************************************************************************/
+/* */
+/* lpfc_challenge_key */
+/* */
+/************************************************************************/
+static void
+lpfc_challenge_key(uint32_t * RandomChallenge, uint32_t * HashWorking)
+{
+ *HashWorking = (*RandomChallenge ^ *HashWorking);
+}
+
+/************************************************************************/
+/* */
+/* lpfc_hba_init */
+/* */
+/************************************************************************/
+void
+lpfc_hba_init(struct lpfc_hba *phba, uint32_t *hbainit)
+{
+ int t;
+ uint32_t *HashWorking;
+ uint32_t *pwwnn = phba->wwnn;
+
+ HashWorking = kmalloc(80 * sizeof(uint32_t), GFP_ATOMIC);
+ if (!HashWorking)
+ return;
+
+ memset(HashWorking, 0, (80 * sizeof(uint32_t)));
+ HashWorking[0] = HashWorking[78] = *pwwnn++;
+ HashWorking[1] = HashWorking[79] = *pwwnn;
+
+ for (t = 0; t < 7; t++)
+ lpfc_challenge_key(phba->RandomData + t, HashWorking + t);
+
+ lpfc_sha_init(hbainit);
+ lpfc_sha_iterate(hbainit, HashWorking);
+ kfree(HashWorking);
+}
+
+static void
+lpfc_consistent_bind_cleanup(struct lpfc_hba * phba)
+{
+ struct lpfc_bindlist *bdlp, *next_bdlp;
+
+ list_for_each_entry_safe(bdlp, next_bdlp,
+ &phba->fc_nlpbind_list, nlp_listp) {
+ list_del(&bdlp->nlp_listp);
+ mempool_free( bdlp, phba->bind_mem_pool);
+ }
+ phba->fc_bind_cnt = 0;
+}
+
+void
+lpfc_cleanup(struct lpfc_hba * phba, uint32_t save_bind)
+{
+ struct lpfc_nodelist *ndlp, *next_ndlp;
+
+ /* clean up phba - lpfc specific */
+ lpfc_can_disctmo(phba);
+ list_for_each_entry_safe(ndlp, next_ndlp, &phba->fc_nlpunmap_list,
+ nlp_listp) {
+ lpfc_nlp_remove(phba, ndlp);
+ }
+
+ list_for_each_entry_safe(ndlp, next_ndlp, &phba->fc_nlpmap_list,
+ nlp_listp) {
+ lpfc_nlp_remove(phba, ndlp);
+ }
+
+ list_for_each_entry_safe(ndlp, next_ndlp, &phba->fc_unused_list,
+ nlp_listp) {
+ lpfc_nlp_list(phba, ndlp, NLP_NO_LIST);
+ }
+
+ list_for_each_entry_safe(ndlp, next_ndlp, &phba->fc_plogi_list,
+ nlp_listp) {
+ lpfc_nlp_remove(phba, ndlp);
+ }
+
+ list_for_each_entry_safe(ndlp, next_ndlp, &phba->fc_adisc_list,
+ nlp_listp) {
+ lpfc_nlp_remove(phba, ndlp);
+ }
+
+ list_for_each_entry_safe(ndlp, next_ndlp, &phba->fc_reglogin_list,
+ nlp_listp) {
+ lpfc_nlp_remove(phba, ndlp);
+ }
+
+ list_for_each_entry_safe(ndlp, next_ndlp, &phba->fc_prli_list,
+ nlp_listp) {
+ lpfc_nlp_remove(phba, ndlp);
+ }
+
+ list_for_each_entry_safe(ndlp, next_ndlp, &phba->fc_npr_list,
+ nlp_listp) {
+ lpfc_nlp_remove(phba, ndlp);
+ }
+
+ if (save_bind == 0) {
+ lpfc_consistent_bind_cleanup(phba);
+ }
+
+ INIT_LIST_HEAD(&phba->fc_nlpmap_list);
+ INIT_LIST_HEAD(&phba->fc_nlpunmap_list);
+ INIT_LIST_HEAD(&phba->fc_unused_list);
+ INIT_LIST_HEAD(&phba->fc_plogi_list);
+ INIT_LIST_HEAD(&phba->fc_adisc_list);
+ INIT_LIST_HEAD(&phba->fc_reglogin_list);
+ INIT_LIST_HEAD(&phba->fc_prli_list);
+ INIT_LIST_HEAD(&phba->fc_npr_list);
+
+ phba->fc_map_cnt = 0;
+ phba->fc_unmap_cnt = 0;
+ phba->fc_plogi_cnt = 0;
+ phba->fc_adisc_cnt = 0;
+ phba->fc_reglogin_cnt = 0;
+ phba->fc_prli_cnt = 0;
+ phba->fc_npr_cnt = 0;
+ phba->fc_unused_cnt= 0;
+ return;
+}
+
+void
+lpfc_establish_link_tmo(unsigned long ptr)
+{
+ struct lpfc_hba *phba = (struct lpfc_hba *)ptr;
+ unsigned long iflag;
+
+ spin_lock_irqsave(phba->host->host_lock, iflag);
+
+ /* Re-establishing Link, timer expired */
+ lpfc_printf_log(phba, KERN_ERR, LOG_LINK_EVENT,
+ "%d:1300 Re-establishing Link, timer expired "
+ "Data: x%x x%x\n",
+ phba->brd_no, phba->fc_flag, phba->hba_state);
+ phba->fc_flag &= ~FC_ESTABLISH_LINK;
+ spin_unlock_irqrestore(phba->host->host_lock, iflag);
+}
+
+int
+lpfc_online(struct lpfc_hba * phba)
+{
+ if (!phba)
+ return 0;
+
+ if (!(phba->fc_flag & FC_OFFLINE_MODE))
+ return 0;
+
+ lpfc_printf_log(phba,
+ KERN_WARNING,
+ LOG_INIT,
+ "%d:0458 Bring Adapter online\n",
+ phba->brd_no);
+
+ if (!lpfc_sli_queue_setup(phba))
+ return 1;
+
+ if (lpfc_sli_hba_setup(phba)) /* Initialize the HBA */
+ return 1;
+
+ phba->fc_flag &= ~FC_OFFLINE_MODE;
+
+ /*
+ * Restart all traffic to this host. Since the fc_transport block
+ * functions (future) were not called in lpfc_offline, don't call them
+ * here.
+ */
+ scsi_unblock_requests(phba->host);
+ return 0;
+}
+
+int
+lpfc_offline(struct lpfc_hba * phba)
+{
+ struct lpfc_sli_ring *pring;
+ struct lpfc_sli *psli;
+ unsigned long iflag;
+ int i = 0;
+
+ if (!phba)
+ return 0;
+
+ if (phba->fc_flag & FC_OFFLINE_MODE)
+ return 0;
+
+ /*
+ * Don't call the fc_transport block api (future). The device is
+ * going offline and causing a timer to fire in the midlayer is
+ * unproductive. Just block all new requests until the driver
+ * comes back online.
+ */
+ scsi_block_requests(phba->host);
+ psli = &phba->sli;
+ pring = &psli->ring[psli->fcp_ring];
+
+ lpfc_linkdown(phba);
+
+ /* The linkdown event takes 30 seconds to timeout. */
+ while (pring->txcmplq_cnt) {
+ mdelay(10);
+ if (i++ > 3000)
+ break;
+ }
+
+ /* stop all timers associated with this hba */
+ spin_lock_irqsave(phba->host->host_lock, iflag);
+ lpfc_stop_timer(phba);
+ spin_unlock_irqrestore(phba->host->host_lock, iflag);
+
+ lpfc_printf_log(phba,
+ KERN_WARNING,
+ LOG_INIT,
+ "%d:0460 Bring Adapter offline\n",
+ phba->brd_no);
+
+ /* Bring down the SLI Layer and cleanup. The HBA is offline
+ now. */
+ lpfc_sli_hba_down(phba);
+ lpfc_cleanup(phba, 1);
+ phba->fc_flag |= FC_OFFLINE_MODE;
+ return 0;
+}
+
+/******************************************************************************
+* Function name : lpfc_scsi_free
+*
+* Description : Called from fc_detach to free scsi tgt / lun resources
+*
+******************************************************************************/
+int
+lpfc_scsi_free(struct lpfc_hba * phba)
+{
+ struct lpfc_target *targetp;
+ int i;
+
+ for (i = 0; i < MAX_FCP_TARGET; i++) {
+ targetp = phba->device_queue_hash[i];
+ if (targetp) {
+ kfree(targetp);
+ phba->device_queue_hash[i] = NULL;
+ }
+ }
+ return 0;
+}
+
+static void
+lpfc_wakeup_event(struct lpfc_hba * phba, fcEVTHDR_t * ep)
+{
+ ep->e_mode &= ~E_SLEEPING_MODE;
+ switch (ep->e_mask) {
+ case FC_REG_LINK_EVENT:
+ wake_up_interruptible(&phba->linkevtwq);
+ break;
+ case FC_REG_RSCN_EVENT:
+ wake_up_interruptible(&phba->rscnevtwq);
+ break;
+ case FC_REG_CT_EVENT:
+ wake_up_interruptible(&phba->ctevtwq);
+ break;
+ }
+ return;
+}
+
+int
+lpfc_put_event(struct lpfc_hba * phba, uint32_t evcode, uint32_t evdata0,
+ void * evdata1, uint32_t evdata2, uint32_t evdata3)
+{
+ fcEVT_t *ep;
+ fcEVTHDR_t *ehp = phba->fc_evt_head;
+ int found = 0;
+ void *fstype = NULL;
+ struct lpfc_dmabuf *mp;
+ struct lpfc_sli_ct_request *ctp;
+ struct lpfc_hba_event *rec;
+ uint32_t evtype;
+
+ switch (evcode) {
+ case HBA_EVENT_RSCN:
+ evtype = FC_REG_RSCN_EVENT;
+ break;
+ case HBA_EVENT_LINK_DOWN:
+ case HBA_EVENT_LINK_UP:
+ evtype = FC_REG_LINK_EVENT;
+ break;
+ default:
+ evtype = FC_REG_CT_EVENT;
+ }
+
+ if (evtype == FC_REG_RSCN_EVENT || evtype == FC_REG_LINK_EVENT) {
+ rec = &phba->hbaevt[phba->hba_event_put];
+ rec->fc_eventcode = evcode;
+ rec->fc_evdata1 = evdata0;
+ rec->fc_evdata2 = (uint32_t)(unsigned long)evdata1;
+ rec->fc_evdata3 = evdata2;
+ rec->fc_evdata4 = evdata3;
+
+ phba->hba_event_put++;
+ if (phba->hba_event_put >= MAX_HBAEVT)
+ phba->hba_event_put = 0;
+
+ if (phba->hba_event_put == phba->hba_event_get) {
+ phba->hba_event_missed++;
+ phba->hba_event_get++;
+ if (phba->hba_event_get >= MAX_HBAEVT)
+ phba->hba_event_get = 0;
+ }
+ }
+
+ if (evtype == FC_REG_CT_EVENT) {
+ mp = (struct lpfc_dmabuf *) evdata1;
+ ctp = (struct lpfc_sli_ct_request *) mp->virt;
+ fstype = (void *)(ulong) (ctp->FsType);
+ }
+
+ while (ehp && ((ehp->e_mask != evtype) || (ehp->e_type != fstype)))
+ ehp = (fcEVTHDR_t *) ehp->e_next_header;
+
+ if (!ehp)
+ return (0);
+
+ ep = ehp->e_head;
+
+ while (ep && !(found)) {
+ if (ep->evt_sleep) {
+ switch (evtype) {
+ case FC_REG_CT_EVENT:
+ if ((ep->evt_type ==
+ (void *)(ulong) FC_FSTYPE_ALL)
+ || (ep->evt_type == fstype)) {
+ found++;
+ ep->evt_data0 = evdata0; /* tag */
+ ep->evt_data1 = evdata1; /* buffer
+ ptr */
+ ep->evt_data2 = evdata2; /* count */
+ ep->evt_sleep = 0;
+ if (ehp->e_mode & E_SLEEPING_MODE) {
+ ehp->e_flag |=
+ E_GET_EVENT_ACTIVE;
+ lpfc_wakeup_event(phba, ehp);
+ }
+ /* For FC_REG_CT_EVENT just give it to
+ first one found */
+ }
+ break;
+ default:
+ found++;
+ ep->evt_data0 = evdata0;
+ ep->evt_data1 = evdata1;
+ ep->evt_data2 = evdata2;
+ ep->evt_sleep = 0;
+ if ((ehp->e_mode & E_SLEEPING_MODE)
+ && !(ehp->e_flag & E_GET_EVENT_ACTIVE)) {
+ ehp->e_flag |= E_GET_EVENT_ACTIVE;
+ lpfc_wakeup_event(phba, ehp);
+ }
+ /* For all other events, give it to every one
+ waiting */
+ break;
+ }
+ }
+ ep = ep->evt_next;
+ }
+ if (evtype == FC_REG_LINK_EVENT)
+ phba->nport_event_cnt++;
+
+ return (found);
+}
+
+int
+lpfc_stop_timer(struct lpfc_hba * phba)
+{
+ struct lpfc_sli *psli = &phba->sli;
+
+ /* Instead of a timer, this has been converted to a
+ * deferred procedding list.
+ */
+ while (!list_empty(&phba->freebufList)) {
+ struct lpfc_dmabuf *mp;
+
+ mp = (struct lpfc_dmabuf *)(phba->freebufList.next);
+ if (mp) {
+ lpfc_mbuf_free(phba, mp->virt, mp->phys);
+ list_del(&mp->list);
+ kfree(mp);
+ }
+ }
+
+ del_timer_sync(&phba->fc_estabtmo);
+ del_timer_sync(&phba->fc_disctmo);
+ del_timer_sync(&phba->fc_scantmo);
+ del_timer_sync(&phba->fc_fdmitmo);
+ del_timer_sync(&phba->els_tmofunc);
+ psli = &phba->sli;
+ del_timer_sync(&psli->mbox_tmo);
+ return(1);
+}
--- /dev/null
+/*******************************************************************
+ * This file is part of the Emulex Linux Device Driver for *
+ * Enterprise Fibre Channel Host Bus Adapters. *
+ * Refer to the README file included with this package for *
+ * driver version and adapter support. *
+ * Copyright (C) 2004 Emulex Corporation. *
+ * www.emulex.com *
+ * *
+ * This program is free software; you can redistribute it and/or *
+ * modify it under the terms of the GNU General Public License *
+ * as published by the Free Software Foundation; either version 2 *
+ * of the License, or (at your option) any later version. *
+ * *
+ * This program is distributed in the hope that it will be useful, *
+ * but WITHOUT ANY WARRANTY; without even the implied warranty of *
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the *
+ * GNU General Public License for more details, a copy of which *
+ * can be found in the file COPYING included with this package. *
+ *******************************************************************/
+
+/*
+ * $Id: lpfc_logmsg.h 1.31 2004/08/26 19:17:52EDT sf_support Exp $
+ */
+
+#ifndef _H_LPFC_LOGMSG
+#define _H_LPFC_LOGMSG
+
+#define LOG_ELS 0x1 /* ELS events */
+#define LOG_DISCOVERY 0x2 /* Link discovery events */
+#define LOG_MBOX 0x4 /* Mailbox events */
+#define LOG_INIT 0x8 /* Initialization events */
+#define LOG_LINK_EVENT 0x10 /* Link events */
+#define LOG_IP 0x20 /* IP traffic history */
+#define LOG_FCP 0x40 /* FCP traffic history */
+#define LOG_NODE 0x80 /* Node table events */
+#define LOG_MISC 0x400 /* Miscellaneous events */
+#define LOG_SLI 0x800 /* SLI events */
+#define LOG_CHK_COND 0x1000 /* FCP Check condition flag */
+#define LOG_LIBDFC 0x2000 /* Libdfc events */
+#define LOG_ALL_MSG 0xffff /* LOG all messages */
+
+#define lpfc_printf_log(phba, level, mask, fmt, arg...) \
+ { if (((mask) &(phba)->cfg_log_verbose) || (level[1] <= '3')) \
+ dev_printk(level, &((phba)->pcidev)->dev, fmt, ##arg); }
+#endif
+
--- /dev/null
+/*******************************************************************
+ * This file is part of the Emulex Linux Device Driver for *
+ * Enterprise Fibre Channel Host Bus Adapters. *
+ * Refer to the README file included with this package for *
+ * driver version and adapter support. *
+ * Copyright (C) 2004 Emulex Corporation. *
+ * www.emulex.com *
+ * *
+ * This program is free software; you can redistribute it and/or *
+ * modify it under the terms of the GNU General Public License *
+ * as published by the Free Software Foundation; either version 2 *
+ * of the License, or (at your option) any later version. *
+ * *
+ * This program is distributed in the hope that it will be useful, *
+ * but WITHOUT ANY WARRANTY; without even the implied warranty of *
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the *
+ * GNU General Public License for more details, a copy of which *
+ * can be found in the file COPYING included with this package. *
+ *******************************************************************/
+
+/*
+ * $Id: lpfc_mbox.c 1.70 2004/11/18 17:33:04EST sf_support Exp $
+ */
+#include <linux/version.h>
+#include <linux/blkdev.h>
+#include <linux/dma-mapping.h>
+#include <linux/pci.h>
+#include <linux/spinlock.h>
+#include <scsi/scsi_device.h>
+#include "lpfc_sli.h"
+#include "lpfc_disc.h"
+#include "lpfc_scsi.h"
+#include "lpfc.h"
+#include "lpfc_crtn.h"
+#include "lpfc_hw.h"
+#include "lpfc_logmsg.h"
+#include "lpfc_mem.h"
+#include "lpfc_compat.h"
+
+/**********************************************/
+
+/* mailbox command */
+/**********************************************/
+void
+lpfc_dump_mem(struct lpfc_hba * phba, LPFC_MBOXQ_t * pmb)
+{
+ MAILBOX_t *mb;
+
+ mb = &pmb->mb;
+ /* Setup to dump VPD region */
+ memset(pmb, 0, sizeof (LPFC_MBOXQ_t));
+ mb->mbxCommand = MBX_DUMP_MEMORY;
+ mb->un.varDmp.cv = 1;
+ mb->un.varDmp.type = DMP_NV_PARAMS;
+ mb->un.varDmp.region_id = DMP_REGION_VPD;
+ mb->un.varDmp.word_cnt = (DMP_VPD_SIZE / sizeof (uint32_t));
+
+ mb->un.varDmp.co = 0;
+ mb->un.varDmp.resp_offset = 0;
+ mb->mbxOwner = OWN_HOST;
+ return;
+}
+
+/**********************************************/
+/* lpfc_read_nv Issue a READ NVPARAM */
+/* mailbox command */
+/**********************************************/
+void
+lpfc_read_nv(struct lpfc_hba * phba, LPFC_MBOXQ_t * pmb)
+{
+ MAILBOX_t *mb;
+
+ mb = &pmb->mb;
+ memset(pmb, 0, sizeof (LPFC_MBOXQ_t));
+ mb->mbxCommand = MBX_READ_NV;
+ mb->mbxOwner = OWN_HOST;
+ return;
+}
+
+/**********************************************/
+/* lpfc_read_la Issue a READ LA */
+/* mailbox command */
+/**********************************************/
+int
+lpfc_read_la(struct lpfc_hba * phba, LPFC_MBOXQ_t * pmb)
+{
+ MAILBOX_t *mb;
+ struct lpfc_dmabuf *mp;
+ struct lpfc_sli *psli;
+
+ psli = &phba->sli;
+ mb = &pmb->mb;
+ memset(pmb, 0, sizeof (LPFC_MBOXQ_t));
+
+ /* Get a buffer to hold the loop map */
+ if (((mp = kmalloc(sizeof (struct lpfc_dmabuf), GFP_ATOMIC)) == 0) ||
+ ((mp->virt = lpfc_mbuf_alloc(phba, 0, &(mp->phys))) == 0)) {
+ if (mp)
+ kfree(mp);
+ mb->mbxCommand = MBX_READ_LA64;
+ /* READ_LA: no buffers */
+ lpfc_printf_log(phba,
+ KERN_WARNING,
+ LOG_MBOX,
+ "%d:0300 READ_LA: no buffers\n",
+ phba->brd_no);
+ return (1);
+ }
+ INIT_LIST_HEAD(&mp->list);
+ mb->mbxCommand = MBX_READ_LA64;
+ mb->un.varReadLA.un.lilpBde64.tus.f.bdeSize = 128;
+ mb->un.varReadLA.un.lilpBde64.addrHigh = putPaddrHigh(mp->phys);
+ mb->un.varReadLA.un.lilpBde64.addrLow = putPaddrLow(mp->phys);
+
+ /* Sync the mailbox data with its PCI memory address now. */
+ pci_dma_sync_single_for_device(phba->pcidev, mp->phys, LPFC_BPL_SIZE,
+ PCI_DMA_TODEVICE);
+
+ /* Save address for later completion and set the owner to host so that
+ * the FW knows this mailbox is available for processing.
+ */
+ pmb->context1 = (uint8_t *) mp;
+ mb->mbxOwner = OWN_HOST;
+ return (0);
+}
+
+/**********************************************/
+/* lpfc_clear_la Issue a CLEAR LA */
+/* mailbox command */
+/**********************************************/
+void
+lpfc_clear_la(struct lpfc_hba * phba, LPFC_MBOXQ_t * pmb)
+{
+ MAILBOX_t *mb;
+
+ mb = &pmb->mb;
+ memset(pmb, 0, sizeof (LPFC_MBOXQ_t));
+
+ mb->un.varClearLA.eventTag = phba->fc_eventTag;
+ mb->mbxCommand = MBX_CLEAR_LA;
+ mb->mbxOwner = OWN_HOST;
+ return;
+}
+
+/**************************************************/
+/* lpfc_config_link Issue a CONFIG LINK */
+/* mailbox command */
+/**************************************************/
+void
+lpfc_config_link(struct lpfc_hba * phba, LPFC_MBOXQ_t * pmb)
+{
+ MAILBOX_t *mb = &pmb->mb;
+ memset(pmb, 0, sizeof (LPFC_MBOXQ_t));
+
+ /* NEW_FEATURE
+ * SLI-2, Coalescing Response Feature.
+ */
+ if (phba->cfg_cr_delay) {
+ mb->un.varCfgLnk.cr = 1;
+ mb->un.varCfgLnk.ci = 1;
+ mb->un.varCfgLnk.cr_delay = phba->cfg_cr_delay;
+ mb->un.varCfgLnk.cr_count = phba->cfg_cr_count;
+ }
+
+ mb->un.varCfgLnk.myId = phba->fc_myDID;
+ mb->un.varCfgLnk.edtov = phba->fc_edtov;
+ mb->un.varCfgLnk.arbtov = phba->fc_arbtov;
+ mb->un.varCfgLnk.ratov = phba->fc_ratov;
+ mb->un.varCfgLnk.rttov = phba->fc_rttov;
+ mb->un.varCfgLnk.altov = phba->fc_altov;
+ mb->un.varCfgLnk.crtov = phba->fc_crtov;
+ mb->un.varCfgLnk.citov = phba->fc_citov;
+
+ if (phba->cfg_ack0)
+ mb->un.varCfgLnk.ack0_enable = 1;
+
+ mb->mbxCommand = MBX_CONFIG_LINK;
+ mb->mbxOwner = OWN_HOST;
+ return;
+}
+
+/**********************************************/
+/* lpfc_init_link Issue an INIT LINK */
+/* mailbox command */
+/**********************************************/
+void
+lpfc_init_link(struct lpfc_hba * phba,
+ LPFC_MBOXQ_t * pmb, uint32_t topology, uint32_t linkspeed)
+{
+ lpfc_vpd_t *vpd;
+ struct lpfc_sli *psli;
+ MAILBOX_t *mb;
+
+ mb = &pmb->mb;
+ memset(pmb, 0, sizeof (LPFC_MBOXQ_t));
+
+ psli = &phba->sli;
+ switch (topology) {
+ case FLAGS_TOPOLOGY_MODE_LOOP_PT:
+ mb->un.varInitLnk.link_flags = FLAGS_TOPOLOGY_MODE_LOOP;
+ mb->un.varInitLnk.link_flags |= FLAGS_TOPOLOGY_FAILOVER;
+ break;
+ case FLAGS_TOPOLOGY_MODE_PT_PT:
+ mb->un.varInitLnk.link_flags = FLAGS_TOPOLOGY_MODE_PT_PT;
+ break;
+ case FLAGS_TOPOLOGY_MODE_LOOP:
+ mb->un.varInitLnk.link_flags = FLAGS_TOPOLOGY_MODE_LOOP;
+ break;
+ case FLAGS_TOPOLOGY_MODE_PT_LOOP:
+ mb->un.varInitLnk.link_flags = FLAGS_TOPOLOGY_MODE_PT_PT;
+ mb->un.varInitLnk.link_flags |= FLAGS_TOPOLOGY_FAILOVER;
+ break;
+ }
+
+ /* NEW_FEATURE
+ * Setting up the link speed
+ */
+ vpd = &phba->vpd;
+ if (vpd->rev.feaLevelHigh >= 0x02){
+ switch(linkspeed){
+ case LINK_SPEED_1G:
+ case LINK_SPEED_2G:
+ case LINK_SPEED_4G:
+ mb->un.varInitLnk.link_flags |=
+ FLAGS_LINK_SPEED;
+ mb->un.varInitLnk.link_speed = linkspeed;
+ break;
+ case LINK_SPEED_AUTO:
+ default:
+ mb->un.varInitLnk.link_speed =
+ LINK_SPEED_AUTO;
+ break;
+ }
+
+ }
+ else
+ mb->un.varInitLnk.link_speed = LINK_SPEED_AUTO;
+
+ mb->mbxCommand = (volatile uint8_t)MBX_INIT_LINK;
+ mb->mbxOwner = OWN_HOST;
+ mb->un.varInitLnk.fabric_AL_PA = phba->fc_pref_ALPA;
+ return;
+}
+
+/**********************************************/
+/* lpfc_read_sparam Issue a READ SPARAM */
+/* mailbox command */
+/**********************************************/
+int
+lpfc_read_sparam(struct lpfc_hba * phba, LPFC_MBOXQ_t * pmb)
+{
+ struct lpfc_dmabuf *mp;
+ MAILBOX_t *mb;
+ struct lpfc_sli *psli;
+
+ psli = &phba->sli;
+ mb = &pmb->mb;
+ memset(pmb, 0, sizeof (LPFC_MBOXQ_t));
+
+ mb->mbxOwner = OWN_HOST;
+
+ /* Get a buffer to hold the HBAs Service Parameters */
+
+ if (((mp = kmalloc(sizeof (struct lpfc_dmabuf), GFP_ATOMIC)) == 0) ||
+ ((mp->virt = lpfc_mbuf_alloc(phba, 0, &(mp->phys))) == 0)) {
+ if (mp)
+ kfree(mp);
+ mb->mbxCommand = MBX_READ_SPARM64;
+ /* READ_SPARAM: no buffers */
+ lpfc_printf_log(phba,
+ KERN_WARNING,
+ LOG_MBOX,
+ "%d:0301 READ_SPARAM: no buffers\n",
+ phba->brd_no);
+ return (1);
+ }
+ INIT_LIST_HEAD(&mp->list);
+ mb->mbxCommand = MBX_READ_SPARM64;
+ mb->un.varRdSparm.un.sp64.tus.f.bdeSize = sizeof (struct serv_parm);
+ mb->un.varRdSparm.un.sp64.addrHigh = putPaddrHigh(mp->phys);
+ mb->un.varRdSparm.un.sp64.addrLow = putPaddrLow(mp->phys);
+
+ pci_dma_sync_single_for_device(phba->pcidev, mp->phys, LPFC_BPL_SIZE,
+ PCI_DMA_TODEVICE);
+
+ /* save address for completion */
+ pmb->context1 = mp;
+
+ return (0);
+}
+
+/********************************************/
+/* lpfc_unreg_did Issue a UNREG_DID */
+/* mailbox command */
+/********************************************/
+void
+lpfc_unreg_did(struct lpfc_hba * phba, uint32_t did, LPFC_MBOXQ_t * pmb)
+{
+ MAILBOX_t *mb;
+
+ mb = &pmb->mb;
+ memset(pmb, 0, sizeof (LPFC_MBOXQ_t));
+
+ mb->un.varUnregDID.did = did;
+
+ mb->mbxCommand = MBX_UNREG_D_ID;
+ mb->mbxOwner = OWN_HOST;
+ return;
+}
+
+/***********************************************/
+
+/* command to write slim */
+/***********************************************/
+void
+lpfc_set_slim(struct lpfc_hba * phba, LPFC_MBOXQ_t * pmb, uint32_t addr,
+ uint32_t value)
+{
+ MAILBOX_t *mb;
+
+ mb = &pmb->mb;
+ memset(pmb, 0, sizeof (LPFC_MBOXQ_t));
+
+ /* addr = 0x090597 is AUTO ABTS disable for ELS commands */
+ /* addr = 0x052198 is DELAYED ABTS enable for ELS commands */
+
+ /*
+ * Always turn on DELAYED ABTS for ELS timeouts
+ */
+ if ((addr == 0x052198) && (value == 0))
+ value = 1;
+
+ mb->un.varWords[0] = addr;
+ mb->un.varWords[1] = value;
+
+ mb->mbxCommand = MBX_SET_SLIM;
+ mb->mbxOwner = OWN_HOST;
+ return;
+}
+
+/**********************************************/
+/* lpfc_read_nv Issue a READ CONFIG */
+/* mailbox command */
+/**********************************************/
+void
+lpfc_read_config(struct lpfc_hba * phba, LPFC_MBOXQ_t * pmb)
+{
+ MAILBOX_t *mb;
+
+ mb = &pmb->mb;
+ memset(pmb, 0, sizeof (LPFC_MBOXQ_t));
+
+ mb->mbxCommand = MBX_READ_CONFIG;
+ mb->mbxOwner = OWN_HOST;
+ return;
+}
+
+/********************************************/
+/* lpfc_reg_login Issue a REG_LOGIN */
+/* mailbox command */
+/********************************************/
+int
+lpfc_reg_login(struct lpfc_hba * phba,
+ uint32_t did, uint8_t * param, LPFC_MBOXQ_t * pmb, uint32_t flag)
+{
+ uint8_t *sparam;
+ struct lpfc_dmabuf *mp;
+ MAILBOX_t *mb;
+ struct lpfc_sli *psli;
+
+ psli = &phba->sli;
+ mb = &pmb->mb;
+ memset(pmb, 0, sizeof (LPFC_MBOXQ_t));
+
+ mb->un.varRegLogin.rpi = 0;
+ mb->un.varRegLogin.did = did;
+ mb->un.varWords[30] = flag; /* Set flag to issue action on cmpl */
+
+ mb->mbxOwner = OWN_HOST;
+
+ /* Get a buffer to hold NPorts Service Parameters */
+ if (((mp = kmalloc(sizeof (struct lpfc_dmabuf), GFP_ATOMIC)) == 0) ||
+ ((mp->virt = lpfc_mbuf_alloc(phba, 0, &(mp->phys))) == 0)) {
+ if (mp)
+ kfree(mp);
+
+ mb->mbxCommand = MBX_REG_LOGIN64;
+ /* REG_LOGIN: no buffers */
+ lpfc_printf_log(phba,
+ KERN_WARNING,
+ LOG_MBOX,
+ "%d:0302 REG_LOGIN: no buffers Data x%x x%x\n",
+ phba->brd_no,
+ (uint32_t) did, (uint32_t) flag);
+ return (1);
+ }
+ INIT_LIST_HEAD(&mp->list);
+ sparam = mp->virt;
+
+ /* Copy param's into a new buffer */
+ memcpy(sparam, param, sizeof (struct serv_parm));
+
+ /* Sync the mailbox data with its PCI memory address now. */
+ pci_dma_sync_single_for_device(phba->pcidev, mp->phys, LPFC_BPL_SIZE,
+ PCI_DMA_TODEVICE);
+
+ /* save address for completion */
+ pmb->context1 = (uint8_t *) mp;
+
+ mb->mbxCommand = MBX_REG_LOGIN64;
+ mb->un.varRegLogin.un.sp64.tus.f.bdeSize = sizeof (struct serv_parm);
+ mb->un.varRegLogin.un.sp64.addrHigh = putPaddrHigh(mp->phys);
+ mb->un.varRegLogin.un.sp64.addrLow = putPaddrLow(mp->phys);
+
+ return (0);
+}
+
+/**********************************************/
+/* lpfc_unreg_login Issue a UNREG_LOGIN */
+/* mailbox command */
+/**********************************************/
+void
+lpfc_unreg_login(struct lpfc_hba * phba, uint32_t rpi, LPFC_MBOXQ_t * pmb)
+{
+ MAILBOX_t *mb;
+
+ mb = &pmb->mb;
+ memset(pmb, 0, sizeof (LPFC_MBOXQ_t));
+
+ mb->un.varUnregLogin.rpi = (uint16_t) rpi;
+ mb->un.varUnregLogin.rsvd1 = 0;
+
+ mb->mbxCommand = MBX_UNREG_LOGIN;
+ mb->mbxOwner = OWN_HOST;
+ return;
+}
+
+static void
+lpfc_config_pcb_setup(struct lpfc_hba * phba)
+{
+ struct lpfc_sli *psli = &phba->sli;
+ struct lpfc_sli_ring *pring;
+ PCB_t *pcbp = &phba->slim2p->pcb;
+ LPFC_RING_INIT_t *pringinit;
+ dma_addr_t pdma_addr;
+ uint32_t offset;
+ uint32_t iocbCnt;
+ int i;
+
+ psli->MBhostaddr = (uint32_t *)&phba->slim2p->mbx;
+ pcbp->maxRing = (psli->sliinit.num_rings - 1);
+
+ iocbCnt = 0;
+ for (i = 0; i < psli->sliinit.num_rings; i++) {
+ pringinit = &psli->sliinit.ringinit[i];
+ pring = &psli->ring[i];
+ /* A ring MUST have both cmd and rsp entries defined to be
+ valid */
+ if ((pringinit->numCiocb == 0) || (pringinit->numRiocb == 0)) {
+ pcbp->rdsc[i].cmdEntries = 0;
+ pcbp->rdsc[i].rspEntries = 0;
+ pcbp->rdsc[i].cmdAddrHigh = 0;
+ pcbp->rdsc[i].rspAddrHigh = 0;
+ pcbp->rdsc[i].cmdAddrLow = 0;
+ pcbp->rdsc[i].rspAddrLow = 0;
+ pring->cmdringaddr = NULL;
+ pring->rspringaddr = NULL;
+ continue;
+ }
+ /* Command ring setup for ring */
+ pring->cmdringaddr =
+ (void *)&phba->slim2p->IOCBs[iocbCnt];
+ pcbp->rdsc[i].cmdEntries = pringinit->numCiocb;
+
+ offset = (uint8_t *)&phba->slim2p->IOCBs[iocbCnt] -
+ (uint8_t *)phba->slim2p;
+ pdma_addr = phba->slim2p_mapping + offset;
+ pcbp->rdsc[i].cmdAddrHigh = putPaddrHigh(pdma_addr);
+ pcbp->rdsc[i].cmdAddrLow = putPaddrLow(pdma_addr);
+ iocbCnt += pringinit->numCiocb;
+
+ /* Response ring setup for ring */
+ pring->rspringaddr =
+ (void *)&phba->slim2p->IOCBs[iocbCnt];
+
+ pcbp->rdsc[i].rspEntries = pringinit->numRiocb;
+ offset = (uint8_t *)&phba->slim2p->IOCBs[iocbCnt] -
+ (uint8_t *)phba->slim2p;
+ pdma_addr = phba->slim2p_mapping + offset;
+ pcbp->rdsc[i].rspAddrHigh = putPaddrHigh(pdma_addr);
+ pcbp->rdsc[i].rspAddrLow = putPaddrLow(pdma_addr);
+ iocbCnt += pringinit->numRiocb;
+ }
+}
+
+void
+lpfc_read_rev(struct lpfc_hba * phba, LPFC_MBOXQ_t * pmb)
+{
+ MAILBOX_t *mb;
+
+ mb = &pmb->mb;
+ memset(pmb, 0, sizeof (LPFC_MBOXQ_t));
+ mb->un.varRdRev.cv = 1;
+ mb->mbxCommand = MBX_READ_REV;
+ mb->mbxOwner = OWN_HOST;
+ return;
+}
+
+void
+lpfc_config_ring(struct lpfc_hba * phba, int ring, LPFC_MBOXQ_t * pmb)
+{
+ int i;
+ MAILBOX_t *mb = &pmb->mb;
+ struct lpfc_sli *psli;
+ LPFC_RING_INIT_t *pring;
+
+ memset(pmb, 0, sizeof (LPFC_MBOXQ_t));
+
+ mb->un.varCfgRing.ring = ring;
+ mb->un.varCfgRing.maxOrigXchg = 0;
+ mb->un.varCfgRing.maxRespXchg = 0;
+ mb->un.varCfgRing.recvNotify = 1;
+
+ psli = &phba->sli;
+ pring = &psli->sliinit.ringinit[ring];
+ mb->un.varCfgRing.numMask = pring->num_mask;
+ mb->mbxCommand = MBX_CONFIG_RING;
+ mb->mbxOwner = OWN_HOST;
+
+ /* Is this ring configured for a specific profile */
+ if (pring->prt[0].profile) {
+ mb->un.varCfgRing.profile = pring->prt[0].profile;
+ return;
+ }
+
+ /* Otherwise we setup specific rctl / type masks for this ring */
+ for (i = 0; i < pring->num_mask; i++) {
+ mb->un.varCfgRing.rrRegs[i].rval = pring->prt[i].rctl;
+ if (mb->un.varCfgRing.rrRegs[i].rval != FC_ELS_REQ)
+ mb->un.varCfgRing.rrRegs[i].rmask = 0xff;
+ else
+ mb->un.varCfgRing.rrRegs[i].rmask = 0xfe;
+ mb->un.varCfgRing.rrRegs[i].tval = pring->prt[i].type;
+ mb->un.varCfgRing.rrRegs[i].tmask = 0xff;
+ }
+
+ return;
+}
+
+void
+lpfc_config_port(struct lpfc_hba * phba, LPFC_MBOXQ_t * pmb)
+{
+ MAILBOX_t *mb = &pmb->mb;
+ dma_addr_t pdma_addr;
+ uint32_t bar_low, bar_high;
+ size_t offset;
+ HGP hgp;
+ void *to_slim;
+
+ memset(pmb, 0, sizeof(LPFC_MBOXQ_t));
+ mb->mbxCommand = MBX_CONFIG_PORT;
+ mb->mbxOwner = OWN_HOST;
+
+ mb->un.varCfgPort.pcbLen = sizeof(PCB_t);
+ offset = (uint8_t *)&phba->slim2p->pcb - (uint8_t *)phba->slim2p;
+ pdma_addr = phba->slim2p_mapping + offset;
+ mb->un.varCfgPort.pcbLow = putPaddrLow(pdma_addr);
+ mb->un.varCfgPort.pcbHigh = putPaddrHigh(pdma_addr);
+
+ /* Now setup pcb */
+ phba->slim2p->pcb.type = TYPE_NATIVE_SLI2;
+ phba->slim2p->pcb.feature = FEATURE_INITIAL_SLI2;
+
+ /* Setup Mailbox pointers */
+ phba->slim2p->pcb.mailBoxSize = sizeof(MAILBOX_t);
+ offset = (uint8_t *)&phba->slim2p->mbx - (uint8_t *)phba->slim2p;
+ pdma_addr = phba->slim2p_mapping + offset;
+ phba->slim2p->pcb.mbAddrHigh = putPaddrHigh(pdma_addr);
+ phba->slim2p->pcb.mbAddrLow = putPaddrLow(pdma_addr);
+
+ /*
+ * Setup Host Group ring pointer.
+ *
+ * For efficiency reasons, the ring get/put pointers can be
+ * placed in adapter memory (SLIM) rather than in host memory.
+ * This allows firmware to avoid PCI reads/writes when updating
+ * and checking pointers.
+ *
+ * The firmware recognizes the use of SLIM memory by comparing
+ * the address of the get/put pointers structure with that of
+ * the SLIM BAR (BAR0).
+ *
+ * Caution: be sure to use the PCI config space value of BAR0/BAR1
+ * (the hardware's view of the base address), not the OS's
+ * value of pci_resource_start() as the OS value may be a cookie
+ * for ioremap/iomap.
+ */
+
+
+ pci_read_config_dword(phba->pcidev, PCI_BASE_ADDRESS_0, &bar_low);
+ pci_read_config_dword(phba->pcidev, PCI_BASE_ADDRESS_1, &bar_high);
+
+
+ /* mask off BAR0's flag bits 0 - 3 */
+ phba->slim2p->pcb.hgpAddrLow = (bar_low & PCI_BASE_ADDRESS_MEM_MASK) +
+ (SLIMOFF*sizeof(uint32_t));
+ if (bar_low & PCI_BASE_ADDRESS_MEM_TYPE_64)
+ phba->slim2p->pcb.hgpAddrHigh = bar_high;
+ else
+ phba->slim2p->pcb.hgpAddrHigh = 0;
+ /* write HGP data to SLIM at the required longword offset */
+ memset(&hgp, 0, sizeof(HGP));
+ to_slim = (uint8_t *)phba->MBslimaddr + (SLIMOFF*sizeof (uint32_t));
+ lpfc_memcpy_to_slim(to_slim, &hgp, sizeof (HGP));
+
+ /* Setup Port Group ring pointer */
+ offset = (uint8_t *)&phba->slim2p->mbx.us.s2.port -
+ (uint8_t *)phba->slim2p;
+ pdma_addr = phba->slim2p_mapping + offset;
+ phba->slim2p->pcb.pgpAddrHigh = putPaddrHigh(pdma_addr);
+ phba->slim2p->pcb.pgpAddrLow = putPaddrLow(pdma_addr);
+
+ /* Use callback routine to setp rings in the pcb */
+ lpfc_config_pcb_setup(phba);
+
+ /* special handling for LC HBAs */
+ if (lpfc_is_LC_HBA(phba->pcidev->device)) {
+ uint32_t hbainit[5];
+
+ lpfc_hba_init(phba, hbainit);
+
+ memcpy(&mb->un.varCfgPort.hbainit, hbainit, 20);
+ }
+
+ /* Swap PCB if needed */
+ lpfc_sli_pcimem_bcopy((uint32_t *)&phba->slim2p->pcb,
+ (uint32_t *)&phba->slim2p->pcb,
+ sizeof (PCB_t));
+
+ lpfc_printf_log(phba, KERN_INFO, LOG_INIT,
+ "%d:0405 Service Level Interface (SLI) 2 selected\n",
+ phba->brd_no);
+}
+
+void
+lpfc_mbox_put(struct lpfc_hba * phba, LPFC_MBOXQ_t * mbq)
+{
+ struct lpfc_sli *psli;
+
+ psli = &phba->sli;
+
+ list_add_tail(&mbq->list, &psli->mboxq);
+
+ psli->mboxq_cnt++;
+
+ return;
+}
+
+LPFC_MBOXQ_t *
+lpfc_mbox_get(struct lpfc_hba * phba)
+{
+ LPFC_MBOXQ_t *mbq = NULL;
+ struct lpfc_sli *psli = &phba->sli;
+
+ if (!list_empty(&psli->mboxq)) {
+ mbq = list_entry(psli->mboxq.next, LPFC_MBOXQ_t, list);
+ list_del_init(&mbq->list);
+ psli->mboxq_cnt--;
+ }
+
+ return mbq;
+}
--- /dev/null
+/*******************************************************************
+ * This file is part of the Emulex Linux Device Driver for *
+ * Enterprise Fibre Channel Host Bus Adapters. *
+ * Refer to the README file included with this package for *
+ * driver version and adapter support. *
+ * Copyright (C) 2004 Emulex Corporation. *
+ * www.emulex.com *
+ * *
+ * This program is free software; you can redistribute it and/or *
+ * modify it under the terms of the GNU General Public License *
+ * as published by the Free Software Foundation; either version 2 *
+ * of the License, or (at your option) any later version. *
+ * *
+ * This program is distributed in the hope that it will be useful, *
+ * but WITHOUT ANY WARRANTY; without even the implied warranty of *
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the *
+ * GNU General Public License for more details, a copy of which *
+ * can be found in the file COPYING included with this package. *
+ *******************************************************************/
+
+/*
+ * $Id: lpfc_mem.c 1.69 2004/09/28 07:54:24EDT sf_support Exp $
+ */
+
+#include <linux/mempool.h>
+#include <linux/pci.h>
+#include <linux/slab.h>
+#include <scsi/scsi_device.h>
+
+#include "lpfc_sli.h"
+#include "lpfc_disc.h"
+#include "lpfc_scsi.h"
+#include "lpfc.h"
+#include "lpfc_crtn.h"
+#include "lpfc_mem.h"
+
+static void *
+lpfc_pool_kmalloc(int gfp_flags, void *data)
+{
+ return kmalloc((unsigned long)data, gfp_flags);
+}
+
+static void
+lpfc_pool_kfree(void *obj, void *data)
+{
+ kfree(obj);
+}
+
+int
+lpfc_mem_alloc(struct lpfc_hba * phba)
+{
+ struct lpfc_dma_pool *pool = &phba->lpfc_mbuf_safety_pool;
+ int i;
+
+ phba->lpfc_scsi_dma_ext_pool = pci_pool_create("lpfc_scsi_dma_ext_pool",
+ phba->pcidev, LPFC_SCSI_DMA_EXT_SIZE, 8, 0);
+ if (!phba->lpfc_scsi_dma_ext_pool)
+ goto fail;
+
+ phba->lpfc_mbuf_pool = pci_pool_create("lpfc_mbuf_pool", phba->pcidev,
+ LPFC_BPL_SIZE, 8,0);
+ if (!phba->lpfc_mbuf_pool)
+ goto fail_free_dma_ext_pool;
+
+ pool->elements = kmalloc(sizeof(struct lpfc_dmabuf) *
+ LPFC_MBUF_POOL_SIZE, GFP_KERNEL);
+ pool->max_count = 0;
+ pool->current_count = 0;
+ for ( i = 0; i < LPFC_MBUF_POOL_SIZE; i++) {
+ pool->elements[i].virt = pci_pool_alloc(phba->lpfc_mbuf_pool,
+ GFP_KERNEL, &pool->elements[i].phys);
+ if (!pool->elements[i].virt)
+ goto fail_free_mbuf_pool;
+ pool->max_count++;
+ pool->current_count++;
+ }
+
+ phba->iocb_mem_pool = mempool_create(LPFC_MEM_POOL_SIZE,
+ lpfc_pool_kmalloc, lpfc_pool_kfree,
+ (void *)(unsigned long)sizeof(struct lpfc_iocbq));
+ if (!phba->iocb_mem_pool)
+ goto fail_free_mbuf_pool;
+
+ phba->scsibuf_mem_pool = mempool_create(LPFC_MEM_POOL_SIZE,
+ lpfc_pool_kmalloc, lpfc_pool_kfree,
+ (void *)(unsigned long)sizeof(struct lpfc_scsi_buf));
+ if (!phba->scsibuf_mem_pool)
+ goto fail_free_iocb_pool;
+
+ phba->mbox_mem_pool = mempool_create(LPFC_MEM_POOL_SIZE,
+ lpfc_pool_kmalloc, lpfc_pool_kfree,
+ (void *)(unsigned long)sizeof(LPFC_MBOXQ_t));
+ if (!phba->mbox_mem_pool)
+ goto fail_free_scsibuf_pool;
+
+ phba->nlp_mem_pool = mempool_create(LPFC_MEM_POOL_SIZE,
+ lpfc_pool_kmalloc, lpfc_pool_kfree,
+ (void *)(unsigned long)sizeof(struct lpfc_nodelist));
+ if (!phba->nlp_mem_pool)
+ goto fail_free_mbox_pool;
+
+ phba->bind_mem_pool = mempool_create(LPFC_MEM_POOL_SIZE,
+ lpfc_pool_kmalloc, lpfc_pool_kfree,
+ (void *)(unsigned long)sizeof(struct lpfc_bindlist));
+ if (!phba->bind_mem_pool)
+ goto fail_free_nlp_pool;
+
+ return 0;
+
+ fail_free_nlp_pool:
+ mempool_destroy(phba->nlp_mem_pool);
+ fail_free_mbox_pool:
+ mempool_destroy(phba->mbox_mem_pool);
+ fail_free_scsibuf_pool:
+ mempool_destroy(phba->scsibuf_mem_pool);
+ fail_free_iocb_pool:
+ mempool_destroy(phba->iocb_mem_pool);
+ fail_free_mbuf_pool:
+ while (--i)
+ pci_pool_free(phba->lpfc_mbuf_pool, pool->elements[i].virt,
+ pool->elements[i].phys);
+ kfree(pool->elements);
+ pci_pool_destroy(phba->lpfc_mbuf_pool);
+ fail_free_dma_ext_pool:
+ pci_pool_destroy(phba->lpfc_scsi_dma_ext_pool);
+ fail:
+ return -ENOMEM;
+}
+
+void
+lpfc_mem_free(struct lpfc_hba * phba)
+{
+ struct lpfc_sli *psli = &phba->sli;
+ struct lpfc_dma_pool *pool = &phba->lpfc_mbuf_safety_pool;
+ LPFC_MBOXQ_t *mbox, *next_mbox;
+ int i;
+
+ list_for_each_entry_safe(mbox, next_mbox, &psli->mboxq, list) {
+ list_del(&mbox->list);
+ mempool_free(mbox, phba->mbox_mem_pool);
+ }
+
+ psli->sliinit.sli_flag &= ~LPFC_SLI_MBOX_ACTIVE;
+ if (psli->mbox_active) {
+ mempool_free(psli->mbox_active, phba->mbox_mem_pool);
+ psli->mbox_active = NULL;
+ }
+
+ for (i = 0; i < pool->current_count; i++)
+ pci_pool_free(phba->lpfc_mbuf_pool, pool->elements[i].virt,
+ pool->elements[i].phys);
+ kfree(pool->elements);
+ mempool_destroy(phba->bind_mem_pool);
+ mempool_destroy(phba->nlp_mem_pool);
+ mempool_destroy(phba->mbox_mem_pool);
+ mempool_destroy(phba->scsibuf_mem_pool);
+ mempool_destroy(phba->iocb_mem_pool);
+
+ pci_pool_destroy(phba->lpfc_scsi_dma_ext_pool);
+ pci_pool_destroy(phba->lpfc_mbuf_pool);
+}
+
+void *
+lpfc_mbuf_alloc(struct lpfc_hba *phba, int mem_flags, dma_addr_t *handle)
+{
+ struct lpfc_dma_pool *pool = &phba->lpfc_mbuf_safety_pool;
+ void *ret;
+
+ ret = pci_pool_alloc(phba->lpfc_mbuf_pool, GFP_ATOMIC, handle);
+
+ if (!ret && ( mem_flags & MEM_PRI) && pool->current_count) {
+ pool->current_count--;
+ ret = pool->elements[pool->current_count].virt;
+ *handle = pool->elements[pool->current_count].phys;
+ }
+ return ret;
+}
+
+void
+lpfc_mbuf_free(struct lpfc_hba * phba, void *virt, dma_addr_t dma)
+{
+ struct lpfc_dma_pool *pool = &phba->lpfc_mbuf_safety_pool;
+
+ if (pool->current_count < pool->max_count) {
+ pool->elements[pool->current_count].virt = virt;
+ pool->elements[pool->current_count].phys = dma;
+ pool->current_count++;
+ } else {
+ pci_pool_free(phba->lpfc_mbuf_pool, virt, dma);
+ }
+ return;
+}
--- /dev/null
+/*******************************************************************
+ * This file is part of the Emulex Linux Device Driver for *
+ * Enterprise Fibre Channel Host Bus Adapters. *
+ * Refer to the README file included with this package for *
+ * driver version and adapter support. *
+ * Copyright (C) 2004 Emulex Corporation. *
+ * www.emulex.com *
+ * *
+ * This program is free software; you can redistribute it and/or *
+ * modify it under the terms of the GNU General Public License *
+ * as published by the Free Software Foundation; either version 2 *
+ * of the License, or (at your option) any later version. *
+ * *
+ * This program is distributed in the hope that it will be useful, *
+ * but WITHOUT ANY WARRANTY; without even the implied warranty of *
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the *
+ * GNU General Public License for more details, a copy of which *
+ * can be found in the file COPYING included with this package. *
+ *******************************************************************/
+
+/*
+ * $Id: lpfc_nportdisc.c 1.146 2004/11/18 14:53:54EST sf_support Exp $
+ */
+
+#include <linux/version.h>
+#include <linux/blkdev.h>
+#include <linux/dma-mapping.h>
+#include <linux/pci.h>
+#include <linux/spinlock.h>
+#include <scsi/scsi_device.h>
+#include "lpfc_sli.h"
+#include "lpfc_disc.h"
+#include "lpfc_scsi.h"
+#include "lpfc.h"
+#include "lpfc_crtn.h"
+#include "lpfc_hw.h"
+#include "lpfc_logmsg.h"
+#include "lpfc_mem.h"
+
+extern uint8_t lpfcAlpaArray[];
+
+
+/* Called to verify a rcv'ed ADISC was intended for us. */
+static int
+lpfc_check_adisc(struct lpfc_hba * phba, struct lpfc_nodelist * ndlp,
+ struct lpfc_name * nn, struct lpfc_name * pn)
+{
+ /* Compare the ADISC rsp WWNN / WWPN matches our internal node
+ * table entry for that node.
+ */
+ if (memcmp(nn, &ndlp->nlp_nodename, sizeof (struct lpfc_name)) != 0)
+ return (0);
+
+ if (memcmp(pn, &ndlp->nlp_portname, sizeof (struct lpfc_name)) != 0)
+ return (0);
+
+ /* we match, return success */
+ return (1);
+}
+
+
+int
+lpfc_check_sparm(struct lpfc_hba * phba,
+ struct lpfc_nodelist * ndlp, struct serv_parm * sp,
+ uint32_t class)
+{
+ volatile struct serv_parm *hsp = &phba->fc_sparam;
+ /* First check for supported version */
+
+ /* Next check for class validity */
+ if (sp->cls1.classValid) {
+
+ if (sp->cls1.rcvDataSizeMsb > hsp->cls1.rcvDataSizeMsb)
+ sp->cls1.rcvDataSizeMsb = hsp->cls1.rcvDataSizeMsb;
+ if (sp->cls1.rcvDataSizeLsb > hsp->cls1.rcvDataSizeLsb)
+ sp->cls1.rcvDataSizeLsb = hsp->cls1.rcvDataSizeLsb;
+ } else if (class == CLASS1) {
+ return (0);
+ }
+
+ if (sp->cls2.classValid) {
+
+ if (sp->cls2.rcvDataSizeMsb > hsp->cls2.rcvDataSizeMsb)
+ sp->cls2.rcvDataSizeMsb = hsp->cls2.rcvDataSizeMsb;
+ if (sp->cls2.rcvDataSizeLsb > hsp->cls2.rcvDataSizeLsb)
+ sp->cls2.rcvDataSizeLsb = hsp->cls2.rcvDataSizeLsb;
+ } else if (class == CLASS2) {
+ return (0);
+ }
+
+ if (sp->cls3.classValid) {
+
+ if (sp->cls3.rcvDataSizeMsb > hsp->cls3.rcvDataSizeMsb)
+ sp->cls3.rcvDataSizeMsb = hsp->cls3.rcvDataSizeMsb;
+ if (sp->cls3.rcvDataSizeLsb > hsp->cls3.rcvDataSizeLsb)
+ sp->cls3.rcvDataSizeLsb = hsp->cls3.rcvDataSizeLsb;
+ } else if (class == CLASS3) {
+ return (0);
+ }
+
+ if (sp->cmn.bbRcvSizeMsb > hsp->cmn.bbRcvSizeMsb)
+ sp->cmn.bbRcvSizeMsb = hsp->cmn.bbRcvSizeMsb;
+ if (sp->cmn.bbRcvSizeLsb > hsp->cmn.bbRcvSizeLsb)
+ sp->cmn.bbRcvSizeLsb = hsp->cmn.bbRcvSizeLsb;
+
+ /* If check is good, copy wwpn wwnn into ndlp */
+ memcpy(&ndlp->nlp_nodename, &sp->nodeName, sizeof (struct lpfc_name));
+ memcpy(&ndlp->nlp_portname, &sp->portName, sizeof (struct lpfc_name));
+ return (1);
+}
+
+static void *
+lpfc_check_elscmpl_iocb(struct lpfc_hba * phba,
+ struct lpfc_iocbq *cmdiocb,
+ struct lpfc_iocbq *rspiocb)
+{
+ struct lpfc_dmabuf *pcmd, *prsp;
+ uint32_t *lp;
+ void *ptr;
+ IOCB_t *irsp;
+
+ irsp = &rspiocb->iocb;
+ pcmd = (struct lpfc_dmabuf *) cmdiocb->context2;
+
+ /* For lpfc_els_abort, context2 could be zero'ed to delay
+ * freeing associated memory till after ABTS completes.
+ */
+ if (pcmd) {
+ prsp = (struct lpfc_dmabuf *) pcmd->list.next;
+ lp = (uint32_t *) prsp->virt;
+
+ pci_dma_sync_single_for_cpu(phba->pcidev, prsp->phys,
+ LPFC_BPL_SIZE, PCI_DMA_FROMDEVICE);
+
+ ptr = (void *)((uint8_t *)lp + sizeof(uint32_t));
+ }
+ else {
+ /* Force ulpStatus error since we are returning NULL ptr */
+ if (!(irsp->ulpStatus)) {
+ irsp->ulpStatus = IOSTAT_LOCAL_REJECT;
+ irsp->un.ulpWord[4] = IOERR_SLI_ABORTED;
+ }
+ ptr = NULL;
+ }
+ return (ptr);
+}
+
+
+/*
+ * Free resources / clean up outstanding I/Os
+ * associated with a LPFC_NODELIST entry. This
+ * routine effectively results in a "software abort".
+ */
+static int
+lpfc_els_abort(struct lpfc_hba * phba, struct lpfc_nodelist * ndlp,
+ int send_abts)
+{
+ struct lpfc_sli *psli;
+ struct lpfc_sli_ring *pring;
+ struct lpfc_iocbq *iocb, *next_iocb;
+ IOCB_t *icmd;
+
+ /* Abort outstanding I/O on NPort <nlp_DID> */
+ lpfc_printf_log(phba, KERN_INFO, LOG_DISCOVERY,
+ "%d:0201 Abort outstanding I/O on NPort x%x "
+ "Data: x%x x%x x%x\n",
+ phba->brd_no, ndlp->nlp_DID, ndlp->nlp_flag,
+ ndlp->nlp_state, ndlp->nlp_rpi);
+
+ psli = &phba->sli;
+ pring = &psli->ring[LPFC_ELS_RING];
+
+ /* First check the txq */
+ list_for_each_entry_safe(iocb, next_iocb, &pring->txq, list) {
+ /* Check to see if iocb matches the nport we are looking for */
+ if ((lpfc_check_sli_ndlp(phba, pring, iocb, ndlp))) {
+ /* It matches, so deque and call compl with an error */
+ list_del(&iocb->list);
+ pring->txq_cnt--;
+ if (iocb->iocb_cmpl) {
+ icmd = &iocb->iocb;
+ icmd->ulpStatus = IOSTAT_LOCAL_REJECT;
+ icmd->un.ulpWord[4] = IOERR_SLI_ABORTED;
+ (iocb->iocb_cmpl) (phba, iocb, iocb);
+ } else {
+ mempool_free(iocb, phba->iocb_mem_pool);
+ }
+ }
+ }
+
+ /* Everything on txcmplq will be returned by firmware
+ * with a no rpi / linkdown / abort error. For ring 0,
+ * ELS discovery, we want to get rid of it right here.
+ */
+ /* Next check the txcmplq */
+ list_for_each_entry_safe(iocb, next_iocb, &pring->txcmplq, list) {
+ /* Check to see if iocb matches the nport we are looking for */
+ if ((lpfc_check_sli_ndlp (phba, pring, iocb, ndlp))) {
+ /* It matches, so deque and call compl with an error */
+ list_del(&iocb->list);
+ pring->txcmplq_cnt--;
+
+ icmd = &iocb->iocb;
+ /* If the driver is completing an ELS
+ * command early, flush it out of the firmware.
+ */
+ if (send_abts &&
+ (icmd->ulpCommand == CMD_ELS_REQUEST64_CR) &&
+ (icmd->un.elsreq64.bdl.ulpIoTag32)) {
+ lpfc_sli_issue_abort_iotag32(phba, pring, iocb);
+ }
+ if (iocb->iocb_cmpl) {
+ icmd->ulpStatus = IOSTAT_LOCAL_REJECT;
+ icmd->un.ulpWord[4] = IOERR_SLI_ABORTED;
+ (iocb->iocb_cmpl) (phba, iocb, iocb);
+ } else {
+ mempool_free(iocb, phba->iocb_mem_pool);
+ }
+ }
+ }
+
+ /* If we are delaying issuing an ELS command, cancel it */
+ if(ndlp->nlp_flag & NLP_DELAY_TMO) {
+ ndlp->nlp_flag &= ~NLP_DELAY_TMO;
+ del_timer_sync(&ndlp->nlp_delayfunc);
+ }
+ return (0);
+}
+
+static int
+lpfc_rcv_plogi(struct lpfc_hba * phba,
+ struct lpfc_nodelist * ndlp,
+ struct lpfc_iocbq *cmdiocb)
+{
+ struct lpfc_dmabuf *pcmd;
+ uint32_t *lp;
+ IOCB_t *icmd;
+ struct serv_parm *sp;
+ LPFC_MBOXQ_t *mbox;
+ struct ls_rjt stat;
+
+ memset(&stat, 0, sizeof (struct ls_rjt));
+ if (phba->hba_state <= LPFC_FLOGI) {
+ /* Before responding to PLOGI, check for pt2pt mode.
+ * If we are pt2pt, with an outstanding FLOGI, abort
+ * the FLOGI and resend it first.
+ */
+ if (phba->fc_flag & FC_PT2PT) {
+ lpfc_els_abort_flogi(phba);
+ if(!(phba->fc_flag & FC_PT2PT_PLOGI)) {
+ /* If the other side is supposed to initiate
+ * the PLOGI anyway, just ACC it now and
+ * move on with discovery.
+ */
+ phba->fc_edtov = FF_DEF_EDTOV;
+ phba->fc_ratov = FF_DEF_RATOV;
+ /* Start discovery - this should just do
+ CLEAR_LA */
+ lpfc_disc_start(phba);
+ }
+ else {
+ lpfc_initial_flogi(phba);
+ }
+ }
+ else {
+ stat.un.b.lsRjtRsnCode = LSRJT_LOGICAL_BSY;
+ stat.un.b.lsRjtRsnCodeExp = LSEXP_NOTHING_MORE;
+ goto out;
+ }
+ }
+ pcmd = (struct lpfc_dmabuf *) cmdiocb->context2;
+ lp = (uint32_t *) pcmd->virt;
+ sp = (struct serv_parm *) ((uint8_t *) lp + sizeof (uint32_t));
+ if ((lpfc_check_sparm(phba, ndlp, sp, CLASS3) == 0)) {
+ /* Reject this request because invalid parameters */
+ stat.un.b.lsRjtRsnCode = LSRJT_UNABLE_TPC;
+ stat.un.b.lsRjtRsnCodeExp = LSEXP_SPARM_OPTIONS;
+ goto out;
+ }
+ icmd = &cmdiocb->iocb;
+
+ /* PLOGI chkparm OK */
+ lpfc_printf_log(phba,
+ KERN_INFO,
+ LOG_ELS,
+ "%d:0114 PLOGI chkparm OK Data: x%x x%x x%x x%x\n",
+ phba->brd_no,
+ ndlp->nlp_DID, ndlp->nlp_state, ndlp->nlp_flag,
+ ndlp->nlp_rpi);
+
+ if ((phba->cfg_fcp_class == 2) &&
+ (sp->cls2.classValid)) {
+ ndlp->nlp_fcp_info |= CLASS2;
+ } else {
+ ndlp->nlp_fcp_info |= CLASS3;
+ }
+
+ /* no need to reg_login if we are already in one of these states */
+ switch(ndlp->nlp_state) {
+ case NLP_STE_REG_LOGIN_ISSUE:
+ case NLP_STE_PRLI_ISSUE:
+ case NLP_STE_UNMAPPED_NODE:
+ case NLP_STE_MAPPED_NODE:
+ lpfc_els_rsp_acc(phba, ELS_CMD_PLOGI, cmdiocb, ndlp, NULL, 0);
+ return (1);
+ }
+
+ if ((mbox = mempool_alloc(phba->mbox_mem_pool, GFP_ATOMIC)) == 0) {
+ stat.un.b.lsRjtRsnCode = LSRJT_UNABLE_TPC;
+ stat.un.b.lsRjtRsnCodeExp = LSEXP_OUT_OF_RESOURCE;
+ goto out;
+ }
+
+ if ((phba->fc_flag & FC_PT2PT)
+ && !(phba->fc_flag & FC_PT2PT_PLOGI)) {
+ /* rcv'ed PLOGI decides what our NPortId will be */
+ phba->fc_myDID = icmd->un.rcvels.parmRo;
+ lpfc_config_link(phba, mbox);
+ if (lpfc_sli_issue_mbox
+ (phba, mbox, (MBX_NOWAIT | MBX_STOP_IOCB))
+ == MBX_NOT_FINISHED) {
+ mempool_free( mbox, phba->mbox_mem_pool);
+ stat.un.b.lsRjtRsnCode = LSRJT_UNABLE_TPC;
+ stat.un.b.lsRjtRsnCodeExp = LSEXP_OUT_OF_RESOURCE;
+ goto out;
+ }
+ if ((mbox = mempool_alloc(phba->mbox_mem_pool,
+ GFP_ATOMIC)) == 0) {
+ stat.un.b.lsRjtRsnCode = LSRJT_UNABLE_TPC;
+ stat.un.b.lsRjtRsnCodeExp = LSEXP_OUT_OF_RESOURCE;
+ goto out;
+ }
+ lpfc_can_disctmo(phba);
+ }
+
+ if(lpfc_reg_login(phba, icmd->un.rcvels.remoteID,
+ (uint8_t *) sp, mbox, 0)) {
+ mempool_free( mbox, phba->mbox_mem_pool);
+ stat.un.b.lsRjtRsnCode = LSRJT_UNABLE_TPC;
+ stat.un.b.lsRjtRsnCodeExp = LSEXP_OUT_OF_RESOURCE;
+out:
+ lpfc_els_rsp_reject(phba, stat.un.lsRjtError, cmdiocb, ndlp);
+ return (0);
+ }
+
+ /* ACC PLOGI rsp command needs to execute first,
+ * queue this mbox command to be processed later.
+ */
+ mbox->mbox_cmpl = lpfc_mbx_cmpl_reg_login;
+ mbox->context2 = ndlp;
+ ndlp->nlp_flag |= NLP_ACC_REGLOGIN;
+
+ /* If there is an outstanding PLOGI issued, abort it before
+ * sending ACC rsp to PLOGI recieved.
+ */
+ if(ndlp->nlp_state == NLP_STE_PLOGI_ISSUE) {
+ /* software abort outstanding PLOGI */
+ lpfc_els_abort(phba, ndlp, 1);
+ }
+ ndlp->nlp_flag |= NLP_RCV_PLOGI;
+ lpfc_els_rsp_acc(phba, ELS_CMD_PLOGI, cmdiocb, ndlp, mbox, 0);
+ return (1);
+}
+
+static int
+lpfc_rcv_padisc(struct lpfc_hba * phba,
+ struct lpfc_nodelist * ndlp,
+ struct lpfc_iocbq *cmdiocb)
+{
+ struct lpfc_dmabuf *pcmd;
+ struct serv_parm *sp;
+ struct lpfc_name *pnn, *ppn;
+ struct ls_rjt stat;
+ ADISC *ap;
+ IOCB_t *icmd;
+ uint32_t *lp;
+ uint32_t cmd;
+
+ pcmd = (struct lpfc_dmabuf *) cmdiocb->context2;
+ lp = (uint32_t *) pcmd->virt;
+
+ cmd = *lp++;
+ if (cmd == ELS_CMD_ADISC) {
+ ap = (ADISC *) lp;
+ pnn = (struct lpfc_name *) & ap->nodeName;
+ ppn = (struct lpfc_name *) & ap->portName;
+ } else {
+ sp = (struct serv_parm *) lp;
+ pnn = (struct lpfc_name *) & sp->nodeName;
+ ppn = (struct lpfc_name *) & sp->portName;
+ }
+
+ icmd = &cmdiocb->iocb;
+ if ((icmd->ulpStatus == 0) &&
+ (lpfc_check_adisc(phba, ndlp, pnn, ppn))) {
+ if (cmd == ELS_CMD_ADISC) {
+ lpfc_els_rsp_adisc_acc(phba, cmdiocb, ndlp);
+ }
+ else {
+ lpfc_els_rsp_acc(phba, ELS_CMD_PLOGI, cmdiocb, ndlp,
+ NULL, 0);
+ }
+ return (1);
+ }
+ /* Reject this request because invalid parameters */
+ stat.un.b.lsRjtRsvd0 = 0;
+ stat.un.b.lsRjtRsnCode = LSRJT_UNABLE_TPC;
+ stat.un.b.lsRjtRsnCodeExp = LSEXP_SPARM_OPTIONS;
+ stat.un.b.vendorUnique = 0;
+ lpfc_els_rsp_reject(phba, stat.un.lsRjtError, cmdiocb, ndlp);
+
+ ndlp->nlp_last_elscmd = (unsigned long)ELS_CMD_PLOGI;
+ /* 1 sec timeout */
+ mod_timer(&ndlp->nlp_delayfunc, jiffies + HZ);
+
+ ndlp->nlp_flag |= NLP_DELAY_TMO;
+ ndlp->nlp_state = NLP_STE_NPR_NODE;
+ lpfc_nlp_list(phba, ndlp, NLP_NPR_LIST);
+ return (0);
+}
+
+static int
+lpfc_rcv_logo(struct lpfc_hba * phba,
+ struct lpfc_nodelist * ndlp,
+ struct lpfc_iocbq *cmdiocb)
+{
+ /* Put ndlp on NPR list with 1 sec timeout for plogi, ACC logo */
+ /* Only call LOGO ACC for first LOGO, this avoids sending unnecessary
+ * PLOGIs during LOGO storms from a device.
+ */
+ ndlp->nlp_flag |= NLP_LOGO_ACC;
+ lpfc_els_rsp_acc(phba, ELS_CMD_ACC, cmdiocb, ndlp, NULL, 0);
+
+ if (!(ndlp->nlp_type & NLP_FABRIC)) {
+ /* Only try to re-login if this is NOT a Fabric Node */
+ ndlp->nlp_last_elscmd = (unsigned long)ELS_CMD_PLOGI;
+ mod_timer(&ndlp->nlp_delayfunc, jiffies + HZ * 1);
+ ndlp->nlp_flag |= NLP_DELAY_TMO;
+ }
+
+ ndlp->nlp_state = NLP_STE_NPR_NODE;
+ lpfc_nlp_list(phba, ndlp, NLP_NPR_LIST);
+
+ ndlp->nlp_flag &= ~NLP_NPR_ADISC;
+ /* The driver has to wait until the ACC completes before it continues
+ * processing the LOGO. The action will resume in
+ * lpfc_cmpl_els_logo_acc routine. Since part of processing includes an
+ * unreg_login, the driver waits so the ACC does not get aborted.
+ */
+ return (0);
+}
+
+static int
+lpfc_binding_found(struct lpfc_bindlist * blp, struct lpfc_nodelist * ndlp)
+{
+ uint16_t bindtype = blp->nlp_bind_type;
+
+ if ((bindtype & FCP_SEED_DID) &&
+ (ndlp->nlp_DID == be32_to_cpu(blp->nlp_DID))) {
+ return (1);
+ } else if ((bindtype & FCP_SEED_WWPN) &&
+ (memcmp(&ndlp->nlp_portname, &blp->nlp_portname,
+ sizeof (struct lpfc_name)) == 0)) {
+ return (1);
+ } else if ((bindtype & FCP_SEED_WWNN) &&
+ (memcmp(&ndlp->nlp_nodename, &blp->nlp_nodename,
+ sizeof (struct lpfc_name)) == 0)) {
+ return (1);
+ }
+ return (0);
+}
+
+static int
+lpfc_binding_useid(struct lpfc_hba * phba, uint32_t sid)
+{
+ struct lpfc_bindlist *blp;
+
+ list_for_each_entry(blp, &phba->fc_nlpbind_list, nlp_listp) {
+ if (blp->nlp_sid == sid) {
+ return (1);
+ }
+ }
+ return (0);
+}
+
+static int
+lpfc_mapping_useid(struct lpfc_hba * phba, uint32_t sid)
+{
+ struct lpfc_nodelist *mapnode;
+ struct lpfc_bindlist *blp;
+
+ list_for_each_entry(mapnode, &phba->fc_nlpmap_list, nlp_listp) {
+ blp = mapnode->nlp_listp_bind;
+ if (blp->nlp_sid == sid) {
+ return (1);
+ }
+ }
+ return (0);
+}
+
+static struct lpfc_bindlist *
+lpfc_create_binding(struct lpfc_hba * phba,
+ struct lpfc_nodelist * ndlp, uint16_t index,
+ uint16_t bindtype)
+{
+ struct lpfc_bindlist *blp;
+
+ if ((blp = mempool_alloc(phba->bind_mem_pool, GFP_ATOMIC))) {
+ memset(blp, 0, sizeof (struct lpfc_bindlist));
+ switch (bindtype) {
+ case FCP_SEED_WWPN:
+ blp->nlp_bind_type = FCP_SEED_WWPN;
+ break;
+ case FCP_SEED_WWNN:
+ blp->nlp_bind_type = FCP_SEED_WWNN;
+ break;
+ case FCP_SEED_DID:
+ blp->nlp_bind_type = FCP_SEED_DID;
+ break;
+ }
+ blp->nlp_sid = index;
+ blp->nlp_DID = ndlp->nlp_DID;
+ memcpy(&blp->nlp_nodename, &ndlp->nlp_nodename,
+ sizeof (struct lpfc_name));
+ memcpy(&blp->nlp_portname, &ndlp->nlp_portname,
+ sizeof (struct lpfc_name));
+
+ return (blp);
+ }
+ return NULL;
+}
+
+
+static struct lpfc_bindlist *
+lpfc_consistent_bind_get(struct lpfc_hba * phba, struct lpfc_nodelist * ndlp)
+{
+ struct lpfc_bindlist *blp, *next_blp;
+ uint16_t index;
+
+ /* check binding list */
+ list_for_each_entry_safe(blp, next_blp, &phba->fc_nlpbind_list,
+ nlp_listp) {
+ if (lpfc_binding_found(blp, ndlp)) {
+
+ /* take it off the binding list */
+ phba->fc_bind_cnt--;
+ list_del_init(&blp->nlp_listp);
+
+ /* Reassign scsi id <sid> to NPort <nlp_DID> */
+ lpfc_printf_log(phba,
+ KERN_INFO,
+ LOG_DISCOVERY | LOG_FCP,
+ "%d:0213 Reassign scsi id x%x to "
+ "NPort x%x Data: x%x x%x x%x x%x\n",
+ phba->brd_no,
+ blp->nlp_sid, ndlp->nlp_DID,
+ blp->nlp_bind_type, ndlp->nlp_flag,
+ ndlp->nlp_state, ndlp->nlp_rpi);
+
+ return (blp);
+ }
+ }
+
+ /* NOTE: if scan-down = 2 and we have private loop, then we use
+ * AlpaArray to determine sid.
+ */
+ if ((phba->cfg_fcp_bind_method == 4) &&
+ ((phba->fc_flag & (FC_PUBLIC_LOOP | FC_FABRIC)) ||
+ (phba->fc_topology != TOPOLOGY_LOOP))) {
+ /* Log message: ALPA based binding used on a non loop
+ topology */
+ lpfc_printf_log(phba,
+ KERN_WARNING,
+ LOG_DISCOVERY,
+ "%d:0245 ALPA based bind method used on an HBA "
+ "which is in a nonloop topology Data: x%x\n",
+ phba->brd_no,
+ phba->fc_topology);
+ }
+
+ if ((phba->cfg_fcp_bind_method == 4) &&
+ !(phba->fc_flag & (FC_PUBLIC_LOOP | FC_FABRIC)) &&
+ (phba->fc_topology == TOPOLOGY_LOOP)) {
+ for (index = 0; index < FC_MAXLOOP; index++) {
+ if (ndlp->nlp_DID == (uint32_t) lpfcAlpaArray[index]) {
+ if ((blp =
+ lpfc_create_binding(phba, ndlp, index,
+ FCP_SEED_DID))) {
+ return (blp);
+ }
+ goto errid;
+ }
+ }
+ }
+
+ if (phba->cfg_automap) {
+ while (1) {
+ if ((lpfc_binding_useid(phba, phba->sid_cnt))
+ || (lpfc_mapping_useid (phba, phba->sid_cnt))) {
+
+ phba->sid_cnt++;
+ } else {
+ if ((blp =
+ lpfc_create_binding(phba, ndlp,
+ phba->sid_cnt,
+ phba->fcp_mapping))) {
+ blp->nlp_bind_type |= FCP_SEED_AUTO;
+
+ phba->sid_cnt++;
+ return (blp);
+ }
+ goto errid;
+ }
+ }
+ }
+ /* if automap on */
+errid:
+ /* Cannot assign scsi id on NPort <nlp_DID> */
+ lpfc_printf_log(phba,
+ KERN_INFO,
+ LOG_DISCOVERY | LOG_FCP,
+ "%d:0230 Cannot assign scsi ID on NPort x%x "
+ "Data: x%x x%x x%x\n",
+ phba->brd_no,
+ ndlp->nlp_DID, ndlp->nlp_flag, ndlp->nlp_state,
+ ndlp->nlp_rpi);
+
+ return NULL;
+}
+
+static uint32_t
+lpfc_assign_binding(struct lpfc_hba * phba,
+ struct lpfc_nodelist * ndlp, struct lpfc_bindlist *blp)
+{
+ struct lpfc_target *targetp;
+
+ targetp = lpfc_find_target(phba, blp->nlp_sid, ndlp);
+ if(!targetp) {
+ /* Cannot assign scsi id <sid> to NPort <nlp_DID> */
+ lpfc_printf_log(phba,
+ KERN_INFO,
+ LOG_DISCOVERY | LOG_FCP,
+ "%d:0229 Cannot assign scsi id x%x to NPort x%x "
+ "Data: x%x x%x x%x\n",
+ phba->brd_no, blp->nlp_sid,
+ ndlp->nlp_DID, ndlp->nlp_flag, ndlp->nlp_state,
+ ndlp->nlp_rpi);
+ return(0);
+ }
+ ndlp->nlp_sid = blp->nlp_sid;
+ ndlp->nlp_flag &= ~NLP_SEED_MASK;
+ switch ((blp->nlp_bind_type & FCP_SEED_MASK)) {
+ case FCP_SEED_WWPN:
+ ndlp->nlp_flag |= NLP_SEED_WWPN;
+ break;
+ case FCP_SEED_WWNN:
+ ndlp->nlp_flag |= NLP_SEED_WWNN;
+ break;
+ case FCP_SEED_DID:
+ ndlp->nlp_flag |= NLP_SEED_DID;
+ break;
+ }
+ if (blp->nlp_bind_type & FCP_SEED_AUTO) {
+ ndlp->nlp_flag |= NLP_AUTOMAP;
+ }
+ /* Assign scsi id <sid> to NPort <nlp_DID> */
+ lpfc_printf_log(phba,
+ KERN_INFO,
+ LOG_DISCOVERY | LOG_FCP,
+ "%d:0216 Assign scsi "
+ "id x%x to NPort x%x "
+ "Data: x%x x%x x%x x%x\n",
+ phba->brd_no,
+ ndlp->nlp_sid, ndlp->nlp_DID,
+ blp->nlp_bind_type,
+ ndlp->nlp_flag, ndlp->nlp_state,
+ ndlp->nlp_rpi);
+ return(1);
+}
+
+static uint32_t
+lpfc_disc_set_adisc(struct lpfc_hba * phba,
+ struct lpfc_nodelist * ndlp)
+{
+ /* Check config parameter use-adisc or FCP-2 */
+ if ((phba->cfg_use_adisc == 0) &&
+ !(phba->fc_flag & FC_RSCN_MODE)) {
+ return (0);
+ }
+ ndlp->nlp_flag |= NLP_NPR_ADISC;
+ return (1);
+}
+
+static uint32_t
+lpfc_disc_noop(struct lpfc_hba * phba,
+ struct lpfc_nodelist * ndlp, void *arg, uint32_t evt)
+{
+ /* This routine does nothing, just return the current state */
+ return (ndlp->nlp_state);
+}
+
+static uint32_t
+lpfc_disc_illegal(struct lpfc_hba * phba,
+ struct lpfc_nodelist * ndlp, void *arg, uint32_t evt)
+{
+ lpfc_printf_log(phba,
+ KERN_ERR,
+ LOG_DISCOVERY,
+ "%d:0253 Illegal State Transition: node x%x event x%x, "
+ "state x%x Data: x%x x%x\n",
+ phba->brd_no,
+ ndlp->nlp_DID, evt, ndlp->nlp_state, ndlp->nlp_rpi,
+ ndlp->nlp_flag);
+ return (ndlp->nlp_state);
+}
+
+/* Start of Discovery State Machine routines */
+
+static uint32_t
+lpfc_rcv_plogi_unused_node(struct lpfc_hba * phba,
+ struct lpfc_nodelist * ndlp, void *arg, uint32_t evt)
+{
+ struct lpfc_iocbq *cmdiocb;
+ struct lpfc_dmabuf *pcmd;
+
+ cmdiocb = (struct lpfc_iocbq *) arg;
+ pcmd = (struct lpfc_dmabuf *) cmdiocb->context2;
+
+ pci_dma_sync_single_for_cpu(phba->pcidev, pcmd->phys,
+ LPFC_BPL_SIZE, PCI_DMA_FROMDEVICE);
+
+ if(lpfc_rcv_plogi(phba, ndlp, cmdiocb)) {
+ ndlp->nlp_state = NLP_STE_UNUSED_NODE;
+ lpfc_nlp_list(phba, ndlp, NLP_UNUSED_LIST);
+ return (ndlp->nlp_state);
+ }
+ lpfc_nlp_list(phba, ndlp, NLP_NO_LIST);
+ return (NLP_STE_FREED_NODE);
+}
+
+static uint32_t
+lpfc_rcv_els_unused_node(struct lpfc_hba * phba,
+ struct lpfc_nodelist * ndlp, void *arg, uint32_t evt)
+{
+ lpfc_issue_els_logo(phba, ndlp, 0);
+ lpfc_nlp_list(phba, ndlp, NLP_UNUSED_LIST);
+ return (ndlp->nlp_state);
+}
+
+static uint32_t
+lpfc_rcv_logo_unused_node(struct lpfc_hba * phba,
+ struct lpfc_nodelist * ndlp, void *arg, uint32_t evt)
+{
+ struct lpfc_iocbq *cmdiocb;
+ struct lpfc_dmabuf *pcmd;
+
+ cmdiocb = (struct lpfc_iocbq *) arg;
+ pcmd = (struct lpfc_dmabuf *) cmdiocb->context2;
+
+ pci_dma_sync_single_for_cpu(phba->pcidev, pcmd->phys,
+ LPFC_BPL_SIZE, PCI_DMA_FROMDEVICE);
+
+ ndlp->nlp_flag |= NLP_LOGO_ACC;
+ lpfc_els_rsp_acc(phba, ELS_CMD_ACC, cmdiocb, ndlp, NULL, 0);
+ lpfc_nlp_list(phba, ndlp, NLP_UNUSED_LIST);
+
+ return (ndlp->nlp_state);
+}
+
+static uint32_t
+lpfc_cmpl_logo_unused_node(struct lpfc_hba * phba,
+ struct lpfc_nodelist * ndlp, void *arg, uint32_t evt)
+{
+ lpfc_nlp_list(phba, ndlp, NLP_NO_LIST);
+ return (NLP_STE_FREED_NODE);
+}
+
+static uint32_t
+lpfc_device_rm_unused_node(struct lpfc_hba * phba,
+ struct lpfc_nodelist * ndlp, void *arg, uint32_t evt)
+{
+ lpfc_nlp_list(phba, ndlp, NLP_NO_LIST);
+ return (NLP_STE_FREED_NODE);
+}
+
+static uint32_t
+lpfc_rcv_plogi_plogi_issue(struct lpfc_hba * phba,
+ struct lpfc_nodelist * ndlp,
+ struct lpfc_iocbq *cmdiocb, uint32_t evt)
+{
+ struct lpfc_dmabuf *pcmd;
+ struct serv_parm *sp;
+ uint32_t *lp;
+ struct ls_rjt stat;
+ int port_cmp;
+
+ pcmd = (struct lpfc_dmabuf *) cmdiocb->context2;
+ lp = (uint32_t *) pcmd->virt;
+ sp = (struct serv_parm *) ((uint8_t *) lp + sizeof (uint32_t));
+
+ pci_dma_sync_single_for_cpu(phba->pcidev, pcmd->phys,
+ LPFC_BPL_SIZE, PCI_DMA_FROMDEVICE);
+
+ memset(&stat, 0, sizeof (struct ls_rjt));
+
+ /* For a PLOGI, we only accept if our portname is less
+ * than the remote portname.
+ */
+ phba->fc_stat.elsLogiCol++;
+ port_cmp = memcmp(&phba->fc_portname, &sp->portName,
+ sizeof (struct lpfc_name));
+
+ if (port_cmp >= 0) {
+ /* Reject this request because the remote node will accept
+ ours */
+ stat.un.b.lsRjtRsnCode = LSRJT_UNABLE_TPC;
+ stat.un.b.lsRjtRsnCodeExp = LSEXP_CMD_IN_PROGRESS;
+ lpfc_els_rsp_reject(phba, stat.un.lsRjtError, cmdiocb, ndlp);
+ }
+ else {
+ lpfc_rcv_plogi(phba, ndlp, cmdiocb);
+ } /* if our portname was less */
+
+ return (ndlp->nlp_state);
+}
+
+static uint32_t
+lpfc_rcv_els_plogi_issue(struct lpfc_hba * phba,
+ struct lpfc_nodelist * ndlp, void *arg, uint32_t evt)
+{
+ struct lpfc_iocbq *cmdiocb;
+ struct lpfc_dmabuf *pcmd;
+
+ cmdiocb = (struct lpfc_iocbq *) arg;
+ pcmd = (struct lpfc_dmabuf *) cmdiocb->context2;
+
+ pci_dma_sync_single_for_cpu(phba->pcidev, pcmd->phys,
+ LPFC_BPL_SIZE, PCI_DMA_FROMDEVICE);
+
+ /* software abort outstanding PLOGI */
+ lpfc_els_abort(phba, ndlp, 1);
+ mod_timer(&ndlp->nlp_delayfunc, jiffies + HZ * 1);
+ ndlp->nlp_flag |= NLP_DELAY_TMO;
+
+ if(evt == NLP_EVT_RCV_LOGO) {
+ lpfc_els_rsp_acc(phba, ELS_CMD_ACC, cmdiocb, ndlp, NULL, 0);
+ }
+ else {
+ lpfc_issue_els_logo(phba, ndlp, 0);
+ }
+
+ /* Put ndlp in npr list set plogi timer for 1 sec */
+ ndlp->nlp_last_elscmd = (unsigned long)ELS_CMD_PLOGI;
+ ndlp->nlp_state = NLP_STE_NPR_NODE;
+ lpfc_nlp_list(phba, ndlp, NLP_NPR_LIST);
+
+ return (ndlp->nlp_state);
+}
+
+static uint32_t
+lpfc_cmpl_plogi_plogi_issue(struct lpfc_hba * phba,
+ struct lpfc_nodelist * ndlp, void *arg,
+ uint32_t evt)
+{
+ struct lpfc_iocbq *cmdiocb, *rspiocb;
+ struct lpfc_dmabuf *pcmd, *prsp;
+ uint32_t *lp;
+ IOCB_t *irsp;
+ struct serv_parm *sp;
+ LPFC_MBOXQ_t *mbox;
+
+ cmdiocb = (struct lpfc_iocbq *) arg;
+ rspiocb = cmdiocb->context_un.rsp_iocb;
+
+ if (ndlp->nlp_flag & NLP_ACC_REGLOGIN) {
+ return (ndlp->nlp_state);
+ }
+
+ irsp = &rspiocb->iocb;
+
+ if (irsp->ulpStatus == 0) {
+ pcmd = (struct lpfc_dmabuf *) cmdiocb->context2;
+
+ prsp = (struct lpfc_dmabuf *) pcmd->list.next;
+ lp = (uint32_t *) prsp->virt;
+
+ pci_dma_sync_single_for_cpu(phba->pcidev, prsp->phys,
+ LPFC_BPL_SIZE, PCI_DMA_FROMDEVICE);
+
+ sp = (struct serv_parm *) ((uint8_t *) lp + sizeof (uint32_t));
+ if ((lpfc_check_sparm(phba, ndlp, sp, CLASS3))) {
+ /* PLOGI chkparm OK */
+ lpfc_printf_log(phba,
+ KERN_INFO,
+ LOG_ELS,
+ "%d:0121 PLOGI chkparm OK "
+ "Data: x%x x%x x%x x%x\n",
+ phba->brd_no,
+ ndlp->nlp_DID, ndlp->nlp_state,
+ ndlp->nlp_flag, ndlp->nlp_rpi);
+
+ if ((phba->cfg_fcp_class == 2) &&
+ (sp->cls2.classValid)) {
+ ndlp->nlp_fcp_info |= CLASS2;
+ } else {
+ ndlp->nlp_fcp_info |= CLASS3;
+ }
+
+ if ((mbox = mempool_alloc(phba->mbox_mem_pool,
+ GFP_ATOMIC))) {
+ lpfc_unreg_rpi(phba, ndlp);
+ if (lpfc_reg_login
+ (phba, irsp->un.elsreq64.remoteID,
+ (uint8_t *) sp, mbox, 0) == 0) {
+ /* set_slim mailbox command needs to
+ * execute first, queue this command to
+ * be processed later.
+ */
+ switch(ndlp->nlp_DID) {
+ case NameServer_DID:
+ mbox->mbox_cmpl =
+ lpfc_mbx_cmpl_ns_reg_login;
+ break;
+ case FDMI_DID:
+ mbox->mbox_cmpl =
+ lpfc_mbx_cmpl_fdmi_reg_login;
+ break;
+ default:
+ mbox->mbox_cmpl =
+ lpfc_mbx_cmpl_reg_login;
+ }
+ mbox->context2 = ndlp;
+ if (lpfc_sli_issue_mbox(phba, mbox,
+ (MBX_NOWAIT | MBX_STOP_IOCB))
+ != MBX_NOT_FINISHED) {
+ ndlp->nlp_state =
+ NLP_STE_REG_LOGIN_ISSUE;
+ lpfc_nlp_list(phba, ndlp,
+ NLP_REGLOGIN_LIST);
+ return (ndlp->nlp_state);
+ }
+ mempool_free(mbox, phba->mbox_mem_pool);
+ } else {
+ mempool_free(mbox, phba->mbox_mem_pool);
+ }
+ }
+ }
+ }
+
+ /* Free this node since the driver cannot login or has the wrong
+ sparm */
+ lpfc_nlp_list(phba, ndlp, NLP_NO_LIST);
+ return (NLP_STE_FREED_NODE);
+}
+
+static uint32_t
+lpfc_device_rm_plogi_issue(struct lpfc_hba * phba,
+ struct lpfc_nodelist * ndlp, void *arg, uint32_t evt)
+{
+ /* software abort outstanding PLOGI */
+ lpfc_els_abort(phba, ndlp, 1);
+
+ lpfc_nlp_list(phba, ndlp, NLP_NO_LIST);
+ return (NLP_STE_FREED_NODE);
+}
+
+static uint32_t
+lpfc_device_recov_plogi_issue(struct lpfc_hba * phba,
+ struct lpfc_nodelist * ndlp, void *arg,
+ uint32_t evt)
+{
+ /* software abort outstanding PLOGI */
+ lpfc_els_abort(phba, ndlp, 1);
+
+ ndlp->nlp_state = NLP_STE_NPR_NODE;
+ lpfc_nlp_list(phba, ndlp, NLP_NPR_LIST);
+ ndlp->nlp_flag &= ~NLP_NPR_2B_DISC;
+
+ return (ndlp->nlp_state);
+}
+
+static uint32_t
+lpfc_rcv_plogi_adisc_issue(struct lpfc_hba * phba,
+ struct lpfc_nodelist * ndlp, void *arg,
+ uint32_t evt)
+{
+ struct lpfc_iocbq *cmdiocb;
+ struct lpfc_dmabuf *pcmd;
+
+ /* software abort outstanding ADISC */
+ lpfc_els_abort(phba, ndlp, 1);
+
+ cmdiocb = (struct lpfc_iocbq *) arg;
+ pcmd = (struct lpfc_dmabuf *) cmdiocb->context2;
+
+ pci_dma_sync_single_for_cpu(phba->pcidev, pcmd->phys,
+ LPFC_BPL_SIZE, PCI_DMA_FROMDEVICE);
+
+ if(lpfc_rcv_plogi(phba, ndlp, cmdiocb)) {
+ return (ndlp->nlp_state);
+ }
+ ndlp->nlp_state = NLP_STE_PLOGI_ISSUE;
+ lpfc_nlp_list(phba, ndlp, NLP_PLOGI_LIST);
+ lpfc_issue_els_plogi(phba, ndlp, 0);
+
+ return (ndlp->nlp_state);
+}
+
+static uint32_t
+lpfc_rcv_prli_adisc_issue(struct lpfc_hba * phba,
+ struct lpfc_nodelist * ndlp, void *arg,
+ uint32_t evt)
+{
+ struct lpfc_iocbq *cmdiocb;
+ struct lpfc_dmabuf *pcmd;
+
+ cmdiocb = (struct lpfc_iocbq *) arg;
+ pcmd = (struct lpfc_dmabuf *) cmdiocb->context2;
+
+ pci_dma_sync_single_for_cpu(phba->pcidev, pcmd->phys,
+ LPFC_BPL_SIZE, PCI_DMA_FROMDEVICE);
+
+ lpfc_els_rsp_prli_acc(phba, cmdiocb, ndlp);
+ return (ndlp->nlp_state);
+}
+
+static uint32_t
+lpfc_rcv_logo_adisc_issue(struct lpfc_hba * phba,
+ struct lpfc_nodelist * ndlp, void *arg,
+ uint32_t evt)
+{
+ struct lpfc_iocbq *cmdiocb;
+ struct lpfc_dmabuf *pcmd;
+
+ cmdiocb = (struct lpfc_iocbq *) arg;
+ pcmd = (struct lpfc_dmabuf *) cmdiocb->context2;
+
+ pci_dma_sync_single_for_cpu(phba->pcidev, pcmd->phys,
+ LPFC_BPL_SIZE, PCI_DMA_FROMDEVICE);
+
+ /* software abort outstanding ADISC */
+ lpfc_els_abort(phba, ndlp, 0);
+
+ lpfc_rcv_logo(phba, ndlp, cmdiocb);
+ return (ndlp->nlp_state);
+}
+
+static uint32_t
+lpfc_rcv_padisc_adisc_issue(struct lpfc_hba * phba,
+ struct lpfc_nodelist * ndlp, void *arg,
+ uint32_t evt)
+{
+ struct lpfc_iocbq *cmdiocb;
+ struct lpfc_dmabuf *pcmd;
+
+ cmdiocb = (struct lpfc_iocbq *) arg;
+ pcmd = (struct lpfc_dmabuf *) cmdiocb->context2;
+
+ pci_dma_sync_single_for_cpu(phba->pcidev, pcmd->phys,
+ LPFC_BPL_SIZE, PCI_DMA_FROMDEVICE);
+
+ lpfc_rcv_padisc(phba, ndlp, cmdiocb);
+ return (ndlp->nlp_state);
+}
+
+static uint32_t
+lpfc_rcv_prlo_adisc_issue(struct lpfc_hba * phba,
+ struct lpfc_nodelist * ndlp, void *arg,
+ uint32_t evt)
+{
+ struct lpfc_iocbq *cmdiocb;
+ struct lpfc_dmabuf *pcmd;
+
+ cmdiocb = (struct lpfc_iocbq *) arg;
+ pcmd = (struct lpfc_dmabuf *) cmdiocb->context2;
+
+ pci_dma_sync_single_for_cpu(phba->pcidev, pcmd->phys,
+ LPFC_BPL_SIZE, PCI_DMA_FROMDEVICE);
+
+ /* Treat like rcv logo */
+ lpfc_rcv_logo(phba, ndlp, cmdiocb);
+ return (ndlp->nlp_state);
+}
+
+static uint32_t
+lpfc_cmpl_adisc_adisc_issue(struct lpfc_hba * phba,
+ struct lpfc_nodelist * ndlp, void *arg,
+ uint32_t evt)
+{
+ struct lpfc_iocbq *cmdiocb, *rspiocb;
+ struct lpfc_bindlist *blp;
+ IOCB_t *irsp;
+ ADISC *ap;
+
+ cmdiocb = (struct lpfc_iocbq *) arg;
+ rspiocb = cmdiocb->context_un.rsp_iocb;
+
+ ap = (ADISC *)lpfc_check_elscmpl_iocb(phba, cmdiocb, rspiocb);
+ irsp = &rspiocb->iocb;
+
+ if ((irsp->ulpStatus) ||
+ (!lpfc_check_adisc(phba, ndlp, &ap->nodeName, &ap->portName))) {
+ ndlp->nlp_last_elscmd = (unsigned long)ELS_CMD_PLOGI;
+ /* 1 sec timeout */
+ mod_timer(&ndlp->nlp_delayfunc, jiffies + HZ);
+ ndlp->nlp_flag |= NLP_DELAY_TMO;
+
+ memset(&ndlp->nlp_nodename, 0, sizeof (struct lpfc_name));
+ memset(&ndlp->nlp_portname, 0, sizeof (struct lpfc_name));
+
+ ndlp->nlp_state = NLP_STE_NPR_NODE;
+ lpfc_nlp_list(phba, ndlp, NLP_NPR_LIST);
+ lpfc_unreg_rpi(phba, ndlp);
+ return (ndlp->nlp_state);
+ }
+ /* move to mapped / unmapped list accordingly */
+ /* Can we assign a SCSI Id to this NPort */
+ if ((blp = lpfc_consistent_bind_get(phba, ndlp))) {
+ /* Next 4 lines MUST be in this order */
+ if(lpfc_assign_binding(phba, ndlp, blp)) {
+ ndlp->nlp_state = NLP_STE_MAPPED_NODE;
+ lpfc_nlp_list(phba, ndlp, NLP_MAPPED_LIST);
+ ndlp->nlp_listp_bind = blp;
+
+ lpfc_set_failmask(phba, ndlp,
+ (LPFC_DEV_DISCOVERY_INP|LPFC_DEV_DISCONNECTED),
+ LPFC_CLR_BITMASK);
+
+ return (ndlp->nlp_state);
+ }
+ }
+ ndlp->nlp_flag |= NLP_TGT_NO_SCSIID;
+ ndlp->nlp_state = NLP_STE_UNMAPPED_NODE;
+ lpfc_nlp_list(phba, ndlp, NLP_UNMAPPED_LIST);
+
+ lpfc_set_failmask(phba, ndlp,
+ (LPFC_DEV_DISCOVERY_INP | LPFC_DEV_DISCONNECTED),
+ LPFC_CLR_BITMASK);
+
+ return (ndlp->nlp_state);
+}
+
+static uint32_t
+lpfc_device_rm_adisc_issue(struct lpfc_hba * phba,
+ struct lpfc_nodelist * ndlp, void *arg,
+ uint32_t evt)
+{
+ /* software abort outstanding ADISC */
+ lpfc_els_abort(phba, ndlp, 1);
+
+ lpfc_nlp_list(phba, ndlp, NLP_NO_LIST);
+ return (NLP_STE_FREED_NODE);
+}
+
+static uint32_t
+lpfc_device_recov_adisc_issue(struct lpfc_hba * phba,
+ struct lpfc_nodelist * ndlp, void *arg,
+ uint32_t evt)
+{
+ /* software abort outstanding ADISC */
+ lpfc_els_abort(phba, ndlp, 1);
+
+ ndlp->nlp_state = NLP_STE_NPR_NODE;
+ lpfc_nlp_list(phba, ndlp, NLP_NPR_LIST);
+ ndlp->nlp_flag &= ~NLP_NPR_2B_DISC;
+
+ lpfc_disc_set_adisc(phba, ndlp);
+ return (ndlp->nlp_state);
+}
+
+static uint32_t
+lpfc_rcv_plogi_reglogin_issue(struct lpfc_hba * phba,
+ struct lpfc_nodelist * ndlp, void *arg,
+ uint32_t evt)
+{
+ struct lpfc_iocbq *cmdiocb;
+ struct lpfc_dmabuf *pcmd;
+
+ cmdiocb = (struct lpfc_iocbq *) arg;
+ pcmd = (struct lpfc_dmabuf *) cmdiocb->context2;
+
+ pci_dma_sync_single_for_cpu(phba->pcidev, pcmd->phys,
+ LPFC_BPL_SIZE, PCI_DMA_FROMDEVICE);
+
+ lpfc_rcv_plogi(phba, ndlp, cmdiocb);
+ return (ndlp->nlp_state);
+}
+
+static uint32_t
+lpfc_rcv_prli_reglogin_issue(struct lpfc_hba * phba,
+ struct lpfc_nodelist * ndlp, void *arg,
+ uint32_t evt)
+{
+ struct lpfc_iocbq *cmdiocb;
+ struct lpfc_dmabuf *pcmd;
+
+ cmdiocb = (struct lpfc_iocbq *) arg;
+ pcmd = (struct lpfc_dmabuf *) cmdiocb->context2;
+
+ pci_dma_sync_single_for_cpu(phba->pcidev, pcmd->phys,
+ LPFC_BPL_SIZE, PCI_DMA_FROMDEVICE);
+
+ lpfc_els_rsp_prli_acc(phba, cmdiocb, ndlp);
+ return (ndlp->nlp_state);
+}
+
+static uint32_t
+lpfc_rcv_logo_reglogin_issue(struct lpfc_hba * phba,
+ struct lpfc_nodelist * ndlp, void *arg,
+ uint32_t evt)
+{
+ struct lpfc_iocbq *cmdiocb;
+ struct lpfc_dmabuf *pcmd;
+
+ cmdiocb = (struct lpfc_iocbq *) arg;
+ pcmd = (struct lpfc_dmabuf *) cmdiocb->context2;
+
+ pci_dma_sync_single_for_cpu(phba->pcidev, pcmd->phys,
+ LPFC_BPL_SIZE, PCI_DMA_FROMDEVICE);
+
+ lpfc_rcv_logo(phba, ndlp, cmdiocb);
+ return (ndlp->nlp_state);
+}
+
+static uint32_t
+lpfc_rcv_padisc_reglogin_issue(struct lpfc_hba * phba,
+ struct lpfc_nodelist * ndlp, void *arg,
+ uint32_t evt)
+{
+ struct lpfc_iocbq *cmdiocb;
+ struct lpfc_dmabuf *pcmd;
+
+ cmdiocb = (struct lpfc_iocbq *) arg;
+ pcmd = (struct lpfc_dmabuf *) cmdiocb->context2;
+
+ pci_dma_sync_single_for_cpu(phba->pcidev, pcmd->phys,
+ LPFC_BPL_SIZE, PCI_DMA_FROMDEVICE);
+
+ lpfc_rcv_padisc(phba, ndlp, cmdiocb);
+ return (ndlp->nlp_state);
+}
+
+static uint32_t
+lpfc_rcv_prlo_reglogin_issue(struct lpfc_hba * phba,
+ struct lpfc_nodelist * ndlp, void *arg,
+ uint32_t evt)
+{
+ struct lpfc_iocbq *cmdiocb;
+
+ cmdiocb = (struct lpfc_iocbq *) arg;
+ lpfc_els_rsp_acc(phba, ELS_CMD_ACC, cmdiocb, ndlp, NULL, 0);
+ return (ndlp->nlp_state);
+}
+
+static uint32_t
+lpfc_cmpl_reglogin_reglogin_issue(struct lpfc_hba * phba,
+ struct lpfc_nodelist * ndlp,
+ void *arg, uint32_t evt)
+{
+ LPFC_MBOXQ_t *pmb;
+ MAILBOX_t *mb;
+ uint32_t did;
+
+ pmb = (LPFC_MBOXQ_t *) arg;
+ mb = &pmb->mb;
+ did = mb->un.varWords[1];
+ if (mb->mbxStatus) {
+ /* RegLogin failed */
+ lpfc_printf_log(phba,
+ KERN_ERR,
+ LOG_DISCOVERY,
+ "%d:0246 RegLogin failed Data: x%x x%x x%x\n",
+ phba->brd_no,
+ did, mb->mbxStatus, phba->hba_state);
+
+ mod_timer(&ndlp->nlp_delayfunc, jiffies + HZ * 1);
+ ndlp->nlp_flag |= NLP_DELAY_TMO;
+
+ lpfc_issue_els_logo(phba, ndlp, 0);
+ /* Put ndlp in npr list set plogi timer for 1 sec */
+ ndlp->nlp_last_elscmd = (unsigned long)ELS_CMD_PLOGI;
+ ndlp->nlp_state = NLP_STE_NPR_NODE;
+ lpfc_nlp_list(phba, ndlp, NLP_NPR_LIST);
+ return (ndlp->nlp_state);
+ }
+
+ if (ndlp->nlp_rpi != 0)
+ lpfc_findnode_remove_rpi(phba, ndlp->nlp_rpi);
+
+ ndlp->nlp_rpi = mb->un.varWords[0];
+ lpfc_addnode_rpi(phba, ndlp, ndlp->nlp_rpi);
+
+ /* Only if we are not a fabric nport do we issue PRLI */
+ if (!(ndlp->nlp_type & NLP_FABRIC)) {
+ ndlp->nlp_state = NLP_STE_PRLI_ISSUE;
+ lpfc_nlp_list(phba, ndlp, NLP_PRLI_LIST);
+ lpfc_issue_els_prli(phba, ndlp, 0);
+ } else {
+ ndlp->nlp_state = NLP_STE_UNMAPPED_NODE;
+ lpfc_nlp_list(phba, ndlp, NLP_UNMAPPED_LIST);
+ }
+ return (ndlp->nlp_state);
+}
+
+static uint32_t
+lpfc_device_rm_reglogin_issue(struct lpfc_hba * phba,
+ struct lpfc_nodelist * ndlp, void *arg,
+ uint32_t evt)
+{
+ lpfc_nlp_list(phba, ndlp, NLP_NO_LIST);
+ return (NLP_STE_FREED_NODE);
+}
+
+static uint32_t
+lpfc_device_recov_reglogin_issue(struct lpfc_hba * phba,
+ struct lpfc_nodelist * ndlp, void *arg,
+ uint32_t evt)
+{
+ ndlp->nlp_state = NLP_STE_NPR_NODE;
+ lpfc_nlp_list(phba, ndlp, NLP_NPR_LIST);
+ ndlp->nlp_flag &= ~NLP_NPR_2B_DISC;
+ return (ndlp->nlp_state);
+}
+
+static uint32_t
+lpfc_rcv_plogi_prli_issue(struct lpfc_hba * phba,
+ struct lpfc_nodelist * ndlp, void *arg, uint32_t evt)
+{
+ struct lpfc_iocbq *cmdiocb;
+ struct lpfc_dmabuf *pcmd;
+
+ cmdiocb = (struct lpfc_iocbq *) arg;
+ pcmd = (struct lpfc_dmabuf *) cmdiocb->context2;
+
+ pci_dma_sync_single_for_cpu(phba->pcidev, pcmd->phys,
+ LPFC_BPL_SIZE, PCI_DMA_FROMDEVICE);
+
+ lpfc_rcv_plogi(phba, ndlp, cmdiocb);
+ return (ndlp->nlp_state);
+}
+
+static uint32_t
+lpfc_rcv_prli_prli_issue(struct lpfc_hba * phba,
+ struct lpfc_nodelist * ndlp, void *arg, uint32_t evt)
+{
+ struct lpfc_iocbq *cmdiocb;
+ struct lpfc_dmabuf *pcmd;
+
+ cmdiocb = (struct lpfc_iocbq *) arg;
+ pcmd = (struct lpfc_dmabuf *) cmdiocb->context2;
+
+ pci_dma_sync_single_for_cpu(phba->pcidev, pcmd->phys,
+ LPFC_BPL_SIZE, PCI_DMA_FROMDEVICE);
+
+ lpfc_els_rsp_prli_acc(phba, cmdiocb, ndlp);
+ return (ndlp->nlp_state);
+}
+
+static uint32_t
+lpfc_rcv_logo_prli_issue(struct lpfc_hba * phba,
+ struct lpfc_nodelist * ndlp, void *arg, uint32_t evt)
+{
+ struct lpfc_iocbq *cmdiocb;
+ struct lpfc_dmabuf *pcmd;
+
+ cmdiocb = (struct lpfc_iocbq *) arg;
+ pcmd = (struct lpfc_dmabuf *) cmdiocb->context2;
+
+ pci_dma_sync_single_for_cpu(phba->pcidev, pcmd->phys,
+ LPFC_BPL_SIZE, PCI_DMA_FROMDEVICE);
+
+ /* Software abort outstanding PRLI before sending acc */
+ lpfc_els_abort(phba, ndlp, 1);
+
+ lpfc_rcv_logo(phba, ndlp, cmdiocb);
+ return (ndlp->nlp_state);
+}
+
+static uint32_t
+lpfc_rcv_padisc_prli_issue(struct lpfc_hba * phba,
+ struct lpfc_nodelist * ndlp, void *arg, uint32_t evt)
+{
+ struct lpfc_iocbq *cmdiocb;
+ struct lpfc_dmabuf *pcmd;
+
+ cmdiocb = (struct lpfc_iocbq *) arg;
+ pcmd = (struct lpfc_dmabuf *) cmdiocb->context2;
+
+ pci_dma_sync_single_for_cpu(phba->pcidev, pcmd->phys,
+ LPFC_BPL_SIZE, PCI_DMA_FROMDEVICE);
+
+ lpfc_rcv_padisc(phba, ndlp, cmdiocb);
+ return (ndlp->nlp_state);
+}
+
+/* This routine is envoked when we rcv a PRLO request from a nport
+ * we are logged into. We should send back a PRLO rsp setting the
+ * appropriate bits.
+ * NEXT STATE = PRLI_ISSUE
+ */
+static uint32_t
+lpfc_rcv_prlo_prli_issue(struct lpfc_hba * phba,
+ struct lpfc_nodelist * ndlp, void *arg, uint32_t evt)
+{
+ struct lpfc_iocbq *cmdiocb;
+
+ cmdiocb = (struct lpfc_iocbq *) arg;
+ lpfc_els_rsp_acc(phba, ELS_CMD_ACC, cmdiocb, ndlp, NULL, 0);
+ return (ndlp->nlp_state);
+}
+
+static uint32_t
+lpfc_cmpl_prli_prli_issue(struct lpfc_hba * phba,
+ struct lpfc_nodelist * ndlp, void *arg, uint32_t evt)
+{
+ struct lpfc_iocbq *cmdiocb, *rspiocb;
+ IOCB_t *irsp;
+ PRLI *npr;
+ struct lpfc_bindlist *blp;
+
+ cmdiocb = (struct lpfc_iocbq *) arg;
+ rspiocb = cmdiocb->context_un.rsp_iocb;
+ npr = (PRLI *)lpfc_check_elscmpl_iocb(phba, cmdiocb, rspiocb);
+
+ irsp = &rspiocb->iocb;
+ if (irsp->ulpStatus) {
+ ndlp->nlp_state = NLP_STE_UNMAPPED_NODE;
+ lpfc_nlp_list(phba, ndlp, NLP_UNMAPPED_LIST);
+ lpfc_set_failmask(phba, ndlp, LPFC_DEV_DISCOVERY_INP,
+ LPFC_CLR_BITMASK);
+ return (ndlp->nlp_state);
+ }
+
+ /* Check out PRLI rsp */
+ if ((npr->acceptRspCode != PRLI_REQ_EXECUTED) ||
+ (npr->prliType != PRLI_FCP_TYPE) || (npr->targetFunc != 1)) {
+ ndlp->nlp_state = NLP_STE_UNMAPPED_NODE;
+ lpfc_nlp_list(phba, ndlp, NLP_UNMAPPED_LIST);
+ lpfc_set_failmask(phba, ndlp,
+ (LPFC_DEV_DISCOVERY_INP | LPFC_DEV_DISCONNECTED),
+ LPFC_CLR_BITMASK);
+ return (ndlp->nlp_state);
+ }
+ if (npr->Retry == 1) {
+ ndlp->nlp_fcp_info |= NLP_FCP_2_DEVICE;
+ }
+
+ /* Can we assign a SCSI Id to this NPort */
+ if ((blp = lpfc_consistent_bind_get(phba, ndlp))) {
+ /* Next 4 lines MUST be in this order */
+ if(lpfc_assign_binding(phba, ndlp, blp)) {
+ ndlp->nlp_state = NLP_STE_MAPPED_NODE;
+ lpfc_nlp_list(phba, ndlp, NLP_MAPPED_LIST);
+ ndlp->nlp_listp_bind = blp;
+
+ lpfc_set_failmask(phba, ndlp,
+ (LPFC_DEV_DISCOVERY_INP|LPFC_DEV_DISCONNECTED),
+ LPFC_CLR_BITMASK);
+ return (ndlp->nlp_state);
+ }
+ }
+ ndlp->nlp_flag |= NLP_TGT_NO_SCSIID;
+ ndlp->nlp_state = NLP_STE_UNMAPPED_NODE;
+ lpfc_nlp_list(phba, ndlp, NLP_UNMAPPED_LIST);
+
+ lpfc_set_failmask(phba, ndlp,
+ (LPFC_DEV_DISCOVERY_INP | LPFC_DEV_DISCONNECTED),
+ LPFC_CLR_BITMASK);
+ return (ndlp->nlp_state);
+}
+
+/*! lpfc_device_rm_prli_issue
+ *
+ * \pre
+ * \post
+ * \param phba
+ * \param ndlp
+ * \param arg
+ * \param evt
+ * \return uint32_t
+ *
+ * \b Description:
+ * This routine is envoked when we a request to remove a nport we are in the
+ * process of PRLIing. We should software abort outstanding prli, unreg
+ * login, send a logout. We will change node state to UNUSED_NODE, put it
+ * on plogi list so it can be freed when LOGO completes.
+ *
+ */
+static uint32_t
+lpfc_device_rm_prli_issue(struct lpfc_hba * phba,
+ struct lpfc_nodelist * ndlp, void *arg, uint32_t evt)
+{
+ /* software abort outstanding PRLI */
+ lpfc_els_abort(phba, ndlp, 1);
+
+ lpfc_nlp_list(phba, ndlp, NLP_NO_LIST);
+ return (NLP_STE_FREED_NODE);
+}
+
+
+/*! lpfc_device_recov_prli_issue
+ *
+ * \pre
+ * \post
+ * \param phba
+ * \param ndlp
+ * \param arg
+ * \param evt
+ * \return uint32_t
+ *
+ * \b Description:
+ * The routine is envoked when the state of a device is unknown, like
+ * during a link down. We should remove the nodelist entry from the
+ * unmapped list, issue a UNREG_LOGIN, do a software abort of the
+ * outstanding PRLI command, then free the node entry.
+ */
+static uint32_t
+lpfc_device_recov_prli_issue(struct lpfc_hba * phba,
+ struct lpfc_nodelist * ndlp, void *arg, uint32_t evt)
+{
+ /* software abort outstanding PRLI */
+ lpfc_els_abort(phba, ndlp, 1);
+
+ ndlp->nlp_state = NLP_STE_NPR_NODE;
+ lpfc_nlp_list(phba, ndlp, NLP_NPR_LIST);
+ ndlp->nlp_flag &= ~NLP_NPR_2B_DISC;
+ return (ndlp->nlp_state);
+}
+
+static uint32_t
+lpfc_rcv_plogi_unmap_node(struct lpfc_hba * phba,
+ struct lpfc_nodelist * ndlp, void *arg, uint32_t evt)
+{
+ struct lpfc_iocbq *cmdiocb;
+ struct lpfc_dmabuf *pcmd;
+
+ cmdiocb = (struct lpfc_iocbq *) arg;
+ pcmd = (struct lpfc_dmabuf *) cmdiocb->context2;
+
+ pci_dma_sync_single_for_cpu(phba->pcidev, pcmd->phys,
+ LPFC_BPL_SIZE, PCI_DMA_FROMDEVICE);
+
+ lpfc_rcv_plogi(phba, ndlp, cmdiocb);
+ return (ndlp->nlp_state);
+}
+
+static uint32_t
+lpfc_rcv_prli_unmap_node(struct lpfc_hba * phba,
+ struct lpfc_nodelist * ndlp, void *arg, uint32_t evt)
+{
+ struct lpfc_iocbq *cmdiocb;
+ struct lpfc_dmabuf *pcmd;
+
+ cmdiocb = (struct lpfc_iocbq *) arg;
+ pcmd = (struct lpfc_dmabuf *) cmdiocb->context2;
+
+ pci_dma_sync_single_for_cpu(phba->pcidev, pcmd->phys,
+ LPFC_BPL_SIZE, PCI_DMA_FROMDEVICE);
+
+ lpfc_els_rsp_prli_acc(phba, cmdiocb, ndlp);
+ return (ndlp->nlp_state);
+}
+
+static uint32_t
+lpfc_rcv_logo_unmap_node(struct lpfc_hba * phba,
+ struct lpfc_nodelist * ndlp, void *arg, uint32_t evt)
+{
+ struct lpfc_iocbq *cmdiocb;
+ struct lpfc_dmabuf *pcmd;
+
+ cmdiocb = (struct lpfc_iocbq *) arg;
+ pcmd = (struct lpfc_dmabuf *) cmdiocb->context2;
+
+ pci_dma_sync_single_for_cpu(phba->pcidev, pcmd->phys,
+ LPFC_BPL_SIZE, PCI_DMA_FROMDEVICE);
+
+ lpfc_rcv_logo(phba, ndlp, cmdiocb);
+ return (ndlp->nlp_state);
+}
+
+static uint32_t
+lpfc_rcv_padisc_unmap_node(struct lpfc_hba * phba,
+ struct lpfc_nodelist * ndlp, void *arg, uint32_t evt)
+{
+ struct lpfc_iocbq *cmdiocb;
+ struct lpfc_dmabuf *pcmd;
+
+ cmdiocb = (struct lpfc_iocbq *) arg;
+ pcmd = (struct lpfc_dmabuf *) cmdiocb->context2;
+
+ pci_dma_sync_single_for_cpu(phba->pcidev, pcmd->phys,
+ LPFC_BPL_SIZE, PCI_DMA_FROMDEVICE);
+
+ lpfc_rcv_padisc(phba, ndlp, cmdiocb);
+ return (ndlp->nlp_state);
+}
+
+static uint32_t
+lpfc_rcv_prlo_unmap_node(struct lpfc_hba * phba,
+ struct lpfc_nodelist * ndlp, void *arg, uint32_t evt)
+{
+ struct lpfc_iocbq *cmdiocb;
+ struct lpfc_dmabuf *pcmd;
+
+ cmdiocb = (struct lpfc_iocbq *) arg;
+ pcmd = (struct lpfc_dmabuf *) cmdiocb->context2;
+
+ pci_dma_sync_single_for_cpu(phba->pcidev, pcmd->phys,
+ LPFC_BPL_SIZE, PCI_DMA_FROMDEVICE);
+
+ /* Treat like rcv logo */
+ lpfc_rcv_logo(phba, ndlp, cmdiocb);
+ return (ndlp->nlp_state);
+}
+
+static uint32_t
+lpfc_device_recov_unmap_node(struct lpfc_hba * phba,
+ struct lpfc_nodelist * ndlp, void *arg, uint32_t evt)
+{
+ ndlp->nlp_state = NLP_STE_NPR_NODE;
+ lpfc_nlp_list(phba, ndlp, NLP_NPR_LIST);
+ ndlp->nlp_flag &= ~NLP_NPR_2B_DISC;
+ lpfc_disc_set_adisc(phba, ndlp);
+
+ return (ndlp->nlp_state);
+}
+
+static uint32_t
+lpfc_rcv_plogi_mapped_node(struct lpfc_hba * phba,
+ struct lpfc_nodelist * ndlp, void *arg, uint32_t evt)
+{
+ struct lpfc_iocbq *cmdiocb;
+ struct lpfc_dmabuf *pcmd;
+
+ cmdiocb = (struct lpfc_iocbq *) arg;
+ pcmd = (struct lpfc_dmabuf *) cmdiocb->context2;
+
+ pci_dma_sync_single_for_cpu(phba->pcidev, pcmd->phys,
+ LPFC_BPL_SIZE, PCI_DMA_FROMDEVICE);
+
+ lpfc_rcv_plogi(phba, ndlp, cmdiocb);
+ return (ndlp->nlp_state);
+}
+
+static uint32_t
+lpfc_rcv_prli_mapped_node(struct lpfc_hba * phba,
+ struct lpfc_nodelist * ndlp, void *arg, uint32_t evt)
+{
+ struct lpfc_iocbq *cmdiocb;
+ struct lpfc_dmabuf *pcmd;
+
+ cmdiocb = (struct lpfc_iocbq *) arg;
+ pcmd = (struct lpfc_dmabuf *) cmdiocb->context2;
+
+ pci_dma_sync_single_for_cpu(phba->pcidev, pcmd->phys,
+ LPFC_BPL_SIZE, PCI_DMA_FROMDEVICE);
+
+ lpfc_els_rsp_prli_acc(phba, cmdiocb, ndlp);
+ return (ndlp->nlp_state);
+}
+
+static uint32_t
+lpfc_rcv_logo_mapped_node(struct lpfc_hba * phba,
+ struct lpfc_nodelist * ndlp, void *arg, uint32_t evt)
+{
+ struct lpfc_iocbq *cmdiocb;
+ struct lpfc_dmabuf *pcmd;
+
+ cmdiocb = (struct lpfc_iocbq *) arg;
+ pcmd = (struct lpfc_dmabuf *) cmdiocb->context2;
+
+ pci_dma_sync_single_for_cpu(phba->pcidev, pcmd->phys,
+ LPFC_BPL_SIZE, PCI_DMA_FROMDEVICE);
+
+ lpfc_rcv_logo(phba, ndlp, cmdiocb);
+ return (ndlp->nlp_state);
+}
+
+static uint32_t
+lpfc_rcv_padisc_mapped_node(struct lpfc_hba * phba,
+ struct lpfc_nodelist * ndlp, void *arg,
+ uint32_t evt)
+{
+ struct lpfc_iocbq *cmdiocb;
+ struct lpfc_dmabuf *pcmd;
+
+ cmdiocb = (struct lpfc_iocbq *) arg;
+ pcmd = (struct lpfc_dmabuf *) cmdiocb->context2;
+
+ pci_dma_sync_single_for_cpu(phba->pcidev, pcmd->phys,
+ LPFC_BPL_SIZE, PCI_DMA_FROMDEVICE);
+
+ lpfc_rcv_padisc(phba, ndlp, cmdiocb);
+ return (ndlp->nlp_state);
+}
+
+static uint32_t
+lpfc_rcv_prlo_mapped_node(struct lpfc_hba * phba,
+ struct lpfc_nodelist * ndlp, void *arg, uint32_t evt)
+{
+ struct lpfc_iocbq *cmdiocb;
+ struct lpfc_dmabuf *pcmd;
+
+ cmdiocb = (struct lpfc_iocbq *) arg;
+ pcmd = (struct lpfc_dmabuf *) cmdiocb->context2;
+
+ pci_dma_sync_single_for_cpu(phba->pcidev, pcmd->phys,
+ LPFC_BPL_SIZE, PCI_DMA_FROMDEVICE);
+
+ /* flush the target */
+ lpfc_sli_abort_iocb_tgt(phba,
+ &phba->sli.ring[phba->sli.fcp_ring],
+ ndlp->nlp_sid, LPFC_ABORT_ALLQ);
+
+ /* Treat like rcv logo */
+ lpfc_rcv_logo(phba, ndlp, cmdiocb);
+ return (ndlp->nlp_state);
+}
+
+static uint32_t
+lpfc_device_recov_mapped_node(struct lpfc_hba * phba,
+ struct lpfc_nodelist * ndlp, void *arg,
+ uint32_t evt)
+{
+ ndlp->nlp_state = NLP_STE_NPR_NODE;
+ lpfc_nlp_list(phba, ndlp, NLP_NPR_LIST);
+ ndlp->nlp_flag &= ~NLP_NPR_2B_DISC;
+ lpfc_disc_set_adisc(phba, ndlp);
+ return (ndlp->nlp_state);
+}
+
+static uint32_t
+lpfc_rcv_plogi_npr_node(struct lpfc_hba * phba,
+ struct lpfc_nodelist * ndlp, void *arg,
+ uint32_t evt)
+{
+ struct lpfc_iocbq *cmdiocb;
+ struct lpfc_dmabuf *pcmd;
+
+ cmdiocb = (struct lpfc_iocbq *) arg;
+ pcmd = (struct lpfc_dmabuf *) cmdiocb->context2;
+
+ pci_dma_sync_single_for_cpu(phba->pcidev, pcmd->phys,
+ LPFC_BPL_SIZE, PCI_DMA_FROMDEVICE);
+
+ /* Ignore PLOGI if we have an outstanding LOGO */
+ if (ndlp->nlp_flag & NLP_LOGO_SND) {
+ return (ndlp->nlp_state);
+ }
+
+ if(lpfc_rcv_plogi(phba, ndlp, cmdiocb)) {
+ ndlp->nlp_flag &= ~(NLP_NPR_ADISC | NLP_NPR_2B_DISC);
+ return (ndlp->nlp_state);
+ }
+
+ /* send PLOGI immediately, move to PLOGI issue state */
+ if(!(ndlp->nlp_flag & NLP_DELAY_TMO)) {
+ ndlp->nlp_state = NLP_STE_PLOGI_ISSUE;
+ lpfc_nlp_list(phba, ndlp, NLP_PLOGI_LIST);
+ lpfc_issue_els_plogi(phba, ndlp, 0);
+ }
+ return (ndlp->nlp_state);
+}
+
+static uint32_t
+lpfc_rcv_prli_npr_node(struct lpfc_hba * phba,
+ struct lpfc_nodelist * ndlp, void *arg,
+ uint32_t evt)
+{
+ struct lpfc_iocbq *cmdiocb;
+ struct lpfc_dmabuf *pcmd;
+ struct ls_rjt stat;
+
+ cmdiocb = (struct lpfc_iocbq *) arg;
+ pcmd = (struct lpfc_dmabuf *) cmdiocb->context2;
+
+ pci_dma_sync_single_for_cpu(phba->pcidev, pcmd->phys,
+ LPFC_BPL_SIZE, PCI_DMA_FROMDEVICE);
+
+ memset(&stat, 0, sizeof (struct ls_rjt));
+ stat.un.b.lsRjtRsnCode = LSRJT_UNABLE_TPC;
+ stat.un.b.lsRjtRsnCodeExp = LSEXP_NOTHING_MORE;
+ lpfc_els_rsp_reject(phba, stat.un.lsRjtError, cmdiocb, ndlp);
+
+ if(!(ndlp->nlp_flag & NLP_DELAY_TMO)) {
+ if (ndlp->nlp_flag & NLP_NPR_ADISC) {
+ ndlp->nlp_state = NLP_STE_ADISC_ISSUE;
+ lpfc_nlp_list(phba, ndlp, NLP_ADISC_LIST);
+ lpfc_issue_els_adisc(phba, ndlp, 0);
+ } else {
+ ndlp->nlp_state = NLP_STE_PLOGI_ISSUE;
+ lpfc_nlp_list(phba, ndlp, NLP_PLOGI_LIST);
+ lpfc_issue_els_plogi(phba, ndlp, 0);
+ }
+ }
+ return (ndlp->nlp_state);
+}
+
+static uint32_t
+lpfc_rcv_logo_npr_node(struct lpfc_hba * phba,
+ struct lpfc_nodelist * ndlp, void *arg,
+ uint32_t evt)
+{
+ struct lpfc_iocbq *cmdiocb;
+ struct lpfc_dmabuf *pcmd;
+
+ cmdiocb = (struct lpfc_iocbq *) arg;
+ pcmd = (struct lpfc_dmabuf *) cmdiocb->context2;
+
+ pci_dma_sync_single_for_cpu(phba->pcidev, pcmd->phys,
+ LPFC_BPL_SIZE, PCI_DMA_FROMDEVICE);
+
+ lpfc_rcv_logo(phba, ndlp, cmdiocb);
+ return (ndlp->nlp_state);
+}
+
+static uint32_t
+lpfc_rcv_padisc_npr_node(struct lpfc_hba * phba,
+ struct lpfc_nodelist * ndlp, void *arg,
+ uint32_t evt)
+{
+ struct lpfc_iocbq *cmdiocb;
+ struct lpfc_dmabuf *pcmd;
+
+ cmdiocb = (struct lpfc_iocbq *) arg;
+ pcmd = (struct lpfc_dmabuf *) cmdiocb->context2;
+
+ pci_dma_sync_single_for_cpu(phba->pcidev, pcmd->phys,
+ LPFC_BPL_SIZE, PCI_DMA_FROMDEVICE);
+
+ lpfc_rcv_padisc(phba, ndlp, cmdiocb);
+
+ if(!(ndlp->nlp_flag & NLP_DELAY_TMO)) {
+ if (ndlp->nlp_flag & NLP_NPR_ADISC) {
+ ndlp->nlp_state = NLP_STE_ADISC_ISSUE;
+ lpfc_nlp_list(phba, ndlp, NLP_ADISC_LIST);
+ lpfc_issue_els_adisc(phba, ndlp, 0);
+ } else {
+ ndlp->nlp_state = NLP_STE_PLOGI_ISSUE;
+ lpfc_nlp_list(phba, ndlp, NLP_PLOGI_LIST);
+ lpfc_issue_els_plogi(phba, ndlp, 0);
+ }
+ }
+ return (ndlp->nlp_state);
+}
+
+static uint32_t
+lpfc_rcv_prlo_npr_node(struct lpfc_hba * phba,
+ struct lpfc_nodelist * ndlp, void *arg,
+ uint32_t evt)
+{
+ struct lpfc_iocbq *cmdiocb;
+ struct lpfc_dmabuf *pcmd;
+
+ cmdiocb = (struct lpfc_iocbq *) arg;
+ pcmd = (struct lpfc_dmabuf *) cmdiocb->context2;
+
+ pci_dma_sync_single_for_cpu(phba->pcidev, pcmd->phys,
+ LPFC_BPL_SIZE, PCI_DMA_FROMDEVICE);
+
+ lpfc_els_rsp_acc(phba, ELS_CMD_ACC, cmdiocb, ndlp, NULL, 0);
+
+ if(ndlp->nlp_flag & NLP_DELAY_TMO) {
+ if (ndlp->nlp_last_elscmd == (unsigned long)ELS_CMD_PLOGI) {
+ return (ndlp->nlp_state);
+ } else {
+ del_timer_sync(&ndlp->nlp_delayfunc);
+ ndlp->nlp_flag &= ~NLP_DELAY_TMO;
+ }
+ }
+
+ ndlp->nlp_state = NLP_STE_PLOGI_ISSUE;
+ lpfc_nlp_list(phba, ndlp, NLP_PLOGI_LIST);
+ lpfc_issue_els_plogi(phba, ndlp, 0);
+ return (ndlp->nlp_state);
+}
+
+static uint32_t
+lpfc_cmpl_logo_npr_node(struct lpfc_hba * phba,
+ struct lpfc_nodelist * ndlp, void *arg, uint32_t evt)
+{
+ lpfc_unreg_rpi(phba, ndlp);
+ /* This routine does nothing, just return the current state */
+ return (ndlp->nlp_state);
+}
+
+static uint32_t
+lpfc_cmpl_reglogin_npr_node(struct lpfc_hba * phba,
+ struct lpfc_nodelist * ndlp, void *arg,
+ uint32_t evt)
+{
+ LPFC_MBOXQ_t *pmb;
+ MAILBOX_t *mb;
+
+ pmb = (LPFC_MBOXQ_t *) arg;
+ mb = &pmb->mb;
+
+ /* save rpi */
+ if (ndlp->nlp_rpi != 0)
+ lpfc_findnode_remove_rpi(phba, ndlp->nlp_rpi);
+
+ ndlp->nlp_rpi = mb->un.varWords[0];
+ lpfc_addnode_rpi(phba, ndlp, ndlp->nlp_rpi);
+
+ return (ndlp->nlp_state);
+}
+
+static uint32_t
+lpfc_device_rm_npr_node(struct lpfc_hba * phba,
+ struct lpfc_nodelist * ndlp, void *arg,
+ uint32_t evt)
+{
+ lpfc_nlp_list(phba, ndlp, NLP_NO_LIST);
+ return (NLP_STE_FREED_NODE);
+}
+
+static uint32_t
+lpfc_device_recov_npr_node(struct lpfc_hba * phba,
+ struct lpfc_nodelist * ndlp, void *arg,
+ uint32_t evt)
+{
+ ndlp->nlp_flag &= ~NLP_NPR_2B_DISC;
+ return (ndlp->nlp_state);
+}
+
+
+/* This next section defines the NPort Discovery State Machine */
+
+/* There are 4 different double linked lists nodelist entries can reside on.
+ * The plogi list and adisc list are used when Link Up discovery or RSCN
+ * processing is needed. Each list holds the nodes that we will send PLOGI
+ * or ADISC on. These lists will keep track of what nodes will be effected
+ * by an RSCN, or a Link Up (Typically, all nodes are effected on Link Up).
+ * The unmapped_list will contain all nodes that we have successfully logged
+ * into at the Fibre Channel level. The mapped_list will contain all nodes
+ * that are mapped FCP targets.
+ */
+/*
+ * The bind list is a list of undiscovered (potentially non-existent) nodes
+ * that we have saved binding information on. This information is used when
+ * nodes transition from the unmapped to the mapped list.
+ */
+/* For UNUSED_NODE state, the node has just been allocated .
+ * For PLOGI_ISSUE and REG_LOGIN_ISSUE, the node is on
+ * the PLOGI list. For REG_LOGIN_COMPL, the node is taken off the PLOGI list
+ * and put on the unmapped list. For ADISC processing, the node is taken off
+ * the ADISC list and placed on either the mapped or unmapped list (depending
+ * on its previous state). Once on the unmapped list, a PRLI is issued and the
+ * state changed to PRLI_ISSUE. When the PRLI completion occurs, the state is
+ * changed to UNMAPPED_NODE. If the completion indicates a mapped
+ * node, the node is taken off the unmapped list. The binding list is checked
+ * for a valid binding, or a binding is automatically assigned. If binding
+ * assignment is unsuccessful, the node is left on the unmapped list. If
+ * binding assignment is successful, the associated binding list entry (if
+ * any) is removed, and the node is placed on the mapped list.
+ */
+/*
+ * For a Link Down, all nodes on the ADISC, PLOGI, unmapped or mapped
+ * lists will receive a DEVICE_RECOVERY event. If the linkdown or nodev timers
+ * expire, all effected nodes will receive a DEVICE_RM event.
+ */
+/*
+ * For a Link Up or RSCN, all nodes will move from the mapped / unmapped lists
+ * to either the ADISC or PLOGI list. After a Nameserver query or ALPA loopmap
+ * check, additional nodes may be added or removed (via DEVICE_RM) to / from
+ * the PLOGI or ADISC lists. Once the PLOGI and ADISC lists are populated,
+ * we will first process the ADISC list. 32 entries are processed initially and
+ * ADISC is initited for each one. Completions / Events for each node are
+ * funnelled thru the state machine. As each node finishes ADISC processing, it
+ * starts ADISC for any nodes waiting for ADISC processing. If no nodes are
+ * waiting, and the ADISC list count is identically 0, then we are done. For
+ * Link Up discovery, since all nodes on the PLOGI list are UNREG_LOGIN'ed, we
+ * can issue a CLEAR_LA and reenable Link Events. Next we will process the PLOGI
+ * list. 32 entries are processed initially and PLOGI is initited for each one.
+ * Completions / Events for each node are funnelled thru the state machine. As
+ * each node finishes PLOGI processing, it starts PLOGI for any nodes waiting
+ * for PLOGI processing. If no nodes are waiting, and the PLOGI list count is
+ * indentically 0, then we are done. We have now completed discovery / RSCN
+ * handling. Upon completion, ALL nodes should be on either the mapped or
+ * unmapped lists.
+ */
+
+static void *lpfc_disc_action[NLP_STE_MAX_STATE * NLP_EVT_MAX_EVENT] = {
+ /* Action routine Event Current State */
+ (void *)lpfc_rcv_plogi_unused_node, /* RCV_PLOGI UNUSED_NODE */
+ (void *)lpfc_rcv_els_unused_node, /* RCV_PRLI */
+ (void *)lpfc_rcv_logo_unused_node, /* RCV_LOGO */
+ (void *)lpfc_rcv_els_unused_node, /* RCV_ADISC */
+ (void *)lpfc_rcv_els_unused_node, /* RCV_PDISC */
+ (void *)lpfc_rcv_els_unused_node, /* RCV_PRLO */
+ (void *)lpfc_disc_illegal, /* CMPL_PLOGI */
+ (void *)lpfc_disc_illegal, /* CMPL_PRLI */
+ (void *)lpfc_cmpl_logo_unused_node, /* CMPL_LOGO */
+ (void *)lpfc_disc_illegal, /* CMPL_ADISC */
+ (void *)lpfc_disc_illegal, /* CMPL_REG_LOGIN */
+ (void *)lpfc_device_rm_unused_node, /* DEVICE_RM */
+ (void *)lpfc_disc_illegal, /* DEVICE_RECOVERY */
+
+ (void *)lpfc_rcv_plogi_plogi_issue, /* RCV_PLOGI PLOGI_ISSUE */
+ (void *)lpfc_rcv_els_plogi_issue, /* RCV_PRLI */
+ (void *)lpfc_rcv_els_plogi_issue, /* RCV_LOGO */
+ (void *)lpfc_rcv_els_plogi_issue, /* RCV_ADISC */
+ (void *)lpfc_rcv_els_plogi_issue, /* RCV_PDISC */
+ (void *)lpfc_rcv_els_plogi_issue, /* RCV_PRLO */
+ (void *)lpfc_cmpl_plogi_plogi_issue, /* CMPL_PLOGI */
+ (void *)lpfc_disc_illegal, /* CMPL_PRLI */
+ (void *)lpfc_disc_illegal, /* CMPL_LOGO */
+ (void *)lpfc_disc_illegal, /* CMPL_ADISC */
+ (void *)lpfc_disc_illegal, /* CMPL_REG_LOGIN */
+ (void *)lpfc_device_rm_plogi_issue, /* DEVICE_RM */
+ (void *)lpfc_device_recov_plogi_issue, /* DEVICE_RECOVERY */
+
+ (void *)lpfc_rcv_plogi_adisc_issue, /* RCV_PLOGI ADISC_ISSUE */
+ (void *)lpfc_rcv_prli_adisc_issue, /* RCV_PRLI */
+ (void *)lpfc_rcv_logo_adisc_issue, /* RCV_LOGO */
+ (void *)lpfc_rcv_padisc_adisc_issue, /* RCV_ADISC */
+ (void *)lpfc_rcv_padisc_adisc_issue, /* RCV_PDISC */
+ (void *)lpfc_rcv_prlo_adisc_issue, /* RCV_PRLO */
+ (void *)lpfc_disc_illegal, /* CMPL_PLOGI */
+ (void *)lpfc_disc_illegal, /* CMPL_PRLI */
+ (void *)lpfc_disc_illegal, /* CMPL_LOGO */
+ (void *)lpfc_cmpl_adisc_adisc_issue, /* CMPL_ADISC */
+ (void *)lpfc_disc_illegal, /* CMPL_REG_LOGIN */
+ (void *)lpfc_device_rm_adisc_issue, /* DEVICE_RM */
+ (void *)lpfc_device_recov_adisc_issue, /* DEVICE_RECOVERY */
+
+ (void *)lpfc_rcv_plogi_reglogin_issue, /* RCV_PLOGI REG_LOGIN_ISSUE */
+ (void *)lpfc_rcv_prli_reglogin_issue, /* RCV_PLOGI */
+ (void *)lpfc_rcv_logo_reglogin_issue, /* RCV_LOGO */
+ (void *)lpfc_rcv_padisc_reglogin_issue, /* RCV_ADISC */
+ (void *)lpfc_rcv_padisc_reglogin_issue, /* RCV_PDISC */
+ (void *)lpfc_rcv_prlo_reglogin_issue, /* RCV_PRLO */
+ (void *)lpfc_disc_illegal, /* CMPL_PLOGI */
+ (void *)lpfc_disc_illegal, /* CMPL_PRLI */
+ (void *)lpfc_disc_illegal, /* CMPL_LOGO */
+ (void *)lpfc_disc_illegal, /* CMPL_ADISC */
+ (void *)lpfc_cmpl_reglogin_reglogin_issue,/* CMPL_REG_LOGIN */
+ (void *)lpfc_device_rm_reglogin_issue, /* DEVICE_RM */
+ (void *)lpfc_device_recov_reglogin_issue,/* DEVICE_RECOVERY */
+
+ (void *)lpfc_rcv_plogi_prli_issue, /* RCV_PLOGI PRLI_ISSUE */
+ (void *)lpfc_rcv_prli_prli_issue, /* RCV_PRLI */
+ (void *)lpfc_rcv_logo_prli_issue, /* RCV_LOGO */
+ (void *)lpfc_rcv_padisc_prli_issue, /* RCV_ADISC */
+ (void *)lpfc_rcv_padisc_prli_issue, /* RCV_PDISC */
+ (void *)lpfc_rcv_prlo_prli_issue, /* RCV_PRLO */
+ (void *)lpfc_disc_illegal, /* CMPL_PLOGI */
+ (void *)lpfc_cmpl_prli_prli_issue, /* CMPL_PRLI */
+ (void *)lpfc_disc_illegal, /* CMPL_LOGO */
+ (void *)lpfc_disc_illegal, /* CMPL_ADISC */
+ (void *)lpfc_disc_illegal, /* CMPL_REG_LOGIN */
+ (void *)lpfc_device_rm_prli_issue, /* DEVICE_RM */
+ (void *)lpfc_device_recov_prli_issue, /* DEVICE_RECOVERY */
+
+ (void *)lpfc_rcv_plogi_unmap_node, /* RCV_PLOGI UNMAPPED_NODE */
+ (void *)lpfc_rcv_prli_unmap_node, /* RCV_PRLI */
+ (void *)lpfc_rcv_logo_unmap_node, /* RCV_LOGO */
+ (void *)lpfc_rcv_padisc_unmap_node, /* RCV_ADISC */
+ (void *)lpfc_rcv_padisc_unmap_node, /* RCV_PDISC */
+ (void *)lpfc_rcv_prlo_unmap_node, /* RCV_PRLO */
+ (void *)lpfc_disc_illegal, /* CMPL_PLOGI */
+ (void *)lpfc_disc_illegal, /* CMPL_PRLI */
+ (void *)lpfc_disc_illegal, /* CMPL_LOGO */
+ (void *)lpfc_disc_illegal, /* CMPL_ADISC */
+ (void *)lpfc_disc_illegal, /* CMPL_REG_LOGIN */
+ (void *)lpfc_disc_illegal, /* DEVICE_RM */
+ (void *)lpfc_device_recov_unmap_node, /* DEVICE_RECOVERY */
+
+ (void *)lpfc_rcv_plogi_mapped_node, /* RCV_PLOGI MAPPED_NODE */
+ (void *)lpfc_rcv_prli_mapped_node, /* RCV_PRLI */
+ (void *)lpfc_rcv_logo_mapped_node, /* RCV_LOGO */
+ (void *)lpfc_rcv_padisc_mapped_node, /* RCV_ADISC */
+ (void *)lpfc_rcv_padisc_mapped_node, /* RCV_PDISC */
+ (void *)lpfc_rcv_prlo_mapped_node, /* RCV_PRLO */
+ (void *)lpfc_disc_illegal, /* CMPL_PLOGI */
+ (void *)lpfc_disc_illegal, /* CMPL_PRLI */
+ (void *)lpfc_disc_illegal, /* CMPL_LOGO */
+ (void *)lpfc_disc_illegal, /* CMPL_ADISC */
+ (void *)lpfc_disc_illegal, /* CMPL_REG_LOGIN */
+ (void *)lpfc_disc_illegal, /* DEVICE_RM */
+ (void *)lpfc_device_recov_mapped_node, /* DEVICE_RECOVERY */
+
+ (void *)lpfc_rcv_plogi_npr_node, /* RCV_PLOGI NPR_NODE */
+ (void *)lpfc_rcv_prli_npr_node, /* RCV_PRLI */
+ (void *)lpfc_rcv_logo_npr_node, /* RCV_LOGO */
+ (void *)lpfc_rcv_padisc_npr_node, /* RCV_ADISC */
+ (void *)lpfc_rcv_padisc_npr_node, /* RCV_PDISC */
+ (void *)lpfc_rcv_prlo_npr_node, /* RCV_PRLO */
+ (void *)lpfc_disc_noop, /* CMPL_PLOGI */
+ (void *)lpfc_disc_noop, /* CMPL_PRLI */
+ (void *)lpfc_cmpl_logo_npr_node, /* CMPL_LOGO */
+ (void *)lpfc_disc_noop, /* CMPL_ADISC */
+ (void *)lpfc_cmpl_reglogin_npr_node, /* CMPL_REG_LOGIN */
+ (void *)lpfc_device_rm_npr_node, /* DEVICE_RM */
+ (void *)lpfc_device_recov_npr_node, /* DEVICE_RECOVERY */
+};
+
+int
+lpfc_disc_state_machine(struct lpfc_hba * phba,
+ struct lpfc_nodelist * ndlp, void *arg, uint32_t evt)
+{
+ uint32_t cur_state, rc;
+ uint32_t(*func) (struct lpfc_hba *, struct lpfc_nodelist *, void *,
+ uint32_t);
+
+ ndlp->nlp_disc_refcnt++;
+ cur_state = ndlp->nlp_state;
+
+ /* DSM in event <evt> on NPort <nlp_DID> in state <cur_state> */
+ lpfc_printf_log(phba,
+ KERN_INFO,
+ LOG_DISCOVERY,
+ "%d:0211 DSM in event x%x on NPort x%x in state %d "
+ "Data: x%x\n",
+ phba->brd_no,
+ evt, ndlp->nlp_DID, cur_state, ndlp->nlp_flag);
+
+ func = (uint32_t(*)(struct lpfc_hba *, struct lpfc_nodelist *, void *,
+ uint32_t))
+ lpfc_disc_action[(cur_state * NLP_EVT_MAX_EVENT) + evt];
+ rc = (func) (phba, ndlp, arg, evt);
+
+ /* DSM out state <rc> on NPort <nlp_DID> */
+ lpfc_printf_log(phba,
+ KERN_INFO,
+ LOG_DISCOVERY,
+ "%d:0212 DSM out state %d on NPort x%x Data: x%x\n",
+ phba->brd_no,
+ rc, ndlp->nlp_DID, ndlp->nlp_flag);
+
+ ndlp->nlp_disc_refcnt--;
+
+ /* Check to see if ndlp removal is deferred */
+ if ((ndlp->nlp_disc_refcnt == 0)
+ && (ndlp->nlp_flag & NLP_DELAY_REMOVE)) {
+
+ ndlp->nlp_flag &= ~NLP_DELAY_REMOVE;
+ lpfc_nlp_remove(phba, ndlp);
+ return (NLP_STE_FREED_NODE);
+ }
+ if (rc == NLP_STE_FREED_NODE)
+ return (NLP_STE_FREED_NODE);
+ ndlp->nlp_state = rc;
+ return (rc);
+}
--- /dev/null
+/*******************************************************************
+ * This file is part of the Emulex Linux Device Driver for *
+ * Enterprise Fibre Channel Host Bus Adapters. *
+ * Refer to the README file included with this package for *
+ * driver version and adapter support. *
+ * Copyright (C) 2004 Emulex Corporation. *
+ * www.emulex.com *
+ * *
+ * This program is free software; you can redistribute it and/or *
+ * modify it under the terms of the GNU General Public License *
+ * as published by the Free Software Foundation; either version 2 *
+ * of the License, or (at your option) any later version. *
+ * *
+ * This program is distributed in the hope that it will be useful, *
+ * but WITHOUT ANY WARRANTY; without even the implied warranty of *
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the *
+ * GNU General Public License for more details, a copy of which *
+ * can be found in the file COPYING included with this package. *
+ *******************************************************************/
+
+/*
+ * $Id: lpfc_scsi.h 1.68 2004/11/10 11:40:43EST sf_support Exp $
+ */
+
+#ifndef _H_LPFC_SCSI
+#define _H_LPFC_SCSI
+
+#include "lpfc_disc.h"
+#include "lpfc_mem.h"
+#include "lpfc_sli.h"
+
+struct lpfc_hba;
+
+
+struct lpfc_target {
+ struct lpfc_nodelist *pnode; /* Pointer to the node structure. */
+ uint16_t scsi_id;
+ uint32_t qcmdcnt;
+ uint32_t iodonecnt;
+ uint32_t errorcnt;
+ uint32_t slavecnt;
+#if defined(FC_TRANS_VER1) || defined(FC_TRANS_265_BLKPATCH)
+ uint16_t blocked;
+#endif
+#ifdef FC_TRANS_VER1
+ struct scsi_target *starget; /* Pointer to midlayer target
+ structure. */
+#endif
+#if defined(FC_TRANS_265_BLKPATCH)
+ struct timer_list dev_loss_timer;
+#endif
+};
+
+struct lpfc_scsi_buf {
+ struct scsi_cmnd *pCmd;
+ struct lpfc_hba *scsi_hba;
+ struct lpfc_target *target;
+
+ uint32_t timeout;
+
+ uint16_t status; /* From IOCB Word 7- ulpStatus */
+ uint32_t result; /* From IOCB Word 4. */
+
+ uint32_t seg_cnt; /* Number of scatter-gather segments returned by
+ * dma_map_sg. The driver needs this for calls
+ * to dma_unmap_sg. */
+ dma_addr_t nonsg_phys; /* Non scatter-gather physical address. */
+
+ /* dma_ext has both virt, phys to dma-able buffer
+ * which contains fcp_cmd, fcp_rsp and scatter gather list fro upto
+ * 68 (LPFC_SCSI_BPL_SIZE) BDE entries,
+ * xfer length, cdb, data direction....
+ */
+ struct lpfc_dmabuf dma_ext;
+ struct fcp_cmnd *fcp_cmnd;
+ struct fcp_rsp *fcp_rsp;
+ struct ulp_bde64 *fcp_bpl;
+
+ /* cur_iocbq has phys of the dma-able buffer.
+ * Iotag is in here
+ */
+ struct lpfc_iocbq cur_iocbq;
+};
+
+#define LPFC_SCSI_INITIAL_BPL_SIZE 4 /* Number of scsi buf BDEs in fcp_bpl */
+
+#define LPFC_SCSI_DMA_EXT_SIZE 264
+#define LPFC_BPL_SIZE 1024
+
+#define MDAC_DIRECT_CMD 0x22
+
+#endif /* _H_LPFC_SCSI */
--- /dev/null
+/*******************************************************************
+ * This file is part of the Emulex Linux Device Driver for *
+ * Enterprise Fibre Channel Host Bus Adapters. *
+ * Refer to the README file included with this package for *
+ * driver version and adapter support. *
+ * Copyright (C) 2004 Emulex Corporation. *
+ * www.emulex.com *
+ * *
+ * This program is free software; you can redistribute it and/or *
+ * modify it under the terms of the GNU General Public License *
+ * as published by the Free Software Foundation; either version 2 *
+ * of the License, or (at your option) any later version. *
+ * *
+ * This program is distributed in the hope that it will be useful, *
+ * but WITHOUT ANY WARRANTY; without even the implied warranty of *
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the *
+ * GNU General Public License for more details, a copy of which *
+ * can be found in the file COPYING included with this package. *
+ *******************************************************************/
+
+/*
+ * $Id: lpfc_sli.c 1.178 2004/11/23 16:57:11EST sf_support Exp $
+ */
+
+#include <linux/version.h>
+#include <linux/blkdev.h>
+#include <linux/dma-mapping.h>
+#include <linux/pci.h>
+#include <linux/spinlock.h>
+
+#include <scsi/scsi_cmnd.h>
+#include <scsi/scsi_device.h>
+#include <scsi/scsi_host.h>
+
+#include "lpfc_sli.h"
+#include "lpfc_disc.h"
+#include "lpfc_scsi.h"
+#include "lpfc.h"
+#include "lpfc_crtn.h"
+#include "lpfc_hw.h"
+#include "lpfc_logmsg.h"
+#include "lpfc_mem.h"
+#include "lpfc_compat.h"
+
+static int lpfc_sli_reset_on_init = 1;
+
+/*
+ * Define macro to log: Mailbox command x%x cannot issue Data
+ * This allows multiple uses of lpfc_msgBlk0311
+ * w/o perturbing log msg utility.
+*/
+#define LOG_MBOX_CANNOT_ISSUE_DATA( phba, mb, psli, flag) \
+ lpfc_printf_log(phba, \
+ KERN_INFO, \
+ LOG_MBOX | LOG_SLI, \
+ "%d:0311 Mailbox command x%x cannot issue " \
+ "Data: x%x x%x x%x\n", \
+ phba->brd_no, \
+ mb->mbxCommand, \
+ phba->hba_state, \
+ psli->sliinit.sli_flag, \
+ flag);
+
+
+/* This will save a huge switch to determine if the IOCB cmd
+ * is unsolicited or solicited.
+ */
+#define LPFC_UNKNOWN_IOCB 0
+#define LPFC_UNSOL_IOCB 1
+#define LPFC_SOL_IOCB 2
+#define LPFC_ABORT_IOCB 3
+static uint8_t lpfc_sli_iocb_cmd_type[CMD_MAX_IOCB_CMD] = {
+ LPFC_UNKNOWN_IOCB, /* 0x00 */
+ LPFC_UNSOL_IOCB, /* CMD_RCV_SEQUENCE_CX 0x01 */
+ LPFC_SOL_IOCB, /* CMD_XMIT_SEQUENCE_CR 0x02 */
+ LPFC_SOL_IOCB, /* CMD_XMIT_SEQUENCE_CX 0x03 */
+ LPFC_SOL_IOCB, /* CMD_XMIT_BCAST_CN 0x04 */
+ LPFC_SOL_IOCB, /* CMD_XMIT_BCAST_CX 0x05 */
+ LPFC_UNKNOWN_IOCB, /* CMD_QUE_RING_BUF_CN 0x06 */
+ LPFC_UNKNOWN_IOCB, /* CMD_QUE_XRI_BUF_CX 0x07 */
+ LPFC_UNKNOWN_IOCB, /* CMD_IOCB_CONTINUE_CN 0x08 */
+ LPFC_UNKNOWN_IOCB, /* CMD_RET_XRI_BUF_CX 0x09 */
+ LPFC_SOL_IOCB, /* CMD_ELS_REQUEST_CR 0x0A */
+ LPFC_SOL_IOCB, /* CMD_ELS_REQUEST_CX 0x0B */
+ LPFC_UNKNOWN_IOCB, /* 0x0C */
+ LPFC_UNSOL_IOCB, /* CMD_RCV_ELS_REQ_CX 0x0D */
+ LPFC_ABORT_IOCB, /* CMD_ABORT_XRI_CN 0x0E */
+ LPFC_ABORT_IOCB, /* CMD_ABORT_XRI_CX 0x0F */
+ LPFC_ABORT_IOCB, /* CMD_CLOSE_XRI_CR 0x10 */
+ LPFC_ABORT_IOCB, /* CMD_CLOSE_XRI_CX 0x11 */
+ LPFC_SOL_IOCB, /* CMD_CREATE_XRI_CR 0x12 */
+ LPFC_SOL_IOCB, /* CMD_CREATE_XRI_CX 0x13 */
+ LPFC_SOL_IOCB, /* CMD_GET_RPI_CN 0x14 */
+ LPFC_SOL_IOCB, /* CMD_XMIT_ELS_RSP_CX 0x15 */
+ LPFC_SOL_IOCB, /* CMD_GET_RPI_CR 0x16 */
+ LPFC_ABORT_IOCB, /* CMD_XRI_ABORTED_CX 0x17 */
+ LPFC_SOL_IOCB, /* CMD_FCP_IWRITE_CR 0x18 */
+ LPFC_SOL_IOCB, /* CMD_FCP_IWRITE_CX 0x19 */
+ LPFC_SOL_IOCB, /* CMD_FCP_IREAD_CR 0x1A */
+ LPFC_SOL_IOCB, /* CMD_FCP_IREAD_CX 0x1B */
+ LPFC_SOL_IOCB, /* CMD_FCP_ICMND_CR 0x1C */
+ LPFC_SOL_IOCB, /* CMD_FCP_ICMND_CX 0x1D */
+ LPFC_UNKNOWN_IOCB, /* 0x1E */
+ LPFC_SOL_IOCB, /* CMD_FCP_TSEND_CX 0x1F */
+ LPFC_SOL_IOCB, /* CMD_ADAPTER_MSG 0x20 */
+ LPFC_SOL_IOCB, /* CMD_FCP_TRECEIVE_CX 0x21 */
+ LPFC_SOL_IOCB, /* CMD_ADAPTER_DUMP 0x22 */
+ LPFC_SOL_IOCB, /* CMD_FCP_TRSP_CX 0x23 */
+ /* 0x24 - 0x80 */
+ LPFC_UNKNOWN_IOCB, LPFC_UNKNOWN_IOCB, LPFC_UNKNOWN_IOCB,
+ LPFC_UNKNOWN_IOCB, LPFC_UNKNOWN_IOCB, LPFC_UNKNOWN_IOCB,
+ LPFC_UNKNOWN_IOCB, LPFC_UNKNOWN_IOCB, LPFC_UNKNOWN_IOCB,
+ LPFC_UNKNOWN_IOCB, LPFC_UNKNOWN_IOCB, LPFC_UNKNOWN_IOCB,
+ /* 0x30 */
+ LPFC_UNKNOWN_IOCB, LPFC_UNKNOWN_IOCB, LPFC_UNKNOWN_IOCB,
+ LPFC_UNKNOWN_IOCB, LPFC_UNKNOWN_IOCB, LPFC_UNKNOWN_IOCB,
+ LPFC_UNKNOWN_IOCB, LPFC_UNKNOWN_IOCB, LPFC_UNKNOWN_IOCB,
+ LPFC_UNKNOWN_IOCB, LPFC_UNKNOWN_IOCB, LPFC_UNKNOWN_IOCB,
+ LPFC_UNKNOWN_IOCB, LPFC_UNKNOWN_IOCB, LPFC_UNKNOWN_IOCB,
+ LPFC_UNKNOWN_IOCB,
+ /* 0x40 */
+ LPFC_UNKNOWN_IOCB, LPFC_UNKNOWN_IOCB, LPFC_UNKNOWN_IOCB,
+ LPFC_UNKNOWN_IOCB, LPFC_UNKNOWN_IOCB, LPFC_UNKNOWN_IOCB,
+ LPFC_UNKNOWN_IOCB, LPFC_UNKNOWN_IOCB, LPFC_UNKNOWN_IOCB,
+ LPFC_UNKNOWN_IOCB, LPFC_UNKNOWN_IOCB, LPFC_UNKNOWN_IOCB,
+ LPFC_UNKNOWN_IOCB, LPFC_UNKNOWN_IOCB, LPFC_UNKNOWN_IOCB,
+ LPFC_UNKNOWN_IOCB,
+ /* 0x50 */
+ LPFC_SOL_IOCB,
+ LPFC_SOL_IOCB,
+ LPFC_UNKNOWN_IOCB,
+ LPFC_SOL_IOCB,
+ LPFC_SOL_IOCB,
+ LPFC_UNSOL_IOCB,
+ LPFC_UNSOL_IOCB,
+ LPFC_SOL_IOCB,
+ LPFC_SOL_IOCB,
+
+ LPFC_UNKNOWN_IOCB, LPFC_UNKNOWN_IOCB, LPFC_UNKNOWN_IOCB,
+ LPFC_UNKNOWN_IOCB, LPFC_UNKNOWN_IOCB, LPFC_UNKNOWN_IOCB,
+ LPFC_UNKNOWN_IOCB,
+ /* 0x60 */
+ LPFC_UNKNOWN_IOCB, LPFC_UNKNOWN_IOCB, LPFC_UNKNOWN_IOCB,
+ LPFC_UNKNOWN_IOCB, LPFC_UNKNOWN_IOCB, LPFC_UNKNOWN_IOCB,
+ LPFC_UNKNOWN_IOCB, LPFC_UNKNOWN_IOCB, LPFC_UNKNOWN_IOCB,
+ LPFC_UNKNOWN_IOCB, LPFC_UNKNOWN_IOCB, LPFC_UNKNOWN_IOCB,
+ LPFC_UNKNOWN_IOCB, LPFC_UNKNOWN_IOCB, LPFC_UNKNOWN_IOCB,
+ LPFC_UNKNOWN_IOCB,
+ /* 0x70 */
+ LPFC_UNKNOWN_IOCB, LPFC_UNKNOWN_IOCB, LPFC_UNKNOWN_IOCB,
+ LPFC_UNKNOWN_IOCB, LPFC_UNKNOWN_IOCB, LPFC_UNKNOWN_IOCB,
+ LPFC_UNKNOWN_IOCB, LPFC_UNKNOWN_IOCB, LPFC_UNKNOWN_IOCB,
+ LPFC_UNKNOWN_IOCB, LPFC_UNKNOWN_IOCB, LPFC_UNKNOWN_IOCB,
+ LPFC_UNKNOWN_IOCB, LPFC_UNKNOWN_IOCB, LPFC_UNKNOWN_IOCB,
+ LPFC_UNKNOWN_IOCB,
+ /* 0x80 */
+ LPFC_UNKNOWN_IOCB,
+ LPFC_UNSOL_IOCB, /* CMD_RCV_SEQUENCE64_CX 0x81 */
+ LPFC_SOL_IOCB, /* CMD_XMIT_SEQUENCE64_CR 0x82 */
+ LPFC_SOL_IOCB, /* CMD_XMIT_SEQUENCE64_CX 0x83 */
+ LPFC_SOL_IOCB, /* CMD_XMIT_BCAST64_CN 0x84 */
+ LPFC_SOL_IOCB, /* CMD_XMIT_BCAST64_CX 0x85 */
+ LPFC_UNKNOWN_IOCB, /* CMD_QUE_RING_BUF64_CN 0x86 */
+ LPFC_UNKNOWN_IOCB, /* CMD_QUE_XRI_BUF64_CX 0x87 */
+ LPFC_UNKNOWN_IOCB, /* CMD_IOCB_CONTINUE64_CN 0x88 */
+ LPFC_UNKNOWN_IOCB, /* CMD_RET_XRI_BUF64_CX 0x89 */
+ LPFC_SOL_IOCB, /* CMD_ELS_REQUEST64_CR 0x8A */
+ LPFC_SOL_IOCB, /* CMD_ELS_REQUEST64_CX 0x8B */
+ LPFC_ABORT_IOCB, /* CMD_ABORT_MXRI64_CN 0x8C */
+ LPFC_UNSOL_IOCB, /* CMD_RCV_ELS_REQ64_CX 0x8D */
+ /* 0x8E - 0x94 */
+ LPFC_UNKNOWN_IOCB, LPFC_UNKNOWN_IOCB, LPFC_UNKNOWN_IOCB,
+ LPFC_UNKNOWN_IOCB, LPFC_UNKNOWN_IOCB, LPFC_UNKNOWN_IOCB,
+ LPFC_UNKNOWN_IOCB,
+ LPFC_SOL_IOCB, /* CMD_XMIT_ELS_RSP64_CX 0x95 */
+ LPFC_UNKNOWN_IOCB, /* 0x96 */
+ LPFC_UNKNOWN_IOCB, /* 0x97 */
+ LPFC_SOL_IOCB, /* CMD_FCP_IWRITE64_CR 0x98 */
+ LPFC_SOL_IOCB, /* CMD_FCP_IWRITE64_CX 0x99 */
+ LPFC_SOL_IOCB, /* CMD_FCP_IREAD64_CR 0x9A */
+ LPFC_SOL_IOCB, /* CMD_FCP_IREAD64_CX 0x9B */
+ LPFC_SOL_IOCB, /* CMD_FCP_ICMND64_CR 0x9C */
+ LPFC_SOL_IOCB, /* CMD_FCP_ICMND64_CX 0x9D */
+ LPFC_UNKNOWN_IOCB, /* 0x9E */
+ LPFC_SOL_IOCB, /* CMD_FCP_TSEND64_CX 0x9F */
+ LPFC_UNKNOWN_IOCB, /* 0xA0 */
+ LPFC_SOL_IOCB, /* CMD_FCP_TRECEIVE64_CX 0xA1 */
+ LPFC_UNKNOWN_IOCB, /* 0xA2 */
+ LPFC_SOL_IOCB, /* CMD_FCP_TRSP64_CX 0xA3 */
+ /* 0xA4 - 0xC1 */
+ LPFC_UNKNOWN_IOCB, LPFC_UNKNOWN_IOCB, LPFC_UNKNOWN_IOCB,
+ LPFC_UNKNOWN_IOCB, LPFC_UNKNOWN_IOCB, LPFC_UNKNOWN_IOCB,
+ LPFC_UNKNOWN_IOCB, LPFC_UNKNOWN_IOCB, LPFC_UNKNOWN_IOCB,
+ LPFC_UNKNOWN_IOCB, LPFC_UNKNOWN_IOCB, LPFC_UNKNOWN_IOCB,
+ LPFC_UNKNOWN_IOCB, LPFC_UNKNOWN_IOCB, LPFC_UNKNOWN_IOCB,
+ LPFC_UNKNOWN_IOCB, LPFC_UNKNOWN_IOCB, LPFC_UNKNOWN_IOCB,
+ LPFC_UNKNOWN_IOCB, LPFC_UNKNOWN_IOCB, LPFC_UNKNOWN_IOCB,
+ LPFC_UNKNOWN_IOCB, LPFC_UNKNOWN_IOCB, LPFC_UNKNOWN_IOCB,
+ LPFC_UNKNOWN_IOCB, LPFC_UNKNOWN_IOCB, LPFC_UNKNOWN_IOCB,
+ LPFC_UNKNOWN_IOCB, LPFC_UNKNOWN_IOCB, LPFC_UNKNOWN_IOCB,
+ LPFC_SOL_IOCB, /* CMD_GEN_REQUEST64_CR 0xC2 */
+ LPFC_SOL_IOCB, /* CMD_GEN_REQUEST64_CX 0xC3 */
+ /* 0xC4 - 0xCF */
+ LPFC_UNKNOWN_IOCB, LPFC_UNKNOWN_IOCB, LPFC_UNKNOWN_IOCB,
+ LPFC_UNKNOWN_IOCB, LPFC_UNKNOWN_IOCB, LPFC_UNKNOWN_IOCB,
+ LPFC_UNKNOWN_IOCB, LPFC_UNKNOWN_IOCB, LPFC_UNKNOWN_IOCB,
+ LPFC_UNKNOWN_IOCB, LPFC_UNKNOWN_IOCB, LPFC_UNKNOWN_IOCB,
+
+ LPFC_SOL_IOCB,
+ LPFC_SOL_IOCB, /* CMD_SENDTEXT_CR 0xD1 */
+ LPFC_SOL_IOCB, /* CMD_SENDTEXT_CX 0xD2 */
+ LPFC_SOL_IOCB, /* CMD_RCV_LOGIN 0xD3 */
+ LPFC_SOL_IOCB, /* CMD_ACCEPT_LOGIN 0xD4 */
+ LPFC_SOL_IOCB, /* CMD_REJECT_LOGIN 0xD5 */
+ LPFC_UNSOL_IOCB,
+ /* 0xD7 - 0xDF */
+ LPFC_UNKNOWN_IOCB, LPFC_UNKNOWN_IOCB, LPFC_UNKNOWN_IOCB,
+ LPFC_UNKNOWN_IOCB, LPFC_UNKNOWN_IOCB, LPFC_UNKNOWN_IOCB,
+ LPFC_UNKNOWN_IOCB, LPFC_UNKNOWN_IOCB, LPFC_UNKNOWN_IOCB,
+ /* 0xE0 */
+ LPFC_UNSOL_IOCB,
+ LPFC_SOL_IOCB,
+ LPFC_SOL_IOCB,
+ LPFC_SOL_IOCB,
+ LPFC_SOL_IOCB,
+ LPFC_UNSOL_IOCB
+};
+
+
+static int
+lpfc_sli_ring_map(struct lpfc_hba * phba)
+{
+ struct lpfc_sli *psli;
+ LPFC_MBOXQ_t *pmb;
+ MAILBOX_t *pmbox;
+ int i;
+
+ psli = &phba->sli;
+
+ /* Get a Mailbox buffer to setup mailbox commands for HBA
+ initialization */
+ if ((pmb = (LPFC_MBOXQ_t *) mempool_alloc(phba->mbox_mem_pool,
+ GFP_ATOMIC)) == 0) {
+ phba->hba_state = LPFC_HBA_ERROR;
+ return -ENOMEM;
+ }
+ pmbox = &pmb->mb;
+
+ /* Initialize the struct lpfc_sli_ring structure for each ring */
+ for (i = 0; i < psli->sliinit.num_rings; i++) {
+ /* Issue a CONFIG_RING mailbox command for each ring */
+ phba->hba_state = LPFC_INIT_MBX_CMDS;
+ lpfc_config_ring(phba, i, pmb);
+ if (lpfc_sli_issue_mbox(phba, pmb, MBX_POLL) != MBX_SUCCESS) {
+ /* Adapter failed to init, mbxCmd <cmd> CFG_RING,
+ mbxStatus <status>, ring <num> */
+ lpfc_printf_log(phba,
+ KERN_ERR,
+ LOG_INIT,
+ "%d:0446 Adapter failed to init, "
+ "mbxCmd x%x CFG_RING, mbxStatus x%x, "
+ "ring %d\n",
+ phba->brd_no,
+ pmbox->mbxCommand,
+ pmbox->mbxStatus,
+ i);
+ phba->hba_state = LPFC_HBA_ERROR;
+ mempool_free( pmb, phba->mbox_mem_pool);
+ return -ENXIO;
+ }
+ }
+ mempool_free( pmb, phba->mbox_mem_pool);
+ return 0;
+}
+
+static int
+lpfc_sli_ringtxcmpl_put(struct lpfc_hba * phba,
+ struct lpfc_sli_ring * pring, struct lpfc_iocbq * piocb)
+{
+ uint16_t iotag;
+
+ list_add_tail(&piocb->list, &pring->txcmplq);
+ pring->txcmplq_cnt++;
+
+ if (pring->fast_lookup) {
+ /* Setup fast lookup based on iotag for completion */
+ iotag = piocb->iocb.ulpIoTag;
+ if (iotag && (iotag
+ < phba->sli.sliinit.ringinit[pring->ringno].fast_iotag))
+ *(pring->fast_lookup + iotag) = piocb;
+ else {
+
+ /* Cmd ring <ringno> put: iotag <iotag> greater then
+ configured max <fast_iotag> wd0 <icmd> */
+ lpfc_printf_log(phba,
+ KERN_ERR,
+ LOG_SLI,
+ "%d:0316 Cmd ring %d put: iotag x%x "
+ "greater then configured max x%x "
+ "wd0 x%x\n",
+ phba->brd_no,
+ pring->ringno, iotag, phba->sli.sliinit
+ .ringinit[pring->ringno].fast_iotag,
+ *(((uint32_t *)(&piocb->iocb)) + 7));
+ }
+ }
+ return (0);
+}
+
+static struct lpfc_iocbq *
+lpfc_sli_ringtx_get(struct lpfc_hba * phba, struct lpfc_sli_ring * pring)
+{
+ struct list_head *dlp;
+ struct lpfc_iocbq *cmd_iocb;
+ struct lpfc_iocbq *next_iocb;
+
+ dlp = &pring->txq;
+ cmd_iocb = NULL;
+ next_iocb = (struct lpfc_iocbq *) pring->txq.next;
+ if (next_iocb != (struct lpfc_iocbq *) & pring->txq) {
+ /* If the first ptr is not equal to the list header,
+ * deque the IOCBQ_t and return it.
+ */
+ cmd_iocb = next_iocb;
+ list_del(&cmd_iocb->list);
+ pring->txq_cnt--;
+ }
+ return (cmd_iocb);
+}
+
+static IOCB_t *
+lpfc_sli_next_iocb_slot (struct lpfc_hba *phba, struct lpfc_sli_ring *pring)
+{
+ MAILBOX_t *mbox = (MAILBOX_t *)phba->sli.MBhostaddr;
+ PGP *pgp = (PGP *)&mbox->us.s2.port[pring->ringno];
+ uint32_t max_cmd_idx =
+ phba->sli.sliinit.ringinit[pring->ringno].numCiocb;
+ IOCB_t *iocb = NULL;
+
+ if((pring->next_cmdidx == pring->cmdidx) &&
+ (++pring->next_cmdidx >= max_cmd_idx))
+ pring->next_cmdidx = 0;
+
+ if (unlikely(pring->local_getidx == pring->next_cmdidx)) {
+
+ pring->local_getidx = le32_to_cpu(pgp->cmdGetInx);
+
+ if (unlikely(pring->local_getidx >= max_cmd_idx)) {
+ lpfc_printf_log(phba, KERN_ERR, LOG_SLI,
+ "%d:0315 Ring %d issue: portCmdGet %d "
+ "is bigger then cmd ring %d\n",
+ phba->brd_no, pring->ringno,
+ pring->local_getidx, max_cmd_idx);
+
+ phba->hba_state = LPFC_HBA_ERROR;
+ lpfc_handle_eratt(phba, HS_FFER3);
+
+ return NULL;
+ }
+
+ if (pring->local_getidx == pring->next_cmdidx)
+ return NULL;
+ }
+
+ iocb = IOCB_ENTRY(pring->cmdringaddr, pring->cmdidx);
+
+ return iocb;
+}
+
+static void
+lpfc_sli_submit_iocb(struct lpfc_hba *phba, struct lpfc_sli_ring *pring,
+ IOCB_t *iocb, struct lpfc_iocbq *nextiocb)
+{
+ struct lpfc_sli *psli = &phba->sli;
+ int ringno = pring->ringno;
+
+ /*
+ * Alloocate and set up an iotag
+ */
+ nextiocb->iocb.ulpIoTag =
+ lpfc_sli_next_iotag(phba, &psli->ring[psli->fcp_ring]);
+
+ /*
+ * Issue iocb command to adapter
+ */
+ lpfc_sli_pcimem_bcopy((uint32_t *)&nextiocb->iocb,
+ (uint32_t *)(iocb), sizeof (IOCB_t));
+ wmb();
+ psli->slistat.iocbCmd[ringno]++;
+
+ /*
+ * If there is no completion routine to call, we can release the
+ * IOCB buffer back right now. For IOCBs, like QUE_RING_BUF,
+ * that have no rsp ring completion, iocb_cmpl MUST be NULL.
+ */
+ if (nextiocb->iocb_cmpl)
+ lpfc_sli_ringtxcmpl_put(phba, pring, nextiocb);
+ else
+ mempool_free(nextiocb, phba->iocb_mem_pool);
+
+ /*
+ * Let the HBA know what IOCB slot will be the next one the
+ * driver will put a command into.
+ */
+ pring->cmdidx = pring->next_cmdidx;
+ writeb(pring->cmdidx,
+ (u8 *)phba->MBslimaddr + (SLIMOFF + (ringno * 2)) * 4);
+
+ return;
+}
+
+static void
+lpfc_sli_update_full_ring(struct lpfc_hba * phba,
+ struct lpfc_sli_ring *pring)
+{
+ int ringno = pring->ringno;
+
+ pring->flag |= LPFC_CALL_RING_AVAILABLE;
+
+ wmb();
+
+ /*
+ * Set ring 'ringno' to SET R0CE_REQ in Chip Att register.
+ * The HBA will tell us when an IOCB entry is available.
+ */
+ writel((CA_R0ATT|CA_R0CE_REQ) << (ringno*4), phba->CAregaddr);
+ readl(phba->CAregaddr); /* flush */
+
+ phba->sli.slistat.iocbCmdFull[ringno]++;
+}
+
+static void
+lpfc_sli_update_ring(struct lpfc_hba * phba,
+ struct lpfc_sli_ring *pring)
+{
+ int ringno = pring->ringno;
+
+ /*
+ * Tell the HBA that there is work to do in this ring.
+ */
+ wmb();
+ writel(CA_R0ATT << (ringno * 4), phba->CAregaddr);
+ readl(phba->CAregaddr); /* flush */
+}
+
+static void
+lpfc_sli_resume_iocb(struct lpfc_hba * phba, struct lpfc_sli_ring * pring)
+{
+ struct lpfc_sli *psli = &phba->sli;
+ IOCB_t *iocb;
+ struct lpfc_iocbq *nextiocb;
+
+ /*
+ * Check to see if:
+ * (a) there is anything on the txq to send
+ * (b) link is up
+ * (c) link attention events can be processed (fcp ring only)
+ * (d) IOCB processing is not blocked by the outstanding mbox command.
+ */
+ if (pring->txq_cnt &&
+ (phba->hba_state > LPFC_LINK_DOWN) &&
+ (pring->ringno != psli->fcp_ring ||
+ psli->sliinit.sli_flag & LPFC_PROCESS_LA) &&
+ !(pring->flag & LPFC_STOP_IOCB_MBX)) {
+
+ while ((iocb = lpfc_sli_next_iocb_slot(phba, pring)) &&
+ (nextiocb = lpfc_sli_ringtx_get(phba, pring)))
+ lpfc_sli_submit_iocb(phba, pring, iocb, nextiocb);
+
+ if (iocb)
+ lpfc_sli_update_ring(phba, pring);
+ else
+ lpfc_sli_update_full_ring(phba, pring);
+ }
+
+ return;
+}
+
+/* lpfc_sli_turn_on_ring is only called by lpfc_sli_handle_mb_event below */
+static void
+lpfc_sli_turn_on_ring(struct lpfc_hba * phba, int ringno)
+{
+ struct lpfc_sli *psli;
+ struct lpfc_sli_ring *pring;
+ PGP *pgp;
+
+ psli = &phba->sli;
+ pring = &psli->ring[ringno];
+ pgp = (PGP *) & (((MAILBOX_t *)psli->MBhostaddr)->us.s2.port[ringno]);
+
+ /* If the ring is active, flag it */
+ if (psli->ring[ringno].cmdringaddr) {
+ if (psli->ring[ringno].flag & LPFC_STOP_IOCB_MBX) {
+ psli->ring[ringno].flag &= ~LPFC_STOP_IOCB_MBX;
+ /*
+ * Force update of the local copy of cmdGetInx
+ */
+ pring->local_getidx = le32_to_cpu(pgp->cmdGetInx);
+ lpfc_sli_resume_iocb(phba, pring);
+ }
+ }
+}
+
+static int
+lpfc_sli_chk_mbx_command(uint8_t mbxCommand)
+{
+ uint8_t ret;
+
+ switch (mbxCommand) {
+ case MBX_LOAD_SM:
+ case MBX_READ_NV:
+ case MBX_WRITE_NV:
+ case MBX_RUN_BIU_DIAG:
+ case MBX_INIT_LINK:
+ case MBX_DOWN_LINK:
+ case MBX_CONFIG_LINK:
+ case MBX_CONFIG_RING:
+ case MBX_RESET_RING:
+ case MBX_READ_CONFIG:
+ case MBX_READ_RCONFIG:
+ case MBX_READ_SPARM:
+ case MBX_READ_STATUS:
+ case MBX_READ_RPI:
+ case MBX_READ_XRI:
+ case MBX_READ_REV:
+ case MBX_READ_LNK_STAT:
+ case MBX_REG_LOGIN:
+ case MBX_UNREG_LOGIN:
+ case MBX_READ_LA:
+ case MBX_CLEAR_LA:
+ case MBX_DUMP_MEMORY:
+ case MBX_DUMP_CONTEXT:
+ case MBX_RUN_DIAGS:
+ case MBX_RESTART:
+ case MBX_UPDATE_CFG:
+ case MBX_DOWN_LOAD:
+ case MBX_DEL_LD_ENTRY:
+ case MBX_RUN_PROGRAM:
+ case MBX_SET_MASK:
+ case MBX_SET_SLIM:
+ case MBX_UNREG_D_ID:
+ case MBX_CONFIG_FARP:
+ case MBX_LOAD_AREA:
+ case MBX_RUN_BIU_DIAG64:
+ case MBX_CONFIG_PORT:
+ case MBX_READ_SPARM64:
+ case MBX_READ_RPI64:
+ case MBX_REG_LOGIN64:
+ case MBX_READ_LA64:
+ case MBX_FLASH_WR_ULA:
+ case MBX_SET_DEBUG:
+ case MBX_LOAD_EXP_ROM:
+ ret = mbxCommand;
+ break;
+ default:
+ ret = MBX_SHUTDOWN;
+ break;
+ }
+ return (ret);
+}
+static int
+lpfc_sli_handle_mb_event(struct lpfc_hba * phba)
+{
+ MAILBOX_t *mbox;
+ MAILBOX_t *pmbox;
+ LPFC_MBOXQ_t *pmb;
+ struct lpfc_dmabuf *mp;
+ struct lpfc_sli *psli;
+ int i;
+ unsigned long iflag;
+ uint32_t process_next;
+
+
+ psli = &phba->sli;
+ /* We should only get here if we are in SLI2 mode */
+ if (!(psli->sliinit.sli_flag & LPFC_SLI2_ACTIVE)) {
+ return (1);
+ }
+
+ spin_lock_irqsave(phba->host->host_lock, iflag);
+
+ psli->slistat.mboxEvent++;
+
+ /* Get a Mailbox buffer to setup mailbox commands for callback */
+ if ((pmb = psli->mbox_active)) {
+ pmbox = &pmb->mb;
+ mbox = (MAILBOX_t *) psli->MBhostaddr;
+
+ /* First check out the status word */
+ lpfc_sli_pcimem_bcopy((uint32_t *) mbox, (uint32_t *) pmbox,
+ sizeof (uint32_t));
+
+ /* Sanity check to ensure the host owns the mailbox */
+ if (pmbox->mbxOwner != OWN_HOST) {
+ /* Lets try for a while */
+ for (i = 0; i < 10240; i++) {
+ /* First copy command data */
+ lpfc_sli_pcimem_bcopy((uint32_t *) mbox,
+ (uint32_t *) pmbox,
+ sizeof (uint32_t));
+ if (pmbox->mbxOwner == OWN_HOST)
+ goto mbout;
+ }
+ /* Stray Mailbox Interrupt, mbxCommand <cmd> mbxStatus
+ <status> */
+ lpfc_printf_log(phba,
+ KERN_ERR,
+ LOG_MBOX | LOG_SLI,
+ "%d:0304 Stray Mailbox Interrupt "
+ "mbxCommand x%x mbxStatus x%x\n",
+ phba->brd_no,
+ pmbox->mbxCommand,
+ pmbox->mbxStatus);
+
+ psli->sliinit.sli_flag |= LPFC_SLI_MBOX_ACTIVE;
+ spin_unlock_irqrestore(phba->host->host_lock, iflag);
+ return (1);
+ }
+
+ mbout:
+ del_timer_sync(&psli->mbox_tmo);
+
+ /*
+ * It is a fatal error if unknown mbox command completion.
+ */
+ if (lpfc_sli_chk_mbx_command(pmbox->mbxCommand) ==
+ MBX_SHUTDOWN) {
+
+ /* Unknow mailbox command compl */
+ lpfc_printf_log(phba,
+ KERN_ERR,
+ LOG_MBOX | LOG_SLI,
+ "%d:0323 Unknown Mailbox command %x Cmpl\n",
+ phba->brd_no,
+ pmbox->mbxCommand);
+ phba->hba_state = LPFC_HBA_ERROR;
+ spin_unlock_irqrestore(phba->host->host_lock, iflag);
+ lpfc_handle_eratt(phba, HS_FFER3);
+ return (0);
+ }
+
+ psli->mbox_active = NULL;
+ if (pmbox->mbxStatus) {
+ psli->slistat.mboxStatErr++;
+ if (pmbox->mbxStatus == MBXERR_NO_RESOURCES) {
+ /* Mbox cmd cmpl error - RETRYing */
+ lpfc_printf_log(phba,
+ KERN_INFO,
+ LOG_MBOX | LOG_SLI,
+ "%d:0305 Mbox cmd cmpl error - "
+ "RETRYing Data: x%x x%x x%x x%x\n",
+ phba->brd_no,
+ pmbox->mbxCommand,
+ pmbox->mbxStatus,
+ pmbox->un.varWords[0],
+ phba->hba_state);
+ pmbox->mbxStatus = 0;
+ pmbox->mbxOwner = OWN_HOST;
+ psli->sliinit.sli_flag &= ~LPFC_SLI_MBOX_ACTIVE;
+ if (lpfc_sli_issue_mbox(phba, pmb, MBX_NOWAIT)
+ == MBX_SUCCESS) {
+ spin_unlock_irqrestore(
+ phba->host->host_lock,
+ iflag);
+ return (0);
+ }
+ }
+ }
+
+ /* Mailbox cmd <cmd> Cmpl <cmpl> */
+ lpfc_printf_log(phba,
+ KERN_INFO,
+ LOG_MBOX | LOG_SLI,
+ "%d:0307 Mailbox cmd x%x Cmpl x%p "
+ "Data: x%x x%x x%x x%x x%x x%x x%x x%x x%x\n",
+ phba->brd_no,
+ pmbox->mbxCommand,
+ pmb->mbox_cmpl,
+ *((uint32_t *) pmbox),
+ pmbox->un.varWords[0],
+ pmbox->un.varWords[1],
+ pmbox->un.varWords[2],
+ pmbox->un.varWords[3],
+ pmbox->un.varWords[4],
+ pmbox->un.varWords[5],
+ pmbox->un.varWords[6],
+ pmbox->un.varWords[7]);
+
+ if (pmb->mbox_cmpl) {
+ /* Copy entire mbox completion over buffer */
+ lpfc_sli_pcimem_bcopy((uint32_t *) mbox,
+ (uint32_t *) pmbox,
+ (sizeof (uint32_t) *
+ (MAILBOX_CMD_WSIZE)));
+ /* All mbox cmpls are posted to discovery tasklet */
+ lpfc_discq_post_event(phba, pmb, NULL,
+ LPFC_EVT_MBOX);
+ } else {
+ mp = (struct lpfc_dmabuf *) (pmb->context1);
+ if (mp) {
+ lpfc_mbuf_free(phba, mp->virt, mp->phys);
+ kfree(mp);
+ }
+ mempool_free( pmb, phba->mbox_mem_pool);
+ }
+ }
+
+
+ do {
+ process_next = 0; /* by default don't loop */
+ psli->sliinit.sli_flag &= ~LPFC_SLI_MBOX_ACTIVE;
+
+ /* Process next mailbox command if there is one */
+ if ((pmb = lpfc_mbox_get(phba))) {
+ if (lpfc_sli_issue_mbox(phba, pmb, MBX_NOWAIT) ==
+ MBX_NOT_FINISHED) {
+ mp = (struct lpfc_dmabuf *) (pmb->context1);
+ if (mp) {
+ lpfc_mbuf_free(phba, mp->virt,
+ mp->phys);
+ kfree(mp);
+ }
+ mempool_free( pmb, phba->mbox_mem_pool);
+ process_next = 1;
+ continue; /* loop back */
+ }
+ } else {
+ /* Turn on IOCB processing */
+ for (i = 0; i < psli->sliinit.num_rings; i++) {
+ lpfc_sli_turn_on_ring(phba, i);
+ }
+
+ /* Free any lpfc_dmabuf's waiting for mbox cmd cmpls */
+ while (!list_empty(&phba->freebufList)) {
+ struct lpfc_dmabuf *mp;
+
+ mp = (struct lpfc_dmabuf *)
+ (phba->freebufList.next);
+ if (mp) {
+ lpfc_mbuf_free(phba, mp->virt,
+ mp->phys);
+ list_del(&mp->list);
+ kfree(mp);
+ }
+ }
+ }
+
+ } while (process_next);
+
+ spin_unlock_irqrestore(phba->host->host_lock, iflag);
+ return (0);
+}
+static int
+lpfc_sli_process_unsol_iocb(struct lpfc_hba *phba, struct lpfc_sli_ring *pring,
+ struct lpfc_iocbq *saveq)
+{
+ struct lpfc_sli * psli;
+ IOCB_t * irsp;
+ LPFC_RING_INIT_t * pringinit;
+ WORD5 * w5p;
+ uint32_t Rctl, Type;
+ uint32_t match, ringno, i;
+
+ psli = &phba->sli;
+ match = 0;
+ ringno = pring->ringno;
+ irsp = &(saveq->iocb);
+ if ((irsp->ulpCommand == CMD_RCV_ELS_REQ64_CX)
+ || (irsp->ulpCommand == CMD_RCV_ELS_REQ_CX)) {
+ Rctl = FC_ELS_REQ;
+ Type = FC_ELS_DATA;
+ } else {
+ w5p =
+ (WORD5 *) & (saveq->iocb.un.
+ ulpWord[5]);
+ Rctl = w5p->hcsw.Rctl;
+ Type = w5p->hcsw.Type;
+ }
+ /* unSolicited Responses */
+ pringinit = &psli->sliinit.ringinit[ringno];
+ if (pringinit->prt[0].profile) {
+ /* If this ring has a profile set, just
+ send it to prt[0] */
+ /* All unsol iocbs for LPFC_ELS_RING
+ * are posted to discovery tasklet.
+ */
+ if (ringno == LPFC_ELS_RING) {
+ lpfc_discq_post_event(phba, (void *)&pringinit->prt[0],
+ (void *)saveq, LPFC_EVT_UNSOL_IOCB);
+ }
+ else {
+ (pringinit->prt[0].
+ lpfc_sli_rcv_unsol_event) (phba, pring, saveq);
+ }
+ match = 1;
+ } else {
+ /* We must search, based on rctl / type
+ for the right routine */
+ for (i = 0; i < pringinit->num_mask;
+ i++) {
+ if ((pringinit->prt[i].rctl ==
+ Rctl)
+ && (pringinit->prt[i].
+ type == Type)) {
+ /* All unsol iocbs for LPFC_ELS_RING
+ * are posted to discovery tasklet.
+ */
+ if (ringno == LPFC_ELS_RING) {
+ lpfc_discq_post_event(phba,
+ (void *)&pringinit->prt[i],
+ (void *)saveq, LPFC_EVT_UNSOL_IOCB);
+ }
+ else {
+ (pringinit->prt[i].
+ lpfc_sli_rcv_unsol_event)
+ (phba, pring, saveq);
+ }
+ match = 1;
+ break;
+ }
+ }
+ }
+ if (match == 0) {
+ /* Unexpected Rctl / Type received */
+ /* Ring <ringno> handler: unexpected
+ Rctl <Rctl> Type <Type> received */
+ lpfc_printf_log(phba,
+ KERN_WARNING,
+ LOG_SLI,
+ "%d:0313 Ring %d handler: unexpected Rctl x%x "
+ "Type x%x received \n",
+ phba->brd_no,
+ ringno,
+ Rctl,
+ Type);
+ }
+ return(1);
+}
+static struct lpfc_iocbq *
+lpfc_search_txcmpl(struct lpfc_sli_ring * pring, struct lpfc_iocbq * prspiocb)
+{
+ IOCB_t *icmd = NULL;
+ IOCB_t *irsp = NULL;
+ struct lpfc_iocbq *cmd_iocb;
+ struct lpfc_iocbq *iocb, *next_iocb;
+ uint16_t iotag;
+
+ irsp = &prspiocb->iocb;
+ iotag = irsp->ulpIoTag;
+ cmd_iocb = NULL;
+
+ /* Search through txcmpl from the begining */
+ list_for_each_entry_safe(iocb, next_iocb, &(pring->txcmplq), list) {
+ icmd = &iocb->iocb;
+ if (iotag == icmd->ulpIoTag) {
+ /* Found a match. */
+ cmd_iocb = iocb;
+ list_del(&iocb->list);
+ pring->txcmplq_cnt--;
+ break;
+ }
+ }
+
+ return (cmd_iocb);
+}
+static struct lpfc_iocbq *
+lpfc_sli_ringtxcmpl_get(struct lpfc_hba * phba,
+ struct lpfc_sli_ring * pring,
+ struct lpfc_iocbq * prspiocb, uint32_t srch)
+{
+ struct list_head *dlp;
+ IOCB_t *irsp = NULL;
+ struct lpfc_iocbq *cmd_iocb;
+ struct lpfc_sli *psli;
+ uint16_t iotag;
+
+
+ dlp = &pring->txcmplq;
+
+ if (pring->fast_lookup && (srch == 0)) {
+ /*
+ * Use fast lookup based on iotag for completion
+ */
+ psli = &phba->sli;
+ irsp = &prspiocb->iocb;
+ iotag = irsp->ulpIoTag;
+ if (iotag < psli->sliinit.ringinit[pring->ringno].fast_iotag) {
+ cmd_iocb = *(pring->fast_lookup + iotag);
+ *(pring->fast_lookup + iotag) = NULL;
+ if (cmd_iocb) {
+ list_del(&cmd_iocb->list);
+ pring->txcmplq_cnt--;
+ return cmd_iocb;
+ }
+ } else {
+ /*
+ * Rsp ring <ringno> get: iotag <iotag> greater then
+ * configured max <fast_iotag> wd0 <irsp>
+ */
+ lpfc_printf_log(phba,
+ KERN_ERR,
+ LOG_SLI,
+ "%d:0317 Rsp ring %d get: iotag x%x "
+ "greater then configured max x%x "
+ "wd0 x%x\n",
+ phba->brd_no,
+ pring->ringno, iotag,
+ psli->sliinit.ringinit[pring->ringno]
+ .fast_iotag,
+ *(((uint32_t *) irsp) + 7));
+ }
+ }
+
+ cmd_iocb = lpfc_search_txcmpl(pring, prspiocb);
+
+ return cmd_iocb;
+}
+
+static int
+lpfc_sli_process_sol_iocb(struct lpfc_hba * phba, struct lpfc_sli_ring * pring,
+ struct lpfc_iocbq *saveq)
+{
+ struct lpfc_iocbq * cmdiocbp;
+ int ringno, rc;
+ unsigned long iflag;
+
+ rc = 1;
+ ringno = pring->ringno;
+ /* Solicited Responses */
+ /* Based on the iotag field, get the cmd IOCB
+ from the txcmplq */
+ spin_lock_irqsave(phba->host->host_lock, iflag);
+ if ((cmdiocbp =
+ lpfc_sli_ringtxcmpl_get(phba, pring, saveq,
+ 0))) {
+ /* Call the specified completion
+ routine */
+ if (cmdiocbp->iocb_cmpl) {
+ /* All iocb cmpls for LPFC_ELS_RING
+ * are posted to discovery tasklet.
+ */
+ if (ringno == LPFC_ELS_RING) {
+ lpfc_discq_post_event(phba, (void *)cmdiocbp,
+ (void *)saveq, LPFC_EVT_SOL_IOCB);
+ }
+ else {
+ if (cmdiocbp->iocb_flag & LPFC_IO_POLL) {
+ rc = 0;
+ }
+
+ spin_unlock_irqrestore(phba->host->host_lock,
+ iflag);
+ (cmdiocbp->iocb_cmpl) (phba, cmdiocbp, saveq);
+ spin_lock_irqsave(phba->host->host_lock, iflag);
+ }
+ } else {
+ mempool_free( cmdiocbp, phba->iocb_mem_pool);
+ }
+ } else {
+ /* Could not find the initiating command
+ * based of the response iotag.
+ * This is expected on ELS ring because of lpfc_els_abort().
+ */
+ if (ringno != LPFC_ELS_RING) {
+ /* Ring <ringno> handler: unexpected
+ completion IoTag <IoTag> */
+ lpfc_printf_log(phba,
+ KERN_WARNING,
+ LOG_SLI,
+ "%d:0322 Ring %d handler: unexpected "
+ "completion IoTag x%x Data: x%x x%x x%x x%x\n",
+ phba->brd_no,
+ ringno,
+ saveq->iocb.ulpIoTag,
+ saveq->iocb.ulpStatus,
+ saveq->iocb.un.ulpWord[4],
+ saveq->iocb.ulpCommand,
+ saveq->iocb.ulpContext);
+ }
+ }
+ spin_unlock_irqrestore(phba->host->host_lock, iflag);
+ return(rc);
+}
+static int
+lpfc_sli_handle_ring_event(struct lpfc_hba * phba,
+ struct lpfc_sli_ring * pring, uint32_t mask)
+{
+ struct lpfc_sli * psli;
+ IOCB_t * entry;
+ IOCB_t * irsp;
+ struct lpfc_iocbq * rspiocbp, *next_iocb;
+ struct lpfc_iocbq * cmdiocbp;
+ struct lpfc_iocbq * saveq;
+ HGP * hgp;
+ PGP * pgp;
+ MAILBOX_t * mbox;
+ uint32_t status, free_saveq;
+ uint32_t portRspPut, portRspMax;
+ int ringno, loopcnt, rc;
+ uint8_t type;
+ unsigned long iflag;
+ void *to_slim;
+
+ psli = &phba->sli;
+ ringno = pring->ringno;
+ irsp = NULL;
+ rc = 1;
+
+ spin_lock_irqsave(phba->host->host_lock, iflag);
+ psli->slistat.iocbEvent[ringno]++;
+
+ /* At this point we assume SLI-2 */
+ mbox = (MAILBOX_t *) psli->MBhostaddr;
+ pgp = (PGP *) & mbox->us.s2.port[ringno];
+ hgp = (HGP *) & mbox->us.s2.host[ringno];
+
+ /* portRspMax is the number of rsp ring entries for this specific
+ ring. */
+ portRspMax = psli->sliinit.ringinit[ringno].numRiocb;
+
+ rspiocbp = NULL;
+ loopcnt = 0;
+
+ /* Gather iocb entries off response ring.
+ * rspidx is the IOCB index of the next IOCB that the driver
+ * is going to process.
+ */
+ entry = IOCB_ENTRY(pring->rspringaddr, pring->rspidx);
+ portRspPut = le32_to_cpu(pgp->rspPutInx);
+
+ if (portRspPut >= portRspMax) {
+
+ /* Ring <ringno> handler: portRspPut <portRspPut> is bigger then
+ rsp ring <portRspMax> */
+ lpfc_printf_log(phba,
+ KERN_ERR,
+ LOG_SLI,
+ "%d:0312 Ring %d handler: portRspPut %d "
+ "is bigger then rsp ring %d\n",
+ phba->brd_no,
+ ringno, portRspPut, portRspMax);
+ /*
+ * Treat it as adapter hardware error.
+ */
+ phba->hba_state = LPFC_HBA_ERROR;
+ spin_unlock_irqrestore(phba->host->host_lock, iflag);
+ lpfc_handle_eratt(phba, HS_FFER3);
+ return (1);
+ }
+
+ rmb();
+
+ /* Get the next available response iocb.
+ * rspidx is the IOCB index of the next IOCB that the driver
+ * is going to process.
+ */
+ while (pring->rspidx != portRspPut) {
+ /* get an iocb buffer to copy entry into */
+ if ((rspiocbp = mempool_alloc(phba->iocb_mem_pool,
+ GFP_ATOMIC)) == 0) {
+ break;
+ }
+
+ lpfc_sli_pcimem_bcopy((uint32_t *) entry,
+ (uint32_t *) & rspiocbp->iocb,
+ sizeof (IOCB_t));
+ irsp = &rspiocbp->iocb;
+
+ /* bump iocb available response index */
+ if (++pring->rspidx >= portRspMax) {
+ pring->rspidx = 0;
+ }
+
+ /* Let the HBA know what IOCB slot will be the next one the
+ * driver will read a response from.
+ */
+ to_slim = (uint8_t *) phba->MBslimaddr +
+ (SLIMOFF + (ringno * 2) + 1) * 4;
+ writeb( pring->rspidx, to_slim);
+
+ /* chain all iocb entries until LE is set */
+ if (list_empty(&(pring->iocb_continueq))) {
+ list_add(&rspiocbp->list, &(pring->iocb_continueq));
+ } else {
+ list_add_tail(&rspiocbp->list,
+ &(pring->iocb_continueq));
+ }
+ pring->iocb_continueq_cnt++;
+
+ /*
+ * When the ulpLe field is set, the entire Command has been
+ * received. Start by getting a pointer to the first iocb entry
+ * in the chain.
+ */
+ if (irsp->ulpLe) {
+ /*
+ * By default, the driver expects to free all resources
+ * associated with this iocb completion.
+ */
+ free_saveq = 1;
+ saveq = list_entry(pring->iocb_continueq.next,
+ struct lpfc_iocbq, list);
+ irsp = &(saveq->iocb);
+ list_del_init(&pring->iocb_continueq);
+ pring->iocb_continueq_cnt = 0;
+
+ psli->slistat.iocbRsp[ringno]++;
+
+ if(irsp->ulpStatus) {
+ /* Rsp ring <ringno> error: IOCB */
+ lpfc_printf_log(phba,
+ KERN_WARNING,
+ LOG_SLI,
+ "%d:0324 Rsp Ring %d error: IOCB Data: "
+ "x%x x%x x%x x%x x%x x%x x%x x%x\n",
+ phba->brd_no,
+ ringno,
+ irsp->un.ulpWord[0],
+ irsp->un.ulpWord[1],
+ irsp->un.ulpWord[2],
+ irsp->un.ulpWord[3],
+ irsp->un.ulpWord[4],
+ irsp->un.ulpWord[5],
+ *(((uint32_t *) irsp) + 6),
+ *(((uint32_t *) irsp) + 7));
+ }
+
+ /* Determine if IOCB command is a solicited or
+ unsolicited event */
+ type =
+ lpfc_sli_iocb_cmd_type[(irsp->
+ ulpCommand &
+ CMD_IOCB_MASK)];
+ if (type == LPFC_SOL_IOCB) {
+ spin_unlock_irqrestore(phba->host->host_lock,
+ iflag);
+ rc = lpfc_sli_process_sol_iocb(phba, pring,
+ saveq);
+ spin_lock_irqsave(phba->host->host_lock, iflag);
+ /*
+ * If this solicted completion is an ELS
+ * command, don't free the resources now because
+ * the discoverytasklet does later.
+ */
+ if (pring->ringno == LPFC_ELS_RING)
+ free_saveq = 0;
+ else
+ free_saveq = 1;
+
+ } else if (type == LPFC_UNSOL_IOCB) {
+ spin_unlock_irqrestore(phba->host->host_lock,
+ iflag);
+ rc = lpfc_sli_process_unsol_iocb(phba, pring,
+ saveq);
+ spin_lock_irqsave(phba->host->host_lock, iflag);
+
+ /*
+ * If this unsolicted completion is an ELS
+ * command, don't free the resources now because
+ * the discoverytasklet does later.
+ */
+ if (pring->ringno == LPFC_ELS_RING)
+ free_saveq = 0;
+ else
+ free_saveq = 1;
+
+ } else if (type == LPFC_ABORT_IOCB) {
+ /* Solicited ABORT Responses */
+ /* Based on the iotag field, get the cmd IOCB
+ from the txcmplq */
+ if ((irsp->ulpCommand != CMD_XRI_ABORTED_CX) &&
+ ((cmdiocbp =
+ lpfc_sli_ringtxcmpl_get(phba, pring,
+ saveq, 0)))) {
+ /* Call the specified completion
+ routine */
+ if (cmdiocbp->iocb_cmpl) {
+ spin_unlock_irqrestore(
+ phba->host->host_lock,
+ iflag);
+ (cmdiocbp->iocb_cmpl) (phba,
+ cmdiocbp, saveq);
+ spin_lock_irqsave(
+ phba->host->host_lock,
+ iflag);
+ } else {
+ mempool_free(cmdiocbp,
+ phba->iocb_mem_pool);
+ }
+ }
+ } else if (type == LPFC_UNKNOWN_IOCB) {
+ if (irsp->ulpCommand == CMD_ADAPTER_MSG) {
+
+ char adaptermsg[LPFC_MAX_ADPTMSG];
+
+ memset(adaptermsg, 0,
+ LPFC_MAX_ADPTMSG);
+ memcpy(&adaptermsg[0], (uint8_t *) irsp,
+ MAX_MSG_DATA);
+ dev_warn(&((phba->pcidev)->dev),
+ "lpfc%d: %s",
+ phba->brd_no, adaptermsg);
+ } else {
+ /* Unknown IOCB command */
+ lpfc_printf_log(phba,
+ KERN_ERR,
+ LOG_SLI,
+ "%d:0321 Unknown IOCB command "
+ "Data: x%x x%x x%x x%x\n",
+ phba->brd_no,
+ irsp->ulpCommand,
+ irsp->ulpStatus,
+ irsp->ulpIoTag,
+ irsp->ulpContext);
+ }
+ }
+
+ if (free_saveq) {
+ /*
+ * Free up iocb buffer chain for command just
+ * processed
+ */
+ if (!list_empty(&pring->iocb_continueq)) {
+ list_for_each_entry_safe(rspiocbp,
+ next_iocb,
+ &pring->iocb_continueq, list) {
+ list_del_init(&rspiocbp->list);
+ mempool_free(rspiocbp,
+ phba->iocb_mem_pool);
+ }
+ }
+ mempool_free( saveq, phba->iocb_mem_pool);
+ }
+ }
+
+ /* Entire Command has been received */
+ entry = IOCB_ENTRY(pring->rspringaddr, pring->rspidx);
+
+ /* If the port response put pointer has not been updated, sync
+ * the pgp->rspPutInx in the MAILBOX_tand fetch the new port
+ * response put pointer.
+ */
+ if (pring->rspidx == portRspPut) {
+ portRspPut = le32_to_cpu(pgp->rspPutInx);
+ }
+ } /* while (pring->rspidx != portRspPut) */
+
+ if ((rspiocbp != 0) && (mask & HA_R0RE_REQ)) {
+ /* At least one response entry has been freed */
+ psli->slistat.iocbRspFull[ringno]++;
+ /* SET RxRE_RSP in Chip Att register */
+ status = ((CA_R0ATT | CA_R0RE_RSP) << (ringno * 4));
+ writel(status, phba->CAregaddr);
+ readl(phba->CAregaddr); /* flush */
+ }
+ if ((mask & HA_R0CE_RSP) && (pring->flag & LPFC_CALL_RING_AVAILABLE)) {
+ pring->flag &= ~LPFC_CALL_RING_AVAILABLE;
+ psli->slistat.iocbCmdEmpty[ringno]++;
+ /*
+ * Force update of the local copy of cmdGetInx
+ */
+ pring->local_getidx = le32_to_cpu(pgp->cmdGetInx);
+ lpfc_sli_resume_iocb(phba, pring);
+
+ if ((psli->sliinit.ringinit[ringno].lpfc_sli_cmd_available))
+ (psli->sliinit.ringinit[ringno].
+ lpfc_sli_cmd_available) (phba, pring);
+
+ }
+
+ spin_unlock_irqrestore(phba->host->host_lock, iflag);
+ return (rc);
+}
+
+static uint32_t
+lpfc_intr_prep(struct lpfc_hba * phba)
+{
+ uint32_t ha_copy;
+
+ /* Ignore all interrupts during initialization. */
+ if (phba->hba_state < LPFC_LINK_DOWN)
+ return (0);
+
+ /* Read host attention register to determine interrupt source */
+ ha_copy = readl(phba->HAregaddr);
+
+ /* Clear Attention Sources, except ERATT (to preserve status) & LATT
+ * (ha_copy & ~(HA_ERATT | HA_LATT));
+ */
+ writel((ha_copy & ~(HA_LATT | HA_ERATT)), phba->HAregaddr);
+ readl(phba->HAregaddr); /* flush */
+ return (ha_copy);
+} /* lpfc_intr_prep */
+
+int
+lpfc_sli_intr(struct lpfc_hba * phba)
+{
+ struct lpfc_sli *psli;
+ struct lpfc_sli_ring *pring;
+ uint32_t ha_copy, status;
+ int i;
+
+ psli = &phba->sli;
+ psli->slistat.sliIntr++;
+
+ /*
+ * Call the HBA to see if it is interrupting. If not, don't claim
+ * the interrupt
+ */
+ ha_copy = lpfc_intr_prep(phba);
+ if (!ha_copy) {
+ return (1);
+ }
+
+ if (ha_copy & HA_ERATT) {
+ /*
+ * There was a link/board error. Read the status register to
+ * retrieve the error event and process it.
+ */
+ psli->slistat.errAttnEvent++;
+ status = readl(phba->HSregaddr);
+
+ /* Clear Chip error bit */
+ writel(HA_ERATT, phba->HAregaddr);
+ readl(phba->HAregaddr); /* flush */
+
+ lpfc_handle_eratt(phba, status);
+ return (0);
+ }
+
+ if (ha_copy & HA_MBATT) {
+ /* There was a Mailbox event. */
+ lpfc_sli_handle_mb_event(phba);
+ }
+
+ if (ha_copy & HA_LATT) {
+ /*
+ * There was a link attention event. Provided the driver is in
+ * a state to handle link events, handle this event.
+ */
+ if (psli->sliinit.sli_flag & LPFC_PROCESS_LA) {
+ lpfc_handle_latt(phba);
+ }
+ }
+
+ /* Process all events on each ring */
+ for (i = 0; i < psli->sliinit.num_rings; i++) {
+ pring = &psli->ring[i];
+ if ((ha_copy & HA_RXATT)
+ || (pring->flag & LPFC_DEFERRED_RING_EVENT)) {
+ if (pring->flag & LPFC_STOP_IOCB_MASK) {
+ pring->flag |= LPFC_DEFERRED_RING_EVENT;
+ } else {
+ lpfc_sli_handle_ring_event(phba, pring,
+ (ha_copy &
+ HA_RXMASK));
+ pring->flag &= ~LPFC_DEFERRED_RING_EVENT;
+ }
+ }
+ ha_copy = (ha_copy >> 4);
+ }
+
+ return (0);
+}
+
+static int
+lpfc_sli_abort_iocb_ring(struct lpfc_hba * phba, struct lpfc_sli_ring * pring,
+ uint32_t flag)
+{
+ struct lpfc_sli *psli;
+ struct lpfc_iocbq *iocb, *next_iocb;
+ struct lpfc_iocbq *abtsiocbp;
+ IOCB_t *icmd = NULL, *cmd = NULL;
+ int errcnt;
+ uint16_t iotag;
+
+ psli = &phba->sli;
+ errcnt = 0;
+
+ /* Error everything on txq and txcmplq
+ * First do the txq.
+ */
+ list_for_each_entry_safe(iocb, next_iocb, &pring->txq, list) {
+ list_del_init(&iocb->list);
+ if (iocb->iocb_cmpl) {
+ icmd = &iocb->iocb;
+ icmd->ulpStatus = IOSTAT_LOCAL_REJECT;
+ icmd->un.ulpWord[4] = IOERR_SLI_ABORTED;
+ (iocb->iocb_cmpl) (phba, iocb, iocb);
+ } else {
+ mempool_free( iocb, phba->iocb_mem_pool);
+ }
+ }
+
+ pring->txq_cnt = 0;
+ INIT_LIST_HEAD(&(pring->txq));
+
+ /* Next issue ABTS for everything on the txcmplq */
+ list_for_each_entry_safe(iocb, next_iocb, &pring->txcmplq, list) {
+ cmd = &iocb->iocb;
+
+ if (flag == LPFC_SLI_ABORT_IMED) {
+ /*
+ * Imediate abort of IOCB, clear fast_lookup entry,
+ * if any, deque and call compl
+ */
+ iotag = cmd->ulpIoTag;
+ if (pring->fast_lookup &&
+ iotag &&
+ (iotag <
+ psli->sliinit.ringinit[pring->ringno].fast_iotag))
+ *(pring->fast_lookup + iotag) = NULL;
+
+ list_del_init(&iocb->list);
+ pring->txcmplq_cnt--;
+
+ if (iocb->iocb_cmpl) {
+ cmd->ulpStatus = IOSTAT_LOCAL_REJECT;
+ cmd->un.ulpWord[4] = IOERR_SLI_ABORTED;
+ (iocb->iocb_cmpl) (phba, iocb, iocb);
+ } else {
+ mempool_free( iocb, phba->iocb_mem_pool);
+ }
+ continue;
+ }
+
+ /* issue ABTS for this IOCB based on iotag */
+
+ if ((abtsiocbp = mempool_alloc(phba->iocb_mem_pool,
+ GFP_ATOMIC)) == 0) {
+ errcnt++;
+ continue;
+ }
+ memset(abtsiocbp, 0, sizeof (struct lpfc_iocbq));
+ icmd = &abtsiocbp->iocb;
+
+ icmd->un.acxri.abortType = ABORT_TYPE_ABTS;
+ icmd->un.acxri.abortContextTag = cmd->ulpContext;
+ icmd->un.acxri.abortIoTag = cmd->ulpIoTag;
+
+ icmd->ulpLe = 1;
+ icmd->ulpClass = cmd->ulpClass;
+ if (phba->hba_state >= LPFC_LINK_UP) {
+ icmd->ulpCommand = CMD_ABORT_XRI_CN;
+ } else {
+ icmd->ulpCommand = CMD_CLOSE_XRI_CN;
+
+ }
+
+ if (lpfc_sli_issue_iocb
+ (phba, pring, abtsiocbp, 0) == IOCB_ERROR) {
+ mempool_free(abtsiocbp, phba->iocb_mem_pool);
+ errcnt++;
+ continue;
+ }
+ /* The rsp ring completion will remove IOCB from txcmplq when
+ * abort is read by HBA.
+ */
+ }
+
+ if (flag == LPFC_SLI_ABORT_IMED) {
+ INIT_LIST_HEAD(&(pring->txcmplq));
+ pring->txcmplq_cnt = 0;
+ }
+
+ return (errcnt);
+}
+
+int
+lpfc_sli_brdreset(struct lpfc_hba * phba)
+{
+ MAILBOX_t *swpmb;
+ struct lpfc_sli *psli;
+ struct lpfc_sli_ring *pring;
+ uint16_t cfg_value, skip_post;
+ volatile uint32_t word0;
+ int i;
+ void *to_slim;
+ struct lpfc_dmabuf *mp, *next_mp;
+
+ psli = &phba->sli;
+
+ /* A board reset must use REAL SLIM. */
+ psli->sliinit.sli_flag &= ~LPFC_SLI2_ACTIVE;
+
+ word0 = 0;
+ swpmb = (MAILBOX_t *) & word0;
+ swpmb->mbxCommand = MBX_RESTART;
+ swpmb->mbxHc = 1;
+
+ to_slim = phba->MBslimaddr;
+ writel(*(uint32_t *) swpmb, to_slim);
+ readl(to_slim); /* flush */
+
+ /* Only skip post after fc_ffinit is completed */
+ if (phba->hba_state) {
+ skip_post = 1;
+ word0 = 1; /* This is really setting up word1 */
+ } else {
+ skip_post = 0;
+ word0 = 0; /* This is really setting up word1 */
+ }
+ to_slim = (uint8_t *) phba->MBslimaddr + sizeof (uint32_t);
+ writel(*(uint32_t *) swpmb, to_slim);
+ readl(to_slim); /* flush */
+
+ /* Reset HBA */
+ lpfc_printf_log(phba,
+ KERN_INFO,
+ LOG_SLI,
+ "%d:0325 Reset HBA Data: x%x x%x\n",
+ phba->brd_no,
+ phba->hba_state,
+ psli->sliinit.sli_flag);
+
+ /* Turn off SERR, PERR in PCI cmd register */
+ phba->hba_state = LPFC_INIT_START;
+
+ /* perform board reset */
+ phba->fc_eventTag = 0;
+ phba->fc_myDID = 0;
+ phba->fc_prevDID = 0;
+
+ /* Turn off parity checking and serr during the physical reset */
+ pci_read_config_word(phba->pcidev, PCI_COMMAND, &cfg_value);
+ pci_write_config_word(phba->pcidev, PCI_COMMAND,
+ (cfg_value &
+ ~(PCI_COMMAND_PARITY | PCI_COMMAND_SERR)));
+
+ /* Now toggle INITFF bit in the Host Control Register */
+ writel(HC_INITFF, phba->HCregaddr);
+ mdelay(1);
+ readl(phba->HCregaddr); /* flush */
+ writel(0, phba->HCregaddr);
+ readl(phba->HCregaddr); /* flush */
+
+ /* Restore PCI cmd register */
+
+ pci_write_config_word(phba->pcidev, PCI_COMMAND, cfg_value);
+ phba->hba_state = LPFC_INIT_START;
+
+ /* Initialize relevant SLI info */
+ for (i = 0; i < psli->sliinit.num_rings; i++) {
+ pring = &psli->ring[i];
+ pring->flag = 0;
+ pring->rspidx = 0;
+ pring->next_cmdidx = 0;
+ pring->local_getidx = 0;
+ pring->cmdidx = 0;
+ pring->missbufcnt = 0;
+ }
+
+ if (skip_post) {
+ mdelay(100);
+ } else {
+ mdelay(2000);
+ }
+
+ /* Cleanup preposted buffers on the ELS ring */
+ pring = &psli->ring[LPFC_ELS_RING];
+ list_for_each_entry_safe(mp, next_mp, &pring->postbufq, list) {
+ list_del(&mp->list);
+ pring->postbufq_cnt--;
+ lpfc_mbuf_free(phba, mp->virt, mp->phys);
+ kfree(mp);
+ }
+
+ for (i = 0; i < psli->sliinit.num_rings; i++) {
+ pring = &psli->ring[i];
+ lpfc_sli_abort_iocb_ring(phba, pring, LPFC_SLI_ABORT_IMED);
+ }
+
+ return (0);
+}
+
+static void
+lpfc_setup_slim_access(struct lpfc_hba *phba)
+{
+ phba->MBslimaddr = phba->slim_memmap_p;
+ phba->HAregaddr = (uint32_t *) (phba->ctrl_regs_memmap_p) +
+ HA_REG_OFFSET;
+ phba->HCregaddr = (uint32_t *) (phba->ctrl_regs_memmap_p) +
+ HC_REG_OFFSET;
+ phba->CAregaddr = (uint32_t *) (phba->ctrl_regs_memmap_p) +
+ CA_REG_OFFSET;
+ phba->HSregaddr = (uint32_t *) (phba->ctrl_regs_memmap_p) +
+ HS_REG_OFFSET;
+ return;
+}
+
+int
+lpfc_sli_hba_setup(struct lpfc_hba * phba)
+{
+ struct lpfc_sli *psli;
+ LPFC_MBOXQ_t *pmb;
+ int read_rev_reset, i, rc;
+ uint32_t status;
+
+ psli = &phba->sli;
+
+ /* Setep SLI interface for HBA register and HBA SLIM access */
+ lpfc_setup_slim_access(phba);
+
+ /* Set board state to initialization started */
+ phba->hba_state = LPFC_INIT_START;
+ read_rev_reset = 0;
+
+ /* On some platforms/OS's, the driver can't rely on the state the
+ * adapter may be in. For this reason, the driver is allowed to reset
+ * the HBA before initialization.
+ */
+ if (lpfc_sli_reset_on_init) {
+ phba->hba_state = 0; /* Don't skip post */
+ lpfc_sli_brdreset(phba);
+ phba->hba_state = LPFC_INIT_START;
+
+ /* Sleep for 2.5 sec */
+ msleep(2500);
+ }
+
+top:
+ /* Read the HBA Host Status Register */
+ status = readl(phba->HSregaddr);
+
+ /* Check status register to see what current state is */
+ i = 0;
+ while ((status & (HS_FFRDY | HS_MBRDY)) != (HS_FFRDY | HS_MBRDY)) {
+
+ /* Check every 100ms for 5 retries, then every 500ms for 5, then
+ * every 2.5 sec for 5, then reset board and every 2.5 sec for
+ * 4.
+ */
+ if (i++ >= 20) {
+ /* Adapter failed to init, timeout, status reg
+ <status> */
+ lpfc_printf_log(phba,
+ KERN_ERR,
+ LOG_INIT,
+ "%d:0436 Adapter failed to init, "
+ "timeout, status reg x%x\n",
+ phba->brd_no,
+ status);
+ phba->hba_state = LPFC_HBA_ERROR;
+ return -ETIMEDOUT;
+ }
+
+ /* Check to see if any errors occurred during init */
+ if (status & HS_FFERM) {
+ /* ERROR: During chipset initialization */
+ /* Adapter failed to init, chipset, status reg
+ <status> */
+ lpfc_printf_log(phba,
+ KERN_ERR,
+ LOG_INIT,
+ "%d:0437 Adapter failed to init, "
+ "chipset, status reg x%x\n",
+ phba->brd_no,
+ status);
+ phba->hba_state = LPFC_HBA_ERROR;
+ return -EIO;
+ }
+
+ if (i <= 5) {
+ msleep(10);
+ } else if (i <= 10) {
+ msleep(500);
+ } else {
+ msleep(2500);
+ }
+
+ if (i == 15) {
+ phba->hba_state = 0; /* Don't skip post */
+ lpfc_sli_brdreset(phba);
+ phba->hba_state = LPFC_INIT_START;
+ }
+ /* Read the HBA Host Status Register */
+ status = readl(phba->HSregaddr);
+ }
+
+ /* Check to see if any errors occurred during init */
+ if (status & HS_FFERM) {
+ /* ERROR: During chipset initialization */
+ /* Adapter failed to init, chipset, status reg <status> */
+ lpfc_printf_log(phba,
+ KERN_ERR,
+ LOG_INIT,
+ "%d:0438 Adapter failed to init, chipset, "
+ "status reg x%x\n",
+ phba->brd_no,
+ status);
+ phba->hba_state = LPFC_HBA_ERROR;
+ return -EIO;
+ }
+
+ /* Clear all interrupt enable conditions */
+ writel(0, phba->HCregaddr);
+ readl(phba->HCregaddr); /* flush */
+
+ /* setup host attn register */
+ writel(0xffffffff, phba->HAregaddr);
+ readl(phba->HAregaddr); /* flush */
+
+ /* Get a Mailbox buffer to setup mailbox commands for HBA
+ initialization */
+ if ((pmb = (LPFC_MBOXQ_t *) mempool_alloc(phba->mbox_mem_pool,
+ GFP_ATOMIC)) == 0) {
+ phba->hba_state = LPFC_HBA_ERROR;
+ return -ENOMEM;
+ }
+
+ /* Call pre CONFIG_PORT mailbox command initialization. A value of 0
+ * means the call was successful. Any other nonzero value is a failure,
+ * but if ERESTART is returned, the driver may reset the HBA and try
+ * again.
+ */
+ if ((rc = lpfc_config_port_prep(phba))) {
+ if ((rc == -ERESTART) && (read_rev_reset == 0)) {
+ mempool_free( pmb, phba->mbox_mem_pool);
+ phba->hba_state = 0; /* Don't skip post */
+ lpfc_sli_brdreset(phba);
+ phba->hba_state = LPFC_INIT_START;
+ msleep(500);
+ read_rev_reset = 1;
+ goto top;
+ }
+ phba->hba_state = LPFC_HBA_ERROR;
+ mempool_free( pmb, phba->mbox_mem_pool);
+ return -ENXIO;
+ }
+
+ /* Setup and issue mailbox CONFIG_PORT command */
+ phba->hba_state = LPFC_INIT_MBX_CMDS;
+ lpfc_config_port(phba, pmb);
+ if (lpfc_sli_issue_mbox(phba, pmb, MBX_POLL) != MBX_SUCCESS) {
+ /* Adapter failed to init, mbxCmd <cmd> CONFIG_PORT,
+ mbxStatus <status> */
+ lpfc_printf_log(phba, KERN_ERR, LOG_INIT,
+ "%d:0442 Adapter failed to init, mbxCmd x%x "
+ "CONFIG_PORT, mbxStatus x%x Data: x%x\n",
+ phba->brd_no, pmb->mb.mbxCommand,
+ pmb->mb.mbxStatus, 0);
+
+ /* This clause gives the config_port call is given multiple
+ chances to succeed. */
+ if (read_rev_reset == 0) {
+ mempool_free( pmb, phba->mbox_mem_pool);
+ phba->hba_state = 0; /* Don't skip post */
+ lpfc_sli_brdreset(phba);
+ phba->hba_state = LPFC_INIT_START;
+ msleep(2500);
+ read_rev_reset = 1;
+ goto top;
+ }
+
+ psli->sliinit.sli_flag &= ~LPFC_SLI2_ACTIVE;
+ phba->hba_state = LPFC_HBA_ERROR;
+ mempool_free( pmb, phba->mbox_mem_pool);
+ return -ENXIO;
+ }
+
+ if ((rc = lpfc_sli_ring_map(phba))) {
+ phba->hba_state = LPFC_HBA_ERROR;
+ mempool_free( pmb, phba->mbox_mem_pool);
+ return -ENXIO;
+ }
+ psli->sliinit.sli_flag |= LPFC_PROCESS_LA;
+
+ /* Call post CONFIG_PORT mailbox command initialization. */
+ if ((rc = lpfc_config_port_post(phba))) {
+ phba->hba_state = LPFC_HBA_ERROR;
+ mempool_free( pmb, phba->mbox_mem_pool);
+ return -ENXIO;
+ }
+ mempool_free( pmb, phba->mbox_mem_pool);
+ return 0;
+}
+
+
+
+
+
+
+
+static void
+lpfc_mbox_abort(struct lpfc_hba * phba)
+{
+ struct lpfc_sli *psli;
+ LPFC_MBOXQ_t *pmbox;
+ MAILBOX_t *mb;
+ struct lpfc_dmabuf *mp;
+
+ psli = &phba->sli;
+
+ if (psli->mbox_active) {
+ del_timer_sync(&psli->mbox_tmo);
+ pmbox = psli->mbox_active;
+ mb = &pmbox->mb;
+ psli->mbox_active = NULL;
+ if (pmbox->mbox_cmpl) {
+ mb->mbxStatus = MBX_NOT_FINISHED;
+ (pmbox->mbox_cmpl) (phba, pmbox);
+ } else {
+ mp = (struct lpfc_dmabuf *) (pmbox->context1);
+ if (mp) {
+ lpfc_mbuf_free(phba, mp->virt, mp->phys);
+ kfree(mp);
+ }
+ mempool_free( pmbox, phba->mbox_mem_pool);
+ }
+ psli->sliinit.sli_flag &= ~LPFC_SLI_MBOX_ACTIVE;
+ }
+
+ /* Abort all the non active mailbox commands. */
+ pmbox = lpfc_mbox_get(phba);
+ while (pmbox) {
+ mb = &pmbox->mb;
+ if (pmbox->mbox_cmpl) {
+ mb->mbxStatus = MBX_NOT_FINISHED;
+ (pmbox->mbox_cmpl) (phba, pmbox);
+ } else {
+ mp = (struct lpfc_dmabuf *) (pmbox->context1);
+ if (mp) {
+ lpfc_mbuf_free(phba, mp->virt, mp->phys);
+ kfree(mp);
+ }
+ mempool_free( pmbox, phba->mbox_mem_pool);
+ }
+ pmbox = lpfc_mbox_get(phba);
+ }
+ return;
+}
+/*! lpfc_mbox_timeout
+ *
+ * \pre
+ * \post
+ * \param hba Pointer to per struct lpfc_hba structure
+ * \param l1 Pointer to the driver's mailbox queue.
+ * \return
+ * void
+ *
+ * \b Description:
+ *
+ * This routine handles mailbox timeout events at timer interrupt context.
+ */
+void
+lpfc_mbox_timeout(unsigned long ptr)
+{
+ struct lpfc_hba *phba;
+ struct lpfc_sli *psli;
+ LPFC_MBOXQ_t *pmbox;
+ MAILBOX_t *mb;
+ struct lpfc_dmabuf *mp;
+ unsigned long iflag;
+
+ phba = (struct lpfc_hba *)ptr;
+ psli = &phba->sli;
+ spin_lock_irqsave(phba->host->host_lock, iflag);
+
+ pmbox = psli->mbox_active;
+ mb = &pmbox->mb;
+
+ /* Mbox cmd <mbxCommand> timeout */
+ lpfc_printf_log(phba,
+ KERN_ERR,
+ LOG_MBOX | LOG_SLI,
+ "%d:0310 Mailbox command x%x timeout Data: x%x x%x x%p\n",
+ phba->brd_no,
+ mb->mbxCommand,
+ phba->hba_state,
+ psli->sliinit.sli_flag,
+ psli->mbox_active);
+
+ if (psli->mbox_active == pmbox) {
+ psli->mbox_active = NULL;
+ if (pmbox->mbox_cmpl) {
+ mb->mbxStatus = MBX_NOT_FINISHED;
+ (pmbox->mbox_cmpl) (phba, pmbox);
+ } else {
+ mp = (struct lpfc_dmabuf *) (pmbox->context1);
+ if (mp) {
+ lpfc_mbuf_free(phba, mp->virt, mp->phys);
+ kfree(mp);
+ }
+ mempool_free( pmbox, phba->mbox_mem_pool);
+ }
+ psli->sliinit.sli_flag &= ~LPFC_SLI_MBOX_ACTIVE;
+ }
+
+ lpfc_mbox_abort(phba);
+ spin_unlock_irqrestore(phba->host->host_lock, iflag);
+ return;
+}
+
+
+int
+lpfc_sli_issue_mbox(struct lpfc_hba * phba, LPFC_MBOXQ_t * pmbox, uint32_t flag)
+{
+ MAILBOX_t *mbox;
+ MAILBOX_t *mb;
+ struct lpfc_sli *psli;
+ uint32_t status, evtctr;
+ uint32_t ha_copy;
+ int i;
+ unsigned long drvr_flag = 0;
+ volatile uint32_t word0, ldata;
+ void *to_slim;
+
+ psli = &phba->sli;
+ if (flag & MBX_POLL) {
+ spin_lock_irqsave(phba->host->host_lock, drvr_flag);
+ }
+
+ mb = &pmbox->mb;
+ status = MBX_SUCCESS;
+
+ if (psli->sliinit.sli_flag & LPFC_SLI_MBOX_ACTIVE) {
+ /* Polling for a mbox command when another one is already active
+ * is not allowed in SLI. Also, the driver must have established
+ * SLI2 mode to queue and process multiple mbox commands.
+ */
+
+ if (flag & MBX_POLL) {
+ spin_unlock_irqrestore(phba->host->host_lock,
+ drvr_flag);
+
+ /* Mbox command <mbxCommand> cannot issue */
+ LOG_MBOX_CANNOT_ISSUE_DATA( phba, mb, psli, flag)
+ return (MBX_NOT_FINISHED);
+ }
+
+ if (!(psli->sliinit.sli_flag & LPFC_SLI2_ACTIVE)) {
+
+ /* Mbox command <mbxCommand> cannot issue */
+ LOG_MBOX_CANNOT_ISSUE_DATA( phba, mb, psli, flag)
+ return (MBX_NOT_FINISHED);
+ }
+
+ /* Handle STOP IOCB processing flag. This is only meaningful
+ * if we are not polling for mbox completion.
+ */
+ if (flag & MBX_STOP_IOCB) {
+ flag &= ~MBX_STOP_IOCB;
+ /* Now flag each ring */
+ for (i = 0; i < psli->sliinit.num_rings; i++) {
+ /* If the ring is active, flag it */
+ if (psli->ring[i].cmdringaddr) {
+ psli->ring[i].flag |=
+ LPFC_STOP_IOCB_MBX;
+ }
+ }
+ }
+
+ /* Another mailbox command is still being processed, queue this
+ * command to be processed later.
+ */
+ lpfc_mbox_put(phba, pmbox);
+
+ /* Mbox cmd issue - BUSY */
+ lpfc_printf_log(phba,
+ KERN_INFO,
+ LOG_MBOX | LOG_SLI,
+ "%d:0308 Mbox cmd issue - BUSY Data: x%x x%x x%x x%x\n",
+ phba->brd_no,
+ mb->mbxCommand,
+ phba->hba_state,
+ psli->sliinit.sli_flag,
+ flag);
+
+ psli->slistat.mboxBusy++;
+ if (flag == MBX_POLL) {
+ spin_unlock_irqrestore(phba->host->host_lock,
+ drvr_flag);
+ }
+ return (MBX_BUSY);
+ }
+
+ /* Handle STOP IOCB processing flag. This is only meaningful
+ * if we are not polling for mbox completion.
+ */
+ if (flag & MBX_STOP_IOCB) {
+ flag &= ~MBX_STOP_IOCB;
+ if (flag == MBX_NOWAIT) {
+ /* Now flag each ring */
+ for (i = 0; i < psli->sliinit.num_rings; i++) {
+ /* If the ring is active, flag it */
+ if (psli->ring[i].cmdringaddr) {
+ psli->ring[i].flag |=
+ LPFC_STOP_IOCB_MBX;
+ }
+ }
+ }
+ }
+
+ psli->sliinit.sli_flag |= LPFC_SLI_MBOX_ACTIVE;
+
+ /* If we are not polling, we MUST be in SLI2 mode */
+ if (flag != MBX_POLL) {
+ if (!(psli->sliinit.sli_flag & LPFC_SLI2_ACTIVE)) {
+ psli->sliinit.sli_flag &= ~LPFC_SLI_MBOX_ACTIVE;
+
+ /* Mbox command <mbxCommand> cannot issue */
+ LOG_MBOX_CANNOT_ISSUE_DATA( phba, mb, psli, flag);
+ return (MBX_NOT_FINISHED);
+ }
+ /* timeout active mbox command */
+ mod_timer(&psli->mbox_tmo, jiffies + HZ * LPFC_MBOX_TMO);
+ }
+
+ /* Mailbox cmd <cmd> issue */
+ lpfc_printf_log(phba,
+ KERN_INFO,
+ LOG_MBOX | LOG_SLI,
+ "%d:0309 Mailbox cmd x%x issue Data: x%x x%x x%x\n",
+ phba->brd_no,
+ mb->mbxCommand,
+ phba->hba_state,
+ psli->sliinit.sli_flag,
+ flag);
+
+ psli->slistat.mboxCmd++;
+ evtctr = psli->slistat.mboxEvent;
+
+ /* next set own bit for the adapter and copy over command word */
+ mb->mbxOwner = OWN_CHIP;
+
+ if (psli->sliinit.sli_flag & LPFC_SLI2_ACTIVE) {
+
+ /* First copy command data to host SLIM area */
+ mbox = (MAILBOX_t *) psli->MBhostaddr;
+ lpfc_sli_pcimem_bcopy((uint32_t *) mb, (uint32_t *) mbox,
+ (sizeof (uint32_t) *
+ (MAILBOX_CMD_WSIZE)));
+
+ pci_dma_sync_single_for_device(phba->pcidev,
+ phba->slim2p_mapping,
+ sizeof (MAILBOX_t),
+ PCI_DMA_TODEVICE);
+ } else {
+ if (mb->mbxCommand == MBX_CONFIG_PORT) {
+ /* copy command data into host mbox for cmpl */
+ mbox = (MAILBOX_t *) psli->MBhostaddr;
+ lpfc_sli_pcimem_bcopy((uint32_t *) mb,
+ (uint32_t *) mbox,
+ (sizeof (uint32_t) *
+ (MAILBOX_CMD_WSIZE)));
+ }
+
+ /* First copy mbox command data to HBA SLIM, skip past first
+ word */
+ to_slim = (uint8_t *) phba->MBslimaddr + sizeof (uint32_t);
+ lpfc_memcpy_to_slim(to_slim, (void *)&mb->un.varWords[0],
+ (MAILBOX_CMD_WSIZE - 1) * sizeof (uint32_t));
+
+ /* Next copy over first word, with mbxOwner set */
+ ldata = *((volatile uint32_t *)mb);
+ to_slim = phba->MBslimaddr;
+ writel(ldata, to_slim);
+ readl(to_slim); /* flush */
+
+ if (mb->mbxCommand == MBX_CONFIG_PORT) {
+ /* switch over to host mailbox */
+ psli->sliinit.sli_flag |= LPFC_SLI2_ACTIVE;
+ }
+ }
+
+ wmb();
+ /* interrupt board to doit right away */
+ writel(CA_MBATT, phba->CAregaddr);
+ readl(phba->CAregaddr); /* flush */
+
+ switch (flag) {
+ case MBX_NOWAIT:
+ /* Don't wait for it to finish, just return */
+ psli->mbox_active = pmbox;
+ break;
+
+ case MBX_POLL:
+ i = 0;
+ psli->mbox_active = NULL;
+ if (psli->sliinit.sli_flag & LPFC_SLI2_ACTIVE) {
+ /* First read mbox status word */
+ mbox = (MAILBOX_t *) psli->MBhostaddr;
+ word0 = *((volatile uint32_t *)mbox);
+ word0 = le32_to_cpu(word0);
+ } else {
+ /* First read mbox status word */
+ word0 = readl(phba->MBslimaddr);
+ }
+
+ /* Read the HBA Host Attention Register */
+ ha_copy = readl(phba->HAregaddr);
+
+ /* Wait for command to complete */
+ while (((word0 & OWN_CHIP) == OWN_CHIP)
+ || !(ha_copy & HA_MBATT)) {
+ if (i++ >= 100) {
+ psli->sliinit.sli_flag &= ~LPFC_SLI_MBOX_ACTIVE;
+ spin_unlock_irqrestore(phba->host->host_lock,
+ drvr_flag);
+ return (MBX_NOT_FINISHED);
+ }
+
+ /* Check if we took a mbox interrupt while we were
+ polling */
+ if (((word0 & OWN_CHIP) != OWN_CHIP)
+ && (evtctr != psli->slistat.mboxEvent))
+ break;
+
+ spin_unlock_irqrestore(phba->host->host_lock,
+ drvr_flag);
+
+ /* Can be in interrupt context, do not sleep */
+ /* (or might be called with interrupts disabled) */
+ mdelay(i);
+
+ spin_lock_irqsave(phba->host->host_lock, drvr_flag);
+
+ if (psli->sliinit.sli_flag & LPFC_SLI2_ACTIVE) {
+ /* First copy command data */
+ mbox = (MAILBOX_t *) psli->MBhostaddr;
+ word0 = *((volatile uint32_t *)mbox);
+ word0 = le32_to_cpu(word0);
+ if (mb->mbxCommand == MBX_CONFIG_PORT) {
+ MAILBOX_t *slimmb;
+ volatile uint32_t slimword0;
+ /* Check real SLIM for any errors */
+ slimword0 = readl(phba->MBslimaddr);
+ slimmb = (MAILBOX_t *) & slimword0;
+ if (((slimword0 & OWN_CHIP) != OWN_CHIP)
+ && slimmb->mbxStatus) {
+ psli->sliinit.sli_flag &=
+ ~LPFC_SLI2_ACTIVE;
+ word0 = slimword0;
+ }
+ }
+ } else {
+ /* First copy command data */
+ word0 = readl(phba->MBslimaddr);
+ }
+ /* Read the HBA Host Attention Register */
+ ha_copy = readl(phba->HAregaddr);
+ }
+
+ if (psli->sliinit.sli_flag & LPFC_SLI2_ACTIVE) {
+ /* First copy command data */
+ mbox = (MAILBOX_t *) psli->MBhostaddr;
+ /* copy results back to user */
+ lpfc_sli_pcimem_bcopy((uint32_t *) mbox,
+ (uint32_t *) mb,
+ (sizeof (uint32_t) *
+ MAILBOX_CMD_WSIZE));
+ } else {
+ /* First copy command data */
+ lpfc_memcpy_from_slim((void *)mb,
+ phba->MBslimaddr,
+ sizeof (uint32_t) * (MAILBOX_CMD_WSIZE));
+ }
+
+ writel(HA_MBATT, phba->HAregaddr);
+ readl(phba->HAregaddr); /* flush */
+
+ psli->sliinit.sli_flag &= ~LPFC_SLI_MBOX_ACTIVE;
+ status = mb->mbxStatus;
+ }
+
+ if (flag == MBX_POLL) {
+ spin_unlock_irqrestore(phba->host->host_lock, drvr_flag);
+ }
+ return (status);
+}
+
+static int
+lpfc_sli_ringtx_put(struct lpfc_hba * phba, struct lpfc_sli_ring * pring,
+ struct lpfc_iocbq * piocb)
+{
+ /* Insert the caller's iocb in the txq tail for later processing. */
+ list_add_tail(&piocb->list, &pring->txq);
+ pring->txq_cnt++;
+ return (0);
+}
+
+static struct lpfc_iocbq *
+lpfc_sli_next_iocb(struct lpfc_hba *phba, struct lpfc_sli_ring *pring,
+ struct lpfc_iocbq ** piocb)
+{
+ struct lpfc_iocbq * nextiocb;
+
+ nextiocb = lpfc_sli_ringtx_get(phba, pring);
+ if (!nextiocb) {
+ nextiocb = *piocb;
+ *piocb = NULL;
+ }
+
+ return nextiocb;
+}
+
+int
+lpfc_sli_issue_iocb(struct lpfc_hba *phba, struct lpfc_sli_ring *pring,
+ struct lpfc_iocbq *piocb, uint32_t flag)
+{
+ struct lpfc_sli *psli = &phba->sli;
+ int ringno = pring->ringno;
+ struct lpfc_iocbq *nextiocb;
+ IOCB_t *iocb;
+
+ /*
+ * We should never get an IOCB if we are in a < LINK_DOWN state
+ */
+ if (unlikely(phba->hba_state < LPFC_LINK_DOWN))
+ return IOCB_ERROR;
+
+ /*
+ * Check to see if we are blocking IOCB processing because of a
+ * outstanding mbox command.
+ */
+ if (unlikely(pring->flag & LPFC_STOP_IOCB_MBX))
+ goto iocb_busy;
+
+ if (unlikely(phba->hba_state == LPFC_LINK_DOWN)) {
+ /*
+ * Only CREATE_XRI, CLOSE_XRI, ABORT_XRI, and QUE_RING_BUF
+ * can be issued if the link is not up.
+ */
+ switch (piocb->iocb.ulpCommand) {
+ case CMD_QUE_RING_BUF_CN:
+ case CMD_QUE_RING_BUF64_CN:
+ case CMD_CLOSE_XRI_CN:
+ case CMD_ABORT_XRI_CN:
+ /*
+ * For IOCBs, like QUE_RING_BUF, that have no rsp ring
+ * completion, iocb_cmpl MUST be 0.
+ */
+ if (piocb->iocb_cmpl)
+ piocb->iocb_cmpl = NULL;
+ /*FALLTHROUGH*/
+ case CMD_CREATE_XRI_CR:
+ break;
+ default:
+ goto iocb_busy;
+ }
+
+ /*
+ * For FCP commands, we must be in a state where we can process link
+ * attention events.
+ */
+ } else if (unlikely(pring->ringno == psli->fcp_ring &&
+ !(psli->sliinit.sli_flag & LPFC_PROCESS_LA)))
+ goto iocb_busy;
+
+ /*
+ * Check to see if this is a high priority command.
+ * If so bypass tx queue processing.
+ */
+ if (unlikely((flag & SLI_IOCB_HIGH_PRIORITY) &&
+ (iocb = lpfc_sli_next_iocb_slot(phba, pring)))) {
+ lpfc_sli_submit_iocb(phba, pring, iocb, piocb);
+ piocb = NULL;
+ }
+
+ while ((iocb = lpfc_sli_next_iocb_slot(phba, pring)) &&
+ (nextiocb = lpfc_sli_next_iocb(phba, pring, &piocb)))
+ lpfc_sli_submit_iocb(phba, pring, iocb, nextiocb);
+
+ if (iocb)
+ lpfc_sli_update_ring(phba, pring);
+ else
+ lpfc_sli_update_full_ring(phba, pring);
+
+ if (!piocb)
+ return IOCB_SUCCESS;
+
+ goto out_busy;
+
+ iocb_busy:
+ psli->slistat.iocbCmdDelay[ringno]++;
+
+ out_busy:
+
+ if (!(flag & SLI_IOCB_RET_IOCB)) {
+ lpfc_sli_ringtx_put(phba, pring, piocb);
+ return IOCB_SUCCESS;
+ }
+
+ return IOCB_BUSY;
+}
+
+int
+lpfc_sli_queue_setup(struct lpfc_hba * phba)
+{
+ struct lpfc_sli *psli;
+ struct lpfc_sli_ring *pring;
+ int i, cnt;
+
+ psli = &phba->sli;
+ INIT_LIST_HEAD(&psli->mboxq);
+ /* Initialize list headers for txq and txcmplq as double linked lists */
+ for (i = 0; i < psli->sliinit.num_rings; i++) {
+ pring = &psli->ring[i];
+ pring->ringno = i;
+ pring->next_cmdidx = 0;
+ pring->local_getidx = 0;
+ pring->cmdidx = 0;
+ INIT_LIST_HEAD(&pring->txq);
+ INIT_LIST_HEAD(&pring->txcmplq);
+ INIT_LIST_HEAD(&pring->iocb_continueq);
+ INIT_LIST_HEAD(&pring->postbufq);
+ cnt = psli->sliinit.ringinit[i].fast_iotag;
+ if (cnt) {
+ pring->fast_lookup =
+ kmalloc(cnt * sizeof (struct lpfc_iocbq *),
+ GFP_KERNEL);
+ if (pring->fast_lookup == 0) {
+ return (0);
+ }
+ memset((char *)pring->fast_lookup, 0,
+ cnt * sizeof (struct lpfc_iocbq *));
+ }
+ }
+ return (1);
+}
+
+int
+lpfc_sli_hba_down(struct lpfc_hba * phba)
+{
+ struct lpfc_sli *psli;
+ struct lpfc_sli_ring *pring;
+ LPFC_MBOXQ_t *pmb;
+ struct lpfc_dmabuf *mp;
+ struct lpfc_iocbq *iocb, *next_iocb;
+ IOCB_t *icmd = NULL;
+ int i;
+
+ psli = &phba->sli;
+ lpfc_hba_down_prep(phba);
+
+ for (i = 0; i < psli->sliinit.num_rings; i++) {
+ pring = &psli->ring[i];
+ pring->flag |= LPFC_DEFERRED_RING_EVENT;
+
+ /*
+ * Error everything on the txq since these iocbs have not been
+ * given to the FW yet.
+ */
+ pring->txq_cnt = 0;
+
+ list_for_each_entry_safe(iocb, next_iocb, &pring->txq, list) {
+ list_del_init(&iocb->list);
+ if (iocb->iocb_cmpl) {
+ icmd = &iocb->iocb;
+ icmd->ulpStatus = IOSTAT_LOCAL_REJECT;
+ icmd->un.ulpWord[4] = IOERR_SLI_DOWN;
+ (iocb->iocb_cmpl) (phba, iocb, iocb);
+ } else {
+ mempool_free( iocb, phba->iocb_mem_pool);
+ }
+ }
+
+ INIT_LIST_HEAD(&(pring->txq));
+
+ if (pring->fast_lookup) {
+ kfree(pring->fast_lookup);
+ pring->fast_lookup = NULL;
+ }
+
+ }
+
+ /* Return any active mbox cmds */
+ del_timer_sync(&psli->mbox_tmo);
+ if ((psli->mbox_active)) {
+ pmb = psli->mbox_active;
+ mp = (struct lpfc_dmabuf *) (pmb->context1);
+ if (mp) {
+ lpfc_mbuf_free(phba, mp->virt, mp->phys);
+ kfree(mp);
+ }
+ mempool_free(psli->mbox_active, phba->mbox_mem_pool);
+ }
+ psli->sliinit.sli_flag &= ~LPFC_SLI_MBOX_ACTIVE;
+ psli->mbox_active = NULL;
+
+ /* Return any pending mbox cmds */
+ while ((pmb = lpfc_mbox_get(phba)) != NULL) {
+ mp = (struct lpfc_dmabuf *) (pmb->context1);
+ if (mp) {
+ lpfc_mbuf_free(phba, mp->virt, mp->phys);
+ kfree(mp);
+ }
+ mempool_free(pmb, phba->mbox_mem_pool);
+ }
+
+ INIT_LIST_HEAD(&psli->mboxq);
+
+ /*
+ * Provided the hba is not in an error state, reset it. It is not
+ * capable of IO anymore.
+ */
+ if (phba->hba_state != LPFC_HBA_ERROR) {
+ phba->hba_state = LPFC_INIT_START;
+ lpfc_sli_brdreset(phba);
+ }
+
+ return 1;
+}
+
+void
+lpfc_sli_pcimem_bcopy(uint32_t * src, uint32_t * dest, uint32_t cnt)
+{
+ uint32_t ldata;
+ int i;
+
+ for (i = 0; i < (int)cnt; i += sizeof (uint32_t)) {
+ ldata = *src++;
+ ldata = le32_to_cpu(ldata);
+ *dest++ = ldata;
+ }
+}
+
+int
+lpfc_sli_ringpostbuf_put(struct lpfc_hba * phba, struct lpfc_sli_ring * pring,
+ struct lpfc_dmabuf * mp)
+{
+ /* Stick struct lpfc_dmabuf at end of postbufq so driver can look it up
+ later */
+ list_add_tail(&mp->list, &pring->postbufq);
+
+ pring->postbufq_cnt++;
+ return 0;
+}
+
+
+struct lpfc_dmabuf *
+lpfc_sli_ringpostbuf_get(struct lpfc_hba *phba, struct lpfc_sli_ring *pring,
+ dma_addr_t phys)
+{
+ struct lpfc_dmabuf *mp, *next_mp;
+ struct list_head *slp = &pring->postbufq;
+
+ /* Search postbufq, from the begining, looking for a match on phys */
+ list_for_each_entry_safe(mp, next_mp, &pring->postbufq, list) {
+ if (mp->phys == phys) {
+ list_del_init(&mp->list);
+ pring->postbufq_cnt--;
+ pci_dma_sync_single_for_cpu(phba->pcidev, mp->phys,
+ LPFC_BPL_SIZE, PCI_DMA_FROMDEVICE);
+ return mp;
+ }
+ }
+
+ lpfc_printf_log(phba, KERN_ERR, LOG_INIT,
+ "%d:0410 Cannot find virtual addr for mapped buf on "
+ "ring %d Data x%llx x%p x%p x%x\n",
+ phba->brd_no, pring->ringno, (unsigned long long)phys,
+ slp->next, slp->prev, pring->postbufq_cnt);
+ return NULL;
+}
+
+uint32_t
+lpfc_sli_next_iotag(struct lpfc_hba * phba, struct lpfc_sli_ring * pring)
+{
+ LPFC_RING_INIT_t *pringinit;
+ struct lpfc_sli *psli;
+ uint32_t search_start;
+
+ psli = &phba->sli;
+ pringinit = &psli->sliinit.ringinit[pring->ringno];
+
+ if (pring->fast_lookup == NULL) {
+ pringinit->iotag_ctr++;
+ if (pringinit->iotag_ctr >= pringinit->iotag_max)
+ pringinit->iotag_ctr = 1;
+ return pringinit->iotag_ctr;
+ }
+
+ search_start = pringinit->iotag_ctr;
+
+ do {
+ pringinit->iotag_ctr++;
+ if (pringinit->iotag_ctr >= pringinit->fast_iotag)
+ pringinit->iotag_ctr = 1;
+
+ if(*(pring->fast_lookup + pringinit->iotag_ctr) == NULL)
+ return pringinit->iotag_ctr;
+
+ } while (pringinit->iotag_ctr != search_start);
+
+ /*
+ * Outstanding I/O count for ring <ringno> is at max <fast_iotag>
+ */
+ lpfc_printf_log(phba,
+ KERN_ERR,
+ LOG_SLI,
+ "%d:0318 Outstanding I/O count for ring %d is at max x%x\n",
+ phba->brd_no,
+ pring->ringno,
+ psli->sliinit.ringinit[pring->ringno].fast_iotag);
+ return (0);
+}
+
+static void
+lpfc_sli_abort_elsreq_cmpl(struct lpfc_hba * phba, struct lpfc_iocbq * cmdiocb,
+ struct lpfc_iocbq * rspiocb)
+{
+ struct lpfc_dmabuf *buf_ptr, *buf_ptr1;
+ /* Free the resources associated with the ELS_REQUEST64 IOCB the driver
+ * just aborted.
+ * In this case, context2 = cmd, context2->next = rsp, context3 = bpl
+ */
+ if (cmdiocb->context2) {
+ buf_ptr1 = (struct lpfc_dmabuf *) cmdiocb->context2;
+
+ /* Free the response IOCB before completing the abort
+ command. */
+ if (!list_empty(&buf_ptr1->list)) {
+
+ buf_ptr = list_entry(buf_ptr1->list.next,
+ struct lpfc_dmabuf, list);
+
+ list_del(&buf_ptr->list);
+ lpfc_mbuf_free(phba, buf_ptr->virt, buf_ptr->phys);
+ kfree(buf_ptr);
+ }
+ lpfc_mbuf_free(phba, buf_ptr1->virt, buf_ptr1->phys);
+ kfree(buf_ptr1);
+ }
+
+ if (cmdiocb->context3) {
+ buf_ptr = (struct lpfc_dmabuf *) cmdiocb->context3;
+ lpfc_mbuf_free(phba, buf_ptr->virt, buf_ptr->phys);
+ kfree(buf_ptr);
+ }
+ mempool_free( cmdiocb, phba->iocb_mem_pool);
+ return;
+}
+
+int
+lpfc_sli_issue_abort_iotag32(struct lpfc_hba * phba,
+ struct lpfc_sli_ring * pring,
+ struct lpfc_iocbq * cmdiocb)
+{
+ struct lpfc_sli *psli;
+ struct lpfc_iocbq *abtsiocbp;
+ IOCB_t *icmd = NULL;
+ IOCB_t *iabt = NULL;
+ uint32_t iotag32;
+
+ psli = &phba->sli;
+
+ /* issue ABTS for this IOCB based on iotag */
+ if ((abtsiocbp = mempool_alloc(phba->iocb_mem_pool, GFP_ATOMIC)) == 0) {
+ return (0);
+ }
+ memset(abtsiocbp, 0, sizeof (struct lpfc_iocbq));
+ iabt = &abtsiocbp->iocb;
+
+ icmd = &cmdiocb->iocb;
+ switch (icmd->ulpCommand) {
+ case CMD_ELS_REQUEST64_CR:
+ iotag32 = icmd->un.elsreq64.bdl.ulpIoTag32;
+ /* Even though we abort the ELS command, the firmware may access
+ * the BPL or other resources before it processes our
+ * ABORT_MXRI64. Thus we must delay reusing the cmdiocb
+ * resources till the actual abort request completes.
+ */
+ abtsiocbp->context1 = (void *)((unsigned long)icmd->ulpCommand);
+ abtsiocbp->context2 = cmdiocb->context2;
+ abtsiocbp->context3 = cmdiocb->context3;
+ cmdiocb->context2 = NULL;
+ cmdiocb->context3 = NULL;
+ abtsiocbp->iocb_cmpl = lpfc_sli_abort_elsreq_cmpl;
+ break;
+ default:
+ mempool_free( abtsiocbp, phba->iocb_mem_pool);
+ return (0);
+ }
+
+ iabt->un.amxri.abortType = ABORT_TYPE_ABTS;
+ iabt->un.amxri.iotag32 = iotag32;
+
+ iabt->ulpLe = 1;
+ iabt->ulpClass = CLASS3;
+ iabt->ulpCommand = CMD_ABORT_MXRI64_CN;
+
+ if (lpfc_sli_issue_iocb(phba, pring, abtsiocbp, 0) == IOCB_ERROR) {
+ mempool_free( abtsiocbp, phba->iocb_mem_pool);
+ return (0);
+ }
+
+ return (1);
+}
+
+int
+lpfc_sli_abort_iocb_ctx(struct lpfc_hba * phba, struct lpfc_sli_ring * pring,
+ uint32_t ctx)
+{
+ struct lpfc_sli *psli;
+ struct lpfc_iocbq *iocb, *next_iocb;
+ struct lpfc_iocbq *abtsiocbp;
+ IOCB_t *icmd = NULL, *cmd = NULL;
+ int errcnt;
+
+ psli = &phba->sli;
+ errcnt = 0;
+
+ /* Error matching iocb on txq or txcmplq
+ * First check the txq.
+ */
+ list_for_each_entry_safe(iocb, next_iocb, &pring->txq, list) {
+ cmd = &iocb->iocb;
+ if (cmd->ulpContext != ctx) {
+ continue;
+ }
+
+ list_del_init(&iocb->list);
+ pring->txq_cnt--;
+ if (iocb->iocb_cmpl) {
+ icmd = &iocb->iocb;
+ icmd->ulpStatus = IOSTAT_LOCAL_REJECT;
+ icmd->un.ulpWord[4] = IOERR_SLI_ABORTED;
+ (iocb->iocb_cmpl) (phba, iocb, iocb);
+ } else {
+ mempool_free( iocb, phba->iocb_mem_pool);
+ }
+ }
+
+ /* Next check the txcmplq */
+ list_for_each_entry_safe(iocb, next_iocb, &pring->txcmplq, list) {
+ cmd = &iocb->iocb;
+ if (cmd->ulpContext != ctx) {
+ continue;
+ }
+
+ /* issue ABTS for this IOCB based on iotag */
+ if ((abtsiocbp = mempool_alloc(phba->iocb_mem_pool,
+ GFP_ATOMIC)) == 0) {
+ errcnt++;
+ continue;
+ }
+ memset(abtsiocbp, 0, sizeof (struct lpfc_iocbq));
+ icmd = &abtsiocbp->iocb;
+
+ icmd->un.acxri.abortType = ABORT_TYPE_ABTS;
+ icmd->un.acxri.abortContextTag = cmd->ulpContext;
+ icmd->un.acxri.abortIoTag = cmd->ulpIoTag;
+
+ icmd->ulpLe = 1;
+ icmd->ulpClass = cmd->ulpClass;
+ if (phba->hba_state >= LPFC_LINK_UP) {
+ icmd->ulpCommand = CMD_ABORT_XRI_CN;
+ } else {
+ icmd->ulpCommand = CMD_CLOSE_XRI_CN;
+ }
+
+ if (lpfc_sli_issue_iocb(phba, pring, abtsiocbp, 0) ==
+ IOCB_ERROR) {
+ mempool_free( abtsiocbp, phba->iocb_mem_pool);
+ errcnt++;
+ continue;
+ }
+ /* The rsp ring completion will remove IOCB from txcmplq when
+ * abort is read by HBA.
+ */
+ }
+ return (errcnt);
+}
+
+int
+lpfc_sli_sum_iocb_host(struct lpfc_hba * phba,
+ struct lpfc_sli_ring * pring)
+{
+ struct lpfc_sli *psli;
+ struct lpfc_iocbq *iocb, *next_iocb;
+ IOCB_t *cmd = NULL;
+ struct lpfc_scsi_buf *lpfc_cmd;
+ int sum;
+
+ psli = &phba->sli;
+ sum = 0;
+
+ /* Error matching iocb on txq or txcmplq
+ * First check the txq.
+ */
+ list_for_each_entry_safe(iocb, next_iocb, &pring->txq, list) {
+ cmd = &iocb->iocb;
+
+ /* Must be a FCP command */
+ if ((cmd->ulpCommand != CMD_FCP_ICMND64_CR) &&
+ (cmd->ulpCommand != CMD_FCP_IWRITE64_CR) &&
+ (cmd->ulpCommand != CMD_FCP_IREAD64_CR)) {
+ continue;
+ }
+
+ /* context1 MUST be a struct lpfc_scsi_buf */
+ lpfc_cmd = (struct lpfc_scsi_buf *) (iocb->context1);
+ if (lpfc_cmd == 0) {
+ continue;
+ }
+ sum++;
+ }
+
+ /* Next check the txcmplq */
+ list_for_each_entry_safe(iocb, next_iocb, &pring->txcmplq, list) {
+ cmd = &iocb->iocb;
+
+ /* Must be a FCP command */
+ if ((cmd->ulpCommand != CMD_FCP_ICMND64_CR) &&
+ (cmd->ulpCommand != CMD_FCP_IWRITE64_CR) &&
+ (cmd->ulpCommand != CMD_FCP_IREAD64_CR)) {
+ continue;
+ }
+
+ /* context1 MUST be a struct lpfc_scsi_buf */
+ lpfc_cmd = (struct lpfc_scsi_buf *) (iocb->context1);
+ if (lpfc_cmd == 0) {
+ continue;
+ }
+ sum++;
+ }
+ return (sum);
+}
+
+int
+lpfc_sli_abort_iocb_host(struct lpfc_hba * phba,
+ struct lpfc_sli_ring * pring, int flag)
+{
+ struct lpfc_sli *psli;
+ struct lpfc_iocbq *iocb, *next_iocb;
+ struct lpfc_iocbq *abtsiocbp;
+ IOCB_t *icmd = NULL, *cmd = NULL;
+ struct lpfc_scsi_buf *lpfc_cmd;
+ int errcnt;
+
+ psli = &phba->sli;
+ errcnt = 0;
+
+ /* Error matching iocb on txq or txcmplq
+ * First check the txq.
+ */
+ if(flag & LPFC_ABORT_TXQ) {
+ list_for_each_entry_safe(iocb, next_iocb, &pring->txq, list) {
+ cmd = &iocb->iocb;
+
+ /* Must be a FCP command */
+ if ((cmd->ulpCommand != CMD_FCP_ICMND64_CR) &&
+ (cmd->ulpCommand != CMD_FCP_IWRITE64_CR) &&
+ (cmd->ulpCommand != CMD_FCP_IREAD64_CR)) {
+ continue;
+ }
+
+ /* context1 MUST be a struct lpfc_scsi_buf */
+ lpfc_cmd = (struct lpfc_scsi_buf *) (iocb->context1);
+ if (lpfc_cmd == 0) {
+ continue;
+ }
+
+ list_del_init(&iocb->list);
+ pring->txq_cnt--;
+ if (iocb->iocb_cmpl) {
+ icmd = &iocb->iocb;
+ icmd->ulpStatus = IOSTAT_LOCAL_REJECT;
+ icmd->un.ulpWord[4] = IOERR_SLI_ABORTED;
+ (iocb->iocb_cmpl) (phba, iocb, iocb);
+ } else {
+ mempool_free( iocb, phba->iocb_mem_pool);
+ }
+ }
+ }
+
+ if(flag & LPFC_ABORT_TXCMPLQ) {
+ /* Next check the txcmplq */
+ list_for_each_entry_safe(iocb, next_iocb, &pring->txcmplq,
+ list) {
+ cmd = &iocb->iocb;
+
+ /* Must be a FCP command */
+ if ((cmd->ulpCommand != CMD_FCP_ICMND64_CR) &&
+ (cmd->ulpCommand != CMD_FCP_IWRITE64_CR) &&
+ (cmd->ulpCommand != CMD_FCP_IREAD64_CR)) {
+ continue;
+ }
+
+ /* context1 MUST be a struct lpfc_scsi_buf */
+ lpfc_cmd = (struct lpfc_scsi_buf *) (iocb->context1);
+ if (lpfc_cmd == 0) {
+ continue;
+ }
+
+ /* issue ABTS for this IOCB based on iotag */
+ if ((abtsiocbp = mempool_alloc(phba->iocb_mem_pool,
+ GFP_ATOMIC)) == 0) {
+ errcnt++;
+ continue;
+ }
+ memset(abtsiocbp, 0, sizeof (struct lpfc_iocbq));
+ icmd = &abtsiocbp->iocb;
+
+ icmd->un.acxri.abortType = ABORT_TYPE_ABTS;
+ icmd->un.acxri.abortContextTag = cmd->ulpContext;
+ icmd->un.acxri.abortIoTag = cmd->ulpIoTag;
+
+ icmd->ulpLe = 1;
+ icmd->ulpClass = cmd->ulpClass;
+ if (phba->hba_state >= LPFC_LINK_UP) {
+ icmd->ulpCommand = CMD_ABORT_XRI_CN;
+ } else {
+ icmd->ulpCommand = CMD_CLOSE_XRI_CN;
+ }
+
+ if (lpfc_sli_issue_iocb(phba, pring, abtsiocbp, 0) ==
+ IOCB_ERROR) {
+ mempool_free( abtsiocbp, phba->iocb_mem_pool);
+ errcnt++;
+ continue;
+ }
+ /* The rsp ring completion will remove IOCB from
+ * tacmplq when abort is read by HBA.
+ */
+ }
+ }
+ return (errcnt);
+}
+
+int
+lpfc_sli_sum_iocb_lun(struct lpfc_hba * phba,
+ struct lpfc_sli_ring * pring,
+ uint16_t scsi_target, uint64_t scsi_lun)
+{
+ struct lpfc_sli *psli;
+ struct lpfc_iocbq *iocb, *next_iocb;
+ IOCB_t *cmd = NULL;
+ struct lpfc_scsi_buf *lpfc_cmd;
+ int sum;
+
+ psli = &phba->sli;
+ sum = 0;
+
+ /* Error matching iocb on txq or txcmplq
+ * First check the txq.
+ */
+ list_for_each_entry_safe(iocb, next_iocb, &pring->txq, list) {
+ cmd = &iocb->iocb;
+
+ /* Must be a FCP command */
+ if ((cmd->ulpCommand != CMD_FCP_ICMND64_CR) &&
+ (cmd->ulpCommand != CMD_FCP_IWRITE64_CR) &&
+ (cmd->ulpCommand != CMD_FCP_IREAD64_CR)) {
+ continue;
+ }
+
+ /* context1 MUST be a struct lpfc_scsi_buf */
+ lpfc_cmd = (struct lpfc_scsi_buf *) (iocb->context1);
+ if ((lpfc_cmd == 0) ||
+ (lpfc_cmd->pCmd->device->id != scsi_target) ||
+ (lpfc_cmd->pCmd->device->lun != scsi_lun)) {
+ continue;
+ }
+ sum++;
+ }
+
+ /* Next check the txcmplq */
+ list_for_each_entry_safe(iocb, next_iocb, &pring->txcmplq, list) {
+ cmd = &iocb->iocb;
+
+ /* Must be a FCP command */
+ if ((cmd->ulpCommand != CMD_FCP_ICMND64_CR) &&
+ (cmd->ulpCommand != CMD_FCP_IWRITE64_CR) &&
+ (cmd->ulpCommand != CMD_FCP_IREAD64_CR)) {
+ continue;
+ }
+
+ /* context1 MUST be a struct lpfc_scsi_buf */
+ lpfc_cmd = (struct lpfc_scsi_buf *) (iocb->context1);
+ if ((lpfc_cmd == 0) ||
+ (lpfc_cmd->pCmd->device->id != scsi_target) ||
+ (lpfc_cmd->pCmd->device->lun != scsi_lun)) {
+ continue;
+ }
+
+ sum++;
+ }
+ return (sum);
+}
+
+int
+lpfc_sli_abort_iocb_lun(struct lpfc_hba * phba,
+ struct lpfc_sli_ring * pring,
+ uint16_t scsi_target, uint64_t scsi_lun, int flag)
+{
+ struct lpfc_sli *psli;
+ struct lpfc_iocbq *iocb, *next_iocb;
+ struct lpfc_iocbq *abtsiocbp;
+ IOCB_t *icmd = NULL, *cmd = NULL;
+ struct lpfc_scsi_buf *lpfc_cmd;
+ int errcnt;
+
+ psli = &phba->sli;
+ errcnt = 0;
+
+ /* Error matching iocb on txq or txcmplq
+ * First check the txq.
+ */
+ if(flag & LPFC_ABORT_TXQ) {
+ list_for_each_entry_safe(iocb, next_iocb, &pring->txq, list) {
+ cmd = &iocb->iocb;
+
+ /* Must be a FCP command */
+ if ((cmd->ulpCommand != CMD_FCP_ICMND64_CR) &&
+ (cmd->ulpCommand != CMD_FCP_IWRITE64_CR) &&
+ (cmd->ulpCommand != CMD_FCP_IREAD64_CR)) {
+ continue;
+ }
+
+ /* context1 MUST be a struct lpfc_scsi_buf */
+ lpfc_cmd = (struct lpfc_scsi_buf *) (iocb->context1);
+ if ((lpfc_cmd == 0) ||
+ (lpfc_cmd->pCmd->device->id != scsi_target) ||
+ (lpfc_cmd->pCmd->device->lun != scsi_lun)) {
+ continue;
+ }
+
+ list_del_init(&iocb->list);
+ pring->txq_cnt--;
+ if (iocb->iocb_cmpl) {
+ icmd = &iocb->iocb;
+ icmd->ulpStatus = IOSTAT_LOCAL_REJECT;
+ icmd->un.ulpWord[4] = IOERR_SLI_ABORTED;
+ (iocb->iocb_cmpl) (phba, iocb, iocb);
+ } else {
+ mempool_free( iocb, phba->iocb_mem_pool);
+ }
+ }
+ }
+
+ if(flag & LPFC_ABORT_TXCMPLQ) {
+ /* Next check the txcmplq */
+ list_for_each_entry_safe(iocb, next_iocb, &pring->txcmplq,
+ list) {
+ cmd = &iocb->iocb;
+
+ /* Must be a FCP command */
+ if ((cmd->ulpCommand != CMD_FCP_ICMND64_CR) &&
+ (cmd->ulpCommand != CMD_FCP_IWRITE64_CR) &&
+ (cmd->ulpCommand != CMD_FCP_IREAD64_CR)) {
+ continue;
+ }
+
+ /* context1 MUST be a struct lpfc_scsi_buf */
+ lpfc_cmd = (struct lpfc_scsi_buf *) (iocb->context1);
+ if ((lpfc_cmd == 0) ||
+ (lpfc_cmd->pCmd->device->id != scsi_target) ||
+ (lpfc_cmd->pCmd->device->lun != scsi_lun)) {
+ continue;
+ }
+
+ /* issue ABTS for this IOCB based on iotag */
+ if ((abtsiocbp = mempool_alloc(phba->iocb_mem_pool,
+ GFP_ATOMIC)) == 0) {
+ errcnt++;
+ continue;
+ }
+ memset(abtsiocbp, 0, sizeof (struct lpfc_iocbq));
+ icmd = &abtsiocbp->iocb;
+
+ icmd->un.acxri.abortType = ABORT_TYPE_ABTS;
+ icmd->un.acxri.abortContextTag = cmd->ulpContext;
+ icmd->un.acxri.abortIoTag = cmd->ulpIoTag;
+
+ icmd->ulpLe = 1;
+ icmd->ulpClass = cmd->ulpClass;
+ if (phba->hba_state >= LPFC_LINK_UP) {
+ icmd->ulpCommand = CMD_ABORT_XRI_CN;
+ } else {
+ icmd->ulpCommand = CMD_CLOSE_XRI_CN;
+ }
+
+ if (lpfc_sli_issue_iocb(phba, pring, abtsiocbp, 0) ==
+ IOCB_ERROR) {
+ mempool_free( abtsiocbp, phba->iocb_mem_pool);
+ errcnt++;
+ continue;
+ }
+ /* The rsp ring completion will remove IOCB from
+ * tacmplq when abort is read by HBA.
+ */
+ }
+ }
+ return (errcnt);
+}
+
+int
+lpfc_sli_abort_iocb_tgt(struct lpfc_hba * phba,
+ struct lpfc_sli_ring * pring,
+ uint16_t scsi_target, int flag)
+{
+ struct lpfc_sli *psli;
+ struct lpfc_iocbq *iocb, *next_iocb;
+ struct lpfc_iocbq *abtsiocbp;
+ IOCB_t *icmd = NULL, *cmd = NULL;
+ struct lpfc_scsi_buf *lpfc_cmd;
+ int errcnt;
+
+ psli = &phba->sli;
+ errcnt = 0;
+
+ /* Error matching iocb on txq or txcmplq
+ * First check the txq.
+ */
+ if(flag & LPFC_ABORT_TXQ) {
+ list_for_each_entry_safe(iocb, next_iocb, &pring->txq, list) {
+ cmd = &iocb->iocb;
+
+ /* Must be a FCP command */
+ if ((cmd->ulpCommand != CMD_FCP_ICMND64_CR) &&
+ (cmd->ulpCommand != CMD_FCP_IWRITE64_CR) &&
+ (cmd->ulpCommand != CMD_FCP_IREAD64_CR)) {
+ continue;
+ }
+
+ /* context1 MUST be a struct lpfc_scsi_buf */
+ lpfc_cmd = (struct lpfc_scsi_buf *) (iocb->context1);
+ if ((lpfc_cmd == 0)
+ || (lpfc_cmd->pCmd->device->id != scsi_target)) {
+ continue;
+ }
+
+ list_del_init(&iocb->list);
+ pring->txq_cnt--;
+ if (iocb->iocb_cmpl) {
+ icmd = &iocb->iocb;
+ icmd->ulpStatus = IOSTAT_LOCAL_REJECT;
+ icmd->un.ulpWord[4] = IOERR_SLI_ABORTED;
+ (iocb->iocb_cmpl) (phba, iocb, iocb);
+ } else {
+ mempool_free( iocb, phba->iocb_mem_pool);
+ }
+ }
+ }
+
+ if(flag & LPFC_ABORT_TXCMPLQ) {
+ /* Next check the txcmplq */
+ list_for_each_entry_safe(iocb, next_iocb, &pring->txcmplq,
+ list) {
+ cmd = &iocb->iocb;
+
+ /* Must be a FCP command */
+ if ((cmd->ulpCommand != CMD_FCP_ICMND64_CR) &&
+ (cmd->ulpCommand != CMD_FCP_IWRITE64_CR) &&
+ (cmd->ulpCommand != CMD_FCP_IREAD64_CR)) {
+ continue;
+ }
+
+ /* context1 MUST be a struct lpfc_scsi_buf */
+ lpfc_cmd = (struct lpfc_scsi_buf *) (iocb->context1);
+ if ((lpfc_cmd == 0)
+ || (lpfc_cmd->pCmd->device->id != scsi_target)) {
+ continue;
+ }
+
+ /* issue ABTS for this IOCB based on iotag */
+ if ((abtsiocbp = mempool_alloc(phba->iocb_mem_pool,
+ GFP_ATOMIC)) == 0) {
+ errcnt++;
+ continue;
+ }
+ memset(abtsiocbp, 0, sizeof (struct lpfc_iocbq));
+ icmd = &abtsiocbp->iocb;
+
+ icmd->un.acxri.abortType = ABORT_TYPE_ABTS;
+ icmd->un.acxri.abortContextTag = cmd->ulpContext;
+ icmd->un.acxri.abortIoTag = cmd->ulpIoTag;
+
+ icmd->ulpLe = 1;
+ icmd->ulpClass = cmd->ulpClass;
+ if (phba->hba_state >= LPFC_LINK_UP) {
+ icmd->ulpCommand = CMD_ABORT_XRI_CN;
+ } else {
+ icmd->ulpCommand = CMD_CLOSE_XRI_CN;
+ }
+
+ if (lpfc_sli_issue_iocb(phba, pring, abtsiocbp, 0) ==
+ IOCB_ERROR) {
+ mempool_free( abtsiocbp, phba->iocb_mem_pool);
+ errcnt++;
+ continue;
+ }
+ /* The rsp ring completion will remove IOCB from
+ * txcmplq when abort is read by HBA.
+ */
+ }
+ }
+ return (errcnt);
+}
+
+
+
+void
+lpfc_sli_wake_iocb_high_priority(struct lpfc_hba * phba,
+ struct lpfc_iocbq * queue1,
+ struct lpfc_iocbq * queue2)
+{
+ if (queue1->context2 && queue2)
+ memcpy(queue1->context2, queue2, sizeof (struct lpfc_iocbq));
+
+ /* The waiter is looking for LPFC_IO_HIPRI bit to be set
+ as a signal to wake up */
+ queue1->iocb_flag |= LPFC_IO_HIPRI;
+ return;
+}
+
+int
+lpfc_sli_issue_iocb_wait_high_priority(struct lpfc_hba * phba,
+ struct lpfc_sli_ring * pring,
+ struct lpfc_iocbq * piocb,
+ uint32_t flag,
+ struct lpfc_iocbq * prspiocbq,
+ uint32_t timeout)
+{
+ int j, delay_time, retval = IOCB_ERROR;
+
+ /* The caller must left context1 empty. */
+ if (piocb->context_un.hipri_wait_queue != 0) {
+ return IOCB_ERROR;
+ }
+
+ /*
+ * If the caller has provided a response iocbq buffer, context2 must
+ * be NULL or its an error.
+ */
+ if (prspiocbq && piocb->context2) {
+ return IOCB_ERROR;
+ }
+
+ piocb->context2 = prspiocbq;
+
+ /* Setup callback routine and issue the command. */
+ piocb->iocb_cmpl = lpfc_sli_wake_iocb_high_priority;
+ retval = lpfc_sli_issue_iocb(phba, pring, piocb,
+ flag | SLI_IOCB_HIGH_PRIORITY);
+ if (retval != IOCB_SUCCESS) {
+ piocb->context2 = NULL;
+ return IOCB_ERROR;
+ }
+
+ /*
+ * This high-priority iocb was sent out-of-band. Poll for its
+ * completion rather than wait for a signal. Note that the host_lock
+ * is held by the midlayer and must be released here to allow the
+ * interrupt handlers to complete the IO and signal this routine via
+ * the iocb_flag.
+ * Also, the delay_time is computed to be one second longer than
+ * the scsi command timeout to give the FW time to abort on
+ * timeout rather than the driver just giving up. Typically,
+ * the midlayer does not specify a time for this command so the
+ * driver is free to enforce its own timeout.
+ */
+
+ delay_time = ((timeout + 1) * 1000) >> 6;
+ retval = IOCB_ERROR;
+ spin_unlock_irq(phba->host->host_lock);
+ for (j = 0; j < 64; j++) {
+#if LINUX_VERSION_CODE < KERNEL_VERSION(2,6,6)
+ mdelay(delay_time);
+#else
+ msleep(delay_time);
+#endif
+ if (piocb->iocb_flag & LPFC_IO_HIPRI) {
+ piocb->iocb_flag &= ~LPFC_IO_HIPRI;
+ retval = IOCB_SUCCESS;
+ break;
+ }
+ }
+
+ spin_lock_irq(phba->host->host_lock);
+ piocb->context2 = NULL;
+ return retval;
+}
+static void
+lpfc_sli_wake_mbox_wait(struct lpfc_hba * phba, LPFC_MBOXQ_t * pmboxq)
+{
+ wait_queue_head_t *pdone_q;
+
+ /*
+ * If pdone_q is empty, the driver thread gave up waiting and
+ * continued running.
+ */
+ pdone_q = (wait_queue_head_t *) pmboxq->context1;
+ if (pdone_q)
+ wake_up_interruptible(pdone_q);
+ return;
+}
+
+int
+lpfc_sli_issue_mbox_wait(struct lpfc_hba * phba, LPFC_MBOXQ_t * pmboxq,
+ uint32_t timeout)
+{
+ DECLARE_WAIT_QUEUE_HEAD(done_q);
+ DECLARE_WAITQUEUE(wq_entry, current);
+ uint32_t timeleft = 0;
+ int retval;
+
+ /* The caller must leave context1 empty. */
+ if (pmboxq->context1 != 0) {
+ return (MBX_NOT_FINISHED);
+ }
+
+ /* setup wake call as IOCB callback */
+ pmboxq->mbox_cmpl = lpfc_sli_wake_mbox_wait;
+ /* setup context field to pass wait_queue pointer to wake function */
+ pmboxq->context1 = &done_q;
+
+ /* start to sleep before we wait, to avoid races */
+ set_current_state(TASK_INTERRUPTIBLE);
+ add_wait_queue(&done_q, &wq_entry);
+
+ /* now issue the command */
+ spin_lock_irq(phba->host->host_lock);
+ retval = lpfc_sli_issue_mbox(phba, pmboxq, MBX_NOWAIT);
+ spin_unlock_irq(phba->host->host_lock);
+
+ if (retval == MBX_BUSY || retval == MBX_SUCCESS) {
+ timeleft = schedule_timeout(timeout * HZ);
+ pmboxq->context1 = NULL;
+ /* if schedule_timeout returns 0, we timed out and were not
+ woken up */
+ if (timeleft == 0) {
+ retval = MBX_TIMEOUT;
+ } else {
+ retval = MBX_SUCCESS;
+ }
+ }
+
+
+ set_current_state(TASK_RUNNING);
+ remove_wait_queue(&done_q, &wq_entry);
+ return retval;
+}
+
+static void
+lpfc_sli_wake_iocb_wait(struct lpfc_hba * phba,
+ struct lpfc_iocbq * queue1, struct lpfc_iocbq * queue2)
+{
+ wait_queue_head_t *pdone_q;
+
+ queue1->iocb_flag |= LPFC_IO_WAIT;
+ if (queue1->context2 && queue2)
+ memcpy(queue1->context2, queue2, sizeof (struct lpfc_iocbq));
+
+ /*
+ * If pdone_q is empty, the waiter gave up and returned and this
+ * call has nothing to do.
+ */
+ pdone_q = queue1->context_un.hipri_wait_queue;
+ if (pdone_q) {
+ wake_up(pdone_q);
+ }
+
+ return;
+}
+
+int
+lpfc_sli_issue_iocb_wait(struct lpfc_hba * phba,
+ struct lpfc_sli_ring * pring,
+ struct lpfc_iocbq * piocb,
+ struct lpfc_iocbq * prspiocbq, uint32_t timeout)
+{
+ DECLARE_WAIT_QUEUE_HEAD(done_q);
+ DECLARE_WAITQUEUE(wq_entry, current);
+ uint32_t timeleft = 0;
+ int retval;
+
+ /* The caller must leave context1 empty for the driver. */
+ if (piocb->context_un.hipri_wait_queue != 0)
+ return (IOCB_ERROR);
+
+ /* If the caller has provided a response iocbq buffer, then context2
+ * is NULL or its an error.
+ */
+ if (prspiocbq) {
+ if (piocb->context2)
+ return (IOCB_ERROR);
+ piocb->context2 = prspiocbq;
+ }
+
+ /* setup wake call as IOCB callback */
+ piocb->iocb_cmpl = lpfc_sli_wake_iocb_wait;
+ /* setup context field to pass wait_queue pointer to wake function */
+ piocb->context_un.hipri_wait_queue = &done_q;
+
+ /* start to sleep before we wait, to avoid races */
+ set_current_state(TASK_UNINTERRUPTIBLE);
+ add_wait_queue(&done_q, &wq_entry);
+
+ /* now issue the command */
+ retval = lpfc_sli_issue_iocb(phba, pring, piocb, 0);
+ if (retval == IOCB_SUCCESS) {
+ /* Give up thread time and wait for the iocb to complete or for
+ * the alloted time to expire.
+ */
+ timeleft = schedule_timeout(timeout * HZ);
+
+ piocb->context_un.hipri_wait_queue = NULL;
+ piocb->iocb_cmpl = NULL;
+ if (piocb->context2 == prspiocbq)
+ piocb->context2 = NULL;
+
+ /*
+ * Catch the error cases. A timeleft of zero is an error since
+ * the iocb should have completed. The iocb_flag not have value
+ * LPFC_IO_WAIT is also an error since the wakeup callback sets
+ * this flag when it runs. Handle each.
+ */
+ if (timeleft == 0) {
+ printk(KERN_WARNING "lpfc driver detected iocb "
+ "Timeout!\n");
+ retval = IOCB_TIMEDOUT;
+ } else if (!(piocb->iocb_flag & LPFC_IO_WAIT)) {
+ printk(KERN_WARNING "lpfc driver detected iocb "
+ "flag = 0x%X\n", piocb->iocb_flag);
+ retval = IOCB_TIMEDOUT;
+ }
+ }
+
+ remove_wait_queue(&done_q, &wq_entry);
+ set_current_state(TASK_RUNNING);
+ piocb->context2 = NULL;
+ return retval;
+}
+
+irqreturn_t
+lpfc_intr_handler(int irq, void *dev_id, struct pt_regs * regs)
+{
+ struct lpfc_hba *phba;
+ int intr_status;
+
+ /*
+ * Get the driver's phba structure from the dev_id and
+ * assume the HBA is not interrupting.
+ */
+ phba = (struct lpfc_hba *) dev_id;
+
+ if (phba) {
+ /* Call SLI to handle the interrupt event. */
+ intr_status = lpfc_sli_intr(phba);
+ if (intr_status == 0)
+ return IRQ_HANDLED;
+ }
+
+ return IRQ_NONE;
+
+} /* lpfc_intr_handler */
--- /dev/null
+/*******************************************************************
+ * This file is part of the Emulex Linux Device Driver for *
+ * Enterprise Fibre Channel Host Bus Adapters. *
+ * Refer to the README file included with this package for *
+ * driver version and adapter support. *
+ * Copyright (C) 2004 Emulex Corporation. *
+ * www.emulex.com *
+ * *
+ * This program is free software; you can redistribute it and/or *
+ * modify it under the terms of the GNU General Public License *
+ * as published by the Free Software Foundation; either version 2 *
+ * of the License, or (at your option) any later version. *
+ * *
+ * This program is distributed in the hope that it will be useful, *
+ * but WITHOUT ANY WARRANTY; without even the implied warranty of *
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the *
+ * GNU General Public License for more details, a copy of which *
+ * can be found in the file COPYING included with this package. *
+ *******************************************************************/
+
+/*
+ * $Id: lpfc_sli.h 1.36 2004/11/23 16:56:58EST sf_support Exp $
+ */
+
+#ifndef _H_LPFC_SLI
+#define _H_LPFC_SLI
+
+#include "lpfc_hw.h"
+
+/* forward declaration for LPFC_IOCB_t's use */
+struct lpfc_hba;
+
+/* This structure is used to handle IOCB requests / responses */
+struct lpfc_iocbq {
+ /* lpfc_iocbqs are used in double linked lists */
+ struct list_head list;
+ IOCB_t iocb; /* IOCB cmd */
+ uint8_t retry; /* retry counter for IOCB cmd - if needed */
+ uint8_t iocb_flag;
+#define LPFC_IO_POLL 1 /* Polling mode iocb */
+#define LPFC_IO_LIBDFC 2 /* libdfc iocb */
+#define LPFC_IO_WAIT 4
+#define LPFC_IO_HIPRI 8 /* High Priority Queue signal flag */
+
+ uint8_t abort_count;
+ uint8_t rsvd2;
+ uint32_t drvrTimeout; /* driver timeout in seconds */
+ void *context1; /* caller context information */
+ void *context2; /* caller context information */
+ void *context3; /* caller context information */
+ union {
+ wait_queue_head_t *hipri_wait_queue; /* High Priority Queue wait
+ queue */
+ struct lpfc_iocbq *rsp_iocb;
+ struct lpfcMboxq *mbox;
+ } context_un;
+
+ void (*iocb_cmpl) (struct lpfc_hba *, struct lpfc_iocbq *,
+ struct lpfc_iocbq *);
+
+};
+
+#define SLI_IOCB_RET_IOCB 1 /* Return IOCB if cmd ring full */
+#define SLI_IOCB_HIGH_PRIORITY 2 /* High priority command */
+
+#define IOCB_SUCCESS 0
+#define IOCB_BUSY 1
+#define IOCB_ERROR 2
+#define IOCB_TIMEDOUT 3
+
+typedef struct lpfcMboxq {
+ /* MBOXQs are used in single linked lists */
+ struct list_head list; /* ptr to next mailbox command */
+ MAILBOX_t mb; /* Mailbox cmd */
+ void *context1; /* caller context information */
+ void *context2; /* caller context information */
+
+ void (*mbox_cmpl) (struct lpfc_hba *, struct lpfcMboxq *);
+
+} LPFC_MBOXQ_t;
+
+#define MBX_POLL 1 /* poll mailbox till command done, then
+ return */
+#define MBX_NOWAIT 2 /* issue command then return immediately */
+#define MBX_STOP_IOCB 4 /* Stop iocb processing till mbox cmds
+ complete */
+
+#define LPFC_MAX_RING_MASK 4 /* max num of rctl/type masks allowed per
+ ring */
+#define LPFC_MAX_RING 4 /* max num of SLI rings used by driver */
+
+/* Structure used to hold SLI ring information */
+struct lpfc_sli_ring {
+ uint16_t flag; /* ring flags */
+#define LPFC_DEFERRED_RING_EVENT 0x001 /* Deferred processing a ring event */
+#define LPFC_CALL_RING_AVAILABLE 0x002 /* indicates cmd was full */
+#define LPFC_STOP_IOCB_MBX 0x010 /* Stop processing IOCB cmds mbox */
+#define LPFC_STOP_IOCB_EVENT 0x020 /* Stop processing IOCB cmds event */
+#define LPFC_STOP_IOCB_MASK 0x030 /* Stop processing IOCB cmds mask */
+ uint16_t abtsiotag; /* tracks next iotag to use for ABTS */
+
+ uint32_t local_getidx; /* last available cmd index (from cmdGetInx) */
+ uint32_t next_cmdidx; /* next_cmd index */
+ uint8_t rsvd;
+ uint8_t ringno; /* ring number */
+ uint8_t rspidx; /* current index in response ring */
+ uint8_t cmdidx; /* current index in command ring */
+ struct lpfc_iocbq ** fast_lookup; /* array of IOCB ptrs indexed by
+ iotag */
+ struct list_head txq;
+ uint16_t txq_cnt; /* current length of queue */
+ uint16_t txq_max; /* max length */
+ struct list_head txcmplq;
+ uint16_t txcmplq_cnt; /* current length of queue */
+ uint16_t txcmplq_max; /* max length */
+ volatile uint32_t *cmdringaddr; /* virtual address for cmd rings */
+ volatile uint32_t *rspringaddr; /* virtual address for rsp rings */
+ uint32_t missbufcnt; /* keep track of buffers to post */
+ struct list_head postbufq;
+ uint16_t postbufq_cnt; /* current length of queue */
+ uint16_t postbufq_max; /* max length */
+ struct list_head iocb_continueq;
+ uint16_t iocb_continueq_cnt; /* current length of queue */
+ uint16_t iocb_continueq_max; /* max length */
+};
+
+typedef struct {
+ uint8_t profile; /* profile associated with ring */
+ uint8_t rctl; /* rctl / type pair configured for ring */
+ uint8_t type; /* rctl / type pair configured for ring */
+ uint8_t rsvd;
+ /* rcv'd unsol event */
+ void (*lpfc_sli_rcv_unsol_event) (struct lpfc_hba *,
+ struct lpfc_sli_ring *,
+ struct lpfc_iocbq *);
+} LPFC_RING_MASK_t;
+
+/* Structure used for configuring rings to a specific profile or rctl / type */
+typedef struct {
+ LPFC_RING_MASK_t prt[LPFC_MAX_RING_MASK];
+ uint32_t num_mask; /* number of mask entries in prt array */
+ uint32_t iotag_ctr; /* keeps track of the next iotag to use */
+ uint32_t iotag_max; /* max iotag value to use */
+ uint32_t fast_iotag; /* max fastlookup based iotag */
+ uint16_t numCiocb; /* number of command iocb's per ring */
+ uint16_t numRiocb; /* number of rsp iocb's per ring */
+ /* cmd ring available */
+ void (*lpfc_sli_cmd_available) (struct lpfc_hba *,
+ struct lpfc_sli_ring *);
+} LPFC_RING_INIT_t;
+
+typedef struct {
+ LPFC_RING_INIT_t ringinit[LPFC_MAX_RING]; /* ring initialization info */
+ uint32_t num_rings;
+ uint32_t sli_flag;
+} LPFC_SLI_INIT_t;
+
+/* Structure used to hold SLI statistical counters and info */
+typedef struct {
+ uint64_t iocbEvent[LPFC_MAX_RING]; /* IOCB event counters */
+ uint64_t iocbCmd[LPFC_MAX_RING]; /* IOCB cmd issued */
+ uint64_t iocbRsp[LPFC_MAX_RING]; /* IOCB rsp received */
+ uint64_t iocbCmdDelay[LPFC_MAX_RING]; /* IOCB cmd ring delay */
+ uint64_t iocbCmdFull[LPFC_MAX_RING]; /* IOCB cmd ring full */
+ uint64_t iocbCmdEmpty[LPFC_MAX_RING]; /* IOCB cmd ring is now empty */
+ uint64_t iocbRspFull[LPFC_MAX_RING]; /* IOCB rsp ring full */
+ uint64_t mboxStatErr; /* Mbox cmds completed status error */
+ uint64_t mboxCmd; /* Mailbox commands issued */
+ uint64_t sliIntr; /* Count of Host Attention interrupts */
+ uint32_t errAttnEvent; /* Error Attn event counters */
+ uint32_t linkEvent; /* Link event counters */
+ uint32_t mboxEvent; /* Mailbox event counters */
+ uint32_t mboxBusy; /* Mailbox cmd busy */
+} LPFC_SLI_STAT_t;
+
+/* Structure used to hold SLI information */
+struct lpfc_sli {
+ LPFC_SLI_INIT_t sliinit; /* initialization info */
+ /* Additional sli_flags */
+#define LPFC_SLI_MBOX_ACTIVE 0x100 /* HBA mailbox is currently active */
+#define LPFC_SLI2_ACTIVE 0x200 /* SLI2 overlay in firmware is active */
+#define LPFC_PROCESS_LA 0x400 /* Able to process link attention */
+
+ struct lpfc_sli_ring ring[LPFC_MAX_RING];
+ int fcp_ring; /* ring used for FCP initiator commands */
+ int next_ring;
+
+ int ip_ring; /* ring used for IP network drv cmds */
+
+ LPFC_SLI_STAT_t slistat; /* SLI statistical info */
+ struct list_head mboxq;
+ uint16_t mboxq_cnt; /* current length of queue */
+ uint16_t mboxq_max; /* max length */
+ LPFC_MBOXQ_t *mbox_active; /* active mboxq information */
+
+ struct timer_list mbox_tmo; /* Hold clk to timeout active mbox
+ cmd */
+
+ volatile uint32_t *MBhostaddr; /* virtual address for mbox cmds */
+};
+
+/* Given a pointer to the start of the ring, and the slot number of
+ * the desired iocb entry, calc a pointer to that entry.
+ * (assume iocb entry size is 32 bytes, or 8 words)
+ */
+#define IOCB_ENTRY(ring,slot) ((IOCB_t *)(((char *)(ring)) + ((slot) * 32)))
+
+#define LPFC_SLI_ABORT_IMED 0 /* Immediate abort of IOCB, deque and
+ call compl routine immediately. */
+#define LPFC_MBOX_TMO 30 /* Sec tmo for outstanding mbox
+ command */
+
+/* Flags for aborting I/Os on tx and txcmpl queues */
+#define LPFC_ABORT_TXQ 1 /* Abort I/Os on txq */
+#define LPFC_ABORT_TXCMPLQ 2 /* Abort I/Os on txcmplq */
+#define LPFC_ABORT_ALLQ 3 /* Abort I/Os both txq and txcmplq */
+
+#endif /* _H_LPFC_SLI */
--- /dev/null
+/*******************************************************************
+ * This file is part of the Emulex Linux Device Driver for *
+ * Enterprise Fibre Channel Host Bus Adapters. *
+ * Refer to the README file included with this package for *
+ * driver version and adapter support. *
+ * Copyright (C) 2004 Emulex Corporation. *
+ * www.emulex.com *
+ * *
+ * This program is free software; you can redistribute it and/or *
+ * modify it under the terms of the GNU General Public License *
+ * as published by the Free Software Foundation; either version 2 *
+ * of the License, or (at your option) any later version. *
+ * *
+ * This program is distributed in the hope that it will be useful, *
+ * but WITHOUT ANY WARRANTY; without even the implied warranty of *
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the *
+ * GNU General Public License for more details, a copy of which *
+ * can be found in the file COPYING included with this package. *
+ *******************************************************************/
+
+/*
+ * $Id: lpfc_version.h 1.36 2004/12/07 14:51:53EST sf_support Exp $
+ */
+
+#ifndef _H_LPFC_VERSION
+#define _H_LPFC_VERSION
+
+#define LPFC_DRIVER_VERSION "8.0.16"
+
+#define LPFC_DRIVER_NAME "lpfc"
+
+#define LPFC_MODULE_DESC "Emulex LightPulse Fibre Channel SCSI driver " \
+ LPFC_DRIVER_VERSION
+
+#define DFC_API_VERSION "0.0.0"
+
+#endif
--- /dev/null
+/*
+ * sata_uli.c - ULi Electronics SATA
+ *
+ * The contents of this file are subject to the Open
+ * Software License version 1.1 that can be found at
+ * http://www.opensource.org/licenses/osl-1.1.txt and is included herein
+ * by reference.
+ *
+ * Alternatively, the contents of this file may be used under the terms
+ * of the GNU General Public License version 2 (the "GPL") as distributed
+ * in the kernel source COPYING file, in which case the provisions of
+ * the GPL are applicable instead of the above. If you wish to allow
+ * the use of your version of this file only under the terms of the
+ * GPL and not to allow others to use your version of this file under
+ * the OSL, indicate your decision by deleting the provisions above and
+ * replace them with the notice and other provisions required by the GPL.
+ * If you do not delete the provisions above, a recipient may use your
+ * version of this file under either the OSL or the GPL.
+ *
+ */
+
+#include <linux/config.h>
+#include <linux/kernel.h>
+#include <linux/module.h>
+#include <linux/pci.h>
+#include <linux/init.h>
+#include <linux/blkdev.h>
+#include <linux/delay.h>
+#include <linux/interrupt.h>
+#include "scsi.h"
+#include <scsi/scsi_host.h>
+#include <linux/libata.h>
+
+#define DRV_NAME "sata_uli"
+#define DRV_VERSION "0.11"
+
+enum {
+ uli_5289 = 0,
+ uli_5287 = 1,
+
+ /* PCI configuration registers */
+ ULI_SCR_BASE = 0x90, /* sata0 phy SCR registers */
+ ULI_SATA1_OFS = 0x10, /* offset from sata0->sata1 phy regs */
+
+};
+
+static int uli_init_one (struct pci_dev *pdev, const struct pci_device_id *ent);
+static u32 uli_scr_read (struct ata_port *ap, unsigned int sc_reg);
+static void uli_scr_write (struct ata_port *ap, unsigned int sc_reg, u32 val);
+
+static struct pci_device_id uli_pci_tbl[] = {
+ { PCI_VENDOR_ID_AL, 0x5289, PCI_ANY_ID, PCI_ANY_ID, 0, 0, uli_5289 },
+ { PCI_VENDOR_ID_AL, 0x5287, PCI_ANY_ID, PCI_ANY_ID, 0, 0, uli_5287 },
+ { } /* terminate list */
+};
+
+
+static struct pci_driver uli_pci_driver = {
+ .name = DRV_NAME,
+ .id_table = uli_pci_tbl,
+ .probe = uli_init_one,
+ .remove = ata_pci_remove_one,
+};
+
+static Scsi_Host_Template uli_sht = {
+ .module = THIS_MODULE,
+ .name = DRV_NAME,
+ .ioctl = ata_scsi_ioctl,
+ .queuecommand = ata_scsi_queuecmd,
+ .eh_strategy_handler = ata_scsi_error,
+ .can_queue = ATA_DEF_QUEUE,
+ .this_id = ATA_SHT_THIS_ID,
+ .sg_tablesize = LIBATA_MAX_PRD,
+ .max_sectors = ATA_MAX_SECTORS,
+ .cmd_per_lun = ATA_SHT_CMD_PER_LUN,
+ .emulated = ATA_SHT_EMULATED,
+ .use_clustering = ATA_SHT_USE_CLUSTERING,
+ .proc_name = DRV_NAME,
+ .dma_boundary = ATA_DMA_BOUNDARY,
+ .slave_configure = ata_scsi_slave_config,
+ .bios_param = ata_std_bios_param,
+};
+
+static struct ata_port_operations uli_ops = {
+ .port_disable = ata_port_disable,
+
+ .tf_load = ata_tf_load,
+ .tf_read = ata_tf_read,
+ .check_status = ata_check_status,
+ .exec_command = ata_exec_command,
+ .dev_select = ata_std_dev_select,
+
+ .phy_reset = sata_phy_reset,
+
+ .bmdma_setup = ata_bmdma_setup,
+ .bmdma_start = ata_bmdma_start,
+ .qc_prep = ata_qc_prep,
+ .qc_issue = ata_qc_issue_prot,
+
+ .eng_timeout = ata_eng_timeout,
+
+ .irq_handler = ata_interrupt,
+ .irq_clear = ata_bmdma_irq_clear,
+
+ .scr_read = uli_scr_read,
+ .scr_write = uli_scr_write,
+
+ .port_start = ata_port_start,
+ .port_stop = ata_port_stop,
+};
+
+static struct ata_port_info uli_port_info = {
+ .sht = &uli_sht,
+ .host_flags = ATA_FLAG_SATA | ATA_FLAG_SATA_RESET |
+ ATA_FLAG_NO_LEGACY,
+ .pio_mask = 0x03, //support pio mode 4 (FIXME)
+ .udma_mask = 0x7f, //support udma mode 6
+ .port_ops = &uli_ops,
+};
+
+
+MODULE_AUTHOR("Peer Chen");
+MODULE_DESCRIPTION("low-level driver for ULi Electronics SATA controller");
+MODULE_LICENSE("GPL");
+MODULE_DEVICE_TABLE(pci, uli_pci_tbl);
+MODULE_VERSION(DRV_VERSION);
+
+static unsigned int get_scr_cfg_addr(unsigned int port_no, unsigned int sc_reg)
+{
+ unsigned int addr = ULI_SCR_BASE + (4 * sc_reg);
+
+ switch (port_no) {
+ case 0:
+ break;
+ case 1:
+ addr += ULI_SATA1_OFS;
+ break;
+ case 2:
+ addr += ULI_SATA1_OFS*4;
+ break;
+ case 3:
+ addr += ULI_SATA1_OFS*5;
+ break;
+ default:
+ BUG();
+ break;
+ }
+ return addr;
+}
+
+static u32 uli_scr_cfg_read (struct ata_port *ap, unsigned int sc_reg)
+{
+ unsigned int cfg_addr = get_scr_cfg_addr(ap->port_no, sc_reg);
+ u32 val;
+
+ pci_read_config_dword(ap->host_set->pdev, cfg_addr, &val);
+ return val;
+}
+
+static void uli_scr_cfg_write (struct ata_port *ap, unsigned int scr, u32 val)
+{
+ unsigned int cfg_addr = get_scr_cfg_addr(ap->port_no, scr);
+
+ pci_write_config_dword(ap->host_set->pdev, cfg_addr, val);
+}
+
+static u32 uli_scr_read (struct ata_port *ap, unsigned int sc_reg)
+{
+ if (sc_reg > SCR_CONTROL)
+ return 0xffffffffU;
+
+ return uli_scr_cfg_read(ap, sc_reg);
+}
+
+static void uli_scr_write (struct ata_port *ap, unsigned int sc_reg, u32 val)
+{
+ if (sc_reg > SCR_CONTROL) //SCR_CONTROL=2, SCR_ERROR=1, SCR_STATUS=0
+ return;
+
+ uli_scr_cfg_write(ap, sc_reg, val);
+}
+
+/* move to PCI layer, integrate w/ MSI stuff */
+static void pci_enable_intx(struct pci_dev *pdev)
+{
+ u16 pci_command;
+
+ pci_read_config_word(pdev, PCI_COMMAND, &pci_command);
+ if (pci_command & PCI_COMMAND_INTX_DISABLE) {
+ pci_command &= ~PCI_COMMAND_INTX_DISABLE;
+ pci_write_config_word(pdev, PCI_COMMAND, pci_command);
+ }
+}
+
+static int uli_init_one (struct pci_dev *pdev, const struct pci_device_id *ent)
+{
+ struct ata_probe_ent *probe_ent;
+ struct ata_port_info *ppi;
+ int rc;
+ unsigned int board_idx = (unsigned int) ent->driver_data;
+
+ rc = pci_enable_device(pdev);
+ if (rc)
+ return rc;
+
+ rc = pci_request_regions(pdev, DRV_NAME);
+ if (rc)
+ goto err_out;
+
+ rc = pci_set_dma_mask(pdev, ATA_DMA_MASK);
+ if (rc)
+ goto err_out_regions;
+ rc = pci_set_consistent_dma_mask(pdev, ATA_DMA_MASK);
+ if (rc)
+ goto err_out_regions;
+
+ ppi = &uli_port_info;
+ probe_ent = ata_pci_init_native_mode(pdev, &ppi);
+ if (!probe_ent) {
+ rc = -ENOMEM;
+ goto err_out_regions;
+ }
+
+ switch (board_idx) {
+ case uli_5287:
+ probe_ent->n_ports = 4;
+
+ probe_ent->port[2].cmd_addr = pci_resource_start(pdev, 0) + 8;
+ probe_ent->port[2].altstatus_addr =
+ probe_ent->port[2].ctl_addr =
+ (pci_resource_start(pdev, 1) | ATA_PCI_CTL_OFS) + 4;
+ probe_ent->port[2].bmdma_addr = pci_resource_start(pdev, 4) + 16;
+
+ probe_ent->port[3].cmd_addr = pci_resource_start(pdev, 2) + 8;
+ probe_ent->port[3].altstatus_addr =
+ probe_ent->port[3].ctl_addr =
+ (pci_resource_start(pdev, 3) | ATA_PCI_CTL_OFS) + 4;
+ probe_ent->port[3].bmdma_addr = pci_resource_start(pdev, 4) + 24;
+
+ ata_std_ports(&probe_ent->port[2]);
+ ata_std_ports(&probe_ent->port[3]);
+ break;
+
+ case uli_5289:
+ /* do nothing; ata_pci_init_native_mode did it all */
+ break;
+
+ default:
+ BUG();
+ break;
+ }
+
+ pci_set_master(pdev);
+ pci_enable_intx(pdev);
+
+ /* FIXME: check ata_device_add return value */
+ ata_device_add(probe_ent);
+ kfree(probe_ent);
+
+ return 0;
+
+err_out_regions:
+ pci_release_regions(pdev);
+
+err_out:
+ pci_disable_device(pdev);
+ return rc;
+
+}
+
+static int __init uli_init(void)
+{
+ return pci_module_init(&uli_pci_driver);
+}
+
+static void __exit uli_exit(void)
+{
+ pci_unregister_driver(&uli_pci_driver);
+}
+
+
+module_init(uli_init);
+module_exit(uli_exit);
--- /dev/null
+#
+# Makefile for the rest of the USB drivers
+# (the ones that don't fit into any other categories)
+#
+
+obj-$(CONFIG_USB_ATM) += usb_atm.o
+obj-$(CONFIG_USB_SPEEDTOUCH) += speedtch.o
--- /dev/null
+/******************************************************************************
+ * speedtch.c - Alcatel SpeedTouch USB xDSL modem driver
+ *
+ * Copyright (C) 2001, Alcatel
+ * Copyright (C) 2003, Duncan Sands
+ * Copyright (C) 2004, David Woodhouse
+ *
+ * This program is free software; you can redistribute it and/or modify it
+ * under the terms of the GNU General Public License as published by the Free
+ * Software Foundation; either version 2 of the License, or (at your option)
+ * any later version.
+ *
+ * This program is distributed in the hope that it will be useful, but WITHOUT
+ * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
+ * FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License for
+ * more details.
+ *
+ * You should have received a copy of the GNU General Public License along with
+ * this program; if not, write to the Free Software Foundation, Inc., 59
+ * Temple Place - Suite 330, Boston, MA 02111-1307, USA.
+ *
+ ******************************************************************************/
+
+#include <linux/module.h>
+#include <linux/moduleparam.h>
+#include <linux/gfp.h>
+#include <linux/kernel.h>
+#include <linux/sched.h>
+#include <linux/timer.h>
+#include <linux/errno.h>
+#include <linux/proc_fs.h>
+#include <linux/slab.h>
+#include <linux/wait.h>
+#include <linux/list.h>
+#include <asm/processor.h>
+#include <asm/uaccess.h>
+#include <linux/smp_lock.h>
+#include <linux/interrupt.h>
+#include <linux/atm.h>
+#include <linux/atmdev.h>
+#include <linux/crc32.h>
+#include <linux/init.h>
+#include <linux/firmware.h>
+
+#include "usb_atm.h"
+
+/*
+#define DEBUG
+#define VERBOSE_DEBUG
+*/
+
+#if !defined (DEBUG) && defined (CONFIG_USB_DEBUG)
+# define DEBUG
+#endif
+
+#include <linux/usb.h>
+
+#if defined(CONFIG_FW_LOADER) || defined(CONFIG_FW_LOADER_MODULE)
+# define USE_FW_LOADER
+#endif
+
+#ifdef VERBOSE_DEBUG
+static int udsl_print_packet(const unsigned char *data, int len);
+#define PACKETDEBUG(arg...) udsl_print_packet (arg)
+#define vdbg(arg...) dbg (arg)
+#else
+#define PACKETDEBUG(arg...)
+#define vdbg(arg...)
+#endif
+
+#define DRIVER_AUTHOR "Johan Verrept, Duncan Sands <duncan.sands@free.fr>"
+#define DRIVER_VERSION "1.8"
+#define DRIVER_DESC "Alcatel SpeedTouch USB driver version " DRIVER_VERSION
+
+static const char speedtch_driver_name[] = "speedtch";
+
+#define SPEEDTOUCH_VENDORID 0x06b9
+#define SPEEDTOUCH_PRODUCTID 0x4061
+
+/* Timeout in jiffies */
+#define CTRL_TIMEOUT (2*HZ)
+#define DATA_TIMEOUT (2*HZ)
+
+#define OFFSET_7 0 /* size 1 */
+#define OFFSET_b 1 /* size 8 */
+#define OFFSET_d 9 /* size 4 */
+#define OFFSET_e 13 /* size 1 */
+#define OFFSET_f 14 /* size 1 */
+#define TOTAL 15
+
+#define SIZE_7 1
+#define SIZE_b 8
+#define SIZE_d 4
+#define SIZE_e 1
+#define SIZE_f 1
+
+static int dl_512_first = 0;
+static int sw_buffering = 0;
+
+module_param(dl_512_first, bool, 0444);
+MODULE_PARM_DESC(dl_512_first, "Read 512 bytes before sending firmware");
+
+module_param(sw_buffering, uint, 0444);
+MODULE_PARM_DESC(sw_buffering, "Enable software buffering");
+
+#define UDSL_IOCTL_LINE_UP 1
+#define UDSL_IOCTL_LINE_DOWN 2
+
+#define SPEEDTCH_ENDPOINT_INT 0x81
+#define SPEEDTCH_ENDPOINT_DATA 0x07
+#define SPEEDTCH_ENDPOINT_FIRMWARE 0x05
+
+#define hex2int(c) ( (c >= '0') && (c <= '9') ? (c - '0') : ((c & 0xf) + 9) )
+
+static struct usb_device_id speedtch_usb_ids[] = {
+ {USB_DEVICE(SPEEDTOUCH_VENDORID, SPEEDTOUCH_PRODUCTID)},
+ {}
+};
+
+MODULE_DEVICE_TABLE(usb, speedtch_usb_ids);
+
+struct speedtch_instance_data {
+ struct udsl_instance_data u;
+
+ /* Status */
+ struct urb *int_urb;
+ unsigned char int_data[16];
+ struct work_struct poll_work;
+ struct timer_list poll_timer;
+};
+/* USB */
+
+static int speedtch_usb_probe(struct usb_interface *intf,
+ const struct usb_device_id *id);
+static void speedtch_usb_disconnect(struct usb_interface *intf);
+static int speedtch_usb_ioctl(struct usb_interface *intf, unsigned int code,
+ void *user_data);
+static void speedtch_handle_int(struct urb *urb, struct pt_regs *regs);
+static void speedtch_poll_status(struct speedtch_instance_data *instance);
+
+static struct usb_driver speedtch_usb_driver = {
+ .owner = THIS_MODULE,
+ .name = speedtch_driver_name,
+ .probe = speedtch_usb_probe,
+ .disconnect = speedtch_usb_disconnect,
+ .ioctl = speedtch_usb_ioctl,
+ .id_table = speedtch_usb_ids,
+};
+
+/***************
+** firmware **
+***************/
+
+static void speedtch_got_firmware(struct speedtch_instance_data *instance,
+ int got_it)
+{
+ int err;
+ struct usb_interface *intf;
+
+ down(&instance->u.serialize); /* vs self, speedtch_firmware_start */
+ if (instance->u.status == UDSL_LOADED_FIRMWARE)
+ goto out;
+ if (!got_it) {
+ instance->u.status = UDSL_NO_FIRMWARE;
+ goto out;
+ }
+ if ((err = usb_set_interface(instance->u.usb_dev, 1, 1)) < 0) {
+ dbg("speedtch_got_firmware: usb_set_interface returned %d!", err);
+ instance->u.status = UDSL_NO_FIRMWARE;
+ goto out;
+ }
+
+ /* Set up interrupt endpoint */
+ intf = usb_ifnum_to_if(instance->u.usb_dev, 0);
+ if (intf && !usb_driver_claim_interface(&speedtch_usb_driver, intf, NULL)) {
+
+ instance->int_urb = usb_alloc_urb(0, GFP_KERNEL);
+ if (instance->int_urb) {
+
+ usb_fill_int_urb(instance->int_urb, instance->u.usb_dev,
+ usb_rcvintpipe(instance->u.usb_dev, SPEEDTCH_ENDPOINT_INT),
+ instance->int_data,
+ sizeof(instance->int_data),
+ speedtch_handle_int, instance, 50);
+ err = usb_submit_urb(instance->int_urb, GFP_KERNEL);
+ if (err) {
+ /* Doesn't matter; we'll poll anyway */
+ dbg("speedtch_got_firmware: Submission of interrupt URB failed %d", err);
+ usb_free_urb(instance->int_urb);
+ instance->int_urb = NULL;
+ usb_driver_release_interface(&speedtch_usb_driver, intf);
+ }
+ }
+ }
+ /* Start status polling */
+ mod_timer(&instance->poll_timer, jiffies + (1 * HZ));
+
+ instance->u.status = UDSL_LOADED_FIRMWARE;
+ tasklet_schedule(&instance->u.receive_tasklet);
+ out:
+ up(&instance->u.serialize);
+ wake_up_interruptible(&instance->u.firmware_waiters);
+}
+
+static int speedtch_set_swbuff(struct speedtch_instance_data *instance,
+ int state)
+{
+ struct usb_device *dev = instance->u.usb_dev;
+ int ret;
+
+ ret = usb_control_msg(dev, usb_sndctrlpipe(dev, 0),
+ 0x32, 0x40, state ? 0x01 : 0x00,
+ 0x00, NULL, 0, 100);
+ if (ret < 0) {
+ printk("Warning: %sabling SW buffering: usb_control_msg returned %d\n",
+ state ? "En" : "Dis", ret);
+ return ret;
+ }
+
+ dbg("speedtch_set_swbuff: %sbled SW buffering", state ? "En" : "Dis");
+ return 0;
+}
+
+static void speedtch_test_sequence(struct speedtch_instance_data *instance)
+{
+ struct usb_device *dev = instance->u.usb_dev;
+ unsigned char buf[10];
+ int ret;
+
+ /* URB 147 */
+ buf[0] = 0x1c;
+ buf[1] = 0x50;
+ ret = usb_control_msg(dev, usb_sndctrlpipe(dev, 0),
+ 0x01, 0x40, 0x0b, 0x00, buf, 2, 100);
+ if (ret < 0)
+ printk(KERN_WARNING "%s failed on URB147: %d\n", __func__, ret);
+
+ /* URB 148 */
+ buf[0] = 0x32;
+ buf[1] = 0x00;
+ ret = usb_control_msg(dev, usb_sndctrlpipe(dev, 0),
+ 0x01, 0x40, 0x02, 0x00, buf, 2, 100);
+ if (ret < 0)
+ printk(KERN_WARNING "%s failed on URB148: %d\n", __func__, ret);
+
+ /* URB 149 */
+ buf[0] = 0x01;
+ buf[1] = 0x00;
+ buf[2] = 0x01;
+ ret = usb_control_msg(dev, usb_sndctrlpipe(dev, 0),
+ 0x01, 0x40, 0x03, 0x00, buf, 3, 100);
+ if (ret < 0)
+ printk(KERN_WARNING "%s failed on URB149: %d\n", __func__, ret);
+
+ /* URB 150 */
+ buf[0] = 0x01;
+ buf[1] = 0x00;
+ buf[2] = 0x01;
+ ret = usb_control_msg(dev, usb_sndctrlpipe(dev, 0),
+ 0x01, 0x40, 0x04, 0x00, buf, 3, 100);
+ if (ret < 0)
+ printk(KERN_WARNING "%s failed on URB150: %d\n", __func__, ret);
+}
+
+static int speedtch_start_synchro(struct speedtch_instance_data *instance)
+{
+ struct usb_device *dev = instance->u.usb_dev;
+ unsigned char buf[2];
+ int ret;
+
+ ret = usb_control_msg(dev, usb_rcvctrlpipe(dev, 0),
+ 0x12, 0xc0, 0x04, 0x00,
+ buf, sizeof(buf), CTRL_TIMEOUT);
+ if (ret < 0) {
+ printk(KERN_WARNING "SpeedTouch: Failed to start ADSL synchronisation: %d\n", ret);
+ return ret;
+ }
+
+ dbg("speedtch_start_synchro: modem prodded. %d Bytes returned: %02x %02x", ret, buf[0], buf[1]);
+ return 0;
+}
+
+static void speedtch_handle_int(struct urb *urb, struct pt_regs *regs)
+{
+ struct speedtch_instance_data *instance = urb->context;
+ unsigned int count = urb->actual_length;
+ int ret;
+
+ /* The magic interrupt for "up state" */
+ const static unsigned char up_int[6] = { 0xa1, 0x00, 0x01, 0x00, 0x00, 0x00 };
+ /* The magic interrupt for "down state" */
+ const static unsigned char down_int[6] = { 0xa1, 0x00, 0x00, 0x00, 0x00, 0x00 };
+
+ switch (urb->status) {
+ case 0:
+ /* success */
+ break;
+ case -ECONNRESET:
+ case -ENOENT:
+ case -ESHUTDOWN:
+ /* this urb is terminated; clean up */
+ dbg("%s - urb shutting down with status: %d", __func__, urb->status);
+ return;
+ default:
+ dbg("%s - nonzero urb status received: %d", __func__, urb->status);
+ goto exit;
+ }
+
+ if (count < 6) {
+ dbg("%s - int packet too short", __func__);
+ goto exit;
+ }
+
+ if (!memcmp(up_int, instance->int_data, 6)) {
+ del_timer(&instance->poll_timer);
+ printk(KERN_NOTICE "DSL line goes up\n");
+ } else if (!memcmp(down_int, instance->int_data, 6)) {
+ printk(KERN_NOTICE "DSL line goes down\n");
+ } else {
+ int i;
+
+ printk(KERN_DEBUG "Unknown interrupt packet of %d bytes:", count);
+ for (i = 0; i < count; i++)
+ printk(" %02x", instance->int_data[i]);
+ printk("\n");
+ }
+ schedule_work(&instance->poll_work);
+
+ exit:
+ rmb();
+ if (!instance->int_urb)
+ return;
+
+ ret = usb_submit_urb(urb, GFP_ATOMIC);
+ if (ret)
+ err("%s - usb_submit_urb failed with result %d", __func__, ret);
+}
+
+static int speedtch_get_status(struct speedtch_instance_data *instance,
+ unsigned char *buf)
+{
+ struct usb_device *dev = instance->u.usb_dev;
+ int ret;
+
+ memset(buf, 0, TOTAL);
+
+ ret = usb_control_msg(dev, usb_rcvctrlpipe(dev, 0),
+ 0x12, 0xc0, 0x07, 0x00, buf + OFFSET_7, SIZE_7,
+ CTRL_TIMEOUT);
+ if (ret < 0) {
+ dbg("MSG 7 failed");
+ return ret;
+ }
+
+ ret = usb_control_msg(dev, usb_rcvctrlpipe(dev, 0),
+ 0x12, 0xc0, 0x0b, 0x00, buf + OFFSET_b, SIZE_b,
+ CTRL_TIMEOUT);
+ if (ret < 0) {
+ dbg("MSG B failed");
+ return ret;
+ }
+
+ ret = usb_control_msg(dev, usb_rcvctrlpipe(dev, 0),
+ 0x12, 0xc0, 0x0d, 0x00, buf + OFFSET_d, SIZE_d,
+ CTRL_TIMEOUT);
+ if (ret < 0) {
+ dbg("MSG D failed");
+ return ret;
+ }
+
+ ret = usb_control_msg(dev, usb_rcvctrlpipe(dev, 0),
+ 0x01, 0xc0, 0x0e, 0x00, buf + OFFSET_e, SIZE_e,
+ CTRL_TIMEOUT);
+ if (ret < 0) {
+ dbg("MSG E failed");
+ return ret;
+ }
+
+ ret = usb_control_msg(dev, usb_rcvctrlpipe(dev, 0),
+ 0x01, 0xc0, 0x0f, 0x00, buf + OFFSET_f, SIZE_f,
+ CTRL_TIMEOUT);
+ if (ret < 0) {
+ dbg("MSG F failed");
+ return ret;
+ }
+
+ return 0;
+}
+
+static void speedtch_poll_status(struct speedtch_instance_data *instance)
+{
+ unsigned char buf[TOTAL];
+ int ret;
+
+ ret = speedtch_get_status(instance, buf);
+ if (ret) {
+ printk(KERN_WARNING
+ "SpeedTouch: Error %d fetching device status\n", ret);
+ return;
+ }
+
+ dbg("Line state %02x", buf[OFFSET_7]);
+
+ switch (buf[OFFSET_7]) {
+ case 0:
+ if (instance->u.atm_dev->signal != ATM_PHY_SIG_LOST) {
+ instance->u.atm_dev->signal = ATM_PHY_SIG_LOST;
+ printk(KERN_NOTICE "ADSL line is down\n");
+ }
+ break;
+
+ case 0x08:
+ if (instance->u.atm_dev->signal != ATM_PHY_SIG_UNKNOWN) {
+ instance->u.atm_dev->signal = ATM_PHY_SIG_UNKNOWN;
+ printk(KERN_NOTICE "ADSL line is blocked?\n");
+ }
+ break;
+
+ case 0x10:
+ if (instance->u.atm_dev->signal != ATM_PHY_SIG_LOST) {
+ instance->u.atm_dev->signal = ATM_PHY_SIG_LOST;
+ printk(KERN_NOTICE "ADSL line is synchronising\n");
+ }
+ break;
+
+ case 0x20:
+ if (instance->u.atm_dev->signal != ATM_PHY_SIG_FOUND) {
+ int down_speed = buf[OFFSET_b] | (buf[OFFSET_b + 1] << 8)
+ | (buf[OFFSET_b + 2] << 16) | (buf[OFFSET_b + 3] << 24);
+ int up_speed = buf[OFFSET_b + 4] | (buf[OFFSET_b + 5] << 8)
+ | (buf[OFFSET_b + 6] << 16) | (buf[OFFSET_b + 7] << 24);
+
+ if (!(down_speed & 0x0000ffff) &&
+ !(up_speed & 0x0000ffff)) {
+ down_speed >>= 16;
+ up_speed >>= 16;
+ }
+ instance->u.atm_dev->link_rate = down_speed * 1000 / 424;
+ instance->u.atm_dev->signal = ATM_PHY_SIG_FOUND;
+
+ printk(KERN_NOTICE
+ "ADSL line is up (%d Kib/s down | %d Kib/s up)\n",
+ down_speed, up_speed);
+ }
+ break;
+
+ default:
+ if (instance->u.atm_dev->signal != ATM_PHY_SIG_UNKNOWN) {
+ instance->u.atm_dev->signal = ATM_PHY_SIG_UNKNOWN;
+ printk(KERN_NOTICE "Unknown line state %02x\n", buf[OFFSET_7]);
+ }
+ break;
+ }
+}
+
+static void speedtch_timer_poll(unsigned long data)
+{
+ struct speedtch_instance_data *instance = (void *)data;
+
+ schedule_work(&instance->poll_work);
+ mod_timer(&instance->poll_timer, jiffies + (5 * HZ));
+}
+
+#ifdef USE_FW_LOADER
+static void speedtch_upload_firmware(struct speedtch_instance_data *instance,
+ const struct firmware *fw1,
+ const struct firmware *fw2)
+{
+ unsigned char *buffer;
+ struct usb_device *usb_dev = instance->u.usb_dev;
+ struct usb_interface *intf;
+ int actual_length, ret;
+ int offset;
+
+ dbg("speedtch_upload_firmware");
+
+ if (!(intf = usb_ifnum_to_if(usb_dev, 2))) {
+ dbg("speedtch_upload_firmware: interface not found!");
+ goto fail;
+ }
+
+ if (!(buffer = (unsigned char *)__get_free_page(GFP_KERNEL))) {
+ dbg("speedtch_upload_firmware: no memory for buffer!");
+ goto fail;
+ }
+
+ /* A user-space firmware loader may already have claimed interface #2 */
+ if ((ret =
+ usb_driver_claim_interface(&speedtch_usb_driver, intf, NULL)) < 0) {
+ dbg("speedtch_upload_firmware: interface in use (%d)!", ret);
+ goto fail_free;
+ }
+
+ /* URB 7 */
+ if (dl_512_first) { /* some modems need a read before writing the firmware */
+ ret = usb_bulk_msg(usb_dev, usb_rcvbulkpipe(usb_dev, SPEEDTCH_ENDPOINT_FIRMWARE),
+ buffer, 0x200, &actual_length, 2 * HZ);
+
+ if (ret < 0 && ret != -ETIMEDOUT)
+ dbg("speedtch_upload_firmware: read BLOCK0 from modem failed (%d)!", ret);
+ else
+ dbg("speedtch_upload_firmware: BLOCK0 downloaded (%d bytes)", ret);
+ }
+
+ /* URB 8 : both leds are static green */
+ for (offset = 0; offset < fw1->size; offset += PAGE_SIZE) {
+ int thislen = min_t(int, PAGE_SIZE, fw1->size - offset);
+ memcpy(buffer, fw1->data + offset, thislen);
+
+ ret = usb_bulk_msg(usb_dev, usb_sndbulkpipe(usb_dev, SPEEDTCH_ENDPOINT_FIRMWARE),
+ buffer, thislen, &actual_length, DATA_TIMEOUT);
+
+ if (ret < 0) {
+ dbg("speedtch_upload_firmware: write BLOCK1 to modem failed (%d)!", ret);
+ goto fail_release;
+ }
+ dbg("speedtch_upload_firmware: BLOCK1 uploaded (%d bytes)", fw1->size);
+ }
+
+ /* USB led blinking green, ADSL led off */
+
+ /* URB 11 */
+ ret = usb_bulk_msg(usb_dev, usb_rcvbulkpipe(usb_dev, SPEEDTCH_ENDPOINT_FIRMWARE),
+ buffer, 0x200, &actual_length, DATA_TIMEOUT);
+
+ if (ret < 0) {
+ dbg("speedtch_upload_firmware: read BLOCK2 from modem failed (%d)!", ret);
+ goto fail_release;
+ }
+ dbg("speedtch_upload_firmware: BLOCK2 downloaded (%d bytes)", actual_length);
+
+ /* URBs 12 to 139 - USB led blinking green, ADSL led off */
+ for (offset = 0; offset < fw2->size; offset += PAGE_SIZE) {
+ int thislen = min_t(int, PAGE_SIZE, fw2->size - offset);
+ memcpy(buffer, fw2->data + offset, thislen);
+
+ ret = usb_bulk_msg(usb_dev, usb_sndbulkpipe(usb_dev, SPEEDTCH_ENDPOINT_FIRMWARE),
+ buffer, thislen, &actual_length, DATA_TIMEOUT);
+
+ if (ret < 0) {
+ dbg("speedtch_upload_firmware: write BLOCK3 to modem failed (%d)!", ret);
+ goto fail_release;
+ }
+ }
+ dbg("speedtch_upload_firmware: BLOCK3 uploaded (%d bytes)", fw2->size);
+
+ /* USB led static green, ADSL led static red */
+
+ /* URB 142 */
+ ret = usb_bulk_msg(usb_dev, usb_rcvbulkpipe(usb_dev, SPEEDTCH_ENDPOINT_FIRMWARE),
+ buffer, 0x200, &actual_length, DATA_TIMEOUT);
+
+ if (ret < 0) {
+ dbg("speedtch_upload_firmware: read BLOCK4 from modem failed (%d)!", ret);
+ goto fail_release;
+ }
+
+ /* success */
+ dbg("speedtch_upload_firmware: BLOCK4 downloaded (%d bytes)", actual_length);
+
+ /* Delay to allow firmware to start up. We can do this here
+ because we're in our own kernel thread anyway. */
+ msleep(1000);
+
+ /* Enable software buffering, if requested */
+ if (sw_buffering)
+ speedtch_set_swbuff(instance, 1);
+
+ /* Magic spell; don't ask us what this does */
+ speedtch_test_sequence(instance);
+
+ /* Start modem synchronisation */
+ if (speedtch_start_synchro(instance))
+ dbg("speedtch_start_synchro: failed");
+
+ speedtch_got_firmware(instance, 1);
+
+ free_page((unsigned long)buffer);
+ return;
+
+ fail_release:
+ /* Only release interface #2 if uploading failed; we don't release it
+ we succeeded. This prevents the userspace tools from trying to load
+ the firmware themselves */
+ usb_driver_release_interface(&speedtch_usb_driver, intf);
+ fail_free:
+ free_page((unsigned long)buffer);
+ fail:
+ speedtch_got_firmware(instance, 0);
+}
+
+static int speedtch_find_firmware(struct speedtch_instance_data
+ *instance, int phase,
+ const struct firmware **fw_p)
+{
+ char buf[24];
+ const u16 bcdDevice = instance->u.usb_dev->descriptor.bcdDevice;
+ const u8 major_revision = bcdDevice >> 8;
+ const u8 minor_revision = bcdDevice & 0xff;
+
+ sprintf(buf, "speedtch-%d.bin.%x.%02x", phase, major_revision, minor_revision);
+ dbg("speedtch_find_firmware: looking for %s", buf);
+
+ if (request_firmware(fw_p, buf, &instance->u.usb_dev->dev)) {
+ sprintf(buf, "speedtch-%d.bin.%x", phase, major_revision);
+ dbg("speedtch_find_firmware: looking for %s", buf);
+
+ if (request_firmware(fw_p, buf, &instance->u.usb_dev->dev)) {
+ sprintf(buf, "speedtch-%d.bin", phase);
+ dbg("speedtch_find_firmware: looking for %s", buf);
+
+ if (request_firmware(fw_p, buf, &instance->u.usb_dev->dev)) {
+ dev_warn(&instance->u.usb_dev->dev, "no stage %d firmware found!", phase);
+ return -ENOENT;
+ }
+ }
+ }
+
+ dev_info(&instance->u.usb_dev->dev, "found stage %d firmware %s\n", phase, buf);
+
+ return 0;
+}
+
+static int speedtch_load_firmware(void *arg)
+{
+ const struct firmware *fw1, *fw2;
+ struct speedtch_instance_data *instance = arg;
+
+ BUG_ON(!instance);
+
+ daemonize("firmware/speedtch");
+
+ if (!speedtch_find_firmware(instance, 1, &fw1)) {
+ if (!speedtch_find_firmware(instance, 2, &fw2)) {
+ speedtch_upload_firmware(instance, fw1, fw2);
+ release_firmware(fw2);
+ }
+ release_firmware(fw1);
+ }
+
+ /* In case we failed, set state back to NO_FIRMWARE so that
+ another later attempt may work. Otherwise, we never actually
+ manage to recover if, for example, the firmware is on /usr and
+ we look for it too early. */
+ speedtch_got_firmware(instance, 0);
+
+ module_put(THIS_MODULE);
+ udsl_put_instance(&instance->u);
+ return 0;
+}
+#endif /* USE_FW_LOADER */
+
+static void speedtch_firmware_start(struct speedtch_instance_data *instance)
+{
+#ifdef USE_FW_LOADER
+ int ret;
+#endif
+
+ dbg("speedtch_firmware_start");
+
+ down(&instance->u.serialize); /* vs self, speedtch_got_firmware */
+
+ if (instance->u.status >= UDSL_LOADING_FIRMWARE) {
+ up(&instance->u.serialize);
+ return;
+ }
+
+ instance->u.status = UDSL_LOADING_FIRMWARE;
+ up(&instance->u.serialize);
+
+#ifdef USE_FW_LOADER
+ udsl_get_instance(&instance->u);
+ try_module_get(THIS_MODULE);
+
+ ret = kernel_thread(speedtch_load_firmware, instance,
+ CLONE_FS | CLONE_FILES);
+
+ if (ret >= 0)
+ return; /* OK */
+
+ dbg("speedtch_firmware_start: kernel_thread failed (%d)!", ret);
+
+ module_put(THIS_MODULE);
+ udsl_put_instance(&instance->u);
+ /* Just pretend it never happened... hope modem_run happens */
+#endif /* USE_FW_LOADER */
+
+ speedtch_got_firmware(instance, 0);
+}
+
+static int speedtch_firmware_wait(struct udsl_instance_data *instance)
+{
+ speedtch_firmware_start((void *)instance);
+
+ if (wait_event_interruptible(instance->firmware_waiters, instance->status != UDSL_LOADING_FIRMWARE) < 0)
+ return -ERESTARTSYS;
+
+ return (instance->status == UDSL_LOADED_FIRMWARE) ? 0 : -EAGAIN;
+}
+
+/**********
+** USB **
+**********/
+
+static int speedtch_usb_ioctl(struct usb_interface *intf, unsigned int code,
+ void *user_data)
+{
+ struct speedtch_instance_data *instance = usb_get_intfdata(intf);
+
+ dbg("speedtch_usb_ioctl entered");
+
+ if (!instance) {
+ dbg("speedtch_usb_ioctl: NULL instance!");
+ return -ENODEV;
+ }
+
+ switch (code) {
+ case UDSL_IOCTL_LINE_UP:
+ instance->u.atm_dev->signal = ATM_PHY_SIG_FOUND;
+ speedtch_got_firmware(instance, 1);
+ return (instance->u.status == UDSL_LOADED_FIRMWARE) ? 0 : -EIO;
+ case UDSL_IOCTL_LINE_DOWN:
+ instance->u.atm_dev->signal = ATM_PHY_SIG_LOST;
+ return 0;
+ default:
+ return -ENOTTY;
+ }
+}
+
+static int speedtch_usb_probe(struct usb_interface *intf,
+ const struct usb_device_id *id)
+{
+ struct usb_device *dev = interface_to_usbdev(intf);
+ int ifnum = intf->altsetting->desc.bInterfaceNumber;
+ struct speedtch_instance_data *instance;
+ unsigned char mac_str[13];
+ int ret, i;
+ char buf7[SIZE_7];
+
+ dbg("speedtch_usb_probe: trying device with vendor=0x%x, product=0x%x, ifnum %d", dev->descriptor.idVendor, dev->descriptor.idProduct, ifnum);
+
+ if ((dev->descriptor.bDeviceClass != USB_CLASS_VENDOR_SPEC) ||
+ (dev->descriptor.idVendor != SPEEDTOUCH_VENDORID) ||
+ (dev->descriptor.idProduct != SPEEDTOUCH_PRODUCTID) || (ifnum != 1))
+ return -ENODEV;
+
+ dbg("speedtch_usb_probe: device accepted");
+
+ /* instance init */
+ instance = kmalloc(sizeof(*instance), GFP_KERNEL);
+ if (!instance) {
+ dbg("speedtch_usb_probe: no memory for instance data!");
+ return -ENOMEM;
+ }
+
+ memset(instance, 0, sizeof(struct speedtch_instance_data));
+
+ if ((ret = usb_set_interface(dev, 0, 0)) < 0)
+ goto fail;
+
+ if ((ret = usb_set_interface(dev, 2, 0)) < 0)
+ goto fail;
+
+ instance->u.data_endpoint = SPEEDTCH_ENDPOINT_DATA;
+ instance->u.firmware_wait = speedtch_firmware_wait;
+ instance->u.driver_name = speedtch_driver_name;
+
+ ret = udsl_instance_setup(dev, &instance->u);
+ if (ret)
+ goto fail;
+
+ init_timer(&instance->poll_timer);
+ instance->poll_timer.function = speedtch_timer_poll;
+ instance->poll_timer.data = (unsigned long)instance;
+
+ INIT_WORK(&instance->poll_work, (void *)speedtch_poll_status, instance);
+
+ /* set MAC address, it is stored in the serial number */
+ memset(instance->u.atm_dev->esi, 0, sizeof(instance->u.atm_dev->esi));
+ if (usb_string(dev, dev->descriptor.iSerialNumber, mac_str, sizeof(mac_str)) == 12) {
+ for (i = 0; i < 6; i++)
+ instance->u.atm_dev->esi[i] =
+ (hex2int(mac_str[i * 2]) * 16) + (hex2int(mac_str[i * 2 + 1]));
+ }
+
+ /* First check whether the modem already seems to be alive */
+ ret = usb_control_msg(dev, usb_rcvctrlpipe(dev, 0),
+ 0x12, 0xc0, 0x07, 0x00, buf7, SIZE_7, HZ / 2);
+
+ if (ret == SIZE_7) {
+ dbg("firmware appears to be already loaded");
+ speedtch_got_firmware(instance, 1);
+ speedtch_poll_status(instance);
+ } else {
+ speedtch_firmware_start(instance);
+ }
+
+ usb_set_intfdata(intf, instance);
+
+ return 0;
+
+ fail:
+ kfree(instance);
+
+ return -ENOMEM;
+}
+
+static void speedtch_usb_disconnect(struct usb_interface *intf)
+{
+ struct speedtch_instance_data *instance = usb_get_intfdata(intf);
+
+ dbg("speedtch_usb_disconnect entered");
+
+ if (!instance) {
+ dbg("speedtch_usb_disconnect: NULL instance!");
+ return;
+ }
+
+ if (instance->int_urb) {
+ struct urb *int_urb = instance->int_urb;
+ instance->int_urb = NULL;
+ wmb();
+ usb_unlink_urb(int_urb);
+ usb_free_urb(int_urb);
+ }
+
+ instance->int_data[0] = 1;
+ del_timer_sync(&instance->poll_timer);
+ wmb();
+ flush_scheduled_work();
+
+ udsl_instance_disconnect(&instance->u);
+
+ /* clean up */
+ usb_set_intfdata(intf, NULL);
+ udsl_put_instance(&instance->u);
+}
+
+/***********
+** init **
+***********/
+
+static int __init speedtch_usb_init(void)
+{
+ dbg("speedtch_usb_init: driver version " DRIVER_VERSION);
+
+ return usb_register(&speedtch_usb_driver);
+}
+
+static void __exit speedtch_usb_cleanup(void)
+{
+ dbg("speedtch_usb_cleanup entered");
+
+ usb_deregister(&speedtch_usb_driver);
+}
+
+module_init(speedtch_usb_init);
+module_exit(speedtch_usb_cleanup);
+
+MODULE_AUTHOR(DRIVER_AUTHOR);
+MODULE_DESCRIPTION(DRIVER_DESC);
+MODULE_LICENSE("GPL");
+MODULE_VERSION(DRIVER_VERSION);
--- /dev/null
+/******************************************************************************
+ * usb_atm.c - Generic USB xDSL driver core
+ *
+ * Copyright (C) 2001, Alcatel
+ * Copyright (C) 2003, Duncan Sands, SolNegro, Josep Comas
+ * Copyright (C) 2004, David Woodhouse
+ *
+ * This program is free software; you can redistribute it and/or modify it
+ * under the terms of the GNU General Public License as published by the Free
+ * Software Foundation; either version 2 of the License, or (at your option)
+ * any later version.
+ *
+ * This program is distributed in the hope that it will be useful, but WITHOUT
+ * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
+ * FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License for
+ * more details.
+ *
+ * You should have received a copy of the GNU General Public License along with
+ * this program; if not, write to the Free Software Foundation, Inc., 59
+ * Temple Place - Suite 330, Boston, MA 02111-1307, USA.
+ *
+ ******************************************************************************/
+
+/*
+ * Written by Johan Verrept, maintained by Duncan Sands (duncan.sands@free.fr)
+ *
+ * 1.7+: - See the check-in logs
+ *
+ * 1.6: - No longer opens a connection if the firmware is not loaded
+ * - Added support for the speedtouch 330
+ * - Removed the limit on the number of devices
+ * - Module now autoloads on device plugin
+ * - Merged relevant parts of sarlib
+ * - Replaced the kernel thread with a tasklet
+ * - New packet transmission code
+ * - Changed proc file contents
+ * - Fixed all known SMP races
+ * - Many fixes and cleanups
+ * - Various fixes by Oliver Neukum (oliver@neukum.name)
+ *
+ * 1.5A: - Version for inclusion in 2.5 series kernel
+ * - Modifications by Richard Purdie (rpurdie@rpsys.net)
+ * - made compatible with kernel 2.5.6 onwards by changing
+ * udsl_usb_send_data_context->urb to a pointer and adding code
+ * to alloc and free it
+ * - remove_wait_queue() added to udsl_atm_processqueue_thread()
+ *
+ * 1.5: - fixed memory leak when atmsar_decode_aal5 returned NULL.
+ * (reported by stephen.robinson@zen.co.uk)
+ *
+ * 1.4: - changed the spin_lock() under interrupt to spin_lock_irqsave()
+ * - unlink all active send urbs of a vcc that is being closed.
+ *
+ * 1.3.1: - added the version number
+ *
+ * 1.3: - Added multiple send urb support
+ * - fixed memory leak and vcc->tx_inuse starvation bug
+ * when not enough memory left in vcc.
+ *
+ * 1.2: - Fixed race condition in udsl_usb_send_data()
+ * 1.1: - Turned off packet debugging
+ *
+ */
+
+#include <linux/module.h>
+#include <linux/moduleparam.h>
+#include <linux/kernel.h>
+#include <linux/sched.h>
+#include <linux/timer.h>
+#include <linux/errno.h>
+#include <linux/proc_fs.h>
+#include <linux/slab.h>
+#include <linux/wait.h>
+#include <linux/list.h>
+#include <asm/uaccess.h>
+#include <linux/smp_lock.h>
+#include <linux/interrupt.h>
+#include <linux/atm.h>
+#include <linux/atmdev.h>
+#include <linux/crc32.h>
+#include <linux/init.h>
+#include <linux/firmware.h>
+
+#include "usb_atm.h"
+
+/*
+#define DEBUG
+#define VERBOSE_DEBUG
+*/
+
+#if !defined (DEBUG) && defined (CONFIG_USB_DEBUG)
+# define DEBUG
+#endif
+
+#include <linux/usb.h>
+
+#ifdef DEBUG
+#define UDSL_ASSERT(x) BUG_ON(!(x))
+#else
+#define UDSL_ASSERT(x) do { if (!(x)) warn("failed assertion '" #x "' at line %d", __LINE__); } while(0)
+#endif
+
+#ifdef VERBOSE_DEBUG
+static int udsl_print_packet(const unsigned char *data, int len);
+#define PACKETDEBUG(arg...) udsl_print_packet (arg)
+#define vdbg(arg...) dbg (arg)
+#else
+#define PACKETDEBUG(arg...)
+#define vdbg(arg...)
+#endif
+
+#define DRIVER_AUTHOR "Johan Verrept, Duncan Sands <duncan.sands@free.fr>"
+#define DRIVER_VERSION "1.8"
+#define DRIVER_DESC "Alcatel SpeedTouch USB driver version " DRIVER_VERSION
+
+static unsigned int num_rcv_urbs = UDSL_DEFAULT_RCV_URBS;
+static unsigned int num_snd_urbs = UDSL_DEFAULT_SND_URBS;
+static unsigned int num_rcv_bufs = UDSL_DEFAULT_RCV_BUFS;
+static unsigned int num_snd_bufs = UDSL_DEFAULT_SND_BUFS;
+static unsigned int rcv_buf_size = UDSL_DEFAULT_RCV_BUF_SIZE;
+static unsigned int snd_buf_size = UDSL_DEFAULT_SND_BUF_SIZE;
+
+module_param(num_rcv_urbs, uint, 0444);
+MODULE_PARM_DESC(num_rcv_urbs,
+ "Number of urbs used for reception (range: 0-"
+ __MODULE_STRING(UDSL_MAX_RCV_URBS) ", default: "
+ __MODULE_STRING(UDSL_DEFAULT_RCV_URBS) ")");
+
+module_param(num_snd_urbs, uint, 0444);
+MODULE_PARM_DESC(num_snd_urbs,
+ "Number of urbs used for transmission (range: 0-"
+ __MODULE_STRING(UDSL_MAX_SND_URBS) ", default: "
+ __MODULE_STRING(UDSL_DEFAULT_SND_URBS) ")");
+
+module_param(num_rcv_bufs, uint, 0444);
+MODULE_PARM_DESC(num_rcv_bufs,
+ "Number of buffers used for reception (range: 0-"
+ __MODULE_STRING(UDSL_MAX_RCV_BUFS) ", default: "
+ __MODULE_STRING(UDSL_DEFAULT_RCV_BUFS) ")");
+
+module_param(num_snd_bufs, uint, 0444);
+MODULE_PARM_DESC(num_snd_bufs,
+ "Number of buffers used for transmission (range: 0-"
+ __MODULE_STRING(UDSL_MAX_SND_BUFS) ", default: "
+ __MODULE_STRING(UDSL_DEFAULT_SND_BUFS) ")");
+
+module_param(rcv_buf_size, uint, 0444);
+MODULE_PARM_DESC(rcv_buf_size,
+ "Size of the buffers used for reception (range: 0-"
+ __MODULE_STRING(UDSL_MAX_RCV_BUF_SIZE) ", default: "
+ __MODULE_STRING(UDSL_DEFAULT_RCV_BUF_SIZE) ")");
+
+module_param(snd_buf_size, uint, 0444);
+MODULE_PARM_DESC(snd_buf_size,
+ "Size of the buffers used for transmission (range: 0-"
+ __MODULE_STRING(UDSL_MAX_SND_BUF_SIZE) ", default: "
+ __MODULE_STRING(UDSL_DEFAULT_SND_BUF_SIZE) ")");
+
+/* ATM */
+
+static void udsl_atm_dev_close(struct atm_dev *dev);
+static int udsl_atm_open(struct atm_vcc *vcc);
+static void udsl_atm_close(struct atm_vcc *vcc);
+static int udsl_atm_ioctl(struct atm_dev *dev, unsigned int cmd, void __user * arg);
+static int udsl_atm_send(struct atm_vcc *vcc, struct sk_buff *skb);
+static int udsl_atm_proc_read(struct atm_dev *atm_dev, loff_t * pos, char *page);
+
+static struct atmdev_ops udsl_atm_devops = {
+ .dev_close = udsl_atm_dev_close,
+ .open = udsl_atm_open,
+ .close = udsl_atm_close,
+ .ioctl = udsl_atm_ioctl,
+ .send = udsl_atm_send,
+ .proc_read = udsl_atm_proc_read,
+ .owner = THIS_MODULE,
+};
+
+/***********
+** misc **
+***********/
+
+static inline void udsl_pop(struct atm_vcc *vcc, struct sk_buff *skb)
+{
+ if (vcc->pop)
+ vcc->pop(vcc, skb);
+ else
+ dev_kfree_skb(skb);
+}
+
+/*************
+** decode **
+*************/
+
+static inline struct udsl_vcc_data *udsl_find_vcc(struct udsl_instance_data *instance,
+ short vpi, int vci)
+{
+ struct udsl_vcc_data *vcc;
+
+ list_for_each_entry(vcc, &instance->vcc_list, list)
+ if ((vcc->vci == vci) && (vcc->vpi == vpi))
+ return vcc;
+ return NULL;
+}
+
+static void udsl_extract_cells(struct udsl_instance_data *instance,
+ unsigned char *source, unsigned int howmany)
+{
+ struct udsl_vcc_data *cached_vcc = NULL;
+ struct atm_vcc *vcc;
+ struct sk_buff *sarb;
+ struct udsl_vcc_data *vcc_data;
+ int cached_vci = 0;
+ unsigned int i;
+ int pti;
+ int vci;
+ short cached_vpi = 0;
+ short vpi;
+
+ for (i = 0; i < howmany;
+ i++, source += ATM_CELL_SIZE + instance->rcv_padding) {
+ vpi = ((source[0] & 0x0f) << 4) | (source[1] >> 4);
+ vci = ((source[1] & 0x0f) << 12) | (source[2] << 4) | (source[3] >> 4);
+ pti = (source[3] & 0x2) != 0;
+
+ vdbg("udsl_extract_cells: vpi %hd, vci %d, pti %d", vpi, vci, pti);
+
+ if (cached_vcc && (vci == cached_vci) && (vpi == cached_vpi))
+ vcc_data = cached_vcc;
+ else if ((vcc_data = udsl_find_vcc(instance, vpi, vci))) {
+ cached_vcc = vcc_data;
+ cached_vpi = vpi;
+ cached_vci = vci;
+ } else {
+ dbg("udsl_extract_cells: unknown vpi/vci (%hd/%d)!", vpi, vci);
+ continue;
+ }
+
+ vcc = vcc_data->vcc;
+ sarb = vcc_data->sarb;
+
+ if (sarb->tail + ATM_CELL_PAYLOAD > sarb->end) {
+ dbg("udsl_extract_cells: buffer overrun (sarb->len %u, vcc: 0x%p)!", sarb->len, vcc);
+ /* discard cells already received */
+ skb_trim(sarb, 0);
+ }
+
+ memcpy(sarb->tail, source + ATM_CELL_HEADER, ATM_CELL_PAYLOAD);
+ __skb_put(sarb, ATM_CELL_PAYLOAD);
+
+ if (pti) {
+ struct sk_buff *skb;
+ unsigned int length;
+ unsigned int pdu_length;
+
+ length = (source[ATM_CELL_SIZE - 6] << 8) + source[ATM_CELL_SIZE - 5];
+
+ /* guard against overflow */
+ if (length > ATM_MAX_AAL5_PDU) {
+ dbg("udsl_extract_cells: bogus length %u (vcc: 0x%p)!", length, vcc);
+ atomic_inc(&vcc->stats->rx_err);
+ goto out;
+ }
+
+ pdu_length = UDSL_NUM_CELLS(length) * ATM_CELL_PAYLOAD;
+
+ if (sarb->len < pdu_length) {
+ dbg("udsl_extract_cells: bogus pdu_length %u (sarb->len: %u, vcc: 0x%p)!", pdu_length, sarb->len, vcc);
+ atomic_inc(&vcc->stats->rx_err);
+ goto out;
+ }
+
+ if (crc32_be(~0, sarb->tail - pdu_length, pdu_length) != 0xc704dd7b) {
+ dbg("udsl_extract_cells: packet failed crc check (vcc: 0x%p)!", vcc);
+ atomic_inc(&vcc->stats->rx_err);
+ goto out;
+ }
+
+ vdbg("udsl_extract_cells: got packet (length: %u, pdu_length: %u, vcc: 0x%p)", length, pdu_length, vcc);
+
+ if (!(skb = dev_alloc_skb(length))) {
+ dbg("udsl_extract_cells: no memory for skb (length: %u)!", length);
+ atomic_inc(&vcc->stats->rx_drop);
+ goto out;
+ }
+
+ vdbg("udsl_extract_cells: allocated new sk_buff (skb: 0x%p, skb->truesize: %u)", skb, skb->truesize);
+
+ if (!atm_charge(vcc, skb->truesize)) {
+ dbg("udsl_extract_cells: failed atm_charge (skb->truesize: %u)!", skb->truesize);
+ dev_kfree_skb(skb);
+ goto out; /* atm_charge increments rx_drop */
+ }
+
+ memcpy(skb->data, sarb->tail - pdu_length, length);
+ __skb_put(skb, length);
+
+ vdbg("udsl_extract_cells: sending skb 0x%p, skb->len %u, skb->truesize %u", skb, skb->len, skb->truesize);
+
+ PACKETDEBUG(skb->data, skb->len);
+
+ vcc->push(vcc, skb);
+
+ atomic_inc(&vcc->stats->rx);
+ out:
+ skb_trim(sarb, 0);
+ }
+ }
+}
+
+/*************
+** encode **
+*************/
+
+static const unsigned char zeros[ATM_CELL_PAYLOAD];
+
+static void udsl_groom_skb(struct atm_vcc *vcc, struct sk_buff *skb)
+{
+ struct udsl_control *ctrl = UDSL_SKB(skb);
+ unsigned int zero_padding;
+ u32 crc;
+
+ ctrl->atm_data.vcc = vcc;
+ ctrl->cell_header[0] = vcc->vpi >> 4;
+ ctrl->cell_header[1] = (vcc->vpi << 4) | (vcc->vci >> 12);
+ ctrl->cell_header[2] = vcc->vci >> 4;
+ ctrl->cell_header[3] = vcc->vci << 4;
+ ctrl->cell_header[4] = 0xec;
+
+ ctrl->num_cells = UDSL_NUM_CELLS(skb->len);
+ ctrl->num_entire = skb->len / ATM_CELL_PAYLOAD;
+
+ zero_padding = ctrl->num_cells * ATM_CELL_PAYLOAD - skb->len - ATM_AAL5_TRAILER;
+
+ if (ctrl->num_entire + 1 < ctrl->num_cells)
+ ctrl->pdu_padding = zero_padding - (ATM_CELL_PAYLOAD - ATM_AAL5_TRAILER);
+ else
+ ctrl->pdu_padding = zero_padding;
+
+ ctrl->aal5_trailer[0] = 0; /* UU = 0 */
+ ctrl->aal5_trailer[1] = 0; /* CPI = 0 */
+ ctrl->aal5_trailer[2] = skb->len >> 8;
+ ctrl->aal5_trailer[3] = skb->len;
+
+ crc = crc32_be(~0, skb->data, skb->len);
+ crc = crc32_be(crc, zeros, zero_padding);
+ crc = crc32_be(crc, ctrl->aal5_trailer, 4);
+ crc = ~crc;
+
+ ctrl->aal5_trailer[4] = crc >> 24;
+ ctrl->aal5_trailer[5] = crc >> 16;
+ ctrl->aal5_trailer[6] = crc >> 8;
+ ctrl->aal5_trailer[7] = crc;
+}
+
+static unsigned int udsl_write_cells(struct udsl_instance_data *instance,
+ unsigned int howmany, struct sk_buff *skb,
+ unsigned char **target_p)
+{
+ struct udsl_control *ctrl = UDSL_SKB(skb);
+ unsigned char *target = *target_p;
+ unsigned int nc, ne, i;
+
+ vdbg("udsl_write_cells: howmany=%u, skb->len=%d, num_cells=%u, num_entire=%u, pdu_padding=%u", howmany, skb->len, ctrl->num_cells, ctrl->num_entire, ctrl->pdu_padding);
+
+ nc = ctrl->num_cells;
+ ne = min(howmany, ctrl->num_entire);
+
+ for (i = 0; i < ne; i++) {
+ memcpy(target, ctrl->cell_header, ATM_CELL_HEADER);
+ target += ATM_CELL_HEADER;
+ memcpy(target, skb->data, ATM_CELL_PAYLOAD);
+ target += ATM_CELL_PAYLOAD;
+ if (instance->snd_padding) {
+ memset(target, 0, instance->snd_padding);
+ target += instance->snd_padding;
+ }
+ __skb_pull(skb, ATM_CELL_PAYLOAD);
+ }
+
+ ctrl->num_entire -= ne;
+
+ if (!(ctrl->num_cells -= ne) || !(howmany -= ne))
+ goto out;
+
+ if (instance->snd_padding) {
+ memset(target, 0, instance->snd_padding);
+ target += instance->snd_padding;
+ }
+ memcpy(target, ctrl->cell_header, ATM_CELL_HEADER);
+ target += ATM_CELL_HEADER;
+ memcpy(target, skb->data, skb->len);
+ target += skb->len;
+ __skb_pull(skb, skb->len);
+ memset(target, 0, ctrl->pdu_padding);
+ target += ctrl->pdu_padding;
+
+ if (--ctrl->num_cells) {
+ if (!--howmany) {
+ ctrl->pdu_padding = ATM_CELL_PAYLOAD - ATM_AAL5_TRAILER;
+ goto out;
+ }
+
+ memcpy(target, ctrl->cell_header, ATM_CELL_HEADER);
+ target += ATM_CELL_HEADER;
+ memset(target, 0, ATM_CELL_PAYLOAD - ATM_AAL5_TRAILER);
+ target += ATM_CELL_PAYLOAD - ATM_AAL5_TRAILER;
+
+ --ctrl->num_cells;
+ UDSL_ASSERT(!ctrl->num_cells);
+ }
+
+ memcpy(target, ctrl->aal5_trailer, ATM_AAL5_TRAILER);
+ target += ATM_AAL5_TRAILER;
+ /* set pti bit in last cell */
+ *(target + 3 - ATM_CELL_SIZE) |= 0x2;
+ if (instance->snd_padding) {
+ memset(target, 0, instance->snd_padding);
+ target += instance->snd_padding;
+ }
+ out:
+ *target_p = target;
+ return nc - ctrl->num_cells;
+}
+
+/**************
+** receive **
+**************/
+
+static void udsl_complete_receive(struct urb *urb, struct pt_regs *regs)
+{
+ struct udsl_receive_buffer *buf;
+ struct udsl_instance_data *instance;
+ struct udsl_receiver *rcv;
+ unsigned long flags;
+
+ if (!urb || !(rcv = urb->context)) {
+ dbg("udsl_complete_receive: bad urb!");
+ return;
+ }
+
+ instance = rcv->instance;
+ buf = rcv->buffer;
+
+ buf->filled_cells = urb->actual_length / (ATM_CELL_SIZE + instance->rcv_padding);
+
+ vdbg("udsl_complete_receive: urb 0x%p, status %d, actual_length %d, filled_cells %u, rcv 0x%p, buf 0x%p", urb, urb->status, urb->actual_length, buf->filled_cells, rcv, buf);
+
+ UDSL_ASSERT(buf->filled_cells <= rcv_buf_size);
+
+ /* may not be in_interrupt() */
+ spin_lock_irqsave(&instance->receive_lock, flags);
+ list_add(&rcv->list, &instance->spare_receivers);
+ list_add_tail(&buf->list, &instance->filled_receive_buffers);
+ if (likely(!urb->status))
+ tasklet_schedule(&instance->receive_tasklet);
+ spin_unlock_irqrestore(&instance->receive_lock, flags);
+}
+
+static void udsl_process_receive(unsigned long data)
+{
+ struct udsl_receive_buffer *buf;
+ struct udsl_instance_data *instance = (struct udsl_instance_data *)data;
+ struct udsl_receiver *rcv;
+ int err;
+
+ made_progress:
+ while (!list_empty(&instance->spare_receive_buffers)) {
+ spin_lock_irq(&instance->receive_lock);
+ if (list_empty(&instance->spare_receivers)) {
+ spin_unlock_irq(&instance->receive_lock);
+ break;
+ }
+ rcv = list_entry(instance->spare_receivers.next,
+ struct udsl_receiver, list);
+ list_del(&rcv->list);
+ spin_unlock_irq(&instance->receive_lock);
+
+ buf = list_entry(instance->spare_receive_buffers.next,
+ struct udsl_receive_buffer, list);
+ list_del(&buf->list);
+
+ rcv->buffer = buf;
+
+ usb_fill_bulk_urb(rcv->urb, instance->usb_dev,
+ usb_rcvbulkpipe(instance->usb_dev, instance->data_endpoint),
+ buf->base,
+ rcv_buf_size * (ATM_CELL_SIZE + instance->rcv_padding),
+ udsl_complete_receive, rcv);
+
+ vdbg("udsl_process_receive: sending urb 0x%p, rcv 0x%p, buf 0x%p",
+ rcv->urb, rcv, buf);
+
+ if ((err = usb_submit_urb(rcv->urb, GFP_ATOMIC)) < 0) {
+ dbg("udsl_process_receive: urb submission failed (%d)!", err);
+ list_add(&buf->list, &instance->spare_receive_buffers);
+ spin_lock_irq(&instance->receive_lock);
+ list_add(&rcv->list, &instance->spare_receivers);
+ spin_unlock_irq(&instance->receive_lock);
+ break;
+ }
+ }
+
+ spin_lock_irq(&instance->receive_lock);
+ if (list_empty(&instance->filled_receive_buffers)) {
+ spin_unlock_irq(&instance->receive_lock);
+ return; /* done - no more buffers */
+ }
+ buf = list_entry(instance->filled_receive_buffers.next,
+ struct udsl_receive_buffer, list);
+ list_del(&buf->list);
+ spin_unlock_irq(&instance->receive_lock);
+
+ vdbg("udsl_process_receive: processing buf 0x%p", buf);
+ udsl_extract_cells(instance, buf->base, buf->filled_cells);
+ list_add(&buf->list, &instance->spare_receive_buffers);
+ goto made_progress;
+}
+
+/***********
+** send **
+***********/
+
+static void udsl_complete_send(struct urb *urb, struct pt_regs *regs)
+{
+ struct udsl_instance_data *instance;
+ struct udsl_sender *snd;
+ unsigned long flags;
+
+ if (!urb || !(snd = urb->context) || !(instance = snd->instance)) {
+ dbg("udsl_complete_send: bad urb!");
+ return;
+ }
+
+ vdbg("udsl_complete_send: urb 0x%p, status %d, snd 0x%p, buf 0x%p", urb,
+ urb->status, snd, snd->buffer);
+
+ /* may not be in_interrupt() */
+ spin_lock_irqsave(&instance->send_lock, flags);
+ list_add(&snd->list, &instance->spare_senders);
+ list_add(&snd->buffer->list, &instance->spare_send_buffers);
+ tasklet_schedule(&instance->send_tasklet);
+ spin_unlock_irqrestore(&instance->send_lock, flags);
+}
+
+static void udsl_process_send(unsigned long data)
+{
+ struct udsl_send_buffer *buf;
+ struct udsl_instance_data *instance = (struct udsl_instance_data *)data;
+ struct sk_buff *skb;
+ struct udsl_sender *snd;
+ int err;
+ unsigned int num_written;
+
+ made_progress:
+ spin_lock_irq(&instance->send_lock);
+ while (!list_empty(&instance->spare_senders)) {
+ if (!list_empty(&instance->filled_send_buffers)) {
+ buf = list_entry(instance->filled_send_buffers.next,
+ struct udsl_send_buffer, list);
+ list_del(&buf->list);
+ } else if ((buf = instance->current_buffer)) {
+ instance->current_buffer = NULL;
+ } else /* all buffers empty */
+ break;
+
+ snd = list_entry(instance->spare_senders.next,
+ struct udsl_sender, list);
+ list_del(&snd->list);
+ spin_unlock_irq(&instance->send_lock);
+
+ snd->buffer = buf;
+ usb_fill_bulk_urb(snd->urb, instance->usb_dev,
+ usb_sndbulkpipe(instance->usb_dev, instance->data_endpoint),
+ buf->base,
+ (snd_buf_size - buf->free_cells) * (ATM_CELL_SIZE + instance->snd_padding),
+ udsl_complete_send, snd);
+
+ vdbg("udsl_process_send: submitting urb 0x%p (%d cells), snd 0x%p, buf 0x%p",
+ snd->urb, snd_buf_size - buf->free_cells, snd, buf);
+
+ if ((err = usb_submit_urb(snd->urb, GFP_ATOMIC)) < 0) {
+ dbg("udsl_process_send: urb submission failed (%d)!", err);
+ spin_lock_irq(&instance->send_lock);
+ list_add(&snd->list, &instance->spare_senders);
+ spin_unlock_irq(&instance->send_lock);
+ list_add(&buf->list, &instance->filled_send_buffers);
+ return; /* bail out */
+ }
+
+ spin_lock_irq(&instance->send_lock);
+ } /* while */
+ spin_unlock_irq(&instance->send_lock);
+
+ if (!instance->current_skb)
+ instance->current_skb = skb_dequeue(&instance->sndqueue);
+ if (!instance->current_skb)
+ return; /* done - no more skbs */
+
+ skb = instance->current_skb;
+
+ if (!(buf = instance->current_buffer)) {
+ spin_lock_irq(&instance->send_lock);
+ if (list_empty(&instance->spare_send_buffers)) {
+ instance->current_buffer = NULL;
+ spin_unlock_irq(&instance->send_lock);
+ return; /* done - no more buffers */
+ }
+ buf = list_entry(instance->spare_send_buffers.next,
+ struct udsl_send_buffer, list);
+ list_del(&buf->list);
+ spin_unlock_irq(&instance->send_lock);
+
+ buf->free_start = buf->base;
+ buf->free_cells = snd_buf_size;
+
+ instance->current_buffer = buf;
+ }
+
+ num_written = udsl_write_cells(instance, buf->free_cells, skb, &buf->free_start);
+
+ vdbg("udsl_process_send: wrote %u cells from skb 0x%p to buffer 0x%p",
+ num_written, skb, buf);
+
+ if (!(buf->free_cells -= num_written)) {
+ list_add_tail(&buf->list, &instance->filled_send_buffers);
+ instance->current_buffer = NULL;
+ }
+
+ vdbg("udsl_process_send: buffer contains %d cells, %d left",
+ snd_buf_size - buf->free_cells, buf->free_cells);
+
+ if (!UDSL_SKB(skb)->num_cells) {
+ struct atm_vcc *vcc = UDSL_SKB(skb)->atm_data.vcc;
+
+ udsl_pop(vcc, skb);
+ instance->current_skb = NULL;
+
+ atomic_inc(&vcc->stats->tx);
+ }
+
+ goto made_progress;
+}
+
+static void udsl_cancel_send(struct udsl_instance_data *instance,
+ struct atm_vcc *vcc)
+{
+ struct sk_buff *skb, *n;
+
+ dbg("udsl_cancel_send entered");
+ spin_lock_irq(&instance->sndqueue.lock);
+ for (skb = instance->sndqueue.next, n = skb->next;
+ skb != (struct sk_buff *)&instance->sndqueue;
+ skb = n, n = skb->next)
+ if (UDSL_SKB(skb)->atm_data.vcc == vcc) {
+ dbg("udsl_cancel_send: popping skb 0x%p", skb);
+ __skb_unlink(skb, &instance->sndqueue);
+ udsl_pop(vcc, skb);
+ }
+ spin_unlock_irq(&instance->sndqueue.lock);
+
+ tasklet_disable(&instance->send_tasklet);
+ if ((skb = instance->current_skb) && (UDSL_SKB(skb)->atm_data.vcc == vcc)) {
+ dbg("udsl_cancel_send: popping current skb (0x%p)", skb);
+ instance->current_skb = NULL;
+ udsl_pop(vcc, skb);
+ }
+ tasklet_enable(&instance->send_tasklet);
+ dbg("udsl_cancel_send done");
+}
+
+static int udsl_atm_send(struct atm_vcc *vcc, struct sk_buff *skb)
+{
+ struct udsl_instance_data *instance = vcc->dev->dev_data;
+ int err;
+
+ vdbg("udsl_atm_send called (skb 0x%p, len %u)", skb, skb->len);
+
+ if (!instance) {
+ dbg("udsl_atm_send: NULL data!");
+ err = -ENODEV;
+ goto fail;
+ }
+
+ if (vcc->qos.aal != ATM_AAL5) {
+ dbg("udsl_atm_send: unsupported ATM type %d!", vcc->qos.aal);
+ err = -EINVAL;
+ goto fail;
+ }
+
+ if (skb->len > ATM_MAX_AAL5_PDU) {
+ dbg("udsl_atm_send: packet too long (%d vs %d)!", skb->len,
+ ATM_MAX_AAL5_PDU);
+ err = -EINVAL;
+ goto fail;
+ }
+
+ PACKETDEBUG(skb->data, skb->len);
+
+ udsl_groom_skb(vcc, skb);
+ skb_queue_tail(&instance->sndqueue, skb);
+ tasklet_schedule(&instance->send_tasklet);
+
+ return 0;
+
+ fail:
+ udsl_pop(vcc, skb);
+ return err;
+}
+
+/********************
+** bean counting **
+********************/
+
+static void udsl_destroy_instance(struct kref *kref)
+{
+ struct udsl_instance_data *instance =
+ container_of(kref, struct udsl_instance_data, refcount);
+
+ tasklet_kill(&instance->receive_tasklet);
+ tasklet_kill(&instance->send_tasklet);
+ usb_put_dev(instance->usb_dev);
+ kfree(instance);
+}
+
+void udsl_get_instance(struct udsl_instance_data *instance)
+{
+ kref_get(&instance->refcount);
+}
+
+void udsl_put_instance(struct udsl_instance_data *instance)
+{
+ kref_put(&instance->refcount, udsl_destroy_instance);
+}
+
+/**********
+** ATM **
+**********/
+
+static void udsl_atm_dev_close(struct atm_dev *dev)
+{
+ struct udsl_instance_data *instance = dev->dev_data;
+
+ dev->dev_data = NULL;
+ udsl_put_instance(instance);
+}
+
+static int udsl_atm_proc_read(struct atm_dev *atm_dev, loff_t * pos, char *page)
+{
+ struct udsl_instance_data *instance = atm_dev->dev_data;
+ int left = *pos;
+
+ if (!instance) {
+ dbg("udsl_atm_proc_read: NULL instance!");
+ return -ENODEV;
+ }
+
+ if (!left--)
+ return sprintf(page, "%s\n", instance->description);
+
+ if (!left--)
+ return sprintf(page, "MAC: %02x:%02x:%02x:%02x:%02x:%02x\n",
+ atm_dev->esi[0], atm_dev->esi[1],
+ atm_dev->esi[2], atm_dev->esi[3],
+ atm_dev->esi[4], atm_dev->esi[5]);
+
+ if (!left--)
+ return sprintf(page,
+ "AAL5: tx %d ( %d err ), rx %d ( %d err, %d drop )\n",
+ atomic_read(&atm_dev->stats.aal5.tx),
+ atomic_read(&atm_dev->stats.aal5.tx_err),
+ atomic_read(&atm_dev->stats.aal5.rx),
+ atomic_read(&atm_dev->stats.aal5.rx_err),
+ atomic_read(&atm_dev->stats.aal5.rx_drop));
+
+ if (!left--) {
+ switch (atm_dev->signal) {
+ case ATM_PHY_SIG_FOUND:
+ sprintf(page, "Line up");
+ break;
+ case ATM_PHY_SIG_LOST:
+ sprintf(page, "Line down");
+ break;
+ default:
+ sprintf(page, "Line state unknown");
+ break;
+ }
+
+ if (instance->usb_dev->state == USB_STATE_NOTATTACHED)
+ strcat(page, ", disconnected\n");
+ else {
+ if (instance->status == UDSL_LOADED_FIRMWARE)
+ strcat(page, ", firmware loaded\n");
+ else if (instance->status == UDSL_LOADING_FIRMWARE)
+ strcat(page, ", firmware loading\n");
+ else
+ strcat(page, ", no firmware\n");
+ }
+
+ return strlen(page);
+ }
+
+ return 0;
+}
+
+static int udsl_atm_open(struct atm_vcc *vcc)
+{
+ struct udsl_instance_data *instance = vcc->dev->dev_data;
+ struct udsl_vcc_data *new;
+ unsigned int max_pdu;
+ int vci = vcc->vci;
+ short vpi = vcc->vpi;
+ int err;
+
+ dbg("udsl_atm_open: vpi %hd, vci %d", vpi, vci);
+
+ if (!instance) {
+ dbg("udsl_atm_open: NULL data!");
+ return -ENODEV;
+ }
+
+ /* only support AAL5 */
+ if ((vcc->qos.aal != ATM_AAL5) || (vcc->qos.rxtp.max_sdu < 0)
+ || (vcc->qos.rxtp.max_sdu > ATM_MAX_AAL5_PDU)) {
+ dbg("udsl_atm_open: unsupported ATM type %d!", vcc->qos.aal);
+ return -EINVAL;
+ }
+
+ if (instance->firmware_wait &&
+ (err = instance->firmware_wait(instance)) < 0) {
+ dbg("udsl_atm_open: firmware not loaded (%d)!", err);
+ return err;
+ }
+
+ down(&instance->serialize); /* vs self, udsl_atm_close */
+
+ if (udsl_find_vcc(instance, vpi, vci)) {
+ dbg("udsl_atm_open: %hd/%d already in use!", vpi, vci);
+ up(&instance->serialize);
+ return -EADDRINUSE;
+ }
+
+ if (!(new = kmalloc(sizeof(struct udsl_vcc_data), GFP_KERNEL))) {
+ dbg("udsl_atm_open: no memory for vcc_data!");
+ up(&instance->serialize);
+ return -ENOMEM;
+ }
+
+ memset(new, 0, sizeof(struct udsl_vcc_data));
+ new->vcc = vcc;
+ new->vpi = vpi;
+ new->vci = vci;
+
+ /* udsl_extract_cells requires at least one cell */
+ max_pdu = max(1, UDSL_NUM_CELLS(vcc->qos.rxtp.max_sdu)) * ATM_CELL_PAYLOAD;
+ if (!(new->sarb = alloc_skb(max_pdu, GFP_KERNEL))) {
+ dbg("udsl_atm_open: no memory for SAR buffer!");
+ kfree(new);
+ up(&instance->serialize);
+ return -ENOMEM;
+ }
+
+ vcc->dev_data = new;
+
+ tasklet_disable(&instance->receive_tasklet);
+ list_add(&new->list, &instance->vcc_list);
+ tasklet_enable(&instance->receive_tasklet);
+
+ set_bit(ATM_VF_ADDR, &vcc->flags);
+ set_bit(ATM_VF_PARTIAL, &vcc->flags);
+ set_bit(ATM_VF_READY, &vcc->flags);
+
+ up(&instance->serialize);
+
+ tasklet_schedule(&instance->receive_tasklet);
+
+ dbg("udsl_atm_open: allocated vcc data 0x%p (max_pdu: %u)", new, max_pdu);
+
+ return 0;
+}
+
+static void udsl_atm_close(struct atm_vcc *vcc)
+{
+ struct udsl_instance_data *instance = vcc->dev->dev_data;
+ struct udsl_vcc_data *vcc_data = vcc->dev_data;
+
+ dbg("udsl_atm_close called");
+
+ if (!instance || !vcc_data) {
+ dbg("udsl_atm_close: NULL data!");
+ return;
+ }
+
+ dbg("udsl_atm_close: deallocating vcc 0x%p with vpi %d vci %d",
+ vcc_data, vcc_data->vpi, vcc_data->vci);
+
+ udsl_cancel_send(instance, vcc);
+
+ down(&instance->serialize); /* vs self, udsl_atm_open */
+
+ tasklet_disable(&instance->receive_tasklet);
+ list_del(&vcc_data->list);
+ tasklet_enable(&instance->receive_tasklet);
+
+ kfree_skb(vcc_data->sarb);
+ vcc_data->sarb = NULL;
+
+ kfree(vcc_data);
+ vcc->dev_data = NULL;
+
+ vcc->vpi = ATM_VPI_UNSPEC;
+ vcc->vci = ATM_VCI_UNSPEC;
+ clear_bit(ATM_VF_READY, &vcc->flags);
+ clear_bit(ATM_VF_PARTIAL, &vcc->flags);
+ clear_bit(ATM_VF_ADDR, &vcc->flags);
+
+ up(&instance->serialize);
+
+ dbg("udsl_atm_close successful");
+}
+
+static int udsl_atm_ioctl(struct atm_dev *dev, unsigned int cmd,
+ void __user * arg)
+{
+ switch (cmd) {
+ case ATM_QUERYLOOP:
+ return put_user(ATM_LM_NONE, (int __user *)arg) ? -EFAULT : 0;
+ default:
+ return -ENOIOCTLCMD;
+ }
+}
+
+/**********
+** USB **
+**********/
+
+int udsl_instance_setup(struct usb_device *dev,
+ struct udsl_instance_data *instance)
+{
+ char *buf;
+ int i, length;
+
+ kref_init(&instance->refcount); /* one for USB */
+ udsl_get_instance(instance); /* one for ATM */
+
+ init_MUTEX(&instance->serialize);
+
+ instance->usb_dev = dev;
+
+ INIT_LIST_HEAD(&instance->vcc_list);
+
+ instance->status = UDSL_NO_FIRMWARE;
+ init_waitqueue_head(&instance->firmware_waiters);
+
+ spin_lock_init(&instance->receive_lock);
+ INIT_LIST_HEAD(&instance->spare_receivers);
+ INIT_LIST_HEAD(&instance->filled_receive_buffers);
+
+ tasklet_init(&instance->receive_tasklet, udsl_process_receive, (unsigned long)instance);
+ INIT_LIST_HEAD(&instance->spare_receive_buffers);
+
+ skb_queue_head_init(&instance->sndqueue);
+
+ spin_lock_init(&instance->send_lock);
+ INIT_LIST_HEAD(&instance->spare_senders);
+ INIT_LIST_HEAD(&instance->spare_send_buffers);
+
+ tasklet_init(&instance->send_tasklet, udsl_process_send,
+ (unsigned long)instance);
+ INIT_LIST_HEAD(&instance->filled_send_buffers);
+
+ /* receive init */
+ for (i = 0; i < num_rcv_urbs; i++) {
+ struct udsl_receiver *rcv = &(instance->receivers[i]);
+
+ if (!(rcv->urb = usb_alloc_urb(0, GFP_KERNEL))) {
+ dbg("udsl_usb_probe: no memory for receive urb %d!", i);
+ goto fail;
+ }
+
+ rcv->instance = instance;
+
+ list_add(&rcv->list, &instance->spare_receivers);
+ }
+
+ for (i = 0; i < num_rcv_bufs; i++) {
+ struct udsl_receive_buffer *buf =
+ &(instance->receive_buffers[i]);
+
+ buf->base = kmalloc(rcv_buf_size * (ATM_CELL_SIZE + instance->rcv_padding),
+ GFP_KERNEL);
+ if (!buf->base) {
+ dbg("udsl_usb_probe: no memory for receive buffer %d!", i);
+ goto fail;
+ }
+
+ list_add(&buf->list, &instance->spare_receive_buffers);
+ }
+
+ /* send init */
+ for (i = 0; i < num_snd_urbs; i++) {
+ struct udsl_sender *snd = &(instance->senders[i]);
+
+ if (!(snd->urb = usb_alloc_urb(0, GFP_KERNEL))) {
+ dbg("udsl_usb_probe: no memory for send urb %d!", i);
+ goto fail;
+ }
+
+ snd->instance = instance;
+
+ list_add(&snd->list, &instance->spare_senders);
+ }
+
+ for (i = 0; i < num_snd_bufs; i++) {
+ struct udsl_send_buffer *buf = &(instance->send_buffers[i]);
+
+ buf->base = kmalloc(snd_buf_size * (ATM_CELL_SIZE + instance->snd_padding),
+ GFP_KERNEL);
+ if (!buf->base) {
+ dbg("udsl_usb_probe: no memory for send buffer %d!", i);
+ goto fail;
+ }
+
+ list_add(&buf->list, &instance->spare_send_buffers);
+ }
+
+ /* ATM init */
+ instance->atm_dev = atm_dev_register(instance->driver_name,
+ &udsl_atm_devops, -1, NULL);
+ if (!instance->atm_dev) {
+ dbg("udsl_usb_probe: failed to register ATM device!");
+ goto fail;
+ }
+
+ instance->atm_dev->ci_range.vpi_bits = ATM_CI_MAX;
+ instance->atm_dev->ci_range.vci_bits = ATM_CI_MAX;
+ instance->atm_dev->signal = ATM_PHY_SIG_UNKNOWN;
+
+ /* temp init ATM device, set to 128kbit */
+ instance->atm_dev->link_rate = 128 * 1000 / 424;
+
+ /* device description */
+ buf = instance->description;
+ length = sizeof(instance->description);
+
+ if ((i = usb_string(dev, dev->descriptor.iProduct, buf, length)) < 0)
+ goto finish;
+
+ buf += i;
+ length -= i;
+
+ i = scnprintf(buf, length, " (");
+ buf += i;
+ length -= i;
+
+ if (length <= 0 || (i = usb_make_path(dev, buf, length)) < 0)
+ goto finish;
+
+ buf += i;
+ length -= i;
+
+ snprintf(buf, length, ")");
+
+ finish:
+ /* ready for ATM callbacks */
+ wmb();
+ instance->atm_dev->dev_data = instance;
+
+ usb_get_dev(dev);
+
+ return 0;
+
+ fail:
+ for (i = 0; i < num_snd_bufs; i++)
+ kfree(instance->send_buffers[i].base);
+
+ for (i = 0; i < num_snd_urbs; i++)
+ usb_free_urb(instance->senders[i].urb);
+
+ for (i = 0; i < num_rcv_bufs; i++)
+ kfree(instance->receive_buffers[i].base);
+
+ for (i = 0; i < num_rcv_urbs; i++)
+ usb_free_urb(instance->receivers[i].urb);
+
+ return -ENOMEM;
+}
+
+void udsl_instance_disconnect(struct udsl_instance_data *instance)
+{
+ int i;
+
+ dbg("udsl_instance_disconnect entered");
+
+ if (!instance) {
+ dbg("udsl_instance_disconnect: NULL instance!");
+ return;
+ }
+
+ /* receive finalize */
+ tasklet_disable(&instance->receive_tasklet);
+
+ for (i = 0; i < num_rcv_urbs; i++)
+ usb_kill_urb(instance->receivers[i].urb);
+
+ /* no need to take the spinlock */
+ INIT_LIST_HEAD(&instance->filled_receive_buffers);
+ INIT_LIST_HEAD(&instance->spare_receive_buffers);
+
+ tasklet_enable(&instance->receive_tasklet);
+
+ for (i = 0; i < num_rcv_urbs; i++)
+ usb_free_urb(instance->receivers[i].urb);
+
+ for (i = 0; i < num_rcv_bufs; i++)
+ kfree(instance->receive_buffers[i].base);
+
+ /* send finalize */
+ tasklet_disable(&instance->send_tasklet);
+
+ for (i = 0; i < num_snd_urbs; i++)
+ usb_kill_urb(instance->senders[i].urb);
+
+ /* no need to take the spinlock */
+ INIT_LIST_HEAD(&instance->spare_senders);
+ INIT_LIST_HEAD(&instance->spare_send_buffers);
+ instance->current_buffer = NULL;
+
+ tasklet_enable(&instance->send_tasklet);
+
+ for (i = 0; i < num_snd_urbs; i++)
+ usb_free_urb(instance->senders[i].urb);
+
+ for (i = 0; i < num_snd_bufs; i++)
+ kfree(instance->send_buffers[i].base);
+
+ /* ATM finalize */
+ shutdown_atm_dev(instance->atm_dev);
+}
+
+EXPORT_SYMBOL_GPL(udsl_get_instance);
+EXPORT_SYMBOL_GPL(udsl_put_instance);
+EXPORT_SYMBOL_GPL(udsl_instance_setup);
+EXPORT_SYMBOL_GPL(udsl_instance_disconnect);
+
+/***********
+** init **
+***********/
+
+static int __init udsl_usb_init(void)
+{
+ dbg("udsl_usb_init: driver version " DRIVER_VERSION);
+
+ if (sizeof(struct udsl_control) > sizeof(((struct sk_buff *) 0)->cb)) {
+ printk(KERN_ERR __FILE__ ": unusable with this kernel!\n");
+ return -EIO;
+ }
+
+ if ((num_rcv_urbs > UDSL_MAX_RCV_URBS)
+ || (num_snd_urbs > UDSL_MAX_SND_URBS)
+ || (num_rcv_bufs > UDSL_MAX_RCV_BUFS)
+ || (num_snd_bufs > UDSL_MAX_SND_BUFS)
+ || (rcv_buf_size > UDSL_MAX_RCV_BUF_SIZE)
+ || (snd_buf_size > UDSL_MAX_SND_BUF_SIZE))
+ return -EINVAL;
+
+ return 0;
+}
+
+static void __exit udsl_usb_exit(void)
+{
+}
+
+module_init(udsl_usb_init);
+module_exit(udsl_usb_exit);
+
+MODULE_AUTHOR(DRIVER_AUTHOR);
+MODULE_DESCRIPTION(DRIVER_DESC);
+MODULE_LICENSE("GPL");
+MODULE_VERSION(DRIVER_VERSION);
+
+/************
+** debug **
+************/
+
+#ifdef VERBOSE_DEBUG
+static int udsl_print_packet(const unsigned char *data, int len)
+{
+ unsigned char buffer[256];
+ int i = 0, j = 0;
+
+ for (i = 0; i < len;) {
+ buffer[0] = '\0';
+ sprintf(buffer, "%.3d :", i);
+ for (j = 0; (j < 16) && (i < len); j++, i++) {
+ sprintf(buffer, "%s %2.2x", buffer, data[i]);
+ }
+ dbg("%s", buffer);
+ }
+ return i;
+}
+#endif
--- /dev/null
+/******************************************************************************
+ * usb_atm.h - Generic USB xDSL driver core
+ *
+ * Copyright (C) 2001, Alcatel
+ * Copyright (C) 2003, Duncan Sands, SolNegro, Josep Comas
+ * Copyright (C) 2004, David Woodhouse
+ *
+ * This program is free software; you can redistribute it and/or modify it
+ * under the terms of the GNU General Public License as published by the Free
+ * Software Foundation; either version 2 of the License, or (at your option)
+ * any later version.
+ *
+ * This program is distributed in the hope that it will be useful, but WITHOUT
+ * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
+ * FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License for
+ * more details.
+ *
+ * You should have received a copy of the GNU General Public License along with
+ * this program; if not, write to the Free Software Foundation, Inc., 59
+ * Temple Place - Suite 330, Boston, MA 02111-1307, USA.
+ *
+ ******************************************************************************/
+
+#include <linux/list.h>
+#include <linux/usb.h>
+#include <linux/kref.h>
+#include <linux/atm.h>
+#include <linux/atmdev.h>
+#include <asm/semaphore.h>
+
+#define UDSL_MAX_RCV_URBS 4
+#define UDSL_MAX_SND_URBS 4
+#define UDSL_MAX_RCV_BUFS 8
+#define UDSL_MAX_SND_BUFS 8
+#define UDSL_MAX_RCV_BUF_SIZE 1024 /* ATM cells */
+#define UDSL_MAX_SND_BUF_SIZE 1024 /* ATM cells */
+#define UDSL_DEFAULT_RCV_URBS 2
+#define UDSL_DEFAULT_SND_URBS 2
+#define UDSL_DEFAULT_RCV_BUFS 4
+#define UDSL_DEFAULT_SND_BUFS 4
+#define UDSL_DEFAULT_RCV_BUF_SIZE 64 /* ATM cells */
+#define UDSL_DEFAULT_SND_BUF_SIZE 64 /* ATM cells */
+
+#define ATM_CELL_HEADER (ATM_CELL_SIZE - ATM_CELL_PAYLOAD)
+#define UDSL_NUM_CELLS(x) (((x) + ATM_AAL5_TRAILER + ATM_CELL_PAYLOAD - 1) / ATM_CELL_PAYLOAD)
+
+/* receive */
+
+struct udsl_receive_buffer {
+ struct list_head list;
+ unsigned char *base;
+ unsigned int filled_cells;
+};
+
+struct udsl_receiver {
+ struct list_head list;
+ struct udsl_receive_buffer *buffer;
+ struct urb *urb;
+ struct udsl_instance_data *instance;
+};
+
+struct udsl_vcc_data {
+ /* vpi/vci lookup */
+ struct list_head list;
+ short vpi;
+ int vci;
+ struct atm_vcc *vcc;
+
+ /* raw cell reassembly */
+ struct sk_buff *sarb;
+};
+
+/* send */
+
+struct udsl_send_buffer {
+ struct list_head list;
+ unsigned char *base;
+ unsigned char *free_start;
+ unsigned int free_cells;
+};
+
+struct udsl_sender {
+ struct list_head list;
+ struct udsl_send_buffer *buffer;
+ struct urb *urb;
+ struct udsl_instance_data *instance;
+};
+
+struct udsl_control {
+ struct atm_skb_data atm_data;
+ unsigned int num_cells;
+ unsigned int num_entire;
+ unsigned int pdu_padding;
+ unsigned char cell_header[ATM_CELL_HEADER];
+ unsigned char aal5_trailer[ATM_AAL5_TRAILER];
+};
+
+#define UDSL_SKB(x) ((struct udsl_control *)(x)->cb)
+
+/* main driver data */
+
+enum udsl_status {
+ UDSL_NO_FIRMWARE,
+ UDSL_LOADING_FIRMWARE,
+ UDSL_LOADED_FIRMWARE
+};
+
+struct udsl_instance_data {
+ struct kref refcount;
+ struct semaphore serialize;
+
+ /* USB device part */
+ struct usb_device *usb_dev;
+ char description[64];
+ int data_endpoint;
+ int snd_padding;
+ int rcv_padding;
+ const char *driver_name;
+
+ /* ATM device part */
+ struct atm_dev *atm_dev;
+ struct list_head vcc_list;
+
+ /* firmware */
+ int (*firmware_wait) (struct udsl_instance_data *);
+ enum udsl_status status;
+ wait_queue_head_t firmware_waiters;
+
+ /* receive */
+ struct udsl_receiver receivers[UDSL_MAX_RCV_URBS];
+ struct udsl_receive_buffer receive_buffers[UDSL_MAX_RCV_BUFS];
+
+ spinlock_t receive_lock;
+ struct list_head spare_receivers;
+ struct list_head filled_receive_buffers;
+
+ struct tasklet_struct receive_tasklet;
+ struct list_head spare_receive_buffers;
+
+ /* send */
+ struct udsl_sender senders[UDSL_MAX_SND_URBS];
+ struct udsl_send_buffer send_buffers[UDSL_MAX_SND_BUFS];
+
+ struct sk_buff_head sndqueue;
+
+ spinlock_t send_lock;
+ struct list_head spare_senders;
+ struct list_head spare_send_buffers;
+
+ struct tasklet_struct send_tasklet;
+ struct sk_buff *current_skb; /* being emptied */
+ struct udsl_send_buffer *current_buffer; /* being filled */
+ struct list_head filled_send_buffers;
+};
+
+extern int udsl_instance_setup(struct usb_device *dev,
+ struct udsl_instance_data *instance);
+extern void udsl_instance_disconnect(struct udsl_instance_data *instance);
+extern void udsl_get_instance(struct udsl_instance_data *instance);
+extern void udsl_put_instance(struct udsl_instance_data *instance);
--- /dev/null
+ifneq ($(KERNELRELEASE),)
+
+pwc-objs := pwc-if.o pwc-misc.o pwc-ctrl.o pwc-uncompress.o pwc-dec1.o pwc-dec23.o pwc-kiara.o pwc-timon.o
+
+obj-m += pwc.o
+
+else
+
+KDIR := /lib/modules/$(shell uname -r)/build
+PWD := $(shell pwd)
+
+default:
+ $(MAKE) -C $(KDIR) SUBDIRS=$(PWD) modules
+
+endif
+
+clean:
+ rm -f *.[oas] .*.flags *.ko .*.cmd .*.d .*.tmp *.mod.c
+ rm -rf .tmp_versions
+
--- /dev/null
+/* Driver for Philips webcam
+ Functions that send various control messages to the webcam, including
+ video modes.
+ (C) 1999-2003 Nemosoft Unv.
+ (C) 2004 Luc Saillard (luc@saillard.org)
+
+ NOTE: this version of pwc is an unofficial (modified) release of pwc & pcwx
+ driver and thus may have bugs that are not present in the original version.
+ Please send bug reports and support requests to <luc@saillard.org>.
+
+ NOTE: this version of pwc is an unofficial (modified) release of pwc & pcwx
+ driver and thus may have bugs that are not present in the original version.
+ Please send bug reports and support requests to <luc@saillard.org>.
+ The decompression routines have been implemented by reverse-engineering the
+ Nemosoft binary pwcx module. Caveat emptor.
+
+ This program is free software; you can redistribute it and/or modify
+ it under the terms of the GNU General Public License as published by
+ the Free Software Foundation; either version 2 of the License, or
+ (at your option) any later version.
+
+ This program is distributed in the hope that it will be useful,
+ but WITHOUT ANY WARRANTY; without even the implied warranty of
+ MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
+ GNU General Public License for more details.
+
+ You should have received a copy of the GNU General Public License
+ along with this program; if not, write to the Free Software
+ Foundation, Inc., 59 Temple Place, Suite 330, Boston, MA 02111-1307 USA
+*/
+
+/*
+ Changes
+ 2001/08/03 Alvarado Added methods for changing white balance and
+ red/green gains
+ */
+
+/* Control functions for the cam; brightness, contrast, video mode, etc. */
+
+#ifdef __KERNEL__
+#include <asm/uaccess.h>
+#endif
+#include <asm/errno.h>
+#include <linux/version.h>
+
+#include "pwc.h"
+#include "pwc-ioctl.h"
+#include "pwc-uncompress.h"
+#include "pwc-kiara.h"
+#include "pwc-timon.h"
+#include "pwc-dec1.h"
+#include "pwc-dec23.h"
+
+/* Request types: video */
+#define SET_LUM_CTL 0x01
+#define GET_LUM_CTL 0x02
+#define SET_CHROM_CTL 0x03
+#define GET_CHROM_CTL 0x04
+#define SET_STATUS_CTL 0x05
+#define GET_STATUS_CTL 0x06
+#define SET_EP_STREAM_CTL 0x07
+#define GET_EP_STREAM_CTL 0x08
+#define SET_MPT_CTL 0x0D
+#define GET_MPT_CTL 0x0E
+
+/* Selectors for the Luminance controls [GS]ET_LUM_CTL */
+#define AGC_MODE_FORMATTER 0x2000
+#define PRESET_AGC_FORMATTER 0x2100
+#define SHUTTER_MODE_FORMATTER 0x2200
+#define PRESET_SHUTTER_FORMATTER 0x2300
+#define PRESET_CONTOUR_FORMATTER 0x2400
+#define AUTO_CONTOUR_FORMATTER 0x2500
+#define BACK_LIGHT_COMPENSATION_FORMATTER 0x2600
+#define CONTRAST_FORMATTER 0x2700
+#define DYNAMIC_NOISE_CONTROL_FORMATTER 0x2800
+#define FLICKERLESS_MODE_FORMATTER 0x2900
+#define AE_CONTROL_SPEED 0x2A00
+#define BRIGHTNESS_FORMATTER 0x2B00
+#define GAMMA_FORMATTER 0x2C00
+
+/* Selectors for the Chrominance controls [GS]ET_CHROM_CTL */
+#define WB_MODE_FORMATTER 0x1000
+#define AWB_CONTROL_SPEED_FORMATTER 0x1100
+#define AWB_CONTROL_DELAY_FORMATTER 0x1200
+#define PRESET_MANUAL_RED_GAIN_FORMATTER 0x1300
+#define PRESET_MANUAL_BLUE_GAIN_FORMATTER 0x1400
+#define COLOUR_MODE_FORMATTER 0x1500
+#define SATURATION_MODE_FORMATTER1 0x1600
+#define SATURATION_MODE_FORMATTER2 0x1700
+
+/* Selectors for the Status controls [GS]ET_STATUS_CTL */
+#define SAVE_USER_DEFAULTS_FORMATTER 0x0200
+#define RESTORE_USER_DEFAULTS_FORMATTER 0x0300
+#define RESTORE_FACTORY_DEFAULTS_FORMATTER 0x0400
+#define READ_AGC_FORMATTER 0x0500
+#define READ_SHUTTER_FORMATTER 0x0600
+#define READ_RED_GAIN_FORMATTER 0x0700
+#define READ_BLUE_GAIN_FORMATTER 0x0800
+#define SENSOR_TYPE_FORMATTER1 0x0C00
+#define READ_RAW_Y_MEAN_FORMATTER 0x3100
+#define SET_POWER_SAVE_MODE_FORMATTER 0x3200
+#define MIRROR_IMAGE_FORMATTER 0x3300
+#define LED_FORMATTER 0x3400
+#define SENSOR_TYPE_FORMATTER2 0x3700
+
+/* Formatters for the Video Endpoint controls [GS]ET_EP_STREAM_CTL */
+#define VIDEO_OUTPUT_CONTROL_FORMATTER 0x0100
+
+/* Formatters for the motorized pan & tilt [GS]ET_MPT_CTL */
+#define PT_RELATIVE_CONTROL_FORMATTER 0x01
+#define PT_RESET_CONTROL_FORMATTER 0x02
+#define PT_STATUS_FORMATTER 0x03
+
+static char *size2name[PSZ_MAX] =
+{
+ "subQCIF",
+ "QSIF",
+ "QCIF",
+ "SIF",
+ "CIF",
+ "VGA",
+};
+
+/********/
+
+/* Entries for the Nala (645/646) camera; the Nala doesn't have compression
+ preferences, so you either get compressed or non-compressed streams.
+
+ An alternate value of 0 means this mode is not available at all.
+ */
+
+struct Nala_table_entry {
+ char alternate; /* USB alternate setting */
+ int compressed; /* Compressed yes/no */
+
+ unsigned char mode[3]; /* precomputed mode table */
+};
+
+static struct Nala_table_entry Nala_table[PSZ_MAX][8] =
+{
+#include "pwc-nala.h"
+};
+
+
+/****************************************************************************/
+
+
+#define SendControlMsg(request, value, buflen) \
+ usb_control_msg(pdev->udev, usb_sndctrlpipe(pdev->udev, 0), \
+ request, \
+ USB_DIR_OUT | USB_TYPE_VENDOR | USB_RECIP_DEVICE, \
+ value, \
+ pdev->vcinterface, \
+ &buf, buflen, HZ / 2)
+
+#define RecvControlMsg(request, value, buflen) \
+ usb_control_msg(pdev->udev, usb_rcvctrlpipe(pdev->udev, 0), \
+ request, \
+ USB_DIR_IN | USB_TYPE_VENDOR | USB_RECIP_DEVICE, \
+ value, \
+ pdev->vcinterface, \
+ &buf, buflen, HZ / 2)
+
+
+#if PWC_DEBUG
+void pwc_hexdump(void *p, int len)
+{
+ int i;
+ unsigned char *s;
+ char buf[100], *d;
+
+ s = (unsigned char *)p;
+ d = buf;
+ *d = '\0';
+ Debug("Doing hexdump @ %p, %d bytes.\n", p, len);
+ for (i = 0; i < len; i++) {
+ d += sprintf(d, "%02X ", *s++);
+ if ((i & 0xF) == 0xF) {
+ Debug("%s\n", buf);
+ d = buf;
+ *d = '\0';
+ }
+ }
+ if ((i & 0xF) != 0)
+ Debug("%s\n", buf);
+}
+#endif
+
+static inline int send_video_command(struct usb_device *udev, int index, void *buf, int buflen)
+{
+ return usb_control_msg(udev,
+ usb_sndctrlpipe(udev, 0),
+ SET_EP_STREAM_CTL,
+ USB_DIR_OUT | USB_TYPE_VENDOR | USB_RECIP_DEVICE,
+ VIDEO_OUTPUT_CONTROL_FORMATTER,
+ index,
+ buf, buflen, HZ);
+}
+
+
+
+static inline int set_video_mode_Nala(struct pwc_device *pdev, int size, int frames)
+{
+ unsigned char buf[3];
+ int ret, fps;
+ struct Nala_table_entry *pEntry;
+ int frames2frames[31] =
+ { /* closest match of framerate */
+ 0, 0, 0, 0, 4, /* 0-4 */
+ 5, 5, 7, 7, 10, /* 5-9 */
+ 10, 10, 12, 12, 15, /* 10-14 */
+ 15, 15, 15, 20, 20, /* 15-19 */
+ 20, 20, 20, 24, 24, /* 20-24 */
+ 24, 24, 24, 24, 24, /* 25-29 */
+ 24 /* 30 */
+ };
+ int frames2table[31] =
+ { 0, 0, 0, 0, 0, /* 0-4 */
+ 1, 1, 1, 2, 2, /* 5-9 */
+ 3, 3, 4, 4, 4, /* 10-14 */
+ 5, 5, 5, 5, 5, /* 15-19 */
+ 6, 6, 6, 6, 7, /* 20-24 */
+ 7, 7, 7, 7, 7, /* 25-29 */
+ 7 /* 30 */
+ };
+
+ if (size < 0 || size > PSZ_CIF || frames < 4 || frames > 25)
+ return -EINVAL;
+ frames = frames2frames[frames];
+ fps = frames2table[frames];
+ pEntry = &Nala_table[size][fps];
+ if (pEntry->alternate == 0)
+ return -EINVAL;
+
+ if (pEntry->compressed)
+ return -ENOENT; /* Not supported. */
+
+ memcpy(buf, pEntry->mode, 3);
+ ret = send_video_command(pdev->udev, pdev->vendpoint, buf, 3);
+ if (ret < 0) {
+ Debug("Failed to send video command... %d\n", ret);
+ return ret;
+ }
+ if (pEntry->compressed && pdev->vpalette != VIDEO_PALETTE_RAW)
+ {
+ switch(pdev->type) {
+ case 645:
+ case 646:
+ pwc_dec1_init(pdev->type, pdev->release, buf, pdev->decompress_data);
+ break;
+
+ case 675:
+ case 680:
+ case 690:
+ case 720:
+ case 730:
+ case 740:
+ case 750:
+ pwc_dec23_init(pdev->type, pdev->release, buf, pdev->decompress_data);
+ break;
+ }
+ }
+
+ pdev->cmd_len = 3;
+ memcpy(pdev->cmd_buf, buf, 3);
+
+ /* Set various parameters */
+ pdev->vframes = frames;
+ pdev->vsize = size;
+ pdev->valternate = pEntry->alternate;
+ pdev->image = pwc_image_sizes[size];
+ pdev->frame_size = (pdev->image.x * pdev->image.y * 3) / 2;
+ if (pEntry->compressed) {
+ if (pdev->release < 5) { /* 4 fold compression */
+ pdev->vbandlength = 528;
+ pdev->frame_size /= 4;
+ }
+ else {
+ pdev->vbandlength = 704;
+ pdev->frame_size /= 3;
+ }
+ }
+ else
+ pdev->vbandlength = 0;
+ return 0;
+}
+
+
+static inline int set_video_mode_Timon(struct pwc_device *pdev, int size, int frames, int compression, int snapshot)
+{
+ unsigned char buf[13];
+ const struct Timon_table_entry *pChoose;
+ int ret, fps;
+
+ if (size >= PSZ_MAX || frames < 5 || frames > 30 || compression < 0 || compression > 3)
+ return -EINVAL;
+ if (size == PSZ_VGA && frames > 15)
+ return -EINVAL;
+ fps = (frames / 5) - 1;
+
+ /* Find a supported framerate with progressively higher compression ratios
+ if the preferred ratio is not available.
+ */
+ pChoose = NULL;
+ while (compression <= 3) {
+ pChoose = &Timon_table[size][fps][compression];
+ if (pChoose->alternate != 0)
+ break;
+ compression++;
+ }
+ if (pChoose == NULL || pChoose->alternate == 0)
+ return -ENOENT; /* Not supported. */
+
+ memcpy(buf, pChoose->mode, 13);
+ if (snapshot)
+ buf[0] |= 0x80;
+ ret = send_video_command(pdev->udev, pdev->vendpoint, buf, 13);
+ if (ret < 0)
+ return ret;
+
+ if (pChoose->bandlength > 0 && pdev->vpalette != VIDEO_PALETTE_RAW)
+ pwc_dec23_init(pdev->type, pdev->release, buf, pdev->decompress_data);
+
+ pdev->cmd_len = 13;
+ memcpy(pdev->cmd_buf, buf, 13);
+
+ /* Set various parameters */
+ pdev->vframes = frames;
+ pdev->vsize = size;
+ pdev->vsnapshot = snapshot;
+ pdev->valternate = pChoose->alternate;
+ pdev->image = pwc_image_sizes[size];
+ pdev->vbandlength = pChoose->bandlength;
+ if (pChoose->bandlength > 0)
+ pdev->frame_size = (pChoose->bandlength * pdev->image.y) / 4;
+ else
+ pdev->frame_size = (pdev->image.x * pdev->image.y * 12) / 8;
+ return 0;
+}
+
+
+static inline int set_video_mode_Kiara(struct pwc_device *pdev, int size, int frames, int compression, int snapshot)
+{
+ const struct Kiara_table_entry *pChoose = 0;
+ int fps, ret;
+ unsigned char buf[12];
+ struct Kiara_table_entry RawEntry = {6, 773, 1272, {0xAD, 0xF4, 0x10, 0x27, 0xB6, 0x24, 0x96, 0x02, 0x30, 0x05, 0x03, 0x80}};
+
+ if (size >= PSZ_MAX || frames < 5 || frames > 30 || compression < 0 || compression > 3)
+ return -EINVAL;
+ if (size == PSZ_VGA && frames > 15)
+ return -EINVAL;
+ fps = (frames / 5) - 1;
+
+ /* special case: VGA @ 5 fps and snapshot is raw bayer mode */
+ if (size == PSZ_VGA && frames == 5 && snapshot)
+ {
+ /* Only available in case the raw palette is selected or
+ we have the decompressor available. This mode is
+ only available in compressed form
+ */
+ if (pdev->vpalette == VIDEO_PALETTE_RAW)
+ {
+ Info("Choosing VGA/5 BAYER mode (%d).\n", pdev->vpalette);
+ pChoose = &RawEntry;
+ }
+ else
+ {
+ Info("VGA/5 BAYER mode _must_ have a decompressor available, or use RAW palette.\n");
+ }
+ }
+ else
+ {
+ /* Find a supported framerate with progressively higher compression ratios
+ if the preferred ratio is not available.
+ Skip this step when using RAW modes.
+ */
+ while (compression <= 3) {
+ pChoose = &Kiara_table[size][fps][compression];
+ if (pChoose->alternate != 0)
+ break;
+ compression++;
+ }
+ }
+ if (pChoose == NULL || pChoose->alternate == 0)
+ return -ENOENT; /* Not supported. */
+
+ Debug("Using alternate setting %d.\n", pChoose->alternate);
+
+ /* usb_control_msg won't take staticly allocated arrays as argument?? */
+ memcpy(buf, pChoose->mode, 12);
+ if (snapshot)
+ buf[0] |= 0x80;
+
+ /* Firmware bug: video endpoint is 5, but commands are sent to endpoint 4 */
+ ret = send_video_command(pdev->udev, 4 /* pdev->vendpoint */, buf, 12);
+ if (ret < 0)
+ return ret;
+
+ if (pChoose->bandlength > 0 && pdev->vpalette != VIDEO_PALETTE_RAW)
+ pwc_dec23_init(pdev->type, pdev->release, buf, pdev->decompress_data);
+
+ pdev->cmd_len = 12;
+ memcpy(pdev->cmd_buf, buf, 12);
+ /* All set and go */
+ pdev->vframes = frames;
+ pdev->vsize = size;
+ pdev->vsnapshot = snapshot;
+ pdev->valternate = pChoose->alternate;
+ pdev->image = pwc_image_sizes[size];
+ pdev->vbandlength = pChoose->bandlength;
+ if (pdev->vbandlength > 0)
+ pdev->frame_size = (pdev->vbandlength * pdev->image.y) / 4;
+ else
+ pdev->frame_size = (pdev->image.x * pdev->image.y * 12) / 8;
+ return 0;
+}
+
+
+
+/**
+ @pdev: device structure
+ @width: viewport width
+ @height: viewport height
+ @frame: framerate, in fps
+ @compression: preferred compression ratio
+ @snapshot: snapshot mode or streaming
+ */
+int pwc_set_video_mode(struct pwc_device *pdev, int width, int height, int frames, int compression, int snapshot)
+{
+ int ret, size;
+
+ Trace(TRACE_FLOW, "set_video_mode(%dx%d @ %d, palette %d).\n", width, height, frames, pdev->vpalette);
+ size = pwc_decode_size(pdev, width, height);
+ if (size < 0) {
+ Debug("Could not find suitable size.\n");
+ return -ERANGE;
+ }
+ Debug("decode_size = %d.\n", size);
+
+ ret = -EINVAL;
+ switch(pdev->type) {
+ case 645:
+ case 646:
+ ret = set_video_mode_Nala(pdev, size, frames);
+ break;
+
+ case 675:
+ case 680:
+ case 690:
+ ret = set_video_mode_Timon(pdev, size, frames, compression, snapshot);
+ break;
+
+ case 720:
+ case 730:
+ case 740:
+ case 750:
+ ret = set_video_mode_Kiara(pdev, size, frames, compression, snapshot);
+ break;
+ }
+ if (ret < 0) {
+ if (ret == -ENOENT)
+ Info("Video mode %s@%d fps is only supported with the decompressor module (pwcx).\n", size2name[size], frames);
+ else {
+ Err("Failed to set video mode %s@%d fps; return code = %d\n", size2name[size], frames, ret);
+ }
+ return ret;
+ }
+ pdev->view.x = width;
+ pdev->view.y = height;
+ pdev->frame_total_size = pdev->frame_size + pdev->frame_header_size + pdev->frame_trailer_size;
+ pwc_set_image_buffer_size(pdev);
+ Trace(TRACE_SIZE, "Set viewport to %dx%d, image size is %dx%d.\n", width, height, pwc_image_sizes[size].x, pwc_image_sizes[size].y);
+ return 0;
+}
+
+
+void pwc_set_image_buffer_size(struct pwc_device *pdev)
+{
+ int i, factor = 0, filler = 0;
+
+ /* for PALETTE_YUV420P */
+ switch(pdev->vpalette)
+ {
+ case VIDEO_PALETTE_YUV420P:
+ factor = 6;
+ filler = 128;
+ break;
+ case VIDEO_PALETTE_RAW:
+ factor = 6; /* can be uncompressed YUV420P */
+ filler = 0;
+ break;
+ }
+
+ /* Set sizes in bytes */
+ pdev->image.size = pdev->image.x * pdev->image.y * factor / 4;
+ pdev->view.size = pdev->view.x * pdev->view.y * factor / 4;
+
+ /* Align offset, or you'll get some very weird results in
+ YUV420 mode... x must be multiple of 4 (to get the Y's in
+ place), and y even (or you'll mixup U & V). This is less of a
+ problem for YUV420P.
+ */
+ pdev->offset.x = ((pdev->view.x - pdev->image.x) / 2) & 0xFFFC;
+ pdev->offset.y = ((pdev->view.y - pdev->image.y) / 2) & 0xFFFE;
+
+ /* Fill buffers with gray or black */
+ for (i = 0; i < MAX_IMAGES; i++) {
+ if (pdev->image_ptr[i] != NULL)
+ memset(pdev->image_ptr[i], filler, pdev->view.size);
+ }
+}
+
+
+
+/* BRIGHTNESS */
+
+int pwc_get_brightness(struct pwc_device *pdev)
+{
+ char buf;
+ int ret;
+
+ ret = RecvControlMsg(GET_LUM_CTL, BRIGHTNESS_FORMATTER, 1);
+ if (ret < 0)
+ return ret;
+ return buf << 9;
+}
+
+int pwc_set_brightness(struct pwc_device *pdev, int value)
+{
+ char buf;
+
+ if (value < 0)
+ value = 0;
+ if (value > 0xffff)
+ value = 0xffff;
+ buf = (value >> 9) & 0x7f;
+ return SendControlMsg(SET_LUM_CTL, BRIGHTNESS_FORMATTER, 1);
+}
+
+/* CONTRAST */
+
+int pwc_get_contrast(struct pwc_device *pdev)
+{
+ char buf;
+ int ret;
+
+ ret = RecvControlMsg(GET_LUM_CTL, CONTRAST_FORMATTER, 1);
+ if (ret < 0)
+ return ret;
+ return buf << 10;
+}
+
+int pwc_set_contrast(struct pwc_device *pdev, int value)
+{
+ char buf;
+
+ if (value < 0)
+ value = 0;
+ if (value > 0xffff)
+ value = 0xffff;
+ buf = (value >> 10) & 0x3f;
+ return SendControlMsg(SET_LUM_CTL, CONTRAST_FORMATTER, 1);
+}
+
+/* GAMMA */
+
+int pwc_get_gamma(struct pwc_device *pdev)
+{
+ char buf;
+ int ret;
+
+ ret = RecvControlMsg(GET_LUM_CTL, GAMMA_FORMATTER, 1);
+ if (ret < 0)
+ return ret;
+ return buf << 11;
+}
+
+int pwc_set_gamma(struct pwc_device *pdev, int value)
+{
+ char buf;
+
+ if (value < 0)
+ value = 0;
+ if (value > 0xffff)
+ value = 0xffff;
+ buf = (value >> 11) & 0x1f;
+ return SendControlMsg(SET_LUM_CTL, GAMMA_FORMATTER, 1);
+}
+
+
+/* SATURATION */
+
+int pwc_get_saturation(struct pwc_device *pdev)
+{
+ char buf;
+ int ret;
+
+ if (pdev->type < 675)
+ return -1;
+ ret = RecvControlMsg(GET_CHROM_CTL, pdev->type < 730 ? SATURATION_MODE_FORMATTER2 : SATURATION_MODE_FORMATTER1, 1);
+ if (ret < 0)
+ return ret;
+ return 32768 + buf * 327;
+}
+
+int pwc_set_saturation(struct pwc_device *pdev, int value)
+{
+ char buf;
+
+ if (pdev->type < 675)
+ return -EINVAL;
+ if (value < 0)
+ value = 0;
+ if (value > 0xffff)
+ value = 0xffff;
+ /* saturation ranges from -100 to +100 */
+ buf = (value - 32768) / 327;
+ return SendControlMsg(SET_CHROM_CTL, pdev->type < 730 ? SATURATION_MODE_FORMATTER2 : SATURATION_MODE_FORMATTER1, 1);
+}
+
+/* AGC */
+
+static inline int pwc_set_agc(struct pwc_device *pdev, int mode, int value)
+{
+ char buf;
+ int ret;
+
+ if (mode)
+ buf = 0x0; /* auto */
+ else
+ buf = 0xff; /* fixed */
+
+ ret = SendControlMsg(SET_LUM_CTL, AGC_MODE_FORMATTER, 1);
+
+ if (!mode && ret >= 0) {
+ if (value < 0)
+ value = 0;
+ if (value > 0xffff)
+ value = 0xffff;
+ buf = (value >> 10) & 0x3F;
+ ret = SendControlMsg(SET_LUM_CTL, PRESET_AGC_FORMATTER, 1);
+ }
+ if (ret < 0)
+ return ret;
+ return 0;
+}
+
+static inline int pwc_get_agc(struct pwc_device *pdev, int *value)
+{
+ unsigned char buf;
+ int ret;
+
+ ret = RecvControlMsg(GET_LUM_CTL, AGC_MODE_FORMATTER, 1);
+ if (ret < 0)
+ return ret;
+
+ if (buf != 0) { /* fixed */
+ ret = RecvControlMsg(GET_LUM_CTL, PRESET_AGC_FORMATTER, 1);
+ if (ret < 0)
+ return ret;
+ if (buf > 0x3F)
+ buf = 0x3F;
+ *value = (buf << 10);
+ }
+ else { /* auto */
+ ret = RecvControlMsg(GET_STATUS_CTL, READ_AGC_FORMATTER, 1);
+ if (ret < 0)
+ return ret;
+ /* Gah... this value ranges from 0x00 ... 0x9F */
+ if (buf > 0x9F)
+ buf = 0x9F;
+ *value = -(48 + buf * 409);
+ }
+
+ return 0;
+}
+
+static inline int pwc_set_shutter_speed(struct pwc_device *pdev, int mode, int value)
+{
+ char buf[2];
+ int speed, ret;
+
+
+ if (mode)
+ buf[0] = 0x0; /* auto */
+ else
+ buf[0] = 0xff; /* fixed */
+
+ ret = SendControlMsg(SET_LUM_CTL, SHUTTER_MODE_FORMATTER, 1);
+
+ if (!mode && ret >= 0) {
+ if (value < 0)
+ value = 0;
+ if (value > 0xffff)
+ value = 0xffff;
+ switch(pdev->type) {
+ case 675:
+ case 680:
+ case 690:
+ /* speed ranges from 0x0 to 0x290 (656) */
+ speed = (value / 100);
+ buf[1] = speed >> 8;
+ buf[0] = speed & 0xff;
+ break;
+ case 720:
+ case 730:
+ case 740:
+ case 750:
+ /* speed seems to range from 0x0 to 0xff */
+ buf[1] = 0;
+ buf[0] = value >> 8;
+ break;
+ }
+
+ ret = SendControlMsg(SET_LUM_CTL, PRESET_SHUTTER_FORMATTER, 2);
+ }
+ return ret;
+}
+
+
+/* POWER */
+
+int pwc_camera_power(struct pwc_device *pdev, int power)
+{
+ char buf;
+
+ if (pdev->type < 675 || (pdev->type < 730 && pdev->release < 6))
+ return 0; /* Not supported by Nala or Timon < release 6 */
+
+ if (power)
+ buf = 0x00; /* active */
+ else
+ buf = 0xFF; /* power save */
+ return SendControlMsg(SET_STATUS_CTL, SET_POWER_SAVE_MODE_FORMATTER, 1);
+}
+
+
+
+/* private calls */
+
+static inline int pwc_restore_user(struct pwc_device *pdev)
+{
+ char buf; /* dummy */
+ return SendControlMsg(SET_STATUS_CTL, RESTORE_USER_DEFAULTS_FORMATTER, 0);
+}
+
+static inline int pwc_save_user(struct pwc_device *pdev)
+{
+ char buf; /* dummy */
+ return SendControlMsg(SET_STATUS_CTL, SAVE_USER_DEFAULTS_FORMATTER, 0);
+}
+
+static inline int pwc_restore_factory(struct pwc_device *pdev)
+{
+ char buf; /* dummy */
+ return SendControlMsg(SET_STATUS_CTL, RESTORE_FACTORY_DEFAULTS_FORMATTER, 0);
+}
+
+ /* ************************************************* */
+ /* Patch by Alvarado: (not in the original version */
+
+ /*
+ * the camera recognizes modes from 0 to 4:
+ *
+ * 00: indoor (incandescant lighting)
+ * 01: outdoor (sunlight)
+ * 02: fluorescent lighting
+ * 03: manual
+ * 04: auto
+ */
+static inline int pwc_set_awb(struct pwc_device *pdev, int mode)
+{
+ char buf;
+ int ret;
+
+ if (mode < 0)
+ mode = 0;
+
+ if (mode > 4)
+ mode = 4;
+
+ buf = mode & 0x07; /* just the lowest three bits */
+
+ ret = SendControlMsg(SET_CHROM_CTL, WB_MODE_FORMATTER, 1);
+
+ if (ret < 0)
+ return ret;
+ return 0;
+}
+
+static inline int pwc_get_awb(struct pwc_device *pdev)
+{
+ unsigned char buf;
+ int ret;
+
+ ret = RecvControlMsg(GET_CHROM_CTL, WB_MODE_FORMATTER, 1);
+
+ if (ret < 0)
+ return ret;
+ return buf;
+}
+
+static inline int pwc_set_red_gain(struct pwc_device *pdev, int value)
+{
+ unsigned char buf;
+
+ if (value < 0)
+ value = 0;
+ if (value > 0xffff)
+ value = 0xffff;
+ /* only the msb is considered */
+ buf = value >> 8;
+ return SendControlMsg(SET_CHROM_CTL, PRESET_MANUAL_RED_GAIN_FORMATTER, 1);
+}
+
+static inline int pwc_get_red_gain(struct pwc_device *pdev, int *value)
+{
+ unsigned char buf;
+ int ret;
+
+ ret = RecvControlMsg(GET_CHROM_CTL, PRESET_MANUAL_RED_GAIN_FORMATTER, 1);
+ if (ret < 0)
+ return ret;
+ *value = buf << 8;
+ return 0;
+}
+
+
+static inline int pwc_set_blue_gain(struct pwc_device *pdev, int value)
+{
+ unsigned char buf;
+
+ if (value < 0)
+ value = 0;
+ if (value > 0xffff)
+ value = 0xffff;
+ /* only the msb is considered */
+ buf = value >> 8;
+ return SendControlMsg(SET_CHROM_CTL, PRESET_MANUAL_BLUE_GAIN_FORMATTER, 1);
+}
+
+static inline int pwc_get_blue_gain(struct pwc_device *pdev, int *value)
+{
+ unsigned char buf;
+ int ret;
+
+ ret = RecvControlMsg(GET_CHROM_CTL, PRESET_MANUAL_BLUE_GAIN_FORMATTER, 1);
+ if (ret < 0)
+ return ret;
+ *value = buf << 8;
+ return 0;
+}
+
+
+/* The following two functions are different, since they only read the
+ internal red/blue gains, which may be different from the manual
+ gains set or read above.
+ */
+static inline int pwc_read_red_gain(struct pwc_device *pdev, int *value)
+{
+ unsigned char buf;
+ int ret;
+
+ ret = RecvControlMsg(GET_STATUS_CTL, READ_RED_GAIN_FORMATTER, 1);
+ if (ret < 0)
+ return ret;
+ *value = buf << 8;
+ return 0;
+}
+
+static inline int pwc_read_blue_gain(struct pwc_device *pdev, int *value)
+{
+ unsigned char buf;
+ int ret;
+
+ ret = RecvControlMsg(GET_STATUS_CTL, READ_BLUE_GAIN_FORMATTER, 1);
+ if (ret < 0)
+ return ret;
+ *value = buf << 8;
+ return 0;
+}
+
+
+static inline int pwc_set_wb_speed(struct pwc_device *pdev, int speed)
+{
+ unsigned char buf;
+
+ /* useful range is 0x01..0x20 */
+ buf = speed / 0x7f0;
+ return SendControlMsg(SET_CHROM_CTL, AWB_CONTROL_SPEED_FORMATTER, 1);
+}
+
+static inline int pwc_get_wb_speed(struct pwc_device *pdev, int *value)
+{
+ unsigned char buf;
+ int ret;
+
+ ret = RecvControlMsg(GET_CHROM_CTL, AWB_CONTROL_SPEED_FORMATTER, 1);
+ if (ret < 0)
+ return ret;
+ *value = buf * 0x7f0;
+ return 0;
+}
+
+
+static inline int pwc_set_wb_delay(struct pwc_device *pdev, int delay)
+{
+ unsigned char buf;
+
+ /* useful range is 0x01..0x3F */
+ buf = (delay >> 10);
+ return SendControlMsg(SET_CHROM_CTL, AWB_CONTROL_DELAY_FORMATTER, 1);
+}
+
+static inline int pwc_get_wb_delay(struct pwc_device *pdev, int *value)
+{
+ unsigned char buf;
+ int ret;
+
+ ret = RecvControlMsg(GET_CHROM_CTL, AWB_CONTROL_DELAY_FORMATTER, 1);
+ if (ret < 0)
+ return ret;
+ *value = buf << 10;
+ return 0;
+}
+
+
+int pwc_set_leds(struct pwc_device *pdev, int on_value, int off_value)
+{
+ unsigned char buf[2];
+
+ if (pdev->type < 730)
+ return 0;
+ on_value /= 100;
+ off_value /= 100;
+ if (on_value < 0)
+ on_value = 0;
+ if (on_value > 0xff)
+ on_value = 0xff;
+ if (off_value < 0)
+ off_value = 0;
+ if (off_value > 0xff)
+ off_value = 0xff;
+
+ buf[0] = on_value;
+ buf[1] = off_value;
+
+ return SendControlMsg(SET_STATUS_CTL, LED_FORMATTER, 2);
+}
+
+int pwc_get_leds(struct pwc_device *pdev, int *on_value, int *off_value)
+{
+ unsigned char buf[2];
+ int ret;
+
+ if (pdev->type < 730) {
+ *on_value = -1;
+ *off_value = -1;
+ return 0;
+ }
+
+ ret = RecvControlMsg(GET_STATUS_CTL, LED_FORMATTER, 2);
+ if (ret < 0)
+ return ret;
+ *on_value = buf[0] * 100;
+ *off_value = buf[1] * 100;
+ return 0;
+}
+
+static inline int pwc_set_contour(struct pwc_device *pdev, int contour)
+{
+ unsigned char buf;
+ int ret;
+
+ if (contour < 0)
+ buf = 0xff; /* auto contour on */
+ else
+ buf = 0x0; /* auto contour off */
+ ret = SendControlMsg(SET_LUM_CTL, AUTO_CONTOUR_FORMATTER, 1);
+ if (ret < 0)
+ return ret;
+
+ if (contour < 0)
+ return 0;
+ if (contour > 0xffff)
+ contour = 0xffff;
+
+ buf = (contour >> 10); /* contour preset is [0..3f] */
+ ret = SendControlMsg(SET_LUM_CTL, PRESET_CONTOUR_FORMATTER, 1);
+ if (ret < 0)
+ return ret;
+ return 0;
+}
+
+static inline int pwc_get_contour(struct pwc_device *pdev, int *contour)
+{
+ unsigned char buf;
+ int ret;
+
+ ret = RecvControlMsg(GET_LUM_CTL, AUTO_CONTOUR_FORMATTER, 1);
+ if (ret < 0)
+ return ret;
+
+ if (buf == 0) {
+ /* auto mode off, query current preset value */
+ ret = RecvControlMsg(GET_LUM_CTL, PRESET_CONTOUR_FORMATTER, 1);
+ if (ret < 0)
+ return ret;
+ *contour = buf << 10;
+ }
+ else
+ *contour = -1;
+ return 0;
+}
+
+
+static inline int pwc_set_backlight(struct pwc_device *pdev, int backlight)
+{
+ unsigned char buf;
+
+ if (backlight)
+ buf = 0xff;
+ else
+ buf = 0x0;
+ return SendControlMsg(SET_LUM_CTL, BACK_LIGHT_COMPENSATION_FORMATTER, 1);
+}
+
+static inline int pwc_get_backlight(struct pwc_device *pdev, int *backlight)
+{
+ int ret;
+ unsigned char buf;
+
+ ret = RecvControlMsg(GET_LUM_CTL, BACK_LIGHT_COMPENSATION_FORMATTER, 1);
+ if (ret < 0)
+ return ret;
+ *backlight = buf;
+ return 0;
+}
+
+
+static inline int pwc_set_flicker(struct pwc_device *pdev, int flicker)
+{
+ unsigned char buf;
+
+ if (flicker)
+ buf = 0xff;
+ else
+ buf = 0x0;
+ return SendControlMsg(SET_LUM_CTL, FLICKERLESS_MODE_FORMATTER, 1);
+}
+
+static inline int pwc_get_flicker(struct pwc_device *pdev, int *flicker)
+{
+ int ret;
+ unsigned char buf;
+
+ ret = RecvControlMsg(GET_LUM_CTL, FLICKERLESS_MODE_FORMATTER, 1);
+ if (ret < 0)
+ return ret;
+ *flicker = buf;
+ return 0;
+}
+
+
+static inline int pwc_set_dynamic_noise(struct pwc_device *pdev, int noise)
+{
+ unsigned char buf;
+
+ if (noise < 0)
+ noise = 0;
+ if (noise > 3)
+ noise = 3;
+ buf = noise;
+ return SendControlMsg(SET_LUM_CTL, DYNAMIC_NOISE_CONTROL_FORMATTER, 1);
+}
+
+static inline int pwc_get_dynamic_noise(struct pwc_device *pdev, int *noise)
+{
+ int ret;
+ unsigned char buf;
+
+ ret = RecvControlMsg(GET_LUM_CTL, DYNAMIC_NOISE_CONTROL_FORMATTER, 1);
+ if (ret < 0)
+ return ret;
+ *noise = buf;
+ return 0;
+}
+
+int pwc_mpt_reset(struct pwc_device *pdev, int flags)
+{
+ unsigned char buf;
+
+ buf = flags & 0x03; // only lower two bits are currently used
+ return SendControlMsg(SET_MPT_CTL, PT_RESET_CONTROL_FORMATTER, 1);
+}
+
+static inline int pwc_mpt_set_angle(struct pwc_device *pdev, int pan, int tilt)
+{
+ unsigned char buf[4];
+
+ /* set new relative angle; angles are expressed in degrees * 100,
+ but cam as .5 degree resolution, hence devide by 200. Also
+ the angle must be multiplied by 64 before it's send to
+ the cam (??)
+ */
+ pan = 64 * pan / 100;
+ tilt = -64 * tilt / 100; /* positive tilt is down, which is not what the user would expect */
+ buf[0] = pan & 0xFF;
+ buf[1] = (pan >> 8) & 0xFF;
+ buf[2] = tilt & 0xFF;
+ buf[3] = (tilt >> 8) & 0xFF;
+ return SendControlMsg(SET_MPT_CTL, PT_RELATIVE_CONTROL_FORMATTER, 4);
+}
+
+static inline int pwc_mpt_get_status(struct pwc_device *pdev, struct pwc_mpt_status *status)
+{
+ int ret;
+ unsigned char buf[5];
+
+ ret = RecvControlMsg(GET_MPT_CTL, PT_STATUS_FORMATTER, 5);
+ if (ret < 0)
+ return ret;
+ status->status = buf[0] & 0x7; // 3 bits are used for reporting
+ status->time_pan = (buf[1] << 8) + buf[2];
+ status->time_tilt = (buf[3] << 8) + buf[4];
+ return 0;
+}
+
+
+int pwc_get_cmos_sensor(struct pwc_device *pdev, int *sensor)
+{
+ unsigned char buf;
+ int ret = -1, request;
+
+ if (pdev->type < 675)
+ request = SENSOR_TYPE_FORMATTER1;
+ else if (pdev->type < 730)
+ return -1; /* The Vesta series doesn't have this call */
+ else
+ request = SENSOR_TYPE_FORMATTER2;
+
+ ret = RecvControlMsg(GET_STATUS_CTL, request, 1);
+ if (ret < 0)
+ return ret;
+ if (pdev->type < 675)
+ *sensor = buf | 0x100;
+ else
+ *sensor = buf;
+ return 0;
+}
+
+
+ /* End of Add-Ons */
+ /* ************************************************* */
+
+/* Linux 2.5.something and 2.6 pass direct pointers to arguments of
+ ioctl() calls. With 2.4, you have to do tedious copy_from_user()
+ and copy_to_user() calls. With these macros we circumvent this,
+ and let me maintain only one source file. The functionality is
+ exactly the same otherwise.
+ */
+
+#if LINUX_VERSION_CODE >= KERNEL_VERSION(2, 6, 0)
+
+/* define local variable for arg */
+#define ARG_DEF(ARG_type, ARG_name)\
+ ARG_type *ARG_name = arg;
+/* copy arg to local variable */
+#define ARG_IN(ARG_name) /* nothing */
+/* argument itself (referenced) */
+#define ARGR(ARG_name) (*ARG_name)
+/* argument address */
+#define ARGA(ARG_name) ARG_name
+/* copy local variable to arg */
+#define ARG_OUT(ARG_name) /* nothing */
+
+#else
+
+#define ARG_DEF(ARG_type, ARG_name)\
+ ARG_type ARG_name;
+#define ARG_IN(ARG_name)\
+ if (copy_from_user(&ARG_name, arg, sizeof(ARG_name))) {\
+ ret = -EFAULT;\
+ break;\
+ }
+#define ARGR(ARG_name) ARG_name
+#define ARGA(ARG_name) &ARG_name
+#define ARG_OUT(ARG_name)\
+ if (copy_to_user(arg, &ARG_name, sizeof(ARG_name))) {\
+ ret = -EFAULT;\
+ break;\
+ }
+
+#endif
+
+int pwc_ioctl(struct pwc_device *pdev, unsigned int cmd, void *arg)
+{
+ int ret = 0;
+
+ switch(cmd) {
+ case VIDIOCPWCRUSER:
+ {
+ if (pwc_restore_user(pdev))
+ ret = -EINVAL;
+ break;
+ }
+
+ case VIDIOCPWCSUSER:
+ {
+ if (pwc_save_user(pdev))
+ ret = -EINVAL;
+ break;
+ }
+
+ case VIDIOCPWCFACTORY:
+ {
+ if (pwc_restore_factory(pdev))
+ ret = -EINVAL;
+ break;
+ }
+
+ case VIDIOCPWCSCQUAL:
+ {
+ ARG_DEF(int, qual)
+
+ ARG_IN(qual)
+ if (ARGR(qual) < 0 || ARGR(qual) > 3)
+ ret = -EINVAL;
+ else
+ ret = pwc_try_video_mode(pdev, pdev->view.x, pdev->view.y, pdev->vframes, ARGR(qual), pdev->vsnapshot);
+ if (ret >= 0)
+ pdev->vcompression = ARGR(qual);
+ break;
+ }
+
+ case VIDIOCPWCGCQUAL:
+ {
+ ARG_DEF(int, qual)
+
+ ARGR(qual) = pdev->vcompression;
+ ARG_OUT(qual)
+ break;
+ }
+
+ case VIDIOCPWCPROBE:
+ {
+ ARG_DEF(struct pwc_probe, probe)
+
+ strcpy(ARGR(probe).name, pdev->vdev->name);
+ ARGR(probe).type = pdev->type;
+ ARG_OUT(probe)
+ break;
+ }
+
+ case VIDIOCPWCGSERIAL:
+ {
+ ARG_DEF(struct pwc_serial, serial)
+
+ strcpy(ARGR(serial).serial, pdev->serial);
+ ARG_OUT(serial)
+ break;
+ }
+
+ case VIDIOCPWCSAGC:
+ {
+ ARG_DEF(int, agc)
+
+ ARG_IN(agc)
+ if (pwc_set_agc(pdev, ARGR(agc) < 0 ? 1 : 0, ARGR(agc)))
+ ret = -EINVAL;
+ break;
+ }
+
+ case VIDIOCPWCGAGC:
+ {
+ ARG_DEF(int, agc)
+
+ if (pwc_get_agc(pdev, ARGA(agc)))
+ ret = -EINVAL;
+ ARG_OUT(agc)
+ break;
+ }
+
+ case VIDIOCPWCSSHUTTER:
+ {
+ ARG_DEF(int, shutter_speed)
+
+ ARG_IN(shutter_speed)
+ ret = pwc_set_shutter_speed(pdev, ARGR(shutter_speed) < 0 ? 1 : 0, ARGR(shutter_speed));
+ break;
+ }
+
+ case VIDIOCPWCSAWB:
+ {
+ ARG_DEF(struct pwc_whitebalance, wb)
+
+ ARG_IN(wb)
+ ret = pwc_set_awb(pdev, ARGR(wb).mode);
+ if (ret >= 0 && ARGR(wb).mode == PWC_WB_MANUAL) {
+ pwc_set_red_gain(pdev, ARGR(wb).manual_red);
+ pwc_set_blue_gain(pdev, ARGR(wb).manual_blue);
+ }
+ break;
+ }
+
+ case VIDIOCPWCGAWB:
+ {
+ ARG_DEF(struct pwc_whitebalance, wb)
+
+ memset(ARGA(wb), 0, sizeof(struct pwc_whitebalance));
+ ARGR(wb).mode = pwc_get_awb(pdev);
+ if (ARGR(wb).mode < 0)
+ ret = -EINVAL;
+ else {
+ if (ARGR(wb).mode == PWC_WB_MANUAL) {
+ ret = pwc_get_red_gain(pdev, &ARGR(wb).manual_red);
+ if (ret < 0)
+ break;
+ ret = pwc_get_blue_gain(pdev, &ARGR(wb).manual_blue);
+ if (ret < 0)
+ break;
+ }
+ if (ARGR(wb).mode == PWC_WB_AUTO) {
+ ret = pwc_read_red_gain(pdev, &ARGR(wb).read_red);
+ if (ret < 0)
+ break;
+ ret =pwc_read_blue_gain(pdev, &ARGR(wb).read_blue);
+ if (ret < 0)
+ break;
+ }
+ }
+ ARG_OUT(wb)
+ break;
+ }
+
+ case VIDIOCPWCSAWBSPEED:
+ {
+ ARG_DEF(struct pwc_wb_speed, wbs)
+
+ if (ARGR(wbs).control_speed > 0) {
+ ret = pwc_set_wb_speed(pdev, ARGR(wbs).control_speed);
+ }
+ if (ARGR(wbs).control_delay > 0) {
+ ret = pwc_set_wb_delay(pdev, ARGR(wbs).control_delay);
+ }
+ break;
+ }
+
+ case VIDIOCPWCGAWBSPEED:
+ {
+ ARG_DEF(struct pwc_wb_speed, wbs)
+
+ ret = pwc_get_wb_speed(pdev, &ARGR(wbs).control_speed);
+ if (ret < 0)
+ break;
+ ret = pwc_get_wb_delay(pdev, &ARGR(wbs).control_delay);
+ if (ret < 0)
+ break;
+ ARG_OUT(wbs)
+ break;
+ }
+
+ case VIDIOCPWCSLED:
+ {
+ ARG_DEF(struct pwc_leds, leds)
+
+ ARG_IN(leds)
+ ret = pwc_set_leds(pdev, ARGR(leds).led_on, ARGR(leds).led_off);
+ break;
+ }
+
+
+ case VIDIOCPWCGLED:
+ {
+ ARG_DEF(struct pwc_leds, leds)
+
+ ret = pwc_get_leds(pdev, &ARGR(leds).led_on, &ARGR(leds).led_off);
+ ARG_OUT(leds)
+ break;
+ }
+
+ case VIDIOCPWCSCONTOUR:
+ {
+ ARG_DEF(int, contour)
+
+ ARG_IN(contour)
+ ret = pwc_set_contour(pdev, ARGR(contour));
+ break;
+ }
+
+ case VIDIOCPWCGCONTOUR:
+ {
+ ARG_DEF(int, contour)
+
+ ret = pwc_get_contour(pdev, ARGA(contour));
+ ARG_OUT(contour)
+ break;
+ }
+
+ case VIDIOCPWCSBACKLIGHT:
+ {
+ ARG_DEF(int, backlight)
+
+ ARG_IN(backlight)
+ ret = pwc_set_backlight(pdev, ARGR(backlight));
+ break;
+ }
+
+ case VIDIOCPWCGBACKLIGHT:
+ {
+ ARG_DEF(int, backlight)
+
+ ret = pwc_get_backlight(pdev, ARGA(backlight));
+ ARG_OUT(backlight)
+ break;
+ }
+
+ case VIDIOCPWCSFLICKER:
+ {
+ ARG_DEF(int, flicker)
+
+ ARG_IN(flicker)
+ ret = pwc_set_flicker(pdev, ARGR(flicker));
+ break;
+ }
+
+ case VIDIOCPWCGFLICKER:
+ {
+ ARG_DEF(int, flicker)
+
+ ret = pwc_get_flicker(pdev, ARGA(flicker));
+ ARG_OUT(flicker)
+ break;
+ }
+
+ case VIDIOCPWCSDYNNOISE:
+ {
+ ARG_DEF(int, dynnoise)
+
+ ARG_IN(dynnoise)
+ ret = pwc_set_dynamic_noise(pdev, ARGR(dynnoise));
+ break;
+ }
+
+ case VIDIOCPWCGDYNNOISE:
+ {
+ ARG_DEF(int, dynnoise)
+
+ ret = pwc_get_dynamic_noise(pdev, ARGA(dynnoise));
+ ARG_OUT(dynnoise);
+ break;
+ }
+
+ case VIDIOCPWCGREALSIZE:
+ {
+ ARG_DEF(struct pwc_imagesize, size)
+
+ ARGR(size).width = pdev->image.x;
+ ARGR(size).height = pdev->image.y;
+ ARG_OUT(size)
+ break;
+ }
+
+ case VIDIOCPWCMPTRESET:
+ {
+ if (pdev->features & FEATURE_MOTOR_PANTILT)
+ {
+ ARG_DEF(int, flags)
+
+ ARG_IN(flags)
+ ret = pwc_mpt_reset(pdev, ARGR(flags));
+ if (ret >= 0)
+ {
+ pdev->pan_angle = 0;
+ pdev->tilt_angle = 0;
+ }
+ }
+ else
+ {
+ ret = -ENXIO;
+ }
+ break;
+ }
+
+ case VIDIOCPWCMPTGRANGE:
+ {
+ if (pdev->features & FEATURE_MOTOR_PANTILT)
+ {
+ ARG_DEF(struct pwc_mpt_range, range)
+
+ ARGR(range) = pdev->angle_range;
+ ARG_OUT(range)
+ }
+ else
+ {
+ ret = -ENXIO;
+ }
+ break;
+ }
+
+ case VIDIOCPWCMPTSANGLE:
+ {
+ int new_pan, new_tilt;
+
+ if (pdev->features & FEATURE_MOTOR_PANTILT)
+ {
+ ARG_DEF(struct pwc_mpt_angles, angles)
+
+ ARG_IN(angles)
+ /* The camera can only set relative angles, so
+ do some calculations when getting an absolute angle .
+ */
+ if (ARGR(angles).absolute)
+ {
+ new_pan = ARGR(angles).pan;
+ new_tilt = ARGR(angles).tilt;
+ }
+ else
+ {
+ new_pan = pdev->pan_angle + ARGR(angles).pan;
+ new_tilt = pdev->tilt_angle + ARGR(angles).tilt;
+ }
+ /* check absolute ranges */
+ if (new_pan < pdev->angle_range.pan_min ||
+ new_pan > pdev->angle_range.pan_max ||
+ new_tilt < pdev->angle_range.tilt_min ||
+ new_tilt > pdev->angle_range.tilt_max)
+ {
+ ret = -ERANGE;
+ }
+ else
+ {
+ /* go to relative range, check again */
+ new_pan -= pdev->pan_angle;
+ new_tilt -= pdev->tilt_angle;
+ /* angles are specified in degrees * 100, thus the limit = 36000 */
+ if (new_pan < -36000 || new_pan > 36000 || new_tilt < -36000 || new_tilt > 36000)
+ ret = -ERANGE;
+ }
+ if (ret == 0) /* no errors so far */
+ {
+ ret = pwc_mpt_set_angle(pdev, new_pan, new_tilt);
+ if (ret >= 0)
+ {
+ pdev->pan_angle += new_pan;
+ pdev->tilt_angle += new_tilt;
+ }
+ if (ret == -EPIPE) /* stall -> out of range */
+ ret = -ERANGE;
+ }
+ }
+ else
+ {
+ ret = -ENXIO;
+ }
+ break;
+ }
+
+ case VIDIOCPWCMPTGANGLE:
+ {
+
+ if (pdev->features & FEATURE_MOTOR_PANTILT)
+ {
+ ARG_DEF(struct pwc_mpt_angles, angles)
+
+ ARGR(angles).absolute = 1;
+ ARGR(angles).pan = pdev->pan_angle;
+ ARGR(angles).tilt = pdev->tilt_angle;
+ ARG_OUT(angles)
+ }
+ else
+ {
+ ret = -ENXIO;
+ }
+ break;
+ }
+
+ case VIDIOCPWCMPTSTATUS:
+ {
+ if (pdev->features & FEATURE_MOTOR_PANTILT)
+ {
+ ARG_DEF(struct pwc_mpt_status, status)
+
+ ret = pwc_mpt_get_status(pdev, ARGA(status));
+ ARG_OUT(status)
+ }
+ else
+ {
+ ret = -ENXIO;
+ }
+ break;
+ }
+
+ case VIDIOCPWCGVIDCMD:
+ {
+ ARG_DEF(struct pwc_video_command, cmd);
+
+ ARGR(cmd).type = pdev->type;
+ ARGR(cmd).release = pdev->release;
+ ARGR(cmd).command_len = pdev->cmd_len;
+ memcpy(&ARGR(cmd).command_buf, pdev->cmd_buf, pdev->cmd_len);
+ ARGR(cmd).bandlength = pdev->vbandlength;
+ ARGR(cmd).frame_size = pdev->frame_size;
+ ARG_OUT(cmd)
+ break;
+ }
+ /*
+ case VIDIOCPWCGVIDTABLE:
+ {
+ ARG_DEF(struct pwc_table_init_buffer, table);
+ ARGR(table).len = pdev->cmd_len;
+ memcpy(&ARGR(table).buffer, pdev->decompress_data, pdev->decompressor->table_size);
+ ARG_OUT(table)
+ break;
+ }
+ */
+
+ default:
+ ret = -ENOIOCTLCMD;
+ break;
+ }
+
+ if (ret > 0)
+ return 0;
+ return ret;
+}
+
+
+
--- /dev/null
+/* Linux driver for Philips webcam
+ USB and Video4Linux interface part.
+ (C) 1999-2004 Nemosoft Unv.
+ (C) 2004 Luc Saillard (luc@saillard.org)
+
+ NOTE: this version of pwc is an unofficial (modified) release of pwc & pcwx
+ driver and thus may have bugs that are not present in the original version.
+ Please send bug reports and support requests to <luc@saillard.org>.
+ The decompression routines have been implemented by reverse-engineering the
+ Nemosoft binary pwcx module. Caveat emptor.
+
+ This program is free software; you can redistribute it and/or modify
+ it under the terms of the GNU General Public License as published by
+ the Free Software Foundation; either version 2 of the License, or
+ (at your option) any later version.
+
+ This program is distributed in the hope that it will be useful,
+ but WITHOUT ANY WARRANTY; without even the implied warranty of
+ MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
+ GNU General Public License for more details.
+
+ You should have received a copy of the GNU General Public License
+ along with this program; if not, write to the Free Software
+ Foundation, Inc., 59 Temple Place, Suite 330, Boston, MA 02111-1307 USA
+
+*/
+
+/*
+ This code forms the interface between the USB layers and the Philips
+ specific stuff. Some adanved stuff of the driver falls under an
+ NDA, signed between me and Philips B.V., Eindhoven, the Netherlands, and
+ is thus not distributed in source form. The binary pwcx.o module
+ contains the code that falls under the NDA.
+
+ In case you're wondering: 'pwc' stands for "Philips WebCam", but
+ I really didn't want to type 'philips_web_cam' every time (I'm lazy as
+ any Linux kernel hacker, but I don't like uncomprehensible abbreviations
+ without explanation).
+
+ Oh yes, convention: to disctinguish between all the various pointers to
+ device-structures, I use these names for the pointer variables:
+ udev: struct usb_device *
+ vdev: struct video_device *
+ pdev: struct pwc_devive *
+*/
+
+/* Contributors:
+ - Alvarado: adding whitebalance code
+ - Alistar Moire: QuickCam 3000 Pro device/product ID
+ - Tony Hoyle: Creative Labs Webcam 5 device/product ID
+ - Mark Burazin: solving hang in VIDIOCSYNC when camera gets unplugged
+ - Jk Fang: Sotec Afina Eye ID
+ - Xavier Roche: QuickCam Pro 4000 ID
+ - Jens Knudsen: QuickCam Zoom ID
+ - J. Debert: QuickCam for Notebooks ID
+*/
+
+#include <linux/errno.h>
+#include <linux/init.h>
+#include <linux/mm.h>
+#include <linux/module.h>
+#include <linux/poll.h>
+#include <linux/slab.h>
+#include <linux/vmalloc.h>
+#include <asm/io.h>
+
+#include "pwc.h"
+#include "pwc-ioctl.h"
+#include "pwc-kiara.h"
+#include "pwc-timon.h"
+#include "pwc-dec23.h"
+#include "pwc-dec1.h"
+#include "pwc-uncompress.h"
+
+/* Function prototypes and driver templates */
+
+/* hotplug device table support */
+static struct usb_device_id pwc_device_table [] = {
+ { USB_DEVICE(0x0471, 0x0302) }, /* Philips models */
+ { USB_DEVICE(0x0471, 0x0303) },
+ { USB_DEVICE(0x0471, 0x0304) },
+ { USB_DEVICE(0x0471, 0x0307) },
+ { USB_DEVICE(0x0471, 0x0308) },
+ { USB_DEVICE(0x0471, 0x030C) },
+ { USB_DEVICE(0x0471, 0x0310) },
+ { USB_DEVICE(0x0471, 0x0311) },
+ { USB_DEVICE(0x0471, 0x0312) },
+ { USB_DEVICE(0x0471, 0x0313) }, /* the 'new' 720K */
+ { USB_DEVICE(0x069A, 0x0001) }, /* Askey */
+ { USB_DEVICE(0x046D, 0x08B0) }, /* Logitech QuickCam Pro 3000 */
+ { USB_DEVICE(0x046D, 0x08B1) }, /* Logitech QuickCam Notebook Pro */
+ { USB_DEVICE(0x046D, 0x08B2) }, /* Logitech QuickCam Pro 4000 */
+ { USB_DEVICE(0x046D, 0x08B3) }, /* Logitech QuickCam Zoom (old model) */
+ { USB_DEVICE(0x046D, 0x08B4) }, /* Logitech QuickCam Zoom (new model) */
+ { USB_DEVICE(0x046D, 0x08B5) }, /* Logitech QuickCam Orbit/Sphere */
+ { USB_DEVICE(0x046D, 0x08B6) }, /* Logitech (reserved) */
+ { USB_DEVICE(0x046D, 0x08B7) }, /* Logitech (reserved) */
+ { USB_DEVICE(0x046D, 0x08B8) }, /* Logitech (reserved) */
+ { USB_DEVICE(0x055D, 0x9000) }, /* Samsung */
+ { USB_DEVICE(0x055D, 0x9001) },
+ { USB_DEVICE(0x041E, 0x400C) }, /* Creative Webcam 5 */
+ { USB_DEVICE(0x041E, 0x4011) }, /* Creative Webcam Pro Ex */
+ { USB_DEVICE(0x04CC, 0x8116) }, /* Afina Eye */
+ { USB_DEVICE(0x06BE, 0x8116) }, /* new Afina Eye */
+ { USB_DEVICE(0x0d81, 0x1910) }, /* Visionite */
+ { USB_DEVICE(0x0d81, 0x1900) },
+ { }
+};
+MODULE_DEVICE_TABLE(usb, pwc_device_table);
+
+static int usb_pwc_probe(struct usb_interface *intf, const struct usb_device_id *id);
+static void usb_pwc_disconnect(struct usb_interface *intf);
+
+static struct usb_driver pwc_driver = {
+ .owner = THIS_MODULE,
+ .name = "Philips webcam", /* name */
+ .id_table = pwc_device_table,
+ .probe = usb_pwc_probe, /* probe() */
+ .disconnect = usb_pwc_disconnect, /* disconnect() */
+};
+
+#define MAX_DEV_HINTS 20
+#define MAX_ISOC_ERRORS 20
+
+static int default_size = PSZ_QCIF;
+static int default_fps = 10;
+static int default_fbufs = 3; /* Default number of frame buffers */
+static int default_mbufs = 2; /* Default number of mmap() buffers */
+ int pwc_trace = TRACE_MODULE | TRACE_FLOW | TRACE_PWCX;
+static int power_save = 0;
+static int led_on = 100, led_off = 0; /* defaults to LED that is on while in use */
+ int pwc_preferred_compression = 2; /* 0..3 = uncompressed..high */
+static struct {
+ int type;
+ char serial_number[30];
+ int device_node;
+ struct pwc_device *pdev;
+} device_hint[MAX_DEV_HINTS];
+
+/***/
+
+static int pwc_video_open(struct inode *inode, struct file *file);
+static int pwc_video_close(struct inode *inode, struct file *file);
+static ssize_t pwc_video_read(struct file *file, char *buf,
+ size_t count, loff_t *ppos);
+static unsigned int pwc_video_poll(struct file *file, poll_table *wait);
+static int pwc_video_ioctl(struct inode *inode, struct file *file,
+ unsigned int ioctlnr, unsigned long arg);
+static int pwc_video_mmap(struct file *file, struct vm_area_struct *vma);
+
+static struct file_operations pwc_fops = {
+ .owner = THIS_MODULE,
+ .open = pwc_video_open,
+ .release = pwc_video_close,
+ .read = pwc_video_read,
+ .poll = pwc_video_poll,
+ .mmap = pwc_video_mmap,
+ .ioctl = pwc_video_ioctl,
+ .llseek = no_llseek,
+};
+static struct video_device pwc_template = {
+ .owner = THIS_MODULE,
+ .name = "Philips Webcam", /* Filled in later */
+ .type = VID_TYPE_CAPTURE,
+ .hardware = VID_HARDWARE_PWC,
+ .release = video_device_release,
+ .fops = &pwc_fops,
+ .minor = -1,
+};
+
+/***************************************************************************/
+
+/* Okay, this is some magic that I worked out and the reasoning behind it...
+
+ The biggest problem with any USB device is of course: "what to do
+ when the user unplugs the device while it is in use by an application?"
+ We have several options:
+ 1) Curse them with the 7 plagues when they do (requires divine intervention)
+ 2) Tell them not to (won't work: they'll do it anyway)
+ 3) Oops the kernel (this will have a negative effect on a user's uptime)
+ 4) Do something sensible.
+
+ Of course, we go for option 4.
+
+ It happens that this device will be linked to two times, once from
+ usb_device and once from the video_device in their respective 'private'
+ pointers. This is done when the device is probed() and all initialization
+ succeeded. The pwc_device struct links back to both structures.
+
+ When a device is unplugged while in use it will be removed from the
+ list of known USB devices; I also de-register it as a V4L device, but
+ unfortunately I can't free the memory since the struct is still in use
+ by the file descriptor. This free-ing is then deferend until the first
+ opportunity. Crude, but it works.
+
+ A small 'advantage' is that if a user unplugs the cam and plugs it back
+ in, it should get assigned the same video device minor, but unfortunately
+ it's non-trivial to re-link the cam back to the video device... (that
+ would surely be magic! :))
+*/
+
+/***************************************************************************/
+/* Private functions */
+
+/* Here we want the physical address of the memory.
+ * This is used when initializing the contents of the area.
+ */
+static inline unsigned long kvirt_to_pa(unsigned long adr)
+{
+ unsigned long kva, ret;
+
+ kva = (unsigned long) page_address(vmalloc_to_page((void *)adr));
+ kva |= adr & (PAGE_SIZE-1); /* restore the offset */
+ ret = __pa(kva);
+ return ret;
+}
+
+static void * rvmalloc(unsigned long size)
+{
+ void * mem;
+ unsigned long adr;
+
+ size=PAGE_ALIGN(size);
+ mem=vmalloc_32(size);
+ if (mem)
+ {
+ memset(mem, 0, size); /* Clear the ram out, no junk to the user */
+ adr=(unsigned long) mem;
+ while (size > 0)
+ {
+ SetPageReserved(vmalloc_to_page((void *)adr));
+ adr+=PAGE_SIZE;
+ size-=PAGE_SIZE;
+ }
+ }
+ return mem;
+}
+
+static void rvfree(void * mem, unsigned long size)
+{
+ unsigned long adr;
+
+ if (mem)
+ {
+ adr=(unsigned long) mem;
+ while ((long) size > 0)
+ {
+ ClearPageReserved(vmalloc_to_page((void *)adr));
+ adr+=PAGE_SIZE;
+ size-=PAGE_SIZE;
+ }
+ vfree(mem);
+ }
+}
+
+
+
+
+static int pwc_allocate_buffers(struct pwc_device *pdev)
+{
+ int i;
+ void *kbuf;
+
+ Trace(TRACE_MEMORY, ">> pwc_allocate_buffers(pdev = 0x%p)\n", pdev);
+
+ if (pdev == NULL)
+ return -ENXIO;
+
+#ifdef PWC_MAGIC
+ if (pdev->magic != PWC_MAGIC) {
+ Err("allocate_buffers(): magic failed.\n");
+ return -ENXIO;
+ }
+#endif
+ /* Allocate Isochronuous pipe buffers */
+ for (i = 0; i < MAX_ISO_BUFS; i++) {
+ if (pdev->sbuf[i].data == NULL) {
+ kbuf = kmalloc(ISO_BUFFER_SIZE, GFP_KERNEL);
+ if (kbuf == NULL) {
+ Err("Failed to allocate iso buffer %d.\n", i);
+ return -ENOMEM;
+ }
+ Trace(TRACE_MEMORY, "Allocated iso buffer at %p.\n", kbuf);
+ pdev->sbuf[i].data = kbuf;
+ memset(kbuf, 0, ISO_BUFFER_SIZE);
+ }
+ }
+
+ /* Allocate frame buffer structure */
+ if (pdev->fbuf == NULL) {
+ kbuf = kmalloc(default_fbufs * sizeof(struct pwc_frame_buf), GFP_KERNEL);
+ if (kbuf == NULL) {
+ Err("Failed to allocate frame buffer structure.\n");
+ return -ENOMEM;
+ }
+ Trace(TRACE_MEMORY, "Allocated frame buffer structure at %p.\n", kbuf);
+ pdev->fbuf = kbuf;
+ memset(kbuf, 0, default_fbufs * sizeof(struct pwc_frame_buf));
+ }
+ /* create frame buffers, and make circular ring */
+ for (i = 0; i < default_fbufs; i++) {
+ if (pdev->fbuf[i].data == NULL) {
+ kbuf = vmalloc(PWC_FRAME_SIZE); /* need vmalloc since frame buffer > 128K */
+ if (kbuf == NULL) {
+ Err("Failed to allocate frame buffer %d.\n", i);
+ return -ENOMEM;
+ }
+ Trace(TRACE_MEMORY, "Allocated frame buffer %d at %p.\n", i, kbuf);
+ pdev->fbuf[i].data = kbuf;
+ memset(kbuf, 128, PWC_FRAME_SIZE);
+ }
+ }
+
+ /* Allocate decompressor table space */
+ kbuf = NULL;
+ switch (pdev->type)
+ {
+ case 675:
+ case 680:
+ case 690:
+ case 720:
+ case 730:
+ case 740:
+ case 750:
+ Trace(TRACE_MEMORY,"private_data(%d)\n",sizeof(struct pwc_dec23_private));
+ kbuf = kmalloc(sizeof(struct pwc_dec23_private), GFP_KERNEL); /* Timon & Kiara */
+ break;
+ case 645:
+ case 646:
+ /* TODO & FIXME */
+ kbuf = kmalloc(sizeof(struct pwc_dec23_private), GFP_KERNEL);
+ break;
+ }
+ if (kbuf == NULL) {
+ Err("Failed to allocate decompress table.\n");
+ return -ENOMEM;
+ }
+ pdev->decompress_data = kbuf;
+
+ /* Allocate image buffer; double buffer for mmap() */
+ kbuf = rvmalloc(default_mbufs * pdev->len_per_image);
+ if (kbuf == NULL) {
+ Err("Failed to allocate image buffer(s). needed (%d)\n",default_mbufs * pdev->len_per_image);
+ return -ENOMEM;
+ }
+ Trace(TRACE_MEMORY, "Allocated image buffer at %p.\n", kbuf);
+ pdev->image_data = kbuf;
+ for (i = 0; i < default_mbufs; i++)
+ pdev->image_ptr[i] = kbuf + i * pdev->len_per_image;
+ for (; i < MAX_IMAGES; i++)
+ pdev->image_ptr[i] = NULL;
+
+ kbuf = NULL;
+
+ Trace(TRACE_MEMORY, "<< pwc_allocate_buffers()\n");
+ return 0;
+}
+
+static void pwc_free_buffers(struct pwc_device *pdev)
+{
+ int i;
+
+ Trace(TRACE_MEMORY, "Entering free_buffers(%p).\n", pdev);
+
+ if (pdev == NULL)
+ return;
+#ifdef PWC_MAGIC
+ if (pdev->magic != PWC_MAGIC) {
+ Err("free_buffers(): magic failed.\n");
+ return;
+ }
+#endif
+
+ /* Release Iso-pipe buffers */
+ for (i = 0; i < MAX_ISO_BUFS; i++)
+ if (pdev->sbuf[i].data != NULL) {
+ Trace(TRACE_MEMORY, "Freeing ISO buffer at %p.\n", pdev->sbuf[i].data);
+ kfree(pdev->sbuf[i].data);
+ pdev->sbuf[i].data = NULL;
+ }
+
+ /* The same for frame buffers */
+ if (pdev->fbuf != NULL) {
+ for (i = 0; i < default_fbufs; i++) {
+ if (pdev->fbuf[i].data != NULL) {
+ Trace(TRACE_MEMORY, "Freeing frame buffer %d at %p.\n", i, pdev->fbuf[i].data);
+ vfree(pdev->fbuf[i].data);
+ pdev->fbuf[i].data = NULL;
+ }
+ }
+ kfree(pdev->fbuf);
+ pdev->fbuf = NULL;
+ }
+
+ /* Intermediate decompression buffer & tables */
+ if (pdev->decompress_data != NULL) {
+ Trace(TRACE_MEMORY, "Freeing decompression buffer at %p.\n", pdev->decompress_data);
+ kfree(pdev->decompress_data);
+ pdev->decompress_data = NULL;
+ }
+ pdev->decompressor = NULL;
+
+ /* Release image buffers */
+ if (pdev->image_data != NULL) {
+ Trace(TRACE_MEMORY, "Freeing image buffer at %p.\n", pdev->image_data);
+ rvfree(pdev->image_data, default_mbufs * pdev->len_per_image);
+ }
+ pdev->image_data = NULL;
+
+ Trace(TRACE_MEMORY, "Leaving free_buffers().\n");
+}
+
+/* The frame & image buffer mess.
+
+ Yes, this is a mess. Well, it used to be simple, but alas... In this
+ module, 3 buffers schemes are used to get the data from the USB bus to
+ the user program. The first scheme involves the ISO buffers (called thus
+ since they transport ISO data from the USB controller), and not really
+ interesting. Suffices to say the data from this buffer is quickly
+ gathered in an interrupt handler (pwc_isoc_handler) and placed into the
+ frame buffer.
+
+ The frame buffer is the second scheme, and is the central element here.
+ It collects the data from a single frame from the camera (hence, the
+ name). Frames are delimited by the USB camera with a short USB packet,
+ so that's easy to detect. The frame buffers form a list that is filled
+ by the camera+USB controller and drained by the user process through
+ either read() or mmap().
+
+ The image buffer is the third scheme, in which frames are decompressed
+ and converted into planar format. For mmap() there is more than
+ one image buffer available.
+
+ The frame buffers provide the image buffering. In case the user process
+ is a bit slow, this introduces lag and some undesired side-effects.
+ The problem arises when the frame buffer is full. I used to drop the last
+ frame, which makes the data in the queue stale very quickly. But dropping
+ the frame at the head of the queue proved to be a litte bit more difficult.
+ I tried a circular linked scheme, but this introduced more problems than
+ it solved.
+
+ Because filling and draining are completely asynchronous processes, this
+ requires some fiddling with pointers and mutexes.
+
+ Eventually, I came up with a system with 2 lists: an 'empty' frame list
+ and a 'full' frame list:
+ * Initially, all frame buffers but one are on the 'empty' list; the one
+ remaining buffer is our initial fill frame.
+ * If a frame is needed for filling, we try to take it from the 'empty'
+ list, unless that list is empty, in which case we take the buffer at
+ the head of the 'full' list.
+ * When our fill buffer has been filled, it is appended to the 'full'
+ list.
+ * If a frame is needed by read() or mmap(), it is taken from the head of
+ the 'full' list, handled, and then appended to the 'empty' list. If no
+ buffer is present on the 'full' list, we wait.
+ The advantage is that the buffer that is currently being decompressed/
+ converted, is on neither list, and thus not in our way (any other scheme
+ I tried had the problem of old data lingering in the queue).
+
+ Whatever strategy you choose, it always remains a tradeoff: with more
+ frame buffers the chances of a missed frame are reduced. On the other
+ hand, on slower machines it introduces lag because the queue will
+ always be full.
+ */
+
+/**
+ \brief Find next frame buffer to fill. Take from empty or full list, whichever comes first.
+ */
+static inline int pwc_next_fill_frame(struct pwc_device *pdev)
+{
+ int ret;
+ unsigned long flags;
+
+ ret = 0;
+ spin_lock_irqsave(&pdev->ptrlock, flags);
+ if (pdev->fill_frame != NULL) {
+ /* append to 'full' list */
+ if (pdev->full_frames == NULL) {
+ pdev->full_frames = pdev->fill_frame;
+ pdev->full_frames_tail = pdev->full_frames;
+ }
+ else {
+ pdev->full_frames_tail->next = pdev->fill_frame;
+ pdev->full_frames_tail = pdev->fill_frame;
+ }
+ }
+ if (pdev->empty_frames != NULL) {
+ /* We have empty frames available. That's easy */
+ pdev->fill_frame = pdev->empty_frames;
+ pdev->empty_frames = pdev->empty_frames->next;
+ }
+ else {
+ /* Hmm. Take it from the full list */
+#if PWC_DEBUG
+ /* sanity check */
+ if (pdev->full_frames == NULL) {
+ Err("Neither empty or full frames available!\n");
+ spin_unlock_irqrestore(&pdev->ptrlock, flags);
+ return -EINVAL;
+ }
+#endif
+ pdev->fill_frame = pdev->full_frames;
+ pdev->full_frames = pdev->full_frames->next;
+ ret = 1;
+ }
+ pdev->fill_frame->next = NULL;
+#if PWC_DEBUG
+ Trace(TRACE_SEQUENCE, "Assigning sequence number %d.\n", pdev->sequence);
+ pdev->fill_frame->sequence = pdev->sequence++;
+#endif
+ spin_unlock_irqrestore(&pdev->ptrlock, flags);
+ return ret;
+}
+
+
+/**
+ \brief Reset all buffers, pointers and lists, except for the image_used[] buffer.
+
+ If the image_used[] buffer is cleared too, mmap()/VIDIOCSYNC will run into trouble.
+ */
+static void pwc_reset_buffers(struct pwc_device *pdev)
+{
+ int i;
+ unsigned long flags;
+
+ spin_lock_irqsave(&pdev->ptrlock, flags);
+ pdev->full_frames = NULL;
+ pdev->full_frames_tail = NULL;
+ for (i = 0; i < default_fbufs; i++) {
+ pdev->fbuf[i].filled = 0;
+ if (i > 0)
+ pdev->fbuf[i].next = &pdev->fbuf[i - 1];
+ else
+ pdev->fbuf->next = NULL;
+ }
+ pdev->empty_frames = &pdev->fbuf[default_fbufs - 1];
+ pdev->empty_frames_tail = pdev->fbuf;
+ pdev->read_frame = NULL;
+ pdev->fill_frame = pdev->empty_frames;
+ pdev->empty_frames = pdev->empty_frames->next;
+
+ pdev->image_read_pos = 0;
+ pdev->fill_image = 0;
+ spin_unlock_irqrestore(&pdev->ptrlock, flags);
+}
+
+
+/**
+ \brief Do all the handling for getting one frame: get pointer, decompress, advance pointers.
+ */
+static int pwc_handle_frame(struct pwc_device *pdev)
+{
+ int ret = 0;
+ unsigned long flags;
+
+ spin_lock_irqsave(&pdev->ptrlock, flags);
+ /* First grab our read_frame; this is removed from all lists, so
+ we can release the lock after this without problems */
+ if (pdev->read_frame != NULL) {
+ /* This can't theoretically happen */
+ Err("Huh? Read frame still in use?\n");
+ }
+ else {
+ if (pdev->full_frames == NULL) {
+ Err("Woops. No frames ready.\n");
+ }
+ else {
+ pdev->read_frame = pdev->full_frames;
+ pdev->full_frames = pdev->full_frames->next;
+ pdev->read_frame->next = NULL;
+ }
+
+ if (pdev->read_frame != NULL) {
+#if PWC_DEBUG
+ Trace(TRACE_SEQUENCE, "Decompressing frame %d\n", pdev->read_frame->sequence);
+#endif
+ /* Decompression is a lenghty process, so it's outside of the lock.
+ This gives the isoc_handler the opportunity to fill more frames
+ in the mean time.
+ */
+ spin_unlock_irqrestore(&pdev->ptrlock, flags);
+ ret = pwc_decompress(pdev);
+ spin_lock_irqsave(&pdev->ptrlock, flags);
+
+ /* We're done with read_buffer, tack it to the end of the empty buffer list */
+ if (pdev->empty_frames == NULL) {
+ pdev->empty_frames = pdev->read_frame;
+ pdev->empty_frames_tail = pdev->empty_frames;
+ }
+ else {
+ pdev->empty_frames_tail->next = pdev->read_frame;
+ pdev->empty_frames_tail = pdev->read_frame;
+ }
+ pdev->read_frame = NULL;
+ }
+ }
+ spin_unlock_irqrestore(&pdev->ptrlock, flags);
+ return ret;
+}
+
+/**
+ \brief Advance pointers of image buffer (after each user request)
+*/
+static inline void pwc_next_image(struct pwc_device *pdev)
+{
+ pdev->image_used[pdev->fill_image] = 0;
+ pdev->fill_image = (pdev->fill_image + 1) % default_mbufs;
+}
+
+
+/* This gets called for the Isochronous pipe (video). This is done in
+ * interrupt time, so it has to be fast, not crash, and not stall. Neat.
+ */
+static void pwc_isoc_handler(struct urb *urb, struct pt_regs *regs)
+{
+ struct pwc_device *pdev;
+ int i, fst, flen;
+ int awake;
+ struct pwc_frame_buf *fbuf;
+ unsigned char *fillptr = 0, *iso_buf = 0;
+
+ awake = 0;
+ pdev = (struct pwc_device *)urb->context;
+ if (pdev == NULL) {
+ Err("isoc_handler() called with NULL device?!\n");
+ return;
+ }
+#ifdef PWC_MAGIC
+ if (pdev->magic != PWC_MAGIC) {
+ Err("isoc_handler() called with bad magic!\n");
+ return;
+ }
+#endif
+ if (urb->status == -ENOENT || urb->status == -ECONNRESET) {
+ Trace(TRACE_OPEN, "pwc_isoc_handler(): URB (%p) unlinked %ssynchronuously.\n", urb, urb->status == -ENOENT ? "" : "a");
+ return;
+ }
+ if (urb->status != -EINPROGRESS && urb->status != 0) {
+ const char *errmsg;
+
+ errmsg = "Unknown";
+ switch(urb->status) {
+ case -ENOSR: errmsg = "Buffer error (overrun)"; break;
+ case -EPIPE: errmsg = "Stalled (device not responding)"; break;
+ case -EOVERFLOW: errmsg = "Babble (bad cable?)"; break;
+ case -EPROTO: errmsg = "Bit-stuff error (bad cable?)"; break;
+ case -EILSEQ: errmsg = "CRC/Timeout (could be anything)"; break;
+ case -ETIMEDOUT: errmsg = "NAK (device does not respond)"; break;
+ }
+ Trace(TRACE_FLOW, "pwc_isoc_handler() called with status %d [%s].\n", urb->status, errmsg);
+ /* Give up after a number of contiguous errors on the USB bus.
+ Appearantly something is wrong so we simulate an unplug event.
+ */
+ if (++pdev->visoc_errors > MAX_ISOC_ERRORS)
+ {
+ Info("Too many ISOC errors, bailing out.\n");
+ pdev->error_status = EIO;
+ awake = 1;
+ wake_up_interruptible(&pdev->frameq);
+ }
+ goto handler_end; // ugly, but practical
+ }
+
+ fbuf = pdev->fill_frame;
+ if (fbuf == NULL) {
+ Err("pwc_isoc_handler without valid fill frame.\n");
+ awake = 1;
+ goto handler_end;
+ }
+ else {
+ fillptr = fbuf->data + fbuf->filled;
+ }
+
+ /* Reset ISOC error counter. We did get here, after all. */
+ pdev->visoc_errors = 0;
+
+ /* vsync: 0 = don't copy data
+ 1 = sync-hunt
+ 2 = synched
+ */
+ /* Compact data */
+ for (i = 0; i < urb->number_of_packets; i++) {
+ fst = urb->iso_frame_desc[i].status;
+ flen = urb->iso_frame_desc[i].actual_length;
+ iso_buf = urb->transfer_buffer + urb->iso_frame_desc[i].offset;
+ if (fst == 0) {
+ if (flen > 0) { /* if valid data... */
+ if (pdev->vsync > 0) { /* ...and we are not sync-hunting... */
+ pdev->vsync = 2;
+
+ /* ...copy data to frame buffer, if possible */
+ if (flen + fbuf->filled > pdev->frame_total_size) {
+ Trace(TRACE_FLOW, "Frame buffer overflow (flen = %d, frame_total_size = %d).\n", flen, pdev->frame_total_size);
+ pdev->vsync = 0; /* Hmm, let's wait for an EOF (end-of-frame) */
+ pdev->vframes_error++;
+ }
+ else {
+ memmove(fillptr, iso_buf, flen);
+ fillptr += flen;
+ }
+ }
+ fbuf->filled += flen;
+ } /* ..flen > 0 */
+
+ if (flen < pdev->vlast_packet_size) {
+ /* Shorter packet... We probably have the end of an image-frame;
+ wake up read() process and let select()/poll() do something.
+ Decompression is done in user time over there.
+ */
+ if (pdev->vsync == 2) {
+ /* The ToUCam Fun CMOS sensor causes the firmware to send 2 or 3 bogus
+ frames on the USB wire after an exposure change. This conditition is
+ however detected in the cam and a bit is set in the header.
+ */
+ if (pdev->type == 730) {
+ unsigned char *ptr = (unsigned char *)fbuf->data;
+
+ if (ptr[1] == 1 && ptr[0] & 0x10) {
+#if PWC_DEBUG
+ Debug("Hyundai CMOS sensor bug. Dropping frame %d.\n", fbuf->sequence);
+#endif
+ pdev->drop_frames += 2;
+ pdev->vframes_error++;
+ }
+ if ((ptr[0] ^ pdev->vmirror) & 0x01) {
+ if (ptr[0] & 0x01)
+ Info("Snapshot button pressed.\n");
+ else
+ Info("Snapshot button released.\n");
+ }
+ if ((ptr[0] ^ pdev->vmirror) & 0x02) {
+ if (ptr[0] & 0x02)
+ Info("Image is mirrored.\n");
+ else
+ Info("Image is normal.\n");
+ }
+ pdev->vmirror = ptr[0] & 0x03;
+ /* Sometimes the trailer of the 730 is still sent as a 4 byte packet
+ after a short frame; this condition is filtered out specifically. A 4 byte
+ frame doesn't make sense anyway.
+ So we get either this sequence:
+ drop_bit set -> 4 byte frame -> short frame -> good frame
+ Or this one:
+ drop_bit set -> short frame -> good frame
+ So we drop either 3 or 2 frames in all!
+ */
+ if (fbuf->filled == 4)
+ pdev->drop_frames++;
+ }
+
+ /* In case we were instructed to drop the frame, do so silently.
+ The buffer pointers are not updated either (but the counters are reset below).
+ */
+ if (pdev->drop_frames > 0)
+ pdev->drop_frames--;
+ else {
+ /* Check for underflow first */
+ if (fbuf->filled < pdev->frame_total_size) {
+ Trace(TRACE_FLOW, "Frame buffer underflow (%d bytes); discarded.\n", fbuf->filled);
+ pdev->vframes_error++;
+ }
+ else {
+ /* Send only once per EOF */
+ awake = 1; /* delay wake_ups */
+
+ /* Find our next frame to fill. This will always succeed, since we
+ * nick a frame from either empty or full list, but if we had to
+ * take it from the full list, it means a frame got dropped.
+ */
+ if (pwc_next_fill_frame(pdev)) {
+ pdev->vframes_dumped++;
+ if ((pdev->vframe_count > FRAME_LOWMARK) && (pwc_trace & TRACE_FLOW)) {
+ if (pdev->vframes_dumped < 20)
+ Trace(TRACE_FLOW, "Dumping frame %d.\n", pdev->vframe_count);
+ if (pdev->vframes_dumped == 20)
+ Trace(TRACE_FLOW, "Dumping frame %d (last message).\n", pdev->vframe_count);
+ }
+ }
+ fbuf = pdev->fill_frame;
+ }
+ } /* !drop_frames */
+ pdev->vframe_count++;
+ }
+ fbuf->filled = 0;
+ fillptr = fbuf->data;
+ pdev->vsync = 1;
+ } /* .. flen < last_packet_size */
+ pdev->vlast_packet_size = flen;
+ } /* ..status == 0 */
+#if PWC_DEBUG
+ /* This is normally not interesting to the user, unless you are really debugging something */
+ else {
+ static int iso_error = 0;
+ iso_error++;
+ if (iso_error < 20)
+ Trace(TRACE_FLOW, "Iso frame %d of USB has error %d\n", i, fst);
+ }
+#endif
+ }
+
+handler_end:
+ if (awake)
+ wake_up_interruptible(&pdev->frameq);
+
+ urb->dev = pdev->udev;
+ i = usb_submit_urb(urb, GFP_ATOMIC);
+ if (i != 0)
+ Err("Error (%d) re-submitting urb in pwc_isoc_handler.\n", i);
+}
+
+
+static int pwc_isoc_init(struct pwc_device *pdev)
+{
+ struct usb_device *udev;
+ struct urb *urb;
+ int i, j, ret;
+
+ struct usb_interface *intf;
+ struct usb_host_interface *idesc = NULL;
+
+ if (pdev == NULL)
+ return -EFAULT;
+ if (pdev->iso_init)
+ return 0;
+ pdev->vsync = 0;
+ udev = pdev->udev;
+
+ /* Get the current alternate interface, adjust packet size */
+ if (!udev->actconfig)
+ return -EFAULT;
+#if LINUX_VERSION_CODE < KERNEL_VERSION(2,6,5)
+ idesc = &udev->actconfig->interface[0]->altsetting[pdev->valternate];
+#else
+ intf = usb_ifnum_to_if(udev, 0);
+ if (intf)
+ idesc = usb_altnum_to_altsetting(intf, pdev->valternate);
+#endif
+
+ if (!idesc)
+ return -EFAULT;
+
+ /* Search video endpoint */
+ pdev->vmax_packet_size = -1;
+ for (i = 0; i < idesc->desc.bNumEndpoints; i++)
+ if ((idesc->endpoint[i].desc.bEndpointAddress & 0xF) == pdev->vendpoint) {
+ pdev->vmax_packet_size = idesc->endpoint[i].desc.wMaxPacketSize;
+ break;
+ }
+
+ if (pdev->vmax_packet_size < 0 || pdev->vmax_packet_size > ISO_MAX_FRAME_SIZE) {
+ Err("Failed to find packet size for video endpoint in current alternate setting.\n");
+ return -ENFILE; /* Odd error, that should be noticable */
+ }
+
+ /* Set alternate interface */
+ ret = 0;
+ Trace(TRACE_OPEN, "Setting alternate interface %d\n", pdev->valternate);
+ ret = usb_set_interface(pdev->udev, 0, pdev->valternate);
+ if (ret < 0)
+ return ret;
+
+ for (i = 0; i < MAX_ISO_BUFS; i++) {
+ urb = usb_alloc_urb(ISO_FRAMES_PER_DESC, GFP_KERNEL);
+ if (urb == NULL) {
+ Err("Failed to allocate urb %d\n", i);
+ ret = -ENOMEM;
+ break;
+ }
+ pdev->sbuf[i].urb = urb;
+ Trace(TRACE_MEMORY, "Allocated URB at 0x%p\n", urb);
+ }
+ if (ret) {
+ /* De-allocate in reverse order */
+ while (i >= 0) {
+ if (pdev->sbuf[i].urb != NULL)
+ usb_free_urb(pdev->sbuf[i].urb);
+ pdev->sbuf[i].urb = NULL;
+ i--;
+ }
+ return ret;
+ }
+
+ /* init URB structure */
+ for (i = 0; i < MAX_ISO_BUFS; i++) {
+ urb = pdev->sbuf[i].urb;
+
+ urb->interval = 1; // devik
+ urb->dev = udev;
+ urb->pipe = usb_rcvisocpipe(udev, pdev->vendpoint);
+ urb->transfer_flags = URB_ISO_ASAP;
+ urb->transfer_buffer = pdev->sbuf[i].data;
+ urb->transfer_buffer_length = ISO_BUFFER_SIZE;
+ urb->complete = pwc_isoc_handler;
+ urb->context = pdev;
+ urb->start_frame = 0;
+ urb->number_of_packets = ISO_FRAMES_PER_DESC;
+ for (j = 0; j < ISO_FRAMES_PER_DESC; j++) {
+ urb->iso_frame_desc[j].offset = j * ISO_MAX_FRAME_SIZE;
+ urb->iso_frame_desc[j].length = pdev->vmax_packet_size;
+ }
+ }
+
+ /* link */
+ for (i = 0; i < MAX_ISO_BUFS; i++) {
+ ret = usb_submit_urb(pdev->sbuf[i].urb, GFP_KERNEL);
+ if (ret)
+ Err("isoc_init() submit_urb %d failed with error %d\n", i, ret);
+ else
+ Trace(TRACE_MEMORY, "URB 0x%p submitted.\n", pdev->sbuf[i].urb);
+ }
+
+ /* All is done... */
+ pdev->iso_init = 1;
+ Trace(TRACE_OPEN, "<< pwc_isoc_init()\n");
+ return 0;
+}
+
+static void pwc_isoc_cleanup(struct pwc_device *pdev)
+{
+ int i;
+
+ Trace(TRACE_OPEN, ">> pwc_isoc_cleanup()\n");
+ if (pdev == NULL)
+ return;
+
+ /* Unlinking ISOC buffers one by one */
+ for (i = 0; i < MAX_ISO_BUFS; i++) {
+ struct urb *urb;
+
+ urb = pdev->sbuf[i].urb;
+ if (urb != 0) {
+ if (pdev->iso_init) {
+ Trace(TRACE_MEMORY, "Unlinking URB %p\n", urb);
+ usb_unlink_urb(urb);
+ }
+ Trace(TRACE_MEMORY, "Freeing URB\n");
+ usb_free_urb(urb);
+ pdev->sbuf[i].urb = NULL;
+ }
+ }
+
+ /* Stop camera, but only if we are sure the camera is still there (unplug
+ is signalled by EPIPE)
+ */
+ if (pdev->error_status && pdev->error_status != EPIPE) {
+ Trace(TRACE_OPEN, "Setting alternate interface 0.\n");
+ usb_set_interface(pdev->udev, 0, 0);
+ }
+
+ pdev->iso_init = 0;
+ Trace(TRACE_OPEN, "<< pwc_isoc_cleanup()\n");
+}
+
+int pwc_try_video_mode(struct pwc_device *pdev, int width, int height, int new_fps, int new_compression, int new_snapshot)
+{
+ int ret, start;
+
+ /* Stop isoc stuff */
+ pwc_isoc_cleanup(pdev);
+ /* Reset parameters */
+ pwc_reset_buffers(pdev);
+ /* Try to set video mode... */
+ start = ret = pwc_set_video_mode(pdev, width, height, new_fps, new_compression, new_snapshot);
+ if (ret) {
+ Trace(TRACE_FLOW, "pwc_set_video_mode attempt 1 failed.\n");
+ /* That failed... restore old mode (we know that worked) */
+ start = pwc_set_video_mode(pdev, pdev->view.x, pdev->view.y, pdev->vframes, pdev->vcompression, pdev->vsnapshot);
+ if (start) {
+ Trace(TRACE_FLOW, "pwc_set_video_mode attempt 2 failed.\n");
+ }
+ }
+ if (start == 0)
+ {
+ if (pwc_isoc_init(pdev) < 0)
+ {
+ Info("Failed to restart ISOC transfers in pwc_try_video_mode.\n");
+ ret = -EAGAIN; /* let's try again, who knows if it works a second time */
+ }
+ }
+ pdev->drop_frames++; /* try to avoid garbage during switch */
+ return ret; /* Return original error code */
+}
+
+
+/***************************************************************************/
+/* Video4Linux functions */
+
+static int pwc_video_open(struct inode *inode, struct file *file)
+{
+ int i;
+ struct video_device *vdev = video_devdata(file);
+ struct pwc_device *pdev;
+
+ Trace(TRACE_OPEN, ">> video_open called(vdev = 0x%p).\n", vdev);
+
+ pdev = (struct pwc_device *)vdev->priv;
+ if (pdev == NULL)
+ BUG();
+ if (pdev->vopen)
+ return -EBUSY;
+
+ down(&pdev->modlock);
+ if (!pdev->usb_init) {
+ Trace(TRACE_OPEN, "Doing first time initialization.\n");
+ pdev->usb_init = 1;
+
+ if (pwc_trace & TRACE_OPEN)
+ {
+ /* Query sensor type */
+ const char *sensor_type = NULL;
+ int ret;
+
+ ret = pwc_get_cmos_sensor(pdev, &i);
+ if (ret >= 0)
+ {
+ switch(i) {
+ case 0x00: sensor_type = "Hyundai CMOS sensor"; break;
+ case 0x20: sensor_type = "Sony CCD sensor + TDA8787"; break;
+ case 0x2E: sensor_type = "Sony CCD sensor + Exas 98L59"; break;
+ case 0x2F: sensor_type = "Sony CCD sensor + ADI 9804"; break;
+ case 0x30: sensor_type = "Sharp CCD sensor + TDA8787"; break;
+ case 0x3E: sensor_type = "Sharp CCD sensor + Exas 98L59"; break;
+ case 0x3F: sensor_type = "Sharp CCD sensor + ADI 9804"; break;
+ case 0x40: sensor_type = "UPA 1021 sensor"; break;
+ case 0x100: sensor_type = "VGA sensor"; break;
+ case 0x101: sensor_type = "PAL MR sensor"; break;
+ default: sensor_type = "unknown type of sensor"; break;
+ }
+ }
+ if (sensor_type != NULL)
+ Info("This %s camera is equipped with a %s (%d).\n", pdev->vdev->name, sensor_type, i);
+ }
+ }
+
+ /* Turn on camera */
+ if (power_save) {
+ i = pwc_camera_power(pdev, 1);
+ if (i < 0)
+ Info("Failed to restore power to the camera! (%d)\n", i);
+ }
+ /* Set LED on/off time */
+ if (pwc_set_leds(pdev, led_on, led_off) < 0)
+ Info("Failed to set LED on/off time.\n");
+
+ pwc_construct(pdev); /* set min/max sizes correct */
+
+ /* So far, so good. Allocate memory. */
+ i = pwc_allocate_buffers(pdev);
+ if (i < 0) {
+ Trace(TRACE_OPEN, "Failed to allocate buffer memory.\n");
+ up(&pdev->modlock);
+ return i;
+ }
+
+ /* Reset buffers & parameters */
+ pwc_reset_buffers(pdev);
+ for (i = 0; i < default_mbufs; i++)
+ pdev->image_used[i] = 0;
+ pdev->vframe_count = 0;
+ pdev->vframes_dumped = 0;
+ pdev->vframes_error = 0;
+ pdev->visoc_errors = 0;
+ pdev->error_status = 0;
+#if PWC_DEBUG
+ pdev->sequence = 0;
+#endif
+ pwc_construct(pdev); /* set min/max sizes correct */
+
+ /* Set some defaults */
+ pdev->vsnapshot = 0;
+
+ /* Start iso pipe for video; first try the last used video size
+ (or the default one); if that fails try QCIF/10 or QSIF/10;
+ it that fails too, give up.
+ */
+ i = pwc_set_video_mode(pdev, pwc_image_sizes[pdev->vsize].x, pwc_image_sizes[pdev->vsize].y, pdev->vframes, pdev->vcompression, 0);
+ if (i) {
+ Trace(TRACE_OPEN, "First attempt at set_video_mode failed.\n");
+ if (pdev->type == 730 || pdev->type == 740 || pdev->type == 750)
+ i = pwc_set_video_mode(pdev, pwc_image_sizes[PSZ_QSIF].x, pwc_image_sizes[PSZ_QSIF].y, 10, pdev->vcompression, 0);
+ else
+ i = pwc_set_video_mode(pdev, pwc_image_sizes[PSZ_QCIF].x, pwc_image_sizes[PSZ_QCIF].y, 10, pdev->vcompression, 0);
+ }
+ if (i) {
+ Trace(TRACE_OPEN, "Second attempt at set_video_mode failed.\n");
+ up(&pdev->modlock);
+ return i;
+ }
+
+ i = pwc_isoc_init(pdev);
+ if (i) {
+ Trace(TRACE_OPEN, "Failed to init ISOC stuff = %d.\n", i);
+ up(&pdev->modlock);
+ return i;
+ }
+
+ pdev->vopen++;
+ file->private_data = vdev;
+ up(&pdev->modlock);
+ Trace(TRACE_OPEN, "<< video_open() returns 0.\n");
+ return 0;
+}
+
+/* Note that all cleanup is done in the reverse order as in _open */
+static int pwc_video_close(struct inode *inode, struct file *file)
+{
+ struct video_device *vdev = file->private_data;
+ struct pwc_device *pdev;
+ int i;
+
+ Trace(TRACE_OPEN, ">> video_close called(vdev = 0x%p).\n", vdev);
+
+ pdev = (struct pwc_device *)vdev->priv;
+ if (pdev->vopen == 0)
+ Info("video_close() called on closed device?\n");
+
+ /* Dump statistics, but only if a reasonable amount of frames were
+ processed (to prevent endless log-entries in case of snap-shot
+ programs)
+ */
+ if (pdev->vframe_count > 20)
+ Info("Closing video device: %d frames received, dumped %d frames, %d frames with errors.\n", pdev->vframe_count, pdev->vframes_dumped, pdev->vframes_error);
+
+ switch (pdev->type)
+ {
+ case 675:
+ case 680:
+ case 690:
+ case 720:
+ case 730:
+ case 740:
+ case 750:
+ pwc_dec23_exit(); /* Timon & Kiara */
+ break;
+ case 645:
+ case 646:
+ pwc_dec1_exit();
+ break;
+ }
+
+ pwc_isoc_cleanup(pdev);
+ pwc_free_buffers(pdev);
+
+ /* Turn off LEDS and power down camera, but only when not unplugged */
+ if (pdev->error_status != EPIPE) {
+ /* Turn LEDs off */
+ if (pwc_set_leds(pdev, 0, 0) < 0)
+ Info("Failed to set LED on/off time.\n");
+ if (power_save) {
+ i = pwc_camera_power(pdev, 0);
+ if (i < 0)
+ Err("Failed to power down camera (%d)\n", i);
+ }
+ }
+ pdev->vopen = 0;
+ Trace(TRACE_OPEN, "<< video_close()\n");
+ return 0;
+}
+
+/*
+ * FIXME: what about two parallel reads ????
+ * ANSWER: Not supported. You can't open the device more than once,
+ despite what the V4L1 interface says. First, I don't see
+ the need, second there's no mechanism of alerting the
+ 2nd/3rd/... process of events like changing image size.
+ And I don't see the point of blocking that for the
+ 2nd/3rd/... process.
+ In multi-threaded environments reading parallel from any
+ device is tricky anyhow.
+ */
+
+static ssize_t pwc_video_read(struct file *file, char *buf,
+ size_t count, loff_t *ppos)
+{
+ struct video_device *vdev = file->private_data;
+ struct pwc_device *pdev;
+ int noblock = file->f_flags & O_NONBLOCK;
+ DECLARE_WAITQUEUE(wait, current);
+ int bytes_to_read;
+
+ Trace(TRACE_READ, "video_read(0x%p, %p, %d) called.\n", vdev, buf, count);
+ if (vdev == NULL)
+ return -EFAULT;
+ pdev = vdev->priv;
+ if (pdev == NULL)
+ return -EFAULT;
+ if (pdev->error_status)
+ return -pdev->error_status; /* Something happened, report what. */
+
+ /* In case we're doing partial reads, we don't have to wait for a frame */
+ if (pdev->image_read_pos == 0) {
+ /* Do wait queueing according to the (doc)book */
+ add_wait_queue(&pdev->frameq, &wait);
+ while (pdev->full_frames == NULL) {
+ /* Check for unplugged/etc. here */
+ if (pdev->error_status) {
+ remove_wait_queue(&pdev->frameq, &wait);
+ set_current_state(TASK_RUNNING);
+ return -pdev->error_status ;
+ }
+ if (noblock) {
+ remove_wait_queue(&pdev->frameq, &wait);
+ set_current_state(TASK_RUNNING);
+ return -EWOULDBLOCK;
+ }
+ if (signal_pending(current)) {
+ remove_wait_queue(&pdev->frameq, &wait);
+ set_current_state(TASK_RUNNING);
+ return -ERESTARTSYS;
+ }
+ schedule();
+ set_current_state(TASK_INTERRUPTIBLE);
+ }
+ remove_wait_queue(&pdev->frameq, &wait);
+ set_current_state(TASK_RUNNING);
+
+ /* Decompress and release frame */
+ if (pwc_handle_frame(pdev))
+ return -EFAULT;
+ }
+
+ Trace(TRACE_READ, "Copying data to user space.\n");
+ if (pdev->vpalette == VIDEO_PALETTE_RAW)
+ bytes_to_read = pdev->frame_size;
+ else
+ bytes_to_read = pdev->view.size;
+
+ /* copy bytes to user space; we allow for partial reads */
+ if (count + pdev->image_read_pos > bytes_to_read)
+ count = bytes_to_read - pdev->image_read_pos;
+ if (copy_to_user(buf, pdev->image_ptr[pdev->fill_image] + pdev->image_read_pos, count))
+ return -EFAULT;
+ pdev->image_read_pos += count;
+ if (pdev->image_read_pos >= bytes_to_read) { /* All data has been read */
+ pdev->image_read_pos = 0;
+ pwc_next_image(pdev);
+ }
+ return count;
+}
+
+static unsigned int pwc_video_poll(struct file *file, poll_table *wait)
+{
+ struct video_device *vdev = file->private_data;
+ struct pwc_device *pdev;
+
+ if (vdev == NULL)
+ return -EFAULT;
+ pdev = vdev->priv;
+ if (pdev == NULL)
+ return -EFAULT;
+
+ poll_wait(file, &pdev->frameq, wait);
+ if (pdev->error_status)
+ return POLLERR;
+ if (pdev->full_frames != NULL) /* we have frames waiting */
+ return (POLLIN | POLLRDNORM);
+
+ return 0;
+}
+
+static int pwc_video_do_ioctl(struct inode *inode, struct file *file,
+ unsigned int cmd, void *arg)
+{
+ struct video_device *vdev = file->private_data;
+ struct pwc_device *pdev;
+ DECLARE_WAITQUEUE(wait, current);
+
+ if (vdev == NULL)
+ return -EFAULT;
+ pdev = vdev->priv;
+ if (pdev == NULL)
+ return -EFAULT;
+
+ switch (cmd) {
+ /* Query cabapilities */
+ case VIDIOCGCAP:
+ {
+ struct video_capability *caps = arg;
+
+ strcpy(caps->name, vdev->name);
+ caps->type = VID_TYPE_CAPTURE;
+ caps->channels = 1;
+ caps->audios = 1;
+ caps->minwidth = pdev->view_min.x;
+ caps->minheight = pdev->view_min.y;
+ caps->maxwidth = pdev->view_max.x;
+ caps->maxheight = pdev->view_max.y;
+ break;
+ }
+
+ /* Channel functions (simulate 1 channel) */
+ case VIDIOCGCHAN:
+ {
+ struct video_channel *v = arg;
+
+ if (v->channel != 0)
+ return -EINVAL;
+ v->flags = 0;
+ v->tuners = 0;
+ v->type = VIDEO_TYPE_CAMERA;
+ strcpy(v->name, "Webcam");
+ return 0;
+ }
+
+ case VIDIOCSCHAN:
+ {
+ /* The spec says the argument is an integer, but
+ the bttv driver uses a video_channel arg, which
+ makes sense becasue it also has the norm flag.
+ */
+ struct video_channel *v = arg;
+ if (v->channel != 0)
+ return -EINVAL;
+ return 0;
+ }
+
+
+ /* Picture functions; contrast etc. */
+ case VIDIOCGPICT:
+ {
+ struct video_picture *p = arg;
+ int val;
+
+ val = pwc_get_brightness(pdev);
+ if (val >= 0)
+ p->brightness = val;
+ else
+ p->brightness = 0xffff;
+ val = pwc_get_contrast(pdev);
+ if (val >= 0)
+ p->contrast = val;
+ else
+ p->contrast = 0xffff;
+ /* Gamma, Whiteness, what's the difference? :) */
+ val = pwc_get_gamma(pdev);
+ if (val >= 0)
+ p->whiteness = val;
+ else
+ p->whiteness = 0xffff;
+ val = pwc_get_saturation(pdev);
+ if (val >= 0)
+ p->colour = val;
+ else
+ p->colour = 0xffff;
+ p->depth = 24;
+ p->palette = pdev->vpalette;
+ p->hue = 0xFFFF; /* N/A */
+ break;
+ }
+
+ case VIDIOCSPICT:
+ {
+ struct video_picture *p = arg;
+ /*
+ * FIXME: Suppose we are mid read
+ ANSWER: No problem: the firmware of the camera
+ can handle brightness/contrast/etc
+ changes at _any_ time, and the palette
+ is used exactly once in the uncompress
+ routine.
+ */
+ pwc_set_brightness(pdev, p->brightness);
+ pwc_set_contrast(pdev, p->contrast);
+ pwc_set_gamma(pdev, p->whiteness);
+ pwc_set_saturation(pdev, p->colour);
+ if (p->palette && p->palette != pdev->vpalette) {
+ switch (p->palette) {
+ case VIDEO_PALETTE_YUV420P:
+ case VIDEO_PALETTE_RAW:
+ pdev->vpalette = p->palette;
+ return pwc_try_video_mode(pdev, pdev->image.x, pdev->image.y, pdev->vframes, pdev->vcompression, pdev->vsnapshot);
+ break;
+ default:
+ return -EINVAL;
+ break;
+ }
+ }
+ break;
+ }
+
+ /* Window/size parameters */
+ case VIDIOCGWIN:
+ {
+ struct video_window *vw = arg;
+
+ vw->x = 0;
+ vw->y = 0;
+ vw->width = pdev->view.x;
+ vw->height = pdev->view.y;
+ vw->chromakey = 0;
+ vw->flags = (pdev->vframes << PWC_FPS_SHIFT) |
+ (pdev->vsnapshot ? PWC_FPS_SNAPSHOT : 0);
+ break;
+ }
+
+ case VIDIOCSWIN:
+ {
+ struct video_window *vw = arg;
+ int fps, snapshot, ret;
+
+ fps = (vw->flags & PWC_FPS_FRMASK) >> PWC_FPS_SHIFT;
+ snapshot = vw->flags & PWC_FPS_SNAPSHOT;
+ if (fps == 0)
+ fps = pdev->vframes;
+ if (pdev->view.x == vw->width && pdev->view.y && fps == pdev->vframes && snapshot == pdev->vsnapshot)
+ return 0;
+ ret = pwc_try_video_mode(pdev, vw->width, vw->height, fps, pdev->vcompression, snapshot);
+ if (ret)
+ return ret;
+ break;
+ }
+
+ /* We don't have overlay support (yet) */
+ case VIDIOCGFBUF:
+ {
+ struct video_buffer *vb = arg;
+
+ memset(vb,0,sizeof(*vb));
+ break;
+ }
+
+ /* mmap() functions */
+ case VIDIOCGMBUF:
+ {
+ /* Tell the user program how much memory is needed for a mmap() */
+ struct video_mbuf *vm = arg;
+ int i;
+
+ memset(vm, 0, sizeof(*vm));
+ vm->size = default_mbufs * pdev->len_per_image;
+ vm->frames = default_mbufs; /* double buffering should be enough for most applications */
+ for (i = 0; i < default_mbufs; i++)
+ vm->offsets[i] = i * pdev->len_per_image;
+ break;
+ }
+
+ case VIDIOCMCAPTURE:
+ {
+ /* Start capture into a given image buffer (called 'frame' in video_mmap structure) */
+ struct video_mmap *vm = arg;
+
+ Trace(TRACE_READ, "VIDIOCMCAPTURE: %dx%d, frame %d, format %d\n", vm->width, vm->height, vm->frame, vm->format);
+ if (vm->frame < 0 || vm->frame >= default_mbufs)
+ return -EINVAL;
+
+ /* xawtv is nasty. It probes the available palettes
+ by setting a very small image size and trying
+ various palettes... The driver doesn't support
+ such small images, so I'm working around it.
+ */
+ if (vm->format)
+ {
+ switch (vm->format)
+ {
+ case VIDEO_PALETTE_YUV420P:
+ case VIDEO_PALETTE_RAW:
+ break;
+ default:
+ return -EINVAL;
+ break;
+ }
+ }
+
+ if ((vm->width != pdev->view.x || vm->height != pdev->view.y) &&
+ (vm->width >= pdev->view_min.x && vm->height >= pdev->view_min.y)) {
+ int ret;
+
+ Trace(TRACE_OPEN, "VIDIOCMCAPTURE: changing size to please xawtv :-(.\n");
+ ret = pwc_try_video_mode(pdev, vm->width, vm->height, pdev->vframes, pdev->vcompression, pdev->vsnapshot);
+ if (ret)
+ return ret;
+ } /* ... size mismatch */
+
+ /* FIXME: should we lock here? */
+ if (pdev->image_used[vm->frame])
+ return -EBUSY; /* buffer wasn't available. Bummer */
+ pdev->image_used[vm->frame] = 1;
+
+ /* Okay, we're done here. In the SYNC call we wait until a
+ frame comes available, then expand image into the given
+ buffer.
+ In contrast to the CPiA cam the Philips cams deliver a
+ constant stream, almost like a grabber card. Also,
+ we have separate buffers for the rawdata and the image,
+ meaning we can nearly always expand into the requested buffer.
+ */
+ Trace(TRACE_READ, "VIDIOCMCAPTURE done.\n");
+ break;
+ }
+
+ case VIDIOCSYNC:
+ {
+ /* The doc says: "Whenever a buffer is used it should
+ call VIDIOCSYNC to free this frame up and continue."
+
+ The only odd thing about this whole procedure is
+ that MCAPTURE flags the buffer as "in use", and
+ SYNC immediately unmarks it, while it isn't
+ after SYNC that you know that the buffer actually
+ got filled! So you better not start a CAPTURE in
+ the same frame immediately (use double buffering).
+ This is not a problem for this cam, since it has
+ extra intermediate buffers, but a hardware
+ grabber card will then overwrite the buffer
+ you're working on.
+ */
+ int *mbuf = arg;
+ int ret;
+
+ Trace(TRACE_READ, "VIDIOCSYNC called (%d).\n", *mbuf);
+
+ /* bounds check */
+ if (*mbuf < 0 || *mbuf >= default_mbufs)
+ return -EINVAL;
+ /* check if this buffer was requested anyway */
+ if (pdev->image_used[*mbuf] == 0)
+ return -EINVAL;
+
+ /* Add ourselves to the frame wait-queue.
+
+ FIXME: needs auditing for safety.
+ QUESTION: In what respect? I think that using the
+ frameq is safe now.
+ */
+ add_wait_queue(&pdev->frameq, &wait);
+ while (pdev->full_frames == NULL) {
+ if (pdev->error_status) {
+ remove_wait_queue(&pdev->frameq, &wait);
+ set_current_state(TASK_RUNNING);
+ return -pdev->error_status;
+ }
+
+ if (signal_pending(current)) {
+ remove_wait_queue(&pdev->frameq, &wait);
+ set_current_state(TASK_RUNNING);
+ return -ERESTARTSYS;
+ }
+ schedule();
+ set_current_state(TASK_INTERRUPTIBLE);
+ }
+ remove_wait_queue(&pdev->frameq, &wait);
+ set_current_state(TASK_RUNNING);
+
+ /* The frame is ready. Expand in the image buffer
+ requested by the user. I don't care if you
+ mmap() 5 buffers and request data in this order:
+ buffer 4 2 3 0 1 2 3 0 4 3 1 . . .
+ Grabber hardware may not be so forgiving.
+ */
+ Trace(TRACE_READ, "VIDIOCSYNC: frame ready.\n");
+ pdev->fill_image = *mbuf; /* tell in which buffer we want the image to be expanded */
+ /* Decompress, etc */
+ ret = pwc_handle_frame(pdev);
+ pdev->image_used[*mbuf] = 0;
+ if (ret)
+ return -EFAULT;
+ break;
+ }
+
+ case VIDIOCGAUDIO:
+ {
+ struct video_audio *v = arg;
+
+ strcpy(v->name, "Microphone");
+ v->audio = -1; /* unknown audio minor */
+ v->flags = 0;
+ v->mode = VIDEO_SOUND_MONO;
+ v->volume = 0;
+ v->bass = 0;
+ v->treble = 0;
+ v->balance = 0x8000;
+ v->step = 1;
+ break;
+ }
+
+ case VIDIOCSAUDIO:
+ {
+ /* Dummy: nothing can be set */
+ break;
+ }
+
+ case VIDIOCGUNIT:
+ {
+ struct video_unit *vu = arg;
+
+ vu->video = pdev->vdev->minor & 0x3F;
+ vu->audio = -1; /* not known yet */
+ vu->vbi = -1;
+ vu->radio = -1;
+ vu->teletext = -1;
+ break;
+ }
+ default:
+ return pwc_ioctl(pdev, cmd, arg);
+ } /* ..switch */
+ return 0;
+}
+
+static int pwc_video_ioctl(struct inode *inode, struct file *file,
+ unsigned int cmd, unsigned long arg)
+{
+ return video_usercopy(inode, file, cmd, arg, pwc_video_do_ioctl);
+}
+
+
+static int pwc_video_mmap(struct file *file, struct vm_area_struct *vma)
+{
+ struct video_device *vdev = file->private_data;
+ struct pwc_device *pdev;
+ unsigned long start = vma->vm_start;
+ unsigned long size = vma->vm_end-vma->vm_start;
+ unsigned long page, pos;
+
+ Trace(TRACE_MEMORY, "mmap(0x%p, 0x%lx, %lu) called.\n", vdev, start, size);
+ pdev = vdev->priv;
+
+ vma->vm_flags |= VM_IO;
+
+ pos = (unsigned long)pdev->image_data;
+ while (size > 0) {
+ page = kvirt_to_pa(pos);
+ if (remap_page_range(vma, start, page, PAGE_SIZE, PAGE_SHARED))
+ return -EAGAIN;
+
+ start += PAGE_SIZE;
+ pos += PAGE_SIZE;
+ if (size > PAGE_SIZE)
+ size -= PAGE_SIZE;
+ else
+ size = 0;
+ }
+
+ return 0;
+}
+
+/***************************************************************************/
+/* USB functions */
+
+/* This function gets called when a new device is plugged in or the usb core
+ * is loaded.
+ */
+
+static int usb_pwc_probe(struct usb_interface *intf, const struct usb_device_id *id)
+{
+ struct usb_device *udev = interface_to_usbdev(intf);
+ struct pwc_device *pdev = NULL;
+ int vendor_id, product_id, type_id;
+ int i, hint;
+ int features = 0;
+ int video_nr = -1; /* default: use next available device */
+ char serial_number[30], *name;
+
+ /* Check if we can handle this device */
+ Trace(TRACE_PROBE, "probe() called [%04X %04X], if %d\n",
+ udev->descriptor.idVendor, udev->descriptor.idProduct,
+ intf->altsetting->desc.bInterfaceNumber);
+
+ /* the interfaces are probed one by one. We are only interested in the
+ video interface (0) now.
+ Interface 1 is the Audio Control, and interface 2 Audio itself.
+ */
+ if (intf->altsetting->desc.bInterfaceNumber > 0)
+ return -ENODEV;
+
+ vendor_id = udev->descriptor.idVendor;
+ product_id = udev->descriptor.idProduct;
+
+ if (vendor_id == 0x0471) {
+ switch (product_id) {
+ case 0x0302:
+ Info("Philips PCA645VC USB webcam detected.\n");
+ name = "Philips 645 webcam";
+ type_id = 645;
+ break;
+ case 0x0303:
+ Info("Philips PCA646VC USB webcam detected.\n");
+ name = "Philips 646 webcam";
+ type_id = 646;
+ break;
+ case 0x0304:
+ Info("Askey VC010 type 2 USB webcam detected.\n");
+ name = "Askey VC010 webcam";
+ type_id = 646;
+ break;
+ case 0x0307:
+ Info("Philips PCVC675K (Vesta) USB webcam detected.\n");
+ name = "Philips 675 webcam";
+ type_id = 675;
+ break;
+ case 0x0308:
+ Info("Philips PCVC680K (Vesta Pro) USB webcam detected.\n");
+ name = "Philips 680 webcam";
+ type_id = 680;
+ break;
+ case 0x030C:
+ Info("Philips PCVC690K (Vesta Pro Scan) USB webcam detected.\n");
+ name = "Philips 690 webcam";
+ type_id = 690;
+ break;
+ case 0x0310:
+ Info("Philips PCVC730K (ToUCam Fun)/PCVC830 (ToUCam II) USB webcam detected.\n");
+ name = "Philips 730 webcam";
+ type_id = 730;
+ break;
+ case 0x0311:
+ Info("Philips PCVC740K (ToUCam Pro)/PCVC840 (ToUCam II) USB webcam detected.\n");
+ name = "Philips 740 webcam";
+ type_id = 740;
+ break;
+ case 0x0312:
+ Info("Philips PCVC750K (ToUCam Pro Scan) USB webcam detected.\n");
+ name = "Philips 750 webcam";
+ type_id = 750;
+ break;
+ case 0x0313:
+ Info("Philips PCVC720K/40 (ToUCam XS) USB webcam detected.\n");
+ name = "Philips 720K/40 webcam";
+ type_id = 720;
+ break;
+ default:
+ return -ENODEV;
+ break;
+ }
+ }
+ else if (vendor_id == 0x069A) {
+ switch(product_id) {
+ case 0x0001:
+ Info("Askey VC010 type 1 USB webcam detected.\n");
+ name = "Askey VC010 webcam";
+ type_id = 645;
+ break;
+ default:
+ return -ENODEV;
+ break;
+ }
+ }
+ else if (vendor_id == 0x046d) {
+ switch(product_id) {
+ case 0x08b0:
+ Info("Logitech QuickCam Pro 3000 USB webcam detected.\n");
+ name = "Logitech QuickCam Pro 3000";
+ type_id = 740; /* CCD sensor */
+ break;
+ case 0x08b1:
+ Info("Logitech QuickCam Notebook Pro USB webcam detected.\n");
+ name = "Logitech QuickCam Notebook Pro";
+ type_id = 740; /* CCD sensor */
+ break;
+ case 0x08b2:
+ Info("Logitech QuickCam 4000 Pro USB webcam detected.\n");
+ name = "Logitech QuickCam Pro 4000";
+ type_id = 740; /* CCD sensor */
+ break;
+ case 0x08b3:
+ Info("Logitech QuickCam Zoom USB webcam detected.\n");
+ name = "Logitech QuickCam Zoom";
+ type_id = 740; /* CCD sensor */
+ break;
+ case 0x08B4:
+ Info("Logitech QuickCam Zoom (new model) USB webcam detected.\n");
+ name = "Logitech QuickCam Zoom";
+ type_id = 740; /* CCD sensor */
+ break;
+ case 0x08b5:
+ Info("Logitech QuickCam Orbit/Sphere USB webcam detected.\n");
+ name = "Logitech QuickCam Orbit";
+ type_id = 740; /* CCD sensor */
+ features |= FEATURE_MOTOR_PANTILT;
+ break;
+ case 0x08b6:
+ case 0x08b7:
+ case 0x08b8:
+ Info("Logitech QuickCam detected (reserved ID).\n");
+ name = "Logitech QuickCam (res.)";
+ type_id = 730; /* Assuming CMOS */
+ break;
+ default:
+ return -ENODEV;
+ break;
+ }
+ }
+ else if (vendor_id == 0x055d) {
+ /* I don't know the difference between the C10 and the C30;
+ I suppose the difference is the sensor, but both cameras
+ work equally well with a type_id of 675
+ */
+ switch(product_id) {
+ case 0x9000:
+ Info("Samsung MPC-C10 USB webcam detected.\n");
+ name = "Samsung MPC-C10";
+ type_id = 675;
+ break;
+ case 0x9001:
+ Info("Samsung MPC-C30 USB webcam detected.\n");
+ name = "Samsung MPC-C30";
+ type_id = 675;
+ break;
+ default:
+ return -ENODEV;
+ break;
+ }
+ }
+ else if (vendor_id == 0x041e) {
+ switch(product_id) {
+ case 0x400c:
+ Info("Creative Labs Webcam 5 detected.\n");
+ name = "Creative Labs Webcam 5";
+ type_id = 730;
+ break;
+ case 0x4011:
+ Info("Creative Labs Webcam Pro Ex detected.\n");
+ name = "Creative Labs Webcam Pro Ex";
+ type_id = 740;
+ break;
+ default:
+ return -ENODEV;
+ break;
+ }
+ }
+ else if (vendor_id == 0x04cc) {
+ switch(product_id) {
+ case 0x8116:
+ Info("Sotec Afina Eye USB webcam detected.\n");
+ name = "Sotec Afina Eye";
+ type_id = 730;
+ break;
+ default:
+ return -ENODEV;
+ break;
+ }
+ }
+ else if (vendor_id == 0x06be) {
+ switch(product_id) {
+ case 0x8116:
+ /* This is essentially the same cam as the Sotec Afina Eye */
+ Info("AME Co. Afina Eye USB webcam detected.\n");
+ name = "AME Co. Afina Eye";
+ type_id = 750;
+ break;
+ default:
+ return -ENODEV;
+ break;
+ }
+
+ }
+ else if (vendor_id == 0x0d81) {
+ switch(product_id) {
+ case 0x1900:
+ Info("Visionite VCS-UC300 USB webcam detected.\n");
+ name = "Visionite VCS-UC300";
+ type_id = 740; /* CCD sensor */
+ break;
+ case 0x1910:
+ Info("Visionite VCS-UM100 USB webcam detected.\n");
+ name = "Visionite VCS-UM100";
+ type_id = 730; /* CMOS sensor */
+ break;
+ default:
+ return -ENODEV;
+ break;
+ }
+ }
+ else
+ return -ENODEV; /* Not any of the know types; but the list keeps growing. */
+
+ memset(serial_number, 0, 30);
+ usb_string(udev, udev->descriptor.iSerialNumber, serial_number, 29);
+ Trace(TRACE_PROBE, "Device serial number is %s\n", serial_number);
+
+ if (udev->descriptor.bNumConfigurations > 1)
+ Info("Warning: more than 1 configuration available.\n");
+
+ /* Allocate structure, initialize pointers, mutexes, etc. and link it to the usb_device */
+ pdev = kmalloc(sizeof(struct pwc_device), GFP_KERNEL);
+ if (pdev == NULL) {
+ Err("Oops, could not allocate memory for pwc_device.\n");
+ return -ENOMEM;
+ }
+ memset(pdev, 0, sizeof(struct pwc_device));
+ pdev->type = type_id;
+ pdev->vsize = default_size;
+ pdev->vframes = default_fps;
+ strcpy(pdev->serial, serial_number);
+ pdev->features = features;
+ if (vendor_id == 0x046D && product_id == 0x08B5)
+ {
+ /* Logitech QuickCam Orbit
+ The ranges have been determined experimentally; they may differ from cam to cam.
+ Also, the exact ranges left-right and up-down are different for my cam
+ */
+ pdev->angle_range.pan_min = -7000;
+ pdev->angle_range.pan_max = 7000;
+ pdev->angle_range.tilt_min = -3000;
+ pdev->angle_range.tilt_max = 2500;
+ }
+
+ init_MUTEX(&pdev->modlock);
+ pdev->ptrlock = SPIN_LOCK_UNLOCKED;
+
+ pdev->udev = udev;
+ init_waitqueue_head(&pdev->frameq);
+ pdev->vcompression = pwc_preferred_compression;
+
+ /* Allocate video_device structure */
+ pdev->vdev = video_device_alloc();
+ if (pdev->vdev == 0)
+ {
+ Err("Err, cannot allocate video_device struture. Failing probe.");
+ kfree(pdev);
+ return -ENOMEM;
+ }
+ memcpy(pdev->vdev, &pwc_template, sizeof(pwc_template));
+ strcpy(pdev->vdev->name, name);
+ pdev->vdev->owner = THIS_MODULE;
+ video_set_drvdata(pdev->vdev, pdev);
+
+ pdev->release = udev->descriptor.bcdDevice;
+ Trace(TRACE_PROBE, "Release: %04x\n", pdev->release);
+
+ /* Now search device_hint[] table for a match, so we can hint a node number. */
+ for (hint = 0; hint < MAX_DEV_HINTS; hint++) {
+ if (((device_hint[hint].type == -1) || (device_hint[hint].type == pdev->type)) &&
+ (device_hint[hint].pdev == NULL)) {
+ /* so far, so good... try serial number */
+ if ((device_hint[hint].serial_number[0] == '*') || !strcmp(device_hint[hint].serial_number, serial_number)) {
+ /* match! */
+ video_nr = device_hint[hint].device_node;
+ Trace(TRACE_PROBE, "Found hint, will try to register as /dev/video%d\n", video_nr);
+ break;
+ }
+ }
+ }
+
+ pdev->vdev->release = video_device_release;
+ i = video_register_device(pdev->vdev, VFL_TYPE_GRABBER, video_nr);
+ if (i < 0) {
+ Err("Failed to register as video device (%d).\n", i);
+ video_device_release(pdev->vdev); /* Drip... drip... drip... */
+ kfree(pdev); /* Oops, no memory leaks please */
+ return -EIO;
+ }
+ else {
+ Info("Registered as /dev/video%d.\n", pdev->vdev->minor & 0x3F);
+ }
+
+ /* occupy slot */
+ if (hint < MAX_DEV_HINTS)
+ device_hint[hint].pdev = pdev;
+
+ Trace(TRACE_PROBE, "probe() function returning struct at 0x%p.\n", pdev);
+ usb_set_intfdata (intf, pdev);
+ return 0;
+}
+
+/* The user janked out the cable... */
+static void usb_pwc_disconnect(struct usb_interface *intf)
+{
+ struct pwc_device *pdev;
+ int hint;
+
+ lock_kernel();
+ pdev = usb_get_intfdata (intf);
+ usb_set_intfdata (intf, NULL);
+ if (pdev == NULL) {
+ Err("pwc_disconnect() Called without private pointer.\n");
+ goto disconnect_out;
+ }
+ if (pdev->udev == NULL) {
+ Err("pwc_disconnect() already called for %p\n", pdev);
+ goto disconnect_out;
+ }
+ if (pdev->udev != interface_to_usbdev(intf)) {
+ Err("pwc_disconnect() Woops: pointer mismatch udev/pdev.\n");
+ goto disconnect_out;
+ }
+#ifdef PWC_MAGIC
+ if (pdev->magic != PWC_MAGIC) {
+ Err("pwc_disconnect() Magic number failed. Consult your scrolls and try again.\n");
+ goto disconnect_out;
+ }
+#endif
+
+ /* We got unplugged; this is signalled by an EPIPE error code */
+ if (pdev->vopen) {
+ Info("Disconnected while webcam is in use!\n");
+ pdev->error_status = EPIPE;
+ }
+
+ /* Alert waiting processes */
+ wake_up_interruptible(&pdev->frameq);
+ /* Wait until device is closed */
+ while (pdev->vopen)
+ schedule();
+ /* Device is now closed, so we can safely unregister it */
+ Trace(TRACE_PROBE, "Unregistering video device in disconnect().\n");
+ video_unregister_device(pdev->vdev);
+
+ /* Free memory (don't set pdev to 0 just yet) */
+ kfree(pdev);
+
+disconnect_out:
+ /* search device_hint[] table if we occupy a slot, by any chance */
+ for (hint = 0; hint < MAX_DEV_HINTS; hint++)
+ if (device_hint[hint].pdev == pdev)
+ device_hint[hint].pdev = NULL;
+
+ unlock_kernel();
+}
+
+
+/* *grunt* We have to do atoi ourselves :-( */
+static int pwc_atoi(const char *s)
+{
+ int k = 0;
+
+ k = 0;
+ while (*s != '\0' && *s >= '0' && *s <= '9') {
+ k = 10 * k + (*s - '0');
+ s++;
+ }
+ return k;
+}
+
+
+/*
+ * Initialization code & module stuff
+ */
+
+static char *size = NULL;
+static int fps = 0;
+static int fbufs = 0;
+static int mbufs = 0;
+static int trace = -1;
+static int compression = -1;
+static int leds[2] = { -1, -1 };
+static char *dev_hint[MAX_DEV_HINTS] = { };
+
+MODULE_PARM(size, "s");
+MODULE_PARM_DESC(size, "Initial image size. One of sqcif, qsif, qcif, sif, cif, vga");
+MODULE_PARM(fps, "i");
+MODULE_PARM_DESC(fps, "Initial frames per second. Varies with model, useful range 5-30");
+MODULE_PARM(fbufs, "i");
+MODULE_PARM_DESC(fbufs, "Number of internal frame buffers to reserve");
+MODULE_PARM(mbufs, "i");
+MODULE_PARM_DESC(mbufs, "Number of external (mmap()ed) image buffers");
+MODULE_PARM(trace, "i");
+MODULE_PARM_DESC(trace, "For debugging purposes");
+MODULE_PARM(power_save, "i");
+MODULE_PARM_DESC(power_save, "Turn power save feature in camera on or off");
+MODULE_PARM(compression, "i");
+MODULE_PARM_DESC(compression, "Preferred compression quality. Range 0 (uncompressed) to 3 (high compression)");
+MODULE_PARM(leds, "2i");
+MODULE_PARM_DESC(leds, "LED on,off time in milliseconds");
+MODULE_PARM(dev_hint, "0-20s");
+MODULE_PARM_DESC(dev_hint, "Device node hints");
+
+MODULE_DESCRIPTION("Philips & OEM USB webcam driver");
+MODULE_AUTHOR("Luc Saillard <luc@saillard.org>");
+MODULE_LICENSE("GPL");
+
+static int __init usb_pwc_init(void)
+{
+ int i, sz;
+ char *sizenames[PSZ_MAX] = { "sqcif", "qsif", "qcif", "sif", "cif", "vga" };
+
+ Info("Philips webcam module version " PWC_VERSION " loaded.\n");
+ Info("Supports Philips PCA645/646, PCVC675/680/690, PCVC720[40]/730/740/750 & PCVC830/840.\n");
+ Info("Also supports the Askey VC010, various Logitech Quickcams, Samsung MPC-C10 and MPC-C30,\n");
+ Info("the Creative WebCam 5 & Pro Ex, SOTEC Afina Eye and Visionite VCS-UC300 and VCS-UM100.\n");
+
+ if (fps) {
+ if (fps < 4 || fps > 30) {
+ Err("Framerate out of bounds (4-30).\n");
+ return -EINVAL;
+ }
+ default_fps = fps;
+ Info("Default framerate set to %d.\n", default_fps);
+ }
+
+ if (size) {
+ /* string; try matching with array */
+ for (sz = 0; sz < PSZ_MAX; sz++) {
+ if (!strcmp(sizenames[sz], size)) { /* Found! */
+ default_size = sz;
+ break;
+ }
+ }
+ if (sz == PSZ_MAX) {
+ Err("Size not recognized; try size=[sqcif | qsif | qcif | sif | cif | vga].\n");
+ return -EINVAL;
+ }
+ Info("Default image size set to %s [%dx%d].\n", sizenames[default_size], pwc_image_sizes[default_size].x, pwc_image_sizes[default_size].y);
+ }
+ if (mbufs) {
+ if (mbufs < 1 || mbufs > MAX_IMAGES) {
+ Err("Illegal number of mmap() buffers; use a number between 1 and %d.\n", MAX_IMAGES);
+ return -EINVAL;
+ }
+ default_mbufs = mbufs;
+ Info("Number of image buffers set to %d.\n", default_mbufs);
+ }
+ if (fbufs) {
+ if (fbufs < 2 || fbufs > MAX_FRAMES) {
+ Err("Illegal number of frame buffers; use a number between 2 and %d.\n", MAX_FRAMES);
+ return -EINVAL;
+ }
+ default_fbufs = fbufs;
+ Info("Number of frame buffers set to %d.\n", default_fbufs);
+ }
+ if (trace >= 0) {
+ Info("Trace options: 0x%04x\n", trace);
+ pwc_trace = trace;
+ }
+ if (compression >= 0) {
+ if (compression > 3) {
+ Err("Invalid compression setting; use a number between 0 (uncompressed) and 3 (high).\n");
+ return -EINVAL;
+ }
+ pwc_preferred_compression = compression;
+ Info("Preferred compression set to %d.\n", pwc_preferred_compression);
+ }
+ if (power_save)
+ Info("Enabling power save on open/close.\n");
+ if (leds[0] >= 0)
+ led_on = leds[0];
+ if (leds[1] >= 0)
+ led_off = leds[1];
+
+ /* Big device node whoopla. Basicly, it allows you to assign a
+ device node (/dev/videoX) to a camera, based on its type
+ & serial number. The format is [type[.serialnumber]:]node.
+
+ Any camera that isn't matched by these rules gets the next
+ available free device node.
+ */
+ for (i = 0; i < MAX_DEV_HINTS; i++) {
+ char *s, *colon, *dot;
+
+ /* This loop also initializes the array */
+ device_hint[i].pdev = NULL;
+ s = dev_hint[i];
+ if (s != NULL && *s != '\0') {
+ device_hint[i].type = -1; /* wildcard */
+ strcpy(device_hint[i].serial_number, "*");
+
+ /* parse string: chop at ':' & '/' */
+ colon = dot = s;
+ while (*colon != '\0' && *colon != ':')
+ colon++;
+ while (*dot != '\0' && *dot != '.')
+ dot++;
+ /* Few sanity checks */
+ if (*dot != '\0' && dot > colon) {
+ Err("Malformed camera hint: the colon must be after the dot.\n");
+ return -EINVAL;
+ }
+
+ if (*colon == '\0') {
+ /* No colon */
+ if (*dot != '\0') {
+ Err("Malformed camera hint: no colon + device node given.\n");
+ return -EINVAL;
+ }
+ else {
+ /* No type or serial number specified, just a number. */
+ device_hint[i].device_node = pwc_atoi(s);
+ }
+ }
+ else {
+ /* There's a colon, so we have at least a type and a device node */
+ device_hint[i].type = pwc_atoi(s);
+ device_hint[i].device_node = pwc_atoi(colon + 1);
+ if (*dot != '\0') {
+ /* There's a serial number as well */
+ int k;
+
+ dot++;
+ k = 0;
+ while (*dot != ':' && k < 29) {
+ device_hint[i].serial_number[k++] = *dot;
+ dot++;
+ }
+ device_hint[i].serial_number[k] = '\0';
+ }
+ }
+#if PWC_DEBUG
+ Debug("device_hint[%d]:\n", i);
+ Debug(" type : %d\n", device_hint[i].type);
+ Debug(" serial# : %s\n", device_hint[i].serial_number);
+ Debug(" node : %d\n", device_hint[i].device_node);
+#endif
+ }
+ else
+ device_hint[i].type = 0; /* not filled */
+ } /* ..for MAX_DEV_HINTS */
+
+ Trace(TRACE_PROBE, "Registering driver at address 0x%p.\n", &pwc_driver);
+ return usb_register(&pwc_driver);
+}
+
+static void __exit usb_pwc_exit(void)
+{
+ Trace(TRACE_MODULE, "Deregistering driver.\n");
+ usb_deregister(&pwc_driver);
+ Info("Philips webcam module removed.\n");
+}
+
+module_init(usb_pwc_init);
+module_exit(usb_pwc_exit);
+
--- /dev/null
+#ifndef PWC_IOCTL_H
+#define PWC_IOCTL_H
+
+/* (C) 2001-2004 Nemosoft Unv.
+ (C) 2004 Luc Saillard (luc@saillard.org)
+
+ NOTE: this version of pwc is an unofficial (modified) release of pwc & pcwx
+ driver and thus may have bugs that are not present in the original version.
+ Please send bug reports and support requests to <luc@saillard.org>.
+ The decompression routines have been implemented by reverse-engineering the
+ Nemosoft binary pwcx module. Caveat emptor.
+
+ This program is free software; you can redistribute it and/or modify
+ it under the terms of the GNU General Public License as published by
+ the Free Software Foundation; either version 2 of the License, or
+ (at your option) any later version.
+
+ This program is distributed in the hope that it will be useful,
+ but WITHOUT ANY WARRANTY; without even the implied warranty of
+ MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
+ GNU General Public License for more details.
+
+ You should have received a copy of the GNU General Public License
+ along with this program; if not, write to the Free Software
+ Foundation, Inc., 59 Temple Place, Suite 330, Boston, MA 02111-1307 USA
+*/
+
+/* This is pwc-ioctl.h belonging to PWC 8.12.1
+ It contains structures and defines to communicate from user space
+ directly to the driver.
+ */
+
+/*
+ Changes
+ 2001/08/03 Alvarado Added ioctl constants to access methods for
+ changing white balance and red/blue gains
+ 2002/12/15 G. H. Fernandez-Toribio VIDIOCGREALSIZE
+ 2003/12/13 Nemosft Unv. Some modifications to make interfacing to
+ PWCX easier
+ */
+
+/* These are private ioctl() commands, specific for the Philips webcams.
+ They contain functions not found in other webcams, and settings not
+ specified in the Video4Linux API.
+
+ The #define names are built up like follows:
+ VIDIOC VIDeo IOCtl prefix
+ PWC Philps WebCam
+ G optional: Get
+ S optional: Set
+ ... the function
+ */
+
+
+ /* Enumeration of image sizes */
+#define PSZ_SQCIF 0x00
+#define PSZ_QSIF 0x01
+#define PSZ_QCIF 0x02
+#define PSZ_SIF 0x03
+#define PSZ_CIF 0x04
+#define PSZ_VGA 0x05
+#define PSZ_MAX 6
+
+
+/* The frame rate is encoded in the video_window.flags parameter using
+ the upper 16 bits, since some flags are defined nowadays. The following
+ defines provide a mask and shift to filter out this value.
+
+ In 'Snapshot' mode the camera freezes its automatic exposure and colour
+ balance controls.
+ */
+#define PWC_FPS_SHIFT 16
+#define PWC_FPS_MASK 0x00FF0000
+#define PWC_FPS_FRMASK 0x003F0000
+#define PWC_FPS_SNAPSHOT 0x00400000
+
+
+/* structure for transfering x & y coordinates */
+struct pwc_coord
+{
+ int x, y; /* guess what */
+ int size; /* size, or offset */
+};
+
+
+/* Used with VIDIOCPWCPROBE */
+struct pwc_probe
+{
+ char name[32];
+ int type;
+};
+
+struct pwc_serial
+{
+ char serial[30]; /* String with serial number. Contains terminating 0 */
+};
+
+/* pwc_whitebalance.mode values */
+#define PWC_WB_INDOOR 0
+#define PWC_WB_OUTDOOR 1
+#define PWC_WB_FL 2
+#define PWC_WB_MANUAL 3
+#define PWC_WB_AUTO 4
+
+/* Used with VIDIOCPWC[SG]AWB (Auto White Balance).
+ Set mode to one of the PWC_WB_* values above.
+ *red and *blue are the respective gains of these colour components inside
+ the camera; range 0..65535
+ When 'mode' == PWC_WB_MANUAL, 'manual_red' and 'manual_blue' are set or read;
+ otherwise undefined.
+ 'read_red' and 'read_blue' are read-only.
+*/
+struct pwc_whitebalance
+{
+ int mode;
+ int manual_red, manual_blue; /* R/W */
+ int read_red, read_blue; /* R/O */
+};
+
+/*
+ 'control_speed' and 'control_delay' are used in automatic whitebalance mode,
+ and tell the camera how fast it should react to changes in lighting, and
+ with how much delay. Valid values are 0..65535.
+*/
+struct pwc_wb_speed
+{
+ int control_speed;
+ int control_delay;
+
+};
+
+/* Used with VIDIOCPWC[SG]LED */
+struct pwc_leds
+{
+ int led_on; /* Led on-time; range = 0..25000 */
+ int led_off; /* Led off-time; range = 0..25000 */
+};
+
+/* Image size (used with GREALSIZE) */
+struct pwc_imagesize
+{
+ int width;
+ int height;
+};
+
+/* Defines and structures for Motorized Pan & Tilt */
+#define PWC_MPT_PAN 0x01
+#define PWC_MPT_TILT 0x02
+#define PWC_MPT_TIMEOUT 0x04 /* for status */
+
+/* Set angles; when absolute != 0, the angle is absolute and the
+ driver calculates the relative offset for you. This can only
+ be used with VIDIOCPWCSANGLE; VIDIOCPWCGANGLE always returns
+ absolute angles.
+ */
+struct pwc_mpt_angles
+{
+ int absolute; /* write-only */
+ int pan; /* degrees * 100 */
+ int tilt; /* degress * 100 */
+};
+
+/* Range of angles of the camera, both horizontally and vertically.
+ */
+struct pwc_mpt_range
+{
+ int pan_min, pan_max; /* degrees * 100 */
+ int tilt_min, tilt_max;
+};
+
+struct pwc_mpt_status
+{
+ int status;
+ int time_pan;
+ int time_tilt;
+};
+
+
+/* This is used for out-of-kernel decompression. With it, you can get
+ all the necessary information to initialize and use the decompressor
+ routines in standalone applications.
+ */
+struct pwc_video_command
+{
+ int type; /* camera type (645, 675, 730, etc.) */
+ int release; /* release number */
+
+ int size; /* one of PSZ_* */
+ int alternate;
+ int command_len; /* length of USB video command */
+ unsigned char command_buf[13]; /* Actual USB video command */
+ int bandlength; /* >0 = compressed */
+ int frame_size; /* Size of one (un)compressed frame */
+};
+
+/* Flags for PWCX subroutines. Not all modules honour all flags. */
+#define PWCX_FLAG_PLANAR 0x0001
+#define PWCX_FLAG_BAYER 0x0008
+
+
+/* IOCTL definitions */
+
+ /* Restore user settings */
+#define VIDIOCPWCRUSER _IO('v', 192)
+ /* Save user settings */
+#define VIDIOCPWCSUSER _IO('v', 193)
+ /* Restore factory settings */
+#define VIDIOCPWCFACTORY _IO('v', 194)
+
+ /* You can manipulate the compression factor. A compression preference of 0
+ means use uncompressed modes when available; 1 is low compression, 2 is
+ medium and 3 is high compression preferred. Of course, the higher the
+ compression, the lower the bandwidth used but more chance of artefacts
+ in the image. The driver automatically chooses a higher compression when
+ the preferred mode is not available.
+ */
+ /* Set preferred compression quality (0 = uncompressed, 3 = highest compression) */
+#define VIDIOCPWCSCQUAL _IOW('v', 195, int)
+ /* Get preferred compression quality */
+#define VIDIOCPWCGCQUAL _IOR('v', 195, int)
+
+
+/* Retrieve serial number of camera */
+#define VIDIOCPWCGSERIAL _IOR('v', 198, struct pwc_serial)
+
+ /* This is a probe function; since so many devices are supported, it
+ becomes difficult to include all the names in programs that want to
+ check for the enhanced Philips stuff. So in stead, try this PROBE;
+ it returns a structure with the original name, and the corresponding
+ Philips type.
+ To use, fill the structure with zeroes, call PROBE and if that succeeds,
+ compare the name with that returned from VIDIOCGCAP; they should be the
+ same. If so, you can be assured it is a Philips (OEM) cam and the type
+ is valid.
+ */
+#define VIDIOCPWCPROBE _IOR('v', 199, struct pwc_probe)
+
+ /* Set AGC (Automatic Gain Control); int < 0 = auto, 0..65535 = fixed */
+#define VIDIOCPWCSAGC _IOW('v', 200, int)
+ /* Get AGC; int < 0 = auto; >= 0 = fixed, range 0..65535 */
+#define VIDIOCPWCGAGC _IOR('v', 200, int)
+ /* Set shutter speed; int < 0 = auto; >= 0 = fixed, range 0..65535 */
+#define VIDIOCPWCSSHUTTER _IOW('v', 201, int)
+
+ /* Color compensation (Auto White Balance) */
+#define VIDIOCPWCSAWB _IOW('v', 202, struct pwc_whitebalance)
+#define VIDIOCPWCGAWB _IOR('v', 202, struct pwc_whitebalance)
+
+ /* Auto WB speed */
+#define VIDIOCPWCSAWBSPEED _IOW('v', 203, struct pwc_wb_speed)
+#define VIDIOCPWCGAWBSPEED _IOR('v', 203, struct pwc_wb_speed)
+
+ /* LEDs on/off/blink; int range 0..65535 */
+#define VIDIOCPWCSLED _IOW('v', 205, struct pwc_leds)
+#define VIDIOCPWCGLED _IOR('v', 205, struct pwc_leds)
+
+ /* Contour (sharpness); int < 0 = auto, 0..65536 = fixed */
+#define VIDIOCPWCSCONTOUR _IOW('v', 206, int)
+#define VIDIOCPWCGCONTOUR _IOR('v', 206, int)
+
+ /* Backlight compensation; 0 = off, otherwise on */
+#define VIDIOCPWCSBACKLIGHT _IOW('v', 207, int)
+#define VIDIOCPWCGBACKLIGHT _IOR('v', 207, int)
+
+ /* Flickerless mode; = 0 off, otherwise on */
+#define VIDIOCPWCSFLICKER _IOW('v', 208, int)
+#define VIDIOCPWCGFLICKER _IOR('v', 208, int)
+
+ /* Dynamic noise reduction; 0 off, 3 = high noise reduction */
+#define VIDIOCPWCSDYNNOISE _IOW('v', 209, int)
+#define VIDIOCPWCGDYNNOISE _IOR('v', 209, int)
+
+ /* Real image size as used by the camera; tells you whether or not there's a gray border around the image */
+#define VIDIOCPWCGREALSIZE _IOR('v', 210, struct pwc_imagesize)
+
+ /* Motorized pan & tilt functions */
+#define VIDIOCPWCMPTRESET _IOW('v', 211, int)
+#define VIDIOCPWCMPTGRANGE _IOR('v', 211, struct pwc_mpt_range)
+#define VIDIOCPWCMPTSANGLE _IOW('v', 212, struct pwc_mpt_angles)
+#define VIDIOCPWCMPTGANGLE _IOR('v', 212, struct pwc_mpt_angles)
+#define VIDIOCPWCMPTSTATUS _IOR('v', 213, struct pwc_mpt_status)
+
+ /* Get the USB set-video command; needed for initializing libpwcx */
+#define VIDIOCPWCGVIDCMD _IOR('v', 215, struct pwc_video_command)
+struct pwc_table_init_buffer {
+ int len;
+ char *buffer;
+
+};
+#define VIDIOCPWCGVIDTABLE _IOR('v', 216, struct pwc_table_init_buffer)
+
+#endif
--- /dev/null
+/* Linux driver for Philips webcam
+ (C) 2004 Luc Saillard (luc@saillard.org)
+
+ NOTE: this version of pwc is an unofficial (modified) release of pwc & pcwx
+ driver and thus may have bugs that are not present in the original version.
+ Please send bug reports and support requests to <luc@saillard.org>.
+ The decompression routines have been implemented by reverse-engineering the
+ Nemosoft binary pwcx module. Caveat emptor.
+
+ This program is free software; you can redistribute it and/or modify
+ it under the terms of the GNU General Public License as published by
+ the Free Software Foundation; either version 2 of the License, or
+ (at your option) any later version.
+
+ This program is distributed in the hope that it will be useful,
+ but WITHOUT ANY WARRANTY; without even the implied warranty of
+ MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
+ GNU General Public License for more details.
+
+ You should have received a copy of the GNU General Public License
+ along with this program; if not, write to the Free Software
+ Foundation, Inc., 59 Temple Place, Suite 330, Boston, MA 02111-1307 USA
+*/
+
+
+/* This tables contains entries for the 730/740/750 (Kiara) camera, with
+ 4 different qualities (no compression, low, medium, high).
+ It lists the bandwidth requirements for said mode by its alternate interface
+ number. An alternate of 0 means that the mode is unavailable.
+
+ There are 6 * 4 * 4 entries:
+ 6 different resolutions subqcif, qsif, qcif, sif, cif, vga
+ 6 framerates: 5, 10, 15, 20, 25, 30
+ 4 compression modi: none, low, medium, high
+
+ When an uncompressed mode is not available, the next available compressed mode
+ will be chosen (unless the decompressor is absent). Sometimes there are only
+ 1 or 2 compressed modes available; in that case entries are duplicated.
+*/
+
+
+#include "pwc-kiara.h"
+#include "pwc-uncompress.h"
+
+const struct Kiara_table_entry Kiara_table[PSZ_MAX][6][4] =
+{
+ /* SQCIF */
+ {
+ /* 5 fps */
+ {
+ {0, },
+ {0, },
+ {0, },
+ {0, },
+ },
+ /* 10 fps */
+ {
+ {0, },
+ {0, },
+ {0, },
+ {0, },
+ },
+ /* 15 fps */
+ {
+ {0, },
+ {0, },
+ {0, },
+ {0, },
+ },
+ /* 20 fps */
+ {
+ {0, },
+ {0, },
+ {0, },
+ {0, },
+ },
+ /* 25 fps */
+ {
+ {0, },
+ {0, },
+ {0, },
+ {0, },
+ },
+ /* 30 fps */
+ {
+ {0, },
+ {0, },
+ {0, },
+ {0, },
+ },
+ },
+ /* QSIF */
+ {
+ /* 5 fps */
+ {
+ {1, 146, 0, {0x1D, 0xF4, 0x30, 0x00, 0x00, 0x00, 0x00, 0x18, 0x00, 0x92, 0x00, 0x80}},
+ {1, 146, 0, {0x1D, 0xF4, 0x30, 0x00, 0x00, 0x00, 0x00, 0x18, 0x00, 0x92, 0x00, 0x80}},
+ {1, 146, 0, {0x1D, 0xF4, 0x30, 0x00, 0x00, 0x00, 0x00, 0x18, 0x00, 0x92, 0x00, 0x80}},
+ {1, 146, 0, {0x1D, 0xF4, 0x30, 0x00, 0x00, 0x00, 0x00, 0x18, 0x00, 0x92, 0x00, 0x80}},
+ },
+ /* 10 fps */
+ {
+ {2, 291, 0, {0x1C, 0xF4, 0x30, 0x00, 0x00, 0x00, 0x00, 0x18, 0x00, 0x23, 0x01, 0x80}},
+ {1, 192, 630, {0x14, 0xF4, 0x30, 0x13, 0xA9, 0x12, 0xE1, 0x17, 0x08, 0xC0, 0x00, 0x80}},
+ {1, 192, 630, {0x14, 0xF4, 0x30, 0x13, 0xA9, 0x12, 0xE1, 0x17, 0x08, 0xC0, 0x00, 0x80}},
+ {1, 192, 630, {0x14, 0xF4, 0x30, 0x13, 0xA9, 0x12, 0xE1, 0x17, 0x08, 0xC0, 0x00, 0x80}},
+ },
+ /* 15 fps */
+ {
+ {3, 437, 0, {0x1B, 0xF4, 0x30, 0x00, 0x00, 0x00, 0x00, 0x18, 0x00, 0xB5, 0x01, 0x80}},
+ {2, 292, 640, {0x13, 0xF4, 0x30, 0x13, 0xF7, 0x13, 0x2F, 0x13, 0x20, 0x24, 0x01, 0x80}},
+ {2, 292, 640, {0x13, 0xF4, 0x30, 0x13, 0xF7, 0x13, 0x2F, 0x13, 0x20, 0x24, 0x01, 0x80}},
+ {1, 192, 420, {0x13, 0xF4, 0x30, 0x0D, 0x1B, 0x0C, 0x53, 0x1E, 0x18, 0xC0, 0x00, 0x80}},
+ },
+ /* 20 fps */
+ {
+ {4, 589, 0, {0x1A, 0xF4, 0x30, 0x00, 0x00, 0x00, 0x00, 0x18, 0x00, 0x4D, 0x02, 0x80}},
+ {3, 448, 730, {0x12, 0xF4, 0x30, 0x16, 0xC9, 0x16, 0x01, 0x0E, 0x18, 0xC0, 0x01, 0x80}},
+ {2, 292, 476, {0x12, 0xF4, 0x30, 0x0E, 0xD8, 0x0E, 0x10, 0x19, 0x18, 0x24, 0x01, 0x80}},
+ {1, 192, 312, {0x12, 0xF4, 0x50, 0x09, 0xB3, 0x08, 0xEB, 0x1E, 0x18, 0xC0, 0x00, 0x80}},
+ },
+ /* 25 fps */
+ {
+ {5, 703, 0, {0x19, 0xF4, 0x30, 0x00, 0x00, 0x00, 0x00, 0x18, 0x00, 0xBF, 0x02, 0x80}},
+ {3, 447, 610, {0x11, 0xF4, 0x30, 0x13, 0x0B, 0x12, 0x43, 0x14, 0x28, 0xBF, 0x01, 0x80}},
+ {2, 292, 398, {0x11, 0xF4, 0x50, 0x0C, 0x6C, 0x0B, 0xA4, 0x1E, 0x28, 0x24, 0x01, 0x80}},
+ {1, 193, 262, {0x11, 0xF4, 0x50, 0x08, 0x23, 0x07, 0x5B, 0x1E, 0x28, 0xC1, 0x00, 0x80}},
+ },
+ /* 30 fps */
+ {
+ {8, 874, 0, {0x18, 0xF4, 0x30, 0x00, 0x00, 0x00, 0x00, 0x18, 0x00, 0x6A, 0x03, 0x80}},
+ {5, 704, 730, {0x10, 0xF4, 0x30, 0x16, 0xC9, 0x16, 0x01, 0x0E, 0x28, 0xC0, 0x02, 0x80}},
+ {3, 448, 492, {0x10, 0xF4, 0x30, 0x0F, 0x5D, 0x0E, 0x95, 0x15, 0x28, 0xC0, 0x01, 0x80}},
+ {2, 292, 320, {0x10, 0xF4, 0x50, 0x09, 0xFB, 0x09, 0x33, 0x1E, 0x28, 0x24, 0x01, 0x80}},
+ },
+ },
+ /* QCIF */
+ {
+ /* 5 fps */
+ {
+ {0, },
+ {0, },
+ {0, },
+ {0, },
+ },
+ /* 10 fps */
+ {
+ {0, },
+ {0, },
+ {0, },
+ {0, },
+ },
+ /* 15 fps */
+ {
+ {0, },
+ {0, },
+ {0, },
+ {0, },
+ },
+ /* 20 fps */
+ {
+ {0, },
+ {0, },
+ {0, },
+ {0, },
+ },
+ /* 25 fps */
+ {
+ {0, },
+ {0, },
+ {0, },
+ {0, },
+ },
+ /* 30 fps */
+ {
+ {0, },
+ {0, },
+ {0, },
+ {0, },
+ },
+ },
+ /* SIF */
+ {
+ /* 5 fps */
+ {
+ {4, 582, 0, {0x0D, 0xF4, 0x30, 0x00, 0x00, 0x00, 0x00, 0x04, 0x00, 0x46, 0x02, 0x80}},
+ {3, 387, 1276, {0x05, 0xF4, 0x30, 0x27, 0xD8, 0x26, 0x48, 0x03, 0x10, 0x83, 0x01, 0x80}},
+ {2, 291, 960, {0x05, 0xF4, 0x30, 0x1D, 0xF2, 0x1C, 0x62, 0x04, 0x10, 0x23, 0x01, 0x80}},
+ {1, 191, 630, {0x05, 0xF4, 0x50, 0x13, 0xA9, 0x12, 0x19, 0x05, 0x18, 0xBF, 0x00, 0x80}},
+ },
+ /* 10 fps */
+ {
+ {0, },
+ {6, 775, 1278, {0x04, 0xF4, 0x30, 0x27, 0xE8, 0x26, 0x58, 0x05, 0x30, 0x07, 0x03, 0x80}},
+ {3, 447, 736, {0x04, 0xF4, 0x30, 0x16, 0xFB, 0x15, 0x6B, 0x05, 0x28, 0xBF, 0x01, 0x80}},
+ {2, 292, 480, {0x04, 0xF4, 0x70, 0x0E, 0xF9, 0x0D, 0x69, 0x09, 0x28, 0x24, 0x01, 0x80}},
+ },
+ /* 15 fps */
+ {
+ {0, },
+ {9, 955, 1050, {0x03, 0xF4, 0x30, 0x20, 0xCF, 0x1F, 0x3F, 0x06, 0x48, 0xBB, 0x03, 0x80}},
+ {4, 592, 650, {0x03, 0xF4, 0x30, 0x14, 0x44, 0x12, 0xB4, 0x08, 0x30, 0x50, 0x02, 0x80}},
+ {3, 448, 492, {0x03, 0xF4, 0x50, 0x0F, 0x52, 0x0D, 0xC2, 0x09, 0x38, 0xC0, 0x01, 0x80}},
+ },
+ /* 20 fps */
+ {
+ {0, },
+ {9, 958, 782, {0x02, 0xF4, 0x30, 0x18, 0x6A, 0x16, 0xDA, 0x0B, 0x58, 0xBE, 0x03, 0x80}},
+ {5, 703, 574, {0x02, 0xF4, 0x50, 0x11, 0xE7, 0x10, 0x57, 0x0B, 0x40, 0xBF, 0x02, 0x80}},
+ {3, 446, 364, {0x02, 0xF4, 0x90, 0x0B, 0x5C, 0x09, 0xCC, 0x0E, 0x38, 0xBE, 0x01, 0x80}},
+ },
+ /* 25 fps */
+ {
+ {0, },
+ {9, 958, 654, {0x01, 0xF4, 0x30, 0x14, 0x66, 0x12, 0xD6, 0x0B, 0x50, 0xBE, 0x03, 0x80}},
+ {6, 776, 530, {0x01, 0xF4, 0x50, 0x10, 0x8C, 0x0E, 0xFC, 0x0C, 0x48, 0x08, 0x03, 0x80}},
+ {4, 592, 404, {0x01, 0xF4, 0x70, 0x0C, 0x96, 0x0B, 0x06, 0x0B, 0x48, 0x50, 0x02, 0x80}},
+ },
+ /* 30 fps */
+ {
+ {0, },
+ {9, 957, 526, {0x00, 0xF4, 0x50, 0x10, 0x68, 0x0E, 0xD8, 0x0D, 0x58, 0xBD, 0x03, 0x80}},
+ {6, 775, 426, {0x00, 0xF4, 0x70, 0x0D, 0x48, 0x0B, 0xB8, 0x0F, 0x50, 0x07, 0x03, 0x80}},
+ {4, 590, 324, {0x00, 0x7A, 0x88, 0x0A, 0x1C, 0x08, 0xB4, 0x0E, 0x50, 0x4E, 0x02, 0x80}},
+ },
+ },
+ /* CIF */
+ {
+ /* 5 fps */
+ {
+ {0, },
+ {0, },
+ {0, },
+ {0, },
+ },
+ /* 10 fps */
+ {
+ {0, },
+ {0, },
+ {0, },
+ {0, },
+ },
+ /* 15 fps */
+ {
+ {0, },
+ {0, },
+ {0, },
+ {0, },
+ },
+ /* 20 fps */
+ {
+ {0, },
+ {0, },
+ {0, },
+ {0, },
+ },
+ /* 25 fps */
+ {
+ {0, },
+ {0, },
+ {0, },
+ {0, },
+ },
+ /* 30 fps */
+ {
+ {0, },
+ {0, },
+ {0, },
+ {0, },
+ },
+ },
+ /* VGA */
+ {
+ /* 5 fps */
+ {
+ {0, },
+ {6, 773, 1272, {0x25, 0xF4, 0x30, 0x27, 0xB6, 0x24, 0x96, 0x02, 0x30, 0x05, 0x03, 0x80}},
+ {4, 592, 976, {0x25, 0xF4, 0x50, 0x1E, 0x78, 0x1B, 0x58, 0x03, 0x30, 0x50, 0x02, 0x80}},
+ {3, 448, 738, {0x25, 0xF4, 0x90, 0x17, 0x0C, 0x13, 0xEC, 0x04, 0x30, 0xC0, 0x01, 0x80}},
+ },
+ /* 10 fps */
+ {
+ {0, },
+ {9, 956, 788, {0x24, 0xF4, 0x70, 0x18, 0x9C, 0x15, 0x7C, 0x03, 0x48, 0xBC, 0x03, 0x80}},
+ {6, 776, 640, {0x24, 0xF4, 0xB0, 0x13, 0xFC, 0x11, 0x2C, 0x04, 0x48, 0x08, 0x03, 0x80}},
+ {4, 592, 488, {0x24, 0x7A, 0xE8, 0x0F, 0x3C, 0x0C, 0x6C, 0x06, 0x48, 0x50, 0x02, 0x80}},
+ },
+ /* 15 fps */
+ {
+ {0, },
+ {9, 957, 526, {0x23, 0x7A, 0xE8, 0x10, 0x68, 0x0D, 0x98, 0x06, 0x58, 0xBD, 0x03, 0x80}},
+ {9, 957, 526, {0x23, 0x7A, 0xE8, 0x10, 0x68, 0x0D, 0x98, 0x06, 0x58, 0xBD, 0x03, 0x80}},
+ {8, 895, 492, {0x23, 0x7A, 0xE8, 0x0F, 0x5D, 0x0C, 0x8D, 0x06, 0x58, 0x7F, 0x03, 0x80}},
+ },
+ /* 20 fps */
+ {
+ {0, },
+ {0, },
+ {0, },
+ {0, },
+ },
+ /* 25 fps */
+ {
+ {0, },
+ {0, },
+ {0, },
+ {0, },
+ },
+ /* 30 fps */
+ {
+ {0, },
+ {0, },
+ {0, },
+ {0, },
+ },
+ },
+};
+
+
+/*
+ * Rom table for kiara chips
+ *
+ * 32 roms tables (one for each resolution ?)
+ * 2 tables per roms (one for each passes) (Y, and U&V)
+ * 128 bytes per passes
+ */
+
+const unsigned int KiaraRomTable [8][2][16][8] =
+{
+ { /* version 0 */
+ { /* version 0, passes 0 */
+ {0x00000000,0x00000000,0x00000000,0x00000000,
+ 0x00000000,0x00000000,0x00000001,0x00000001},
+ {0x00000000,0x00000000,0x00000009,0x00000009,
+ 0x00000009,0x00000009,0x00000009,0x00000009},
+ {0x00000000,0x00000000,0x00000009,0x00000049,
+ 0x00000049,0x00000049,0x00000049,0x00000049},
+ {0x00000000,0x00000000,0x00000049,0x00000049,
+ 0x00000049,0x00000249,0x0000024a,0x00000049},
+ {0x00000000,0x00000000,0x00000049,0x00000049,
+ 0x00000249,0x00000249,0x0000024a,0x0000024a},
+ {0x00000000,0x00000000,0x00000049,0x00000249,
+ 0x00000249,0x0000124a,0x0000024a,0x0000024a},
+ {0x00000000,0x00000000,0x00000049,0x00000249,
+ 0x0000124a,0x00009252,0x00001252,0x00001252},
+ {0x00000000,0x00000000,0x00000249,0x00000249,
+ 0x00009252,0x00009292,0x00009292,0x00009292},
+ {0x00000000,0x00000000,0x00000249,0x00001249,
+ 0x00009292,0x00009292,0x00009493,0x000124db},
+ {0x00000000,0x00000000,0x00000249,0x0000924a,
+ 0x00009492,0x0000a49b,0x0000a49b,0x000124db},
+ {0x00000000,0x00000000,0x00001249,0x00009252,
+ 0x0000a493,0x000124db,0x000124db,0x000126dc},
+ {0x00000000,0x00000000,0x00001249,0x00009493,
+ 0x000124db,0x000126dc,0x000136e4,0x000126dc},
+ {0x00000000,0x00000000,0x00009292,0x0000a49b,
+ 0x000124db,0x000136e4,0x000136e4,0x000136e4},
+ {0x00000000,0x00000000,0x00009292,0x0000a49b,
+ 0x000126dc,0x0001b724,0x0001b92d,0x0001b925},
+ {0x00000000,0x00000000,0x00009492,0x000124db,
+ 0x000136e4,0x0001b925,0x0001c96e,0x0001c92d},
+ {0x00000000,0x00000000,0x00000000,0x00000000,
+ 0x00000000,0x00000000,0x00000000,0x00000000}
+ },
+ { /* version 0, passes 1 */
+ {0x00000000,0x00000000,0x00000000,0x00000000,
+ 0x00000000,0x00000000,0x00000000,0x00000000},
+ {0x00000000,0x00000000,0x00000000,0x00000000,
+ 0x00000000,0x00000000,0x00000000,0x00000000},
+ {0x00000000,0x00000000,0x00000001,0x00000009,
+ 0x00000009,0x00000009,0x00000009,0x00000001},
+ {0x00000000,0x00000000,0x00000009,0x00000009,
+ 0x00000049,0x00000049,0x00000049,0x00000049},
+ {0x00000000,0x00000000,0x00000049,0x00000049,
+ 0x00000049,0x00000049,0x0000024a,0x0000024a},
+ {0x00000000,0x00000000,0x00000049,0x00000049,
+ 0x00000249,0x00000249,0x0000024a,0x0000024a},
+ {0x00000000,0x00000000,0x00000049,0x00000249,
+ 0x00000249,0x00000249,0x0000024a,0x00001252},
+ {0x00000000,0x00000000,0x00000049,0x00001249,
+ 0x0000124a,0x0000124a,0x00001252,0x00009292},
+ {0x00000000,0x00000000,0x00000249,0x00001249,
+ 0x00009252,0x00009252,0x00009292,0x00009493},
+ {0x00000000,0x00000000,0x00000249,0x0000924a,
+ 0x00009292,0x00009292,0x00009292,0x00009493},
+ {0x00000000,0x00000000,0x00000249,0x00009292,
+ 0x00009492,0x00009493,0x0000a49b,0x00009493},
+ {0x00000000,0x00000000,0x00001249,0x00009292,
+ 0x0000a493,0x000124db,0x000126dc,0x000126dc},
+ {0x00000000,0x00000000,0x0000924a,0x00009493,
+ 0x0000a493,0x000126dc,0x000136e4,0x000136e4},
+ {0x00000000,0x00000000,0x00009252,0x00009493,
+ 0x000126dc,0x000126dc,0x000136e4,0x000136e4},
+ {0x00000000,0x00000000,0x00009292,0x0000a49b,
+ 0x000136e4,0x000136e4,0x0001b725,0x0001b724},
+ {0x00000000,0x00000000,0x00000000,0x00000000,
+ 0x00000000,0x00000000,0x00000000,0x00000000}
+ }
+ },
+ { /* version 1 */
+ { /* version 1, passes 0 */
+ {0x00000000,0x00000000,0x00000000,0x00000000,
+ 0x00000000,0x00000000,0x00000000,0x00000001},
+ {0x00000000,0x00000000,0x00000009,0x00000009,
+ 0x00000009,0x00000009,0x00000009,0x00000009},
+ {0x00000000,0x00000000,0x00000049,0x00000049,
+ 0x00000049,0x00000049,0x00000049,0x00000049},
+ {0x00000000,0x00000000,0x00000049,0x00000049,
+ 0x00000049,0x00000249,0x0000024a,0x0000024a},
+ {0x00000000,0x00000000,0x00000049,0x00000249,
+ 0x00000249,0x00000249,0x0000024a,0x00001252},
+ {0x00000000,0x00000000,0x00000249,0x00000249,
+ 0x00000249,0x0000124a,0x00001252,0x00001252},
+ {0x00000000,0x00000000,0x00000249,0x00000249,
+ 0x0000124a,0x0000124a,0x00009292,0x00009292},
+ {0x00000000,0x00000000,0x00000249,0x00001249,
+ 0x0000124a,0x00009252,0x00009292,0x00009292},
+ {0x00000000,0x00000000,0x00000249,0x00001249,
+ 0x00009252,0x00009292,0x00009292,0x00009292},
+ {0x00000000,0x00000000,0x00000249,0x00001249,
+ 0x00009252,0x00009292,0x00009493,0x00009493},
+ {0x00000000,0x00000000,0x00000249,0x0000924a,
+ 0x00009252,0x00009493,0x00009493,0x00009493},
+ {0x00000000,0x00000000,0x00000249,0x0000924a,
+ 0x00009292,0x00009493,0x00009493,0x00009493},
+ {0x00000000,0x00000000,0x00000249,0x00009252,
+ 0x00009492,0x00009493,0x0000a49b,0x0000a49b},
+ {0x00000000,0x00000000,0x00001249,0x00009292,
+ 0x00009492,0x000124db,0x000124db,0x000124db},
+ {0x00000000,0x00000000,0x0000924a,0x00009493,
+ 0x0000a493,0x000126dc,0x000126dc,0x000126dc},
+ {0x00000000,0x00000000,0x00000000,0x00000000,
+ 0x00000000,0x00000000,0x00000000,0x00000000}
+ },
+ { /* version 1, passes 1 */
+ {0x00000000,0x00000000,0x00000000,0x00000000,
+ 0x00000000,0x00000000,0x00000000,0x00000000},
+ {0x00000000,0x00000000,0x00000049,0x00000009,
+ 0x00000049,0x00000009,0x00000001,0x00000000},
+ {0x00000000,0x00000000,0x00000049,0x00000049,
+ 0x00000049,0x00000049,0x00000049,0x00000000},
+ {0x00000000,0x00000000,0x00000249,0x00000049,
+ 0x00000249,0x00000049,0x0000024a,0x00000001},
+ {0x00000000,0x00000000,0x00000249,0x00000249,
+ 0x00000249,0x00000249,0x0000024a,0x00000001},
+ {0x00000000,0x00000000,0x00000249,0x00000249,
+ 0x00000249,0x00000249,0x0000024a,0x00000001},
+ {0x00000000,0x00000000,0x00000249,0x00000249,
+ 0x00000249,0x00000249,0x0000024a,0x00000009},
+ {0x00000000,0x00000000,0x00000249,0x00000249,
+ 0x0000124a,0x0000124a,0x0000024a,0x00000009},
+ {0x00000000,0x00000000,0x00000249,0x00000249,
+ 0x0000124a,0x0000124a,0x0000024a,0x00000009},
+ {0x00000000,0x00000000,0x00001249,0x00001249,
+ 0x0000124a,0x00009252,0x00001252,0x00000049},
+ {0x00000000,0x00000000,0x00001249,0x00001249,
+ 0x0000124a,0x00009292,0x00001252,0x00000049},
+ {0x00000000,0x00000000,0x00001249,0x00001249,
+ 0x0000124a,0x00009292,0x00001252,0x00000049},
+ {0x00000000,0x00000000,0x00001249,0x00001249,
+ 0x00009252,0x00009292,0x00001252,0x0000024a},
+ {0x00000000,0x00000000,0x00001249,0x00001249,
+ 0x00009292,0x00009292,0x00001252,0x0000024a},
+ {0x00000000,0x00000000,0x0000924a,0x0000924a,
+ 0x00009492,0x00009493,0x00009292,0x00001252},
+ {0x00000000,0x00000000,0x00000000,0x00000000,
+ 0x00000000,0x00000000,0x00000000,0x00000000}
+ }
+ },
+ { /* version 2 */
+ { /* version 2, passes 0 */
+ {0x00000000,0x00000000,0x00000049,0x00000049,
+ 0x00000049,0x00000049,0x0000024a,0x0000024a},
+ {0x00000000,0x00000000,0x00000249,0x00000249,
+ 0x00000249,0x0000124a,0x00001252,0x00009292},
+ {0x00000000,0x00000000,0x00000249,0x00000249,
+ 0x0000124a,0x00009252,0x00009292,0x00009292},
+ {0x00000000,0x00000000,0x00000249,0x00001249,
+ 0x0000124a,0x00009292,0x00009493,0x00009493},
+ {0x00000000,0x00000000,0x00000249,0x00001249,
+ 0x00009252,0x00009493,0x00009493,0x0000a49b},
+ {0x00000000,0x00000000,0x00000249,0x0000924a,
+ 0x00009292,0x00009493,0x0000a49b,0x0000a49b},
+ {0x00000000,0x00000000,0x00001249,0x0000924a,
+ 0x00009292,0x00009493,0x0000a49b,0x000124db},
+ {0x00000000,0x00000000,0x00001249,0x00009252,
+ 0x00009492,0x0000a49b,0x0000a49b,0x000124db},
+ {0x00000000,0x00000000,0x00001249,0x00009292,
+ 0x00009492,0x000124db,0x000124db,0x000126dc},
+ {0x00000000,0x00000000,0x00001249,0x00009292,
+ 0x0000a493,0x000124db,0x000126dc,0x000126dc},
+ {0x00000000,0x00000000,0x00001249,0x00009493,
+ 0x0000a493,0x000124db,0x000126dc,0x000136e4},
+ {0x00000000,0x00000000,0x00001249,0x00009493,
+ 0x0000a493,0x000126dc,0x000136e4,0x000136e4},
+ {0x00000000,0x00000000,0x0000924a,0x00009493,
+ 0x0001249b,0x000126dc,0x000136e4,0x000136e4},
+ {0x00000000,0x00000000,0x0000924a,0x0000a49b,
+ 0x000124db,0x000136e4,0x000136e4,0x0001b724},
+ {0x00000000,0x00000000,0x00009252,0x000124db,
+ 0x000126dc,0x0001b724,0x0001b725,0x0001b925},
+ {0x00000000,0x00000000,0x00000000,0x00000000,
+ 0x00000000,0x00000000,0x00000000,0x00000000}
+ },
+ { /* version 2, passes 1 */
+ {0x00000000,0x00000000,0x00000049,0x00000049,
+ 0x00000049,0x00000049,0x00000049,0x00000049},
+ {0x00000000,0x00000000,0x00000249,0x00000249,
+ 0x00000249,0x00000249,0x0000024a,0x00000049},
+ {0x00000000,0x00000000,0x00001249,0x00000249,
+ 0x0000124a,0x0000124a,0x00001252,0x00000049},
+ {0x00000000,0x00000000,0x00001249,0x00001249,
+ 0x0000124a,0x0000124a,0x00009292,0x0000024a},
+ {0x00000000,0x00000000,0x00001249,0x00001249,
+ 0x00009252,0x00009292,0x00009292,0x0000024a},
+ {0x00000000,0x00000000,0x00001249,0x00001249,
+ 0x00009252,0x00009292,0x0000a49b,0x0000024a},
+ {0x00000000,0x00000000,0x00001249,0x00001249,
+ 0x00009292,0x00009493,0x0000a49b,0x00001252},
+ {0x00000000,0x00000000,0x00001249,0x00001249,
+ 0x00009292,0x00009493,0x0000a49b,0x00001252},
+ {0x00000000,0x00000000,0x00001249,0x0000924a,
+ 0x00009492,0x0000a49b,0x0000a49b,0x00001252},
+ {0x00000000,0x00000000,0x00001249,0x00009252,
+ 0x00009492,0x0000a49b,0x0000a49b,0x00009292},
+ {0x00000000,0x00000000,0x00001249,0x00009292,
+ 0x00009492,0x0000a49b,0x0000a49b,0x00009292},
+ {0x00000000,0x00000000,0x00001249,0x00009493,
+ 0x0000a493,0x0000a49b,0x0000a49b,0x00009292},
+ {0x00000000,0x00000000,0x00001249,0x00009493,
+ 0x0000a493,0x0000a49b,0x0000a49b,0x00009493},
+ {0x00000000,0x00000000,0x0000924a,0x00009493,
+ 0x0000a493,0x000124db,0x0000a49b,0x00009493},
+ {0x00000000,0x00000000,0x00009252,0x0000a49b,
+ 0x0001249b,0x000126dc,0x000124db,0x0000a49b},
+ {0x00000000,0x00000000,0x00000000,0x00000000,
+ 0x00000000,0x00000000,0x00000000,0x00000000}
+ }
+ },
+ { /* version 3 */
+ { /* version 3, passes 0 */
+ {0x00000000,0x00000000,0x00000249,0x00000249,
+ 0x0000124a,0x0000124a,0x00009292,0x00009292},
+ {0x00000000,0x00000000,0x00001249,0x00001249,
+ 0x00009292,0x00009493,0x0000a49b,0x0000a49b},
+ {0x00000000,0x00000000,0x00001249,0x0000924a,
+ 0x00009492,0x0000a49b,0x0000a49b,0x000124db},
+ {0x00000000,0x00000000,0x00001249,0x00009292,
+ 0x00009492,0x000124db,0x000126dc,0x000126dc},
+ {0x00000000,0x00000000,0x00001249,0x00009493,
+ 0x0000a493,0x000124db,0x000126dc,0x000126dc},
+ {0x00000000,0x00000000,0x00001249,0x00009493,
+ 0x0000a493,0x000126dc,0x000136e4,0x000136e4},
+ {0x00000000,0x00000000,0x00001249,0x00009493,
+ 0x0000a493,0x000126dc,0x000136e4,0x0001b724},
+ {0x00000000,0x00000000,0x00001249,0x00009493,
+ 0x0001249b,0x000126dc,0x000136e4,0x0001b724},
+ {0x00000000,0x00000000,0x0000924a,0x0000a49b,
+ 0x0001249b,0x000126dc,0x000136e4,0x0001b724},
+ {0x00000000,0x00000000,0x0000924a,0x0000a49b,
+ 0x0001249b,0x000136e4,0x0001b725,0x0001b724},
+ {0x00000000,0x00000000,0x0000924a,0x0000a49b,
+ 0x000124db,0x000136e4,0x0001b725,0x0001b925},
+ {0x00000000,0x00000000,0x00009292,0x0000a49b,
+ 0x000126dc,0x000136e4,0x0001b92d,0x0001b925},
+ {0x00000000,0x00000000,0x00009292,0x0000a49b,
+ 0x000126dc,0x0001b724,0x0001b92d,0x0001c92d},
+ {0x00000000,0x00000000,0x00009492,0x000124db,
+ 0x000126dc,0x0001b724,0x0001c96e,0x0001c92d},
+ {0x00000000,0x00000000,0x0000a492,0x000126db,
+ 0x000136e4,0x0001b925,0x00025bb6,0x00024b77},
+ {0x00000000,0x00000000,0x00000000,0x00000000,
+ 0x00000000,0x00000000,0x00000000,0x00000000}
+ },
+ { /* version 3, passes 1 */
+ {0x00000000,0x00000000,0x00001249,0x00000249,
+ 0x0000124a,0x0000124a,0x00001252,0x00001252},
+ {0x00000000,0x00000000,0x00001249,0x00001249,
+ 0x00009252,0x00009292,0x00009292,0x00001252},
+ {0x00000000,0x00000000,0x00001249,0x0000924a,
+ 0x00009492,0x00009493,0x0000a49b,0x00001252},
+ {0x00000000,0x00000000,0x00001249,0x00009252,
+ 0x00009492,0x0000a49b,0x0000a49b,0x00009292},
+ {0x00000000,0x00000000,0x00001249,0x00009292,
+ 0x00009492,0x0000a49b,0x0000a49b,0x00009292},
+ {0x00000000,0x00000000,0x00001249,0x00009493,
+ 0x0000a493,0x0000a49b,0x000126dc,0x00009292},
+ {0x00000000,0x00000000,0x0000924a,0x00009493,
+ 0x0000a493,0x0000a49b,0x000126dc,0x00009493},
+ {0x00000000,0x00000000,0x0000924a,0x00009493,
+ 0x0000a493,0x0000a49b,0x000126dc,0x00009493},
+ {0x00000000,0x00000000,0x0000924a,0x00009493,
+ 0x0000a493,0x000124db,0x000126dc,0x00009493},
+ {0x00000000,0x00000000,0x0000924a,0x00009493,
+ 0x0000a493,0x000124db,0x000126dc,0x0000a49b},
+ {0x00000000,0x00000000,0x0000924a,0x0000a49b,
+ 0x0000a493,0x000124db,0x000126dc,0x0000a49b},
+ {0x00000000,0x00000000,0x0000924a,0x0000a49b,
+ 0x0001249b,0x000126dc,0x000126dc,0x0000a49b},
+ {0x00000000,0x00000000,0x0000924a,0x0000a49b,
+ 0x000124db,0x000136e4,0x000126dc,0x000124db},
+ {0x00000000,0x00000000,0x00009492,0x0000a49b,
+ 0x000136e4,0x000136e4,0x000126dc,0x000124db},
+ {0x00000000,0x00000000,0x0000a492,0x000124db,
+ 0x0001b724,0x0001b724,0x000136e4,0x000126dc},
+ {0x00000000,0x00000000,0x00000000,0x00000000,
+ 0x00000000,0x00000000,0x00000000,0x00000000}
+ }
+ },
+ { /* version 4 */
+ { /* version 4, passes 0 */
+ {0x00000000,0x00000000,0x00000049,0x00000049,
+ 0x00000049,0x00000049,0x00000049,0x00000049},
+ {0x00000000,0x00000000,0x00000249,0x00000049,
+ 0x00000249,0x00000249,0x0000024a,0x00000049},
+ {0x00000000,0x00000000,0x00000249,0x00000249,
+ 0x0000124a,0x00009252,0x00001252,0x0000024a},
+ {0x00000000,0x00000000,0x00001249,0x00001249,
+ 0x00009252,0x00009292,0x00009493,0x00001252},
+ {0x00000000,0x00000000,0x00001249,0x0000924a,
+ 0x00009292,0x00009493,0x00009493,0x00001252},
+ {0x00000000,0x00000000,0x00001249,0x00009292,
+ 0x00009492,0x0000a49b,0x0000a49b,0x00009292},
+ {0x00000000,0x00000000,0x00001249,0x00009493,
+ 0x0000a493,0x000124db,0x000124db,0x00009493},
+ {0x00000000,0x00000000,0x0000924a,0x00009493,
+ 0x0000a493,0x000124db,0x000126dc,0x0000a49b},
+ {0x00000000,0x00000000,0x0000924a,0x00009493,
+ 0x0000a493,0x000124db,0x000126dc,0x0000a49b},
+ {0x00000000,0x00000000,0x0000924a,0x00009493,
+ 0x0001249b,0x000126dc,0x000126dc,0x000124db},
+ {0x00000000,0x00000000,0x00009252,0x00009493,
+ 0x000124db,0x000136e4,0x000136e4,0x000126dc},
+ {0x00000000,0x00000000,0x00009252,0x0000a49b,
+ 0x000124db,0x000136e4,0x000136e4,0x000126dc},
+ {0x00000000,0x00000000,0x00009292,0x0000a49b,
+ 0x000126dc,0x000136e4,0x000136e4,0x000136e4},
+ {0x00000000,0x00000000,0x00009492,0x0000a49b,
+ 0x000126dc,0x0001b724,0x0001b725,0x0001b724},
+ {0x00000000,0x00000000,0x0000a492,0x000124db,
+ 0x000136e4,0x0001b925,0x0001b92d,0x0001b925},
+ {0x00000000,0x00000000,0x00000000,0x00000000,
+ 0x00000000,0x00000000,0x00000000,0x00000000}
+ },
+ { /* version 4, passes 1 */
+ {0x00000000,0x00000000,0x00000249,0x00000049,
+ 0x00000009,0x00000009,0x00000009,0x00000009},
+ {0x00000000,0x00000000,0x00000249,0x00000249,
+ 0x00000049,0x00000049,0x00000009,0x00000009},
+ {0x00000000,0x00000000,0x00001249,0x00001249,
+ 0x0000124a,0x00000249,0x00000049,0x00000049},
+ {0x00000000,0x00000000,0x00001249,0x00001249,
+ 0x0000124a,0x0000124a,0x00000049,0x00000049},
+ {0x00000000,0x00000000,0x00001249,0x00001249,
+ 0x00009252,0x0000124a,0x0000024a,0x0000024a},
+ {0x00000000,0x00000000,0x00001249,0x0000924a,
+ 0x00009252,0x0000124a,0x0000024a,0x0000024a},
+ {0x00000000,0x00000000,0x00001249,0x00009292,
+ 0x00009492,0x00009252,0x00001252,0x00001252},
+ {0x00000000,0x00000000,0x00001249,0x00009493,
+ 0x0000a493,0x00009292,0x00009292,0x00001252},
+ {0x00000000,0x00000000,0x0000924a,0x00009493,
+ 0x0000a493,0x00009292,0x00009292,0x00009292},
+ {0x00000000,0x00000000,0x0000924a,0x00009493,
+ 0x0000a493,0x00009493,0x00009493,0x00009292},
+ {0x00000000,0x00000000,0x0000924a,0x0000a49b,
+ 0x0000a493,0x0000a49b,0x00009493,0x00009493},
+ {0x00000000,0x00000000,0x0000924a,0x0000a49b,
+ 0x0000a493,0x0000a49b,0x0000a49b,0x00009493},
+ {0x00000000,0x00000000,0x0000924a,0x0000a49b,
+ 0x0001249b,0x000124db,0x0000a49b,0x0000a49b},
+ {0x00000000,0x00000000,0x0000924a,0x0000a49b,
+ 0x000136e4,0x000126dc,0x000124db,0x0000a49b},
+ {0x00000000,0x00000000,0x00009252,0x000124db,
+ 0x0001b724,0x000136e4,0x000126dc,0x000124db},
+ {0x00000000,0x00000000,0x00000000,0x00000000,
+ 0x00000000,0x00000000,0x00000000,0x00000000}
+ }
+ },
+ { /* version 5 */
+ { /* version 5, passes 0 */
+ {0x00000000,0x00000000,0x00000249,0x00000249,
+ 0x00000249,0x00000249,0x00001252,0x00001252},
+ {0x00000000,0x00000000,0x00001249,0x00001249,
+ 0x00009252,0x00009292,0x00009292,0x00001252},
+ {0x00000000,0x00000000,0x00001249,0x0000924a,
+ 0x00009492,0x0000a49b,0x0000a49b,0x00009292},
+ {0x00000000,0x00000000,0x00001249,0x00009493,
+ 0x0000a493,0x0000a49b,0x000124db,0x00009493},
+ {0x00000000,0x00000000,0x00001249,0x00009493,
+ 0x0000a493,0x000124db,0x000126dc,0x00009493},
+ {0x00000000,0x00000000,0x0000924a,0x00009493,
+ 0x0000a493,0x000126dc,0x000126dc,0x0000a49b},
+ {0x00000000,0x00000000,0x0000924a,0x0000a49b,
+ 0x0001249b,0x000126dc,0x000136e4,0x000124db},
+ {0x00000000,0x00000000,0x0000924a,0x0000a49b,
+ 0x000126dc,0x000136e4,0x000136e4,0x000126dc},
+ {0x00000000,0x00000000,0x00009292,0x0000a49b,
+ 0x000126dc,0x000136e4,0x000136e4,0x000126dc},
+ {0x00000000,0x00000000,0x00009292,0x0000a49b,
+ 0x000126dc,0x0001b724,0x0001b725,0x000136e4},
+ {0x00000000,0x00000000,0x00009292,0x0000a49b,
+ 0x000136e4,0x0001b724,0x0001b92d,0x0001b724},
+ {0x00000000,0x00000000,0x00009492,0x0000a49b,
+ 0x000136e4,0x0001b724,0x0001b92d,0x0001b724},
+ {0x00000000,0x00000000,0x00009492,0x000124db,
+ 0x000136e4,0x0001b925,0x0001c96e,0x0001b925},
+ {0x00000000,0x00000000,0x00009492,0x000124db,
+ 0x0001b724,0x0001b925,0x0001c96e,0x0001c92d},
+ {0x00000000,0x00000000,0x0000a492,0x000126db,
+ 0x0001c924,0x0002496d,0x00025bb6,0x00024b77},
+ {0x00000000,0x00000000,0x00000000,0x00000000,
+ 0x00000000,0x00000000,0x00000000,0x00000000}
+ },
+ { /* version 5, passes 1 */
+ {0x00000000,0x00000000,0x00001249,0x00000249,
+ 0x00000249,0x00000249,0x0000024a,0x0000024a},
+ {0x00000000,0x00000000,0x00001249,0x00001249,
+ 0x0000124a,0x0000124a,0x0000024a,0x0000024a},
+ {0x00000000,0x00000000,0x00001249,0x0000924a,
+ 0x00009252,0x00009252,0x0000024a,0x0000024a},
+ {0x00000000,0x00000000,0x00001249,0x00009292,
+ 0x00009492,0x0000a49b,0x00001252,0x00001252},
+ {0x00000000,0x00000000,0x0000924a,0x00009493,
+ 0x0000a493,0x0000a49b,0x00001252,0x00001252},
+ {0x00000000,0x00000000,0x0000924a,0x00009493,
+ 0x0000a493,0x0000a49b,0x00009292,0x00001252},
+ {0x00000000,0x00000000,0x0000924a,0x0000a49b,
+ 0x0000a493,0x0000a49b,0x00009292,0x00009292},
+ {0x00000000,0x00000000,0x0000924a,0x0000a49b,
+ 0x0000a493,0x0000a49b,0x00009493,0x00009292},
+ {0x00000000,0x00000000,0x0000924a,0x0000a49b,
+ 0x0001249b,0x000124db,0x00009493,0x00009292},
+ {0x00000000,0x00000000,0x0000924a,0x0000a49b,
+ 0x0001249b,0x000124db,0x00009493,0x00009493},
+ {0x00000000,0x00000000,0x0000924a,0x0000a49b,
+ 0x000124db,0x000124db,0x0000a49b,0x00009493},
+ {0x00000000,0x00000000,0x0000924a,0x000124db,
+ 0x000126dc,0x000126dc,0x0000a49b,0x00009493},
+ {0x00000000,0x00000000,0x0000924a,0x000124db,
+ 0x000136e4,0x000126dc,0x000124db,0x0000a49b},
+ {0x00000000,0x00000000,0x00009292,0x000124db,
+ 0x000136e4,0x000126dc,0x000124db,0x0000a49b},
+ {0x00000000,0x00000000,0x00009492,0x000126db,
+ 0x0001b724,0x000136e4,0x000126dc,0x000124db},
+ {0x00000000,0x00000000,0x00000000,0x00000000,
+ 0x00000000,0x00000000,0x00000000,0x00000000}
+ }
+ },
+ { /* version 6 */
+ { /* version 6, passes 0 */
+ {0x00000000,0x00000000,0x00001249,0x00001249,
+ 0x00009252,0x00009292,0x00009493,0x00009493},
+ {0x00000000,0x00000000,0x00001249,0x00009292,
+ 0x0000a493,0x0000a49b,0x0000a49b,0x00009493},
+ {0x00000000,0x00000000,0x00001249,0x00009493,
+ 0x0000a493,0x000124db,0x000124db,0x0000a49b},
+ {0x00000000,0x00000000,0x0000924a,0x00009493,
+ 0x0000a493,0x000126dc,0x000126dc,0x0000a49b},
+ {0x00000000,0x00000000,0x0000924a,0x0000a49b,
+ 0x0001249b,0x000126dc,0x000136e4,0x000124db},
+ {0x00000000,0x00000000,0x0000924a,0x0000a49b,
+ 0x000126dc,0x000136e4,0x000136e4,0x000126dc},
+ {0x00000000,0x00000000,0x00009292,0x0000a49b,
+ 0x000126dc,0x0001b724,0x0001b725,0x000126dc},
+ {0x00000000,0x00000000,0x00009292,0x0000a49b,
+ 0x000136e4,0x0001b724,0x0001b92d,0x000136e4},
+ {0x00000000,0x00000000,0x00009492,0x0000a49b,
+ 0x000136e4,0x0001b724,0x0001b92d,0x0001b724},
+ {0x00000000,0x00000000,0x00009492,0x000124db,
+ 0x000136e4,0x0001b724,0x0001b92d,0x0001b724},
+ {0x00000000,0x00000000,0x00009492,0x000124db,
+ 0x000136e4,0x0001b925,0x0001b92d,0x0001b925},
+ {0x00000000,0x00000000,0x00009492,0x000124db,
+ 0x0001b724,0x0001b925,0x0001c96e,0x0001c92d},
+ {0x00000000,0x00000000,0x0000a492,0x000124db,
+ 0x0001b724,0x0001c92d,0x0001c96e,0x0001c92d},
+ {0x00000000,0x00000000,0x0000a492,0x000124db,
+ 0x0001b724,0x0001c92d,0x00024b76,0x0002496e},
+ {0x00000000,0x00000000,0x00012492,0x000126db,
+ 0x0001c924,0x00024b6d,0x0002ddb6,0x00025bbf},
+ {0x00000000,0x00000000,0x00000000,0x00000000,
+ 0x00000000,0x00000000,0x00000000,0x00000000}
+ },
+ { /* version 6, passes 1 */
+ {0x00000000,0x00000000,0x00001249,0x00001249,
+ 0x0000124a,0x0000124a,0x00001252,0x00001252},
+ {0x00000000,0x00000000,0x00001249,0x00009292,
+ 0x00009492,0x00009252,0x00001252,0x00001252},
+ {0x00000000,0x00000000,0x0000924a,0x00009493,
+ 0x0000a493,0x00009292,0x00001252,0x00001252},
+ {0x00000000,0x00000000,0x0000924a,0x0000a49b,
+ 0x0000a493,0x0000a49b,0x00009292,0x00009292},
+ {0x00000000,0x00000000,0x0000924a,0x0000a49b,
+ 0x0000a493,0x0000a49b,0x00009292,0x00009292},
+ {0x00000000,0x00000000,0x0000924a,0x0000a49b,
+ 0x0001249b,0x0000a49b,0x00009493,0x00009292},
+ {0x00000000,0x00000000,0x0000924a,0x0000a49b,
+ 0x000124db,0x000124db,0x00009493,0x00009493},
+ {0x00000000,0x00000000,0x0000924a,0x0000a49b,
+ 0x000124db,0x000124db,0x0000a49b,0x00009493},
+ {0x00000000,0x00000000,0x0000924a,0x000124db,
+ 0x000126dc,0x000124db,0x0000a49b,0x00009493},
+ {0x00000000,0x00000000,0x0000924a,0x000124db,
+ 0x000126dc,0x000126dc,0x0000a49b,0x0000a49b},
+ {0x00000000,0x00000000,0x0000924a,0x000124db,
+ 0x000136e4,0x000126dc,0x000124db,0x0000a49b},
+ {0x00000000,0x00000000,0x00009492,0x000126db,
+ 0x000136e4,0x000126dc,0x000124db,0x0000a49b},
+ {0x00000000,0x00000000,0x00009492,0x000126db,
+ 0x0001b724,0x000136e4,0x000126dc,0x000124db},
+ {0x00000000,0x00000000,0x00009492,0x000126db,
+ 0x0001b724,0x000136e4,0x000126dc,0x000124db},
+ {0x00000000,0x00000000,0x0000a492,0x000136db,
+ 0x0001c924,0x0001b724,0x000136e4,0x000126dc},
+ {0x00000000,0x00000000,0x00000000,0x00000000,
+ 0x00000000,0x00000000,0x00000000,0x00000000}
+ }
+ },
+ { /* version 7 */
+ { /* version 7, passes 0 */
+ {0x00000000,0x00000000,0x00001249,0x00001249,
+ 0x00009252,0x00009292,0x00009493,0x00009493},
+ {0x00000000,0x00000000,0x00001249,0x00009493,
+ 0x0000a493,0x000124db,0x000126dc,0x00009493},
+ {0x00000000,0x00000000,0x00001249,0x0000a49b,
+ 0x0001249b,0x000126dc,0x000126dc,0x0000a49b},
+ {0x00000000,0x00000000,0x0000924a,0x0000a49b,
+ 0x0001249b,0x000126dc,0x000136e4,0x0000a49b},
+ {0x00000000,0x00000000,0x0000924a,0x0000a49b,
+ 0x000126dc,0x000136e4,0x0001b725,0x000124db},
+ {0x00000000,0x00000000,0x00009292,0x0000a49b,
+ 0x000136e4,0x0001b724,0x0001b725,0x000126dc},
+ {0x00000000,0x00000000,0x00009292,0x000124db,
+ 0x000136e4,0x0001b724,0x0001b725,0x000126dc},
+ {0x00000000,0x00000000,0x00009492,0x000124db,
+ 0x000136e4,0x0001b724,0x0001c96e,0x000136e4},
+ {0x00000000,0x00000000,0x00009492,0x000124db,
+ 0x000136e4,0x0001c92d,0x0001c96e,0x0001b724},
+ {0x00000000,0x00000000,0x0000a492,0x000124db,
+ 0x000136e4,0x0001c92d,0x0001c96e,0x0001b724},
+ {0x00000000,0x00000000,0x0000a492,0x000124db,
+ 0x0001b724,0x0001c92d,0x0001c96e,0x0001b925},
+ {0x00000000,0x00000000,0x0000a492,0x000126db,
+ 0x0001b724,0x0001c92d,0x00024b76,0x0001c92d},
+ {0x00000000,0x00000000,0x0000a492,0x000126db,
+ 0x0001b924,0x0001c92d,0x00024b76,0x0001c92d},
+ {0x00000000,0x00000000,0x0000a492,0x000126db,
+ 0x0001b924,0x0001c92d,0x00024b76,0x0002496e},
+ {0x00000000,0x00000000,0x00012492,0x000136db,
+ 0x00024924,0x00024b6d,0x0002ddb6,0x00025bbf},
+ {0x00000000,0x00000000,0x00000000,0x00000000,
+ 0x00000000,0x00000000,0x00000000,0x00000000}
+ },
+ { /* version 7, passes 1 */
+ {0x00000000,0x00000000,0x00001249,0x00001249,
+ 0x0000124a,0x0000124a,0x00001252,0x00001252},
+ {0x00000000,0x00000000,0x0000924a,0x00009493,
+ 0x00009492,0x00009292,0x00001252,0x00001252},
+ {0x00000000,0x00000000,0x0000924a,0x0000a49b,
+ 0x0000a493,0x0000a49b,0x00001252,0x00001252},
+ {0x00000000,0x00000000,0x0000924a,0x0000a49b,
+ 0x0000a493,0x0000a49b,0x00009292,0x00009292},
+ {0x00000000,0x00000000,0x0000924a,0x0000a49b,
+ 0x0000a493,0x0000a49b,0x00009292,0x00009292},
+ {0x00000000,0x00000000,0x0000924a,0x0000a49b,
+ 0x000126dc,0x0000a49b,0x00009493,0x00009292},
+ {0x00000000,0x00000000,0x0000924a,0x000124db,
+ 0x000126dc,0x000124db,0x00009493,0x00009493},
+ {0x00000000,0x00000000,0x0000924a,0x000124db,
+ 0x000136e4,0x000124db,0x0000a49b,0x00009493},
+ {0x00000000,0x00000000,0x0000924a,0x000136db,
+ 0x0001b724,0x000124db,0x0000a49b,0x00009493},
+ {0x00000000,0x00000000,0x0000924a,0x000136db,
+ 0x0001b724,0x000126dc,0x0000a49b,0x0000a49b},
+ {0x00000000,0x00000000,0x00009292,0x000136db,
+ 0x0001b724,0x000126dc,0x000124db,0x0000a49b},
+ {0x00000000,0x00000000,0x00009492,0x000136db,
+ 0x0001b724,0x000126dc,0x000124db,0x0000a49b},
+ {0x00000000,0x00000000,0x0000a492,0x000136db,
+ 0x0001b724,0x000136e4,0x000126dc,0x000124db},
+ {0x00000000,0x00000000,0x0000a492,0x000136db,
+ 0x0001b724,0x000136e4,0x000126dc,0x000124db},
+ {0x00000000,0x00000000,0x00012492,0x0001b6db,
+ 0x0001c924,0x0001b724,0x000136e4,0x000126dc},
+ {0x00000000,0x00000000,0x00000000,0x00000000,
+ 0x00000000,0x00000000,0x00000000,0x00000000}
+ }
+ }
+};
+
--- /dev/null
+/* Linux driver for Philips webcam
+ (C) 2004 Luc Saillard (luc@saillard.org)
+
+ NOTE: this version of pwc is an unofficial (modified) release of pwc & pcwx
+ driver and thus may have bugs that are not present in the original version.
+ Please send bug reports and support requests to <luc@saillard.org>.
+ The decompression routines have been implemented by reverse-engineering the
+ Nemosoft binary pwcx module. Caveat emptor.
+
+ This program is free software; you can redistribute it and/or modify
+ it under the terms of the GNU General Public License as published by
+ the Free Software Foundation; either version 2 of the License, or
+ (at your option) any later version.
+
+ This program is distributed in the hope that it will be useful,
+ but WITHOUT ANY WARRANTY; without even the implied warranty of
+ MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
+ GNU General Public License for more details.
+
+ You should have received a copy of the GNU General Public License
+ along with this program; if not, write to the Free Software
+ Foundation, Inc., 59 Temple Place, Suite 330, Boston, MA 02111-1307 USA
+*/
+
+
+/* This tables contains entries for the 675/680/690 (Timon) camera, with
+ 4 different qualities (no compression, low, medium, high).
+ It lists the bandwidth requirements for said mode by its alternate interface
+ number. An alternate of 0 means that the mode is unavailable.
+
+ There are 6 * 4 * 4 entries:
+ 6 different resolutions subqcif, qsif, qcif, sif, cif, vga
+ 6 framerates: 5, 10, 15, 20, 25, 30
+ 4 compression modi: none, low, medium, high
+
+ When an uncompressed mode is not available, the next available compressed mode
+ will be chosen (unless the decompressor is absent). Sometimes there are only
+ 1 or 2 compressed modes available; in that case entries are duplicated.
+*/
+
+#include "pwc-timon.h"
+
+const struct Timon_table_entry Timon_table[PSZ_MAX][6][4] =
+{
+ /* SQCIF */
+ {
+ /* 5 fps */
+ {
+ {1, 140, 0, {0x05, 0xF4, 0x04, 0x00, 0x00, 0x00, 0x00, 0x13, 0x00, 0x8C, 0xFC, 0x80, 0x02}},
+ {1, 140, 0, {0x05, 0xF4, 0x04, 0x00, 0x00, 0x00, 0x00, 0x13, 0x00, 0x8C, 0xFC, 0x80, 0x02}},
+ {1, 140, 0, {0x05, 0xF4, 0x04, 0x00, 0x00, 0x00, 0x00, 0x13, 0x00, 0x8C, 0xFC, 0x80, 0x02}},
+ {1, 140, 0, {0x05, 0xF4, 0x04, 0x00, 0x00, 0x00, 0x00, 0x13, 0x00, 0x8C, 0xFC, 0x80, 0x02}},
+ },
+ /* 10 fps */
+ {
+ {2, 280, 0, {0x04, 0xF4, 0x04, 0x00, 0x00, 0x00, 0x00, 0x13, 0x00, 0x18, 0xA9, 0x80, 0x02}},
+ {2, 280, 0, {0x04, 0xF4, 0x04, 0x00, 0x00, 0x00, 0x00, 0x13, 0x00, 0x18, 0xA9, 0x80, 0x02}},
+ {2, 280, 0, {0x04, 0xF4, 0x04, 0x00, 0x00, 0x00, 0x00, 0x13, 0x00, 0x18, 0xA9, 0x80, 0x02}},
+ {2, 280, 0, {0x04, 0xF4, 0x04, 0x00, 0x00, 0x00, 0x00, 0x13, 0x00, 0x18, 0xA9, 0x80, 0x02}},
+ },
+ /* 15 fps */
+ {
+ {3, 410, 0, {0x03, 0xF4, 0x04, 0x00, 0x00, 0x00, 0x00, 0x13, 0x00, 0x9A, 0x71, 0x80, 0x02}},
+ {3, 410, 0, {0x03, 0xF4, 0x04, 0x00, 0x00, 0x00, 0x00, 0x13, 0x00, 0x9A, 0x71, 0x80, 0x02}},
+ {3, 410, 0, {0x03, 0xF4, 0x04, 0x00, 0x00, 0x00, 0x00, 0x13, 0x00, 0x9A, 0x71, 0x80, 0x02}},
+ {3, 410, 0, {0x03, 0xF4, 0x04, 0x00, 0x00, 0x00, 0x00, 0x13, 0x00, 0x9A, 0x71, 0x80, 0x02}},
+ },
+ /* 20 fps */
+ {
+ {4, 559, 0, {0x02, 0xF4, 0x04, 0x00, 0x00, 0x00, 0x00, 0x13, 0x00, 0x2F, 0x56, 0x80, 0x02}},
+ {4, 559, 0, {0x02, 0xF4, 0x04, 0x00, 0x00, 0x00, 0x00, 0x13, 0x00, 0x2F, 0x56, 0x80, 0x02}},
+ {4, 559, 0, {0x02, 0xF4, 0x04, 0x00, 0x00, 0x00, 0x00, 0x13, 0x00, 0x2F, 0x56, 0x80, 0x02}},
+ {4, 559, 0, {0x02, 0xF4, 0x04, 0x00, 0x00, 0x00, 0x00, 0x13, 0x00, 0x2F, 0x56, 0x80, 0x02}},
+ },
+ /* 25 fps */
+ {
+ {5, 659, 0, {0x01, 0xF4, 0x04, 0x00, 0x00, 0x00, 0x00, 0x13, 0x00, 0x93, 0x46, 0x80, 0x02}},
+ {5, 659, 0, {0x01, 0xF4, 0x04, 0x00, 0x00, 0x00, 0x00, 0x13, 0x00, 0x93, 0x46, 0x80, 0x02}},
+ {5, 659, 0, {0x01, 0xF4, 0x04, 0x00, 0x00, 0x00, 0x00, 0x13, 0x00, 0x93, 0x46, 0x80, 0x02}},
+ {5, 659, 0, {0x01, 0xF4, 0x04, 0x00, 0x00, 0x00, 0x00, 0x13, 0x00, 0x93, 0x46, 0x80, 0x02}},
+ },
+ /* 30 fps */
+ {
+ {7, 838, 0, {0x00, 0xF4, 0x04, 0x00, 0x00, 0x00, 0x00, 0x13, 0x00, 0x46, 0x3B, 0x80, 0x02}},
+ {7, 838, 0, {0x00, 0xF4, 0x04, 0x00, 0x00, 0x00, 0x00, 0x13, 0x00, 0x46, 0x3B, 0x80, 0x02}},
+ {7, 838, 0, {0x00, 0xF4, 0x04, 0x00, 0x00, 0x00, 0x00, 0x13, 0x00, 0x46, 0x3B, 0x80, 0x02}},
+ {7, 838, 0, {0x00, 0xF4, 0x04, 0x00, 0x00, 0x00, 0x00, 0x13, 0x00, 0x46, 0x3B, 0x80, 0x02}},
+ },
+ },
+ /* QSIF */
+ {
+ /* 5 fps */
+ {
+ {1, 146, 0, {0x2D, 0xF4, 0x04, 0x00, 0x00, 0x00, 0x00, 0x18, 0x00, 0x92, 0xFC, 0xC0, 0x02}},
+ {1, 146, 0, {0x2D, 0xF4, 0x04, 0x00, 0x00, 0x00, 0x00, 0x18, 0x00, 0x92, 0xFC, 0xC0, 0x02}},
+ {1, 146, 0, {0x2D, 0xF4, 0x04, 0x00, 0x00, 0x00, 0x00, 0x18, 0x00, 0x92, 0xFC, 0xC0, 0x02}},
+ {1, 146, 0, {0x2D, 0xF4, 0x04, 0x00, 0x00, 0x00, 0x00, 0x18, 0x00, 0x92, 0xFC, 0xC0, 0x02}},
+ },
+ /* 10 fps */
+ {
+ {2, 291, 0, {0x2C, 0xF4, 0x04, 0x00, 0x00, 0x00, 0x00, 0x18, 0x00, 0x23, 0xA1, 0xC0, 0x02}},
+ {1, 191, 630, {0x2C, 0xF4, 0x05, 0x13, 0xA9, 0x12, 0xE1, 0x17, 0x08, 0xBF, 0xF4, 0xC0, 0x02}},
+ {1, 191, 630, {0x2C, 0xF4, 0x05, 0x13, 0xA9, 0x12, 0xE1, 0x17, 0x08, 0xBF, 0xF4, 0xC0, 0x02}},
+ {1, 191, 630, {0x2C, 0xF4, 0x05, 0x13, 0xA9, 0x12, 0xE1, 0x17, 0x08, 0xBF, 0xF4, 0xC0, 0x02}},
+ },
+ /* 15 fps */
+ {
+ {3, 437, 0, {0x2B, 0xF4, 0x04, 0x00, 0x00, 0x00, 0x00, 0x18, 0x00, 0xB5, 0x6D, 0xC0, 0x02}},
+ {2, 291, 640, {0x2B, 0xF4, 0x05, 0x13, 0xF7, 0x13, 0x2F, 0x13, 0x08, 0x23, 0xA1, 0xC0, 0x02}},
+ {2, 291, 640, {0x2B, 0xF4, 0x05, 0x13, 0xF7, 0x13, 0x2F, 0x13, 0x08, 0x23, 0xA1, 0xC0, 0x02}},
+ {1, 191, 420, {0x2B, 0xF4, 0x0D, 0x0D, 0x1B, 0x0C, 0x53, 0x1E, 0x08, 0xBF, 0xF4, 0xC0, 0x02}},
+ },
+ /* 20 fps */
+ {
+ {4, 588, 0, {0x2A, 0xF4, 0x04, 0x00, 0x00, 0x00, 0x00, 0x18, 0x00, 0x4C, 0x52, 0xC0, 0x02}},
+ {3, 447, 730, {0x2A, 0xF4, 0x05, 0x16, 0xC9, 0x16, 0x01, 0x0E, 0x18, 0xBF, 0x69, 0xC0, 0x02}},
+ {2, 292, 476, {0x2A, 0xF4, 0x0D, 0x0E, 0xD8, 0x0E, 0x10, 0x19, 0x18, 0x24, 0xA1, 0xC0, 0x02}},
+ {1, 192, 312, {0x2A, 0xF4, 0x1D, 0x09, 0xB3, 0x08, 0xEB, 0x1E, 0x18, 0xC0, 0xF4, 0xC0, 0x02}},
+ },
+ /* 25 fps */
+ {
+ {5, 703, 0, {0x29, 0xF4, 0x04, 0x00, 0x00, 0x00, 0x00, 0x18, 0x00, 0xBF, 0x42, 0xC0, 0x02}},
+ {3, 447, 610, {0x29, 0xF4, 0x05, 0x13, 0x0B, 0x12, 0x43, 0x14, 0x18, 0xBF, 0x69, 0xC0, 0x02}},
+ {2, 292, 398, {0x29, 0xF4, 0x0D, 0x0C, 0x6C, 0x0B, 0xA4, 0x1E, 0x18, 0x24, 0xA1, 0xC0, 0x02}},
+ {1, 192, 262, {0x29, 0xF4, 0x25, 0x08, 0x23, 0x07, 0x5B, 0x1E, 0x18, 0xC0, 0xF4, 0xC0, 0x02}},
+ },
+ /* 30 fps */
+ {
+ {8, 873, 0, {0x28, 0xF4, 0x04, 0x00, 0x00, 0x00, 0x00, 0x18, 0x00, 0x69, 0x37, 0xC0, 0x02}},
+ {5, 704, 774, {0x28, 0xF4, 0x05, 0x18, 0x21, 0x17, 0x59, 0x0F, 0x18, 0xC0, 0x42, 0xC0, 0x02}},
+ {3, 448, 492, {0x28, 0xF4, 0x05, 0x0F, 0x5D, 0x0E, 0x95, 0x15, 0x18, 0xC0, 0x69, 0xC0, 0x02}},
+ {2, 291, 320, {0x28, 0xF4, 0x1D, 0x09, 0xFB, 0x09, 0x33, 0x1E, 0x18, 0x23, 0xA1, 0xC0, 0x02}},
+ },
+ },
+ /* QCIF */
+ {
+ /* 5 fps */
+ {
+ {1, 193, 0, {0x0D, 0xF4, 0x04, 0x00, 0x00, 0x00, 0x00, 0x12, 0x00, 0xC1, 0xF4, 0xC0, 0x02}},
+ {1, 193, 0, {0x0D, 0xF4, 0x04, 0x00, 0x00, 0x00, 0x00, 0x12, 0x00, 0xC1, 0xF4, 0xC0, 0x02}},
+ {1, 193, 0, {0x0D, 0xF4, 0x04, 0x00, 0x00, 0x00, 0x00, 0x12, 0x00, 0xC1, 0xF4, 0xC0, 0x02}},
+ {1, 193, 0, {0x0D, 0xF4, 0x04, 0x00, 0x00, 0x00, 0x00, 0x12, 0x00, 0xC1, 0xF4, 0xC0, 0x02}},
+ },
+ /* 10 fps */
+ {
+ {3, 385, 0, {0x0C, 0xF4, 0x04, 0x00, 0x00, 0x00, 0x00, 0x12, 0x00, 0x81, 0x79, 0xC0, 0x02}},
+ {2, 291, 800, {0x0C, 0xF4, 0x05, 0x18, 0xF4, 0x18, 0x18, 0x11, 0x08, 0x23, 0xA1, 0xC0, 0x02}},
+ {2, 291, 800, {0x0C, 0xF4, 0x05, 0x18, 0xF4, 0x18, 0x18, 0x11, 0x08, 0x23, 0xA1, 0xC0, 0x02}},
+ {1, 194, 532, {0x0C, 0xF4, 0x05, 0x10, 0x9A, 0x0F, 0xBE, 0x1B, 0x08, 0xC2, 0xF0, 0xC0, 0x02}},
+ },
+ /* 15 fps */
+ {
+ {4, 577, 0, {0x0B, 0xF4, 0x04, 0x00, 0x00, 0x00, 0x00, 0x12, 0x00, 0x41, 0x52, 0xC0, 0x02}},
+ {3, 447, 818, {0x0B, 0xF4, 0x05, 0x19, 0x89, 0x18, 0xAD, 0x0F, 0x10, 0xBF, 0x69, 0xC0, 0x02}},
+ {2, 292, 534, {0x0B, 0xF4, 0x05, 0x10, 0xA3, 0x0F, 0xC7, 0x19, 0x10, 0x24, 0xA1, 0xC0, 0x02}},
+ {1, 195, 356, {0x0B, 0xF4, 0x15, 0x0B, 0x11, 0x0A, 0x35, 0x1E, 0x10, 0xC3, 0xF0, 0xC0, 0x02}},
+ },
+ /* 20 fps */
+ {
+ {6, 776, 0, {0x0A, 0xF4, 0x04, 0x00, 0x00, 0x00, 0x00, 0x12, 0x00, 0x08, 0x3F, 0xC0, 0x02}},
+ {4, 591, 804, {0x0A, 0xF4, 0x05, 0x19, 0x1E, 0x18, 0x42, 0x0F, 0x18, 0x4F, 0x4E, 0xC0, 0x02}},
+ {3, 447, 608, {0x0A, 0xF4, 0x05, 0x12, 0xFD, 0x12, 0x21, 0x15, 0x18, 0xBF, 0x69, 0xC0, 0x02}},
+ {2, 291, 396, {0x0A, 0xF4, 0x15, 0x0C, 0x5E, 0x0B, 0x82, 0x1E, 0x18, 0x23, 0xA1, 0xC0, 0x02}},
+ },
+ /* 25 fps */
+ {
+ {9, 928, 0, {0x09, 0xF4, 0x04, 0x00, 0x00, 0x00, 0x00, 0x12, 0x00, 0xA0, 0x33, 0xC0, 0x02}},
+ {5, 703, 800, {0x09, 0xF4, 0x05, 0x18, 0xF4, 0x18, 0x18, 0x10, 0x18, 0xBF, 0x42, 0xC0, 0x02}},
+ {3, 447, 508, {0x09, 0xF4, 0x0D, 0x0F, 0xD2, 0x0E, 0xF6, 0x1B, 0x18, 0xBF, 0x69, 0xC0, 0x02}},
+ {2, 292, 332, {0x09, 0xF4, 0x1D, 0x0A, 0x5A, 0x09, 0x7E, 0x1E, 0x18, 0x24, 0xA1, 0xC0, 0x02}},
+ },
+ /* 30 fps */
+ {
+ {0, },
+ {9, 956, 876, {0x08, 0xF4, 0x05, 0x1B, 0x58, 0x1A, 0x7C, 0x0E, 0x20, 0xBC, 0x33, 0x10, 0x02}},
+ {4, 592, 542, {0x08, 0xF4, 0x05, 0x10, 0xE4, 0x10, 0x08, 0x17, 0x20, 0x50, 0x4E, 0x10, 0x02}},
+ {2, 291, 266, {0x08, 0xF4, 0x25, 0x08, 0x48, 0x07, 0x6C, 0x1E, 0x20, 0x23, 0xA1, 0x10, 0x02}},
+ },
+ },
+ /* SIF */
+ {
+ /* 5 fps */
+ {
+ {4, 582, 0, {0x35, 0xF4, 0x04, 0x00, 0x00, 0x00, 0x00, 0x04, 0x00, 0x46, 0x52, 0x60, 0x02}},
+ {3, 387, 1276, {0x35, 0xF4, 0x05, 0x27, 0xD8, 0x26, 0x48, 0x03, 0x10, 0x83, 0x79, 0x60, 0x02}},
+ {2, 291, 960, {0x35, 0xF4, 0x0D, 0x1D, 0xF2, 0x1C, 0x62, 0x04, 0x10, 0x23, 0xA1, 0x60, 0x02}},
+ {1, 191, 630, {0x35, 0xF4, 0x1D, 0x13, 0xA9, 0x12, 0x19, 0x05, 0x08, 0xBF, 0xF4, 0x60, 0x02}},
+ },
+ /* 10 fps */
+ {
+ {0, },
+ {6, 775, 1278, {0x34, 0xF4, 0x05, 0x27, 0xE8, 0x26, 0x58, 0x05, 0x30, 0x07, 0x3F, 0x10, 0x02}},
+ {3, 447, 736, {0x34, 0xF4, 0x15, 0x16, 0xFB, 0x15, 0x6B, 0x05, 0x18, 0xBF, 0x69, 0x10, 0x02}},
+ {2, 291, 480, {0x34, 0xF4, 0x2D, 0x0E, 0xF9, 0x0D, 0x69, 0x09, 0x18, 0x23, 0xA1, 0x10, 0x02}},
+ },
+ /* 15 fps */
+ {
+ {0, },
+ {9, 955, 1050, {0x33, 0xF4, 0x05, 0x20, 0xCF, 0x1F, 0x3F, 0x06, 0x48, 0xBB, 0x33, 0x10, 0x02}},
+ {4, 591, 650, {0x33, 0xF4, 0x15, 0x14, 0x44, 0x12, 0xB4, 0x08, 0x30, 0x4F, 0x4E, 0x10, 0x02}},
+ {3, 448, 492, {0x33, 0xF4, 0x25, 0x0F, 0x52, 0x0D, 0xC2, 0x09, 0x28, 0xC0, 0x69, 0x10, 0x02}},
+ },
+ /* 20 fps */
+ {
+ {0, },
+ {9, 958, 782, {0x32, 0xF4, 0x0D, 0x18, 0x6A, 0x16, 0xDA, 0x0B, 0x58, 0xBE, 0x33, 0xD0, 0x02}},
+ {5, 703, 574, {0x32, 0xF4, 0x1D, 0x11, 0xE7, 0x10, 0x57, 0x0B, 0x40, 0xBF, 0x42, 0xD0, 0x02}},
+ {3, 446, 364, {0x32, 0xF4, 0x3D, 0x0B, 0x5C, 0x09, 0xCC, 0x0E, 0x30, 0xBE, 0x69, 0xD0, 0x02}},
+ },
+ /* 25 fps */
+ {
+ {0, },
+ {9, 958, 654, {0x31, 0xF4, 0x15, 0x14, 0x66, 0x12, 0xD6, 0x0B, 0x50, 0xBE, 0x33, 0x90, 0x02}},
+ {6, 776, 530, {0x31, 0xF4, 0x25, 0x10, 0x8C, 0x0E, 0xFC, 0x0C, 0x48, 0x08, 0x3F, 0x90, 0x02}},
+ {4, 592, 404, {0x31, 0xF4, 0x35, 0x0C, 0x96, 0x0B, 0x06, 0x0B, 0x38, 0x50, 0x4E, 0x90, 0x02}},
+ },
+ /* 30 fps */
+ {
+ {0, },
+ {9, 957, 526, {0x30, 0xF4, 0x25, 0x10, 0x68, 0x0E, 0xD8, 0x0D, 0x58, 0xBD, 0x33, 0x60, 0x02}},
+ {6, 775, 426, {0x30, 0xF4, 0x35, 0x0D, 0x48, 0x0B, 0xB8, 0x0F, 0x50, 0x07, 0x3F, 0x60, 0x02}},
+ {4, 590, 324, {0x30, 0x7A, 0x4B, 0x0A, 0x1C, 0x08, 0xB4, 0x0E, 0x40, 0x4E, 0x52, 0x60, 0x02}},
+ },
+ },
+ /* CIF */
+ {
+ /* 5 fps */
+ {
+ {6, 771, 0, {0x15, 0xF4, 0x04, 0x00, 0x00, 0x00, 0x00, 0x03, 0x00, 0x03, 0x3F, 0x80, 0x02}},
+ {4, 465, 1278, {0x15, 0xF4, 0x05, 0x27, 0xEE, 0x26, 0x36, 0x03, 0x18, 0xD1, 0x65, 0x80, 0x02}},
+ {2, 291, 800, {0x15, 0xF4, 0x15, 0x18, 0xF4, 0x17, 0x3C, 0x05, 0x18, 0x23, 0xA1, 0x80, 0x02}},
+ {1, 193, 528, {0x15, 0xF4, 0x2D, 0x10, 0x7E, 0x0E, 0xC6, 0x0A, 0x18, 0xC1, 0xF4, 0x80, 0x02}},
+ },
+ /* 10 fps */
+ {
+ {0, },
+ {9, 932, 1278, {0x14, 0xF4, 0x05, 0x27, 0xEE, 0x26, 0x36, 0x04, 0x30, 0xA4, 0x33, 0x10, 0x02}},
+ {4, 591, 812, {0x14, 0xF4, 0x15, 0x19, 0x56, 0x17, 0x9E, 0x06, 0x28, 0x4F, 0x4E, 0x10, 0x02}},
+ {2, 291, 400, {0x14, 0xF4, 0x3D, 0x0C, 0x7A, 0x0A, 0xC2, 0x0E, 0x28, 0x23, 0xA1, 0x10, 0x02}},
+ },
+ /* 15 fps */
+ {
+ {0, },
+ {9, 956, 876, {0x13, 0xF4, 0x0D, 0x1B, 0x58, 0x19, 0xA0, 0x05, 0x38, 0xBC, 0x33, 0x60, 0x02}},
+ {5, 703, 644, {0x13, 0xF4, 0x1D, 0x14, 0x1C, 0x12, 0x64, 0x08, 0x38, 0xBF, 0x42, 0x60, 0x02}},
+ {3, 448, 410, {0x13, 0xF4, 0x3D, 0x0C, 0xC4, 0x0B, 0x0C, 0x0E, 0x38, 0xC0, 0x69, 0x60, 0x02}},
+ },
+ /* 20 fps */
+ {
+ {0, },
+ {9, 956, 650, {0x12, 0xF4, 0x1D, 0x14, 0x4A, 0x12, 0x92, 0x09, 0x48, 0xBC, 0x33, 0x10, 0x03}},
+ {6, 776, 528, {0x12, 0xF4, 0x2D, 0x10, 0x7E, 0x0E, 0xC6, 0x0A, 0x40, 0x08, 0x3F, 0x10, 0x03}},
+ {4, 591, 402, {0x12, 0xF4, 0x3D, 0x0C, 0x8F, 0x0A, 0xD7, 0x0E, 0x40, 0x4F, 0x4E, 0x10, 0x03}},
+ },
+ /* 25 fps */
+ {
+ {0, },
+ {9, 956, 544, {0x11, 0xF4, 0x25, 0x10, 0xF4, 0x0F, 0x3C, 0x0A, 0x48, 0xBC, 0x33, 0xC0, 0x02}},
+ {7, 840, 478, {0x11, 0xF4, 0x2D, 0x0E, 0xEB, 0x0D, 0x33, 0x0B, 0x48, 0x48, 0x3B, 0xC0, 0x02}},
+ {5, 703, 400, {0x11, 0xF4, 0x3D, 0x0C, 0x7A, 0x0A, 0xC2, 0x0E, 0x48, 0xBF, 0x42, 0xC0, 0x02}},
+ },
+ /* 30 fps */
+ {
+ {0, },
+ {9, 956, 438, {0x10, 0xF4, 0x35, 0x0D, 0xAC, 0x0B, 0xF4, 0x0D, 0x50, 0xBC, 0x33, 0x10, 0x02}},
+ {7, 838, 384, {0x10, 0xF4, 0x45, 0x0B, 0xFD, 0x0A, 0x45, 0x0F, 0x50, 0x46, 0x3B, 0x10, 0x02}},
+ {6, 773, 354, {0x10, 0x7A, 0x4B, 0x0B, 0x0C, 0x09, 0x80, 0x10, 0x50, 0x05, 0x3F, 0x10, 0x02}},
+ },
+ },
+ /* VGA */
+ {
+ /* 5 fps */
+ {
+ {0, },
+ {6, 773, 1272, {0x1D, 0xF4, 0x15, 0x27, 0xB6, 0x24, 0x96, 0x02, 0x30, 0x05, 0x3F, 0x10, 0x02}},
+ {4, 592, 976, {0x1D, 0xF4, 0x25, 0x1E, 0x78, 0x1B, 0x58, 0x03, 0x30, 0x50, 0x4E, 0x10, 0x02}},
+ {3, 448, 738, {0x1D, 0xF4, 0x3D, 0x17, 0x0C, 0x13, 0xEC, 0x04, 0x30, 0xC0, 0x69, 0x10, 0x02}},
+ },
+ /* 10 fps */
+ {
+ {0, },
+ {9, 956, 788, {0x1C, 0xF4, 0x35, 0x18, 0x9C, 0x15, 0x7C, 0x03, 0x48, 0xBC, 0x33, 0x10, 0x02}},
+ {6, 776, 640, {0x1C, 0x7A, 0x53, 0x13, 0xFC, 0x11, 0x2C, 0x04, 0x48, 0x08, 0x3F, 0x10, 0x02}},
+ {4, 592, 488, {0x1C, 0x7A, 0x6B, 0x0F, 0x3C, 0x0C, 0x6C, 0x06, 0x48, 0x50, 0x4E, 0x10, 0x02}},
+ },
+ /* 15 fps */
+ {
+ {0, },
+ {9, 957, 526, {0x1B, 0x7A, 0x63, 0x10, 0x68, 0x0D, 0x98, 0x06, 0x58, 0xBD, 0x33, 0x80, 0x02}},
+ {9, 957, 526, {0x1B, 0x7A, 0x63, 0x10, 0x68, 0x0D, 0x98, 0x06, 0x58, 0xBD, 0x33, 0x80, 0x02}},
+ {8, 895, 492, {0x1B, 0x7A, 0x6B, 0x0F, 0x5D, 0x0C, 0x8D, 0x06, 0x58, 0x7F, 0x37, 0x80, 0x02}},
+ },
+ /* 20 fps */
+ {
+ {0, },
+ {0, },
+ {0, },
+ {0, },
+ },
+ /* 25 fps */
+ {
+ {0, },
+ {0, },
+ {0, },
+ {0, },
+ },
+ /* 30 fps */
+ {
+ {0, },
+ {0, },
+ {0, },
+ {0, },
+ },
+ },
+};
+
+/*
+ * 16 versions:
+ * 2 tables (one for Y, and one for U&V)
+ * 16 levels of details per tables
+ * 8 blocs
+ */
+
+const unsigned int TimonRomTable [16][2][16][8] =
+{
+ { /* version 0 */
+ { /* version 0, passes 0 */
+ {0x00000000,0x00000000,0x00000000,0x00000000,
+ 0x00000000,0x00000000,0x00000000,0x00000001},
+ {0x00000000,0x00000000,0x00000001,0x00000001,
+ 0x00000001,0x00000001,0x00000001,0x00000001},
+ {0x00000000,0x00000000,0x00000001,0x00000001,
+ 0x00000001,0x00000009,0x00000009,0x00000009},
+ {0x00000000,0x00000000,0x00000009,0x00000001,
+ 0x00000009,0x00000009,0x00000009,0x00000009},
+ {0x00000000,0x00000000,0x00000009,0x00000009,
+ 0x00000009,0x00000009,0x00000049,0x00000009},
+ {0x00000000,0x00000000,0x00000009,0x00000009,
+ 0x00000009,0x00000049,0x00000049,0x00000049},
+ {0x00000000,0x00000000,0x00000009,0x00000009,
+ 0x00000049,0x00000049,0x00000049,0x00000049},
+ {0x00000000,0x00000000,0x00000009,0x00000049,
+ 0x00000049,0x00000049,0x00000049,0x00000049},
+ {0x00000000,0x00000000,0x00000049,0x00000049,
+ 0x00000049,0x00000049,0x0000024a,0x0000024a},
+ {0x00000000,0x00000000,0x00000049,0x00000049,
+ 0x00000049,0x00000249,0x0000024a,0x0000024a},
+ {0x00000000,0x00000000,0x00000049,0x00000049,
+ 0x00000249,0x00000249,0x0000024a,0x0000024a},
+ {0x00000000,0x00000000,0x00000049,0x00000049,
+ 0x00000249,0x00000249,0x00001252,0x0000024a},
+ {0x00000000,0x00000000,0x00000049,0x00000049,
+ 0x00000249,0x0000124a,0x00001252,0x0000024a},
+ {0x00000000,0x00000000,0x00000049,0x00000249,
+ 0x00000249,0x0000124a,0x00001252,0x0000024a},
+ {0x00000000,0x00000000,0x00000249,0x00001249,
+ 0x0000124a,0x00009252,0x00009292,0x00001252},
+ {0x00000000,0x00000000,0x00000000,0x00000000,
+ 0x00000000,0x00000000,0x00000000,0x00000000}
+ },
+ { /* version 0, passes 1 */
+ {0x00000000,0x00000000,0x00000000,0x00000000,
+ 0x00000000,0x00000000,0x00000000,0x00000000},
+ {0x00000000,0x00000000,0x00000001,0x00000001,
+ 0x00000001,0x00000001,0x00000000,0x00000000},
+ {0x00000000,0x00000000,0x00000009,0x00000001,
+ 0x00000001,0x00000009,0x00000000,0x00000000},
+ {0x00000000,0x00000000,0x00000009,0x00000009,
+ 0x00000009,0x00000009,0x00000000,0x00000000},
+ {0x00000000,0x00000000,0x00000009,0x00000009,
+ 0x00000009,0x00000009,0x00000001,0x00000000},
+ {0x00000000,0x00000000,0x00000049,0x00000009,
+ 0x00000009,0x00000049,0x00000001,0x00000001},
+ {0x00000000,0x00000000,0x00000049,0x00000009,
+ 0x00000009,0x00000049,0x00000001,0x00000001},
+ {0x00000000,0x00000000,0x00000049,0x00000049,
+ 0x00000049,0x00000049,0x00000009,0x00000001},
+ {0x00000000,0x00000000,0x00000049,0x00000049,
+ 0x00000049,0x00000049,0x00000009,0x00000001},
+ {0x00000000,0x00000000,0x00000049,0x00000049,
+ 0x00000049,0x00000049,0x00000009,0x00000001},
+ {0x00000000,0x00000000,0x00000049,0x00000049,
+ 0x00000049,0x00000049,0x00000009,0x00000009},
+ {0x00000000,0x00000000,0x00000049,0x00000049,
+ 0x00000049,0x00000249,0x00000049,0x00000009},
+ {0x00000000,0x00000000,0x00000049,0x00000049,
+ 0x00000049,0x00000249,0x00000049,0x00000009},
+ {0x00000000,0x00000000,0x00000249,0x00000049,
+ 0x00000249,0x00000249,0x00000049,0x00000009},
+ {0x00000000,0x00000000,0x00001249,0x00000249,
+ 0x0000124a,0x0000124a,0x0000024a,0x00000049},
+ {0x00000000,0x00000000,0x00000000,0x00000000,
+ 0x00000000,0x00000000,0x00000000,0x00000000}
+ }
+ },
+ { /* version 1 */
+ { /* version 1, passes 0 */
+ {0x00000000,0x00000000,0x00000000,0x00000000,
+ 0x00000000,0x00000000,0x00000000,0x00000001},
+ {0x00000000,0x00000000,0x00000001,0x00000001,
+ 0x00000001,0x00000009,0x00000009,0x00000009},
+ {0x00000000,0x00000000,0x00000009,0x00000009,
+ 0x00000009,0x00000009,0x00000009,0x00000009},
+ {0x00000000,0x00000000,0x00000009,0x00000009,
+ 0x00000009,0x00000049,0x00000049,0x00000049},
+ {0x00000000,0x00000000,0x00000009,0x00000049,
+ 0x00000049,0x00000049,0x00000049,0x00000049},
+ {0x00000000,0x00000000,0x00000049,0x00000049,
+ 0x00000049,0x00000249,0x0000024a,0x0000024a},
+ {0x00000000,0x00000000,0x00000049,0x00000049,
+ 0x00000249,0x00000249,0x0000024a,0x0000024a},
+ {0x00000000,0x00000000,0x00000049,0x00000249,
+ 0x00000249,0x00000249,0x0000024a,0x00001252},
+ {0x00000000,0x00000000,0x00000049,0x00000249,
+ 0x00000249,0x0000124a,0x00001252,0x00001252},
+ {0x00000000,0x00000000,0x00000049,0x00000249,
+ 0x0000124a,0x0000124a,0x00001252,0x00001252},
+ {0x00000000,0x00000000,0x00000249,0x00000249,
+ 0x0000124a,0x0000124a,0x00009292,0x00009292},
+ {0x00000000,0x00000000,0x00000249,0x00001249,
+ 0x0000124a,0x00009252,0x00009292,0x00009292},
+ {0x00000000,0x00000000,0x00000249,0x00001249,
+ 0x00009252,0x00009252,0x00009292,0x00009292},
+ {0x00000000,0x00000000,0x00000249,0x0000924a,
+ 0x00009292,0x00009493,0x00009493,0x00009493},
+ {0x00000000,0x00000000,0x00001249,0x00009252,
+ 0x00009492,0x0000a49b,0x0000a49b,0x0000a49b},
+ {0x00000000,0x00000000,0x00000000,0x00000000,
+ 0x00000000,0x00000000,0x00000000,0x00000000}
+ },
+ { /* version 1, passes 1 */
+ {0x00000000,0x00000000,0x00000000,0x00000000,
+ 0x00000000,0x00000000,0x00000000,0x00000000},
+ {0x00000000,0x00000000,0x00000009,0x00000009,
+ 0x00000009,0x00000001,0x00000001,0x00000000},
+ {0x00000000,0x00000000,0x00000009,0x00000009,
+ 0x00000009,0x00000009,0x00000001,0x00000000},
+ {0x00000000,0x00000000,0x00000049,0x00000049,
+ 0x00000049,0x00000009,0x00000001,0x00000000},
+ {0x00000000,0x00000000,0x00000049,0x00000049,
+ 0x00000049,0x00000049,0x00000001,0x00000001},
+ {0x00000000,0x00000000,0x00000049,0x00000049,
+ 0x00000049,0x00000049,0x00000009,0x00000001},
+ {0x00000000,0x00000000,0x00000249,0x00000049,
+ 0x00000049,0x00000249,0x00000009,0x00000001},
+ {0x00000000,0x00000000,0x00000249,0x00000049,
+ 0x00000249,0x00000249,0x00000009,0x00000009},
+ {0x00000000,0x00000000,0x00000249,0x00000249,
+ 0x00000249,0x00000249,0x00000049,0x00000009},
+ {0x00000000,0x00000000,0x00000249,0x00000249,
+ 0x00000249,0x0000124a,0x00000049,0x00000009},
+ {0x00000000,0x00000000,0x00000249,0x00000249,
+ 0x00000249,0x0000124a,0x00000049,0x00000009},
+ {0x00000000,0x00000000,0x00000249,0x00000249,
+ 0x00000249,0x0000124a,0x0000024a,0x00000049},
+ {0x00000000,0x00000000,0x00000249,0x00000249,
+ 0x0000124a,0x0000124a,0x0000024a,0x00000049},
+ {0x00000000,0x00000000,0x00000249,0x00000249,
+ 0x0000124a,0x0000124a,0x0000024a,0x00000049},
+ {0x00000000,0x00000000,0x00001249,0x00001249,
+ 0x00009252,0x00009252,0x00001252,0x0000024a},
+ {0x00000000,0x00000000,0x00000000,0x00000000,
+ 0x00000000,0x00000000,0x00000000,0x00000000}
+ }
+ },
+ { /* version 2 */
+ { /* version 2, passes 0 */
+ {0x00000000,0x00000000,0x00000000,0x00000000,
+ 0x00000000,0x00000000,0x00000000,0x00000001},
+ {0x00000000,0x00000000,0x00000009,0x00000009,
+ 0x00000009,0x00000009,0x00000009,0x00000009},
+ {0x00000000,0x00000000,0x00000049,0x00000049,
+ 0x00000049,0x00000049,0x00000049,0x00000049},
+ {0x00000000,0x00000000,0x00000049,0x00000049,
+ 0x00000049,0x00000249,0x0000024a,0x0000024a},
+ {0x00000000,0x00000000,0x00000049,0x00000249,
+ 0x00000249,0x00000249,0x0000024a,0x00001252},
+ {0x00000000,0x00000000,0x00000249,0x00000249,
+ 0x00000249,0x0000124a,0x00001252,0x00001252},
+ {0x00000000,0x00000000,0x00000249,0x00000249,
+ 0x0000124a,0x0000124a,0x00009292,0x00009292},
+ {0x00000000,0x00000000,0x00000249,0x00001249,
+ 0x0000124a,0x00009252,0x00009292,0x00009292},
+ {0x00000000,0x00000000,0x00000249,0x00001249,
+ 0x00009252,0x00009292,0x00009292,0x00009292},
+ {0x00000000,0x00000000,0x00000249,0x00001249,
+ 0x00009252,0x00009292,0x00009493,0x00009493},
+ {0x00000000,0x00000000,0x00000249,0x0000924a,
+ 0x00009252,0x00009493,0x00009493,0x00009493},
+ {0x00000000,0x00000000,0x00000249,0x0000924a,
+ 0x00009292,0x00009493,0x00009493,0x00009493},
+ {0x00000000,0x00000000,0x00000249,0x00009252,
+ 0x00009492,0x00009493,0x0000a49b,0x0000a49b},
+ {0x00000000,0x00000000,0x00001249,0x00009292,
+ 0x00009492,0x000124db,0x000124db,0x000124db},
+ {0x00000000,0x00000000,0x0000924a,0x00009493,
+ 0x0000a493,0x000126dc,0x000126dc,0x000126dc},
+ {0x00000000,0x00000000,0x00000000,0x00000000,
+ 0x00000000,0x00000000,0x00000000,0x00000000}
+ },
+ { /* version 2, passes 1 */
+ {0x00000000,0x00000000,0x00000000,0x00000000,
+ 0x00000000,0x00000000,0x00000000,0x00000000},
+ {0x00000000,0x00000000,0x00000049,0x00000009,
+ 0x00000049,0x00000009,0x00000001,0x00000000},
+ {0x00000000,0x00000000,0x00000049,0x00000049,
+ 0x00000049,0x00000049,0x00000049,0x00000000},
+ {0x00000000,0x00000000,0x00000249,0x00000049,
+ 0x00000249,0x00000049,0x0000024a,0x00000001},
+ {0x00000000,0x00000000,0x00000249,0x00000249,
+ 0x00000249,0x00000249,0x0000024a,0x00000001},
+ {0x00000000,0x00000000,0x00000249,0x00000249,
+ 0x00000249,0x00000249,0x0000024a,0x00000001},
+ {0x00000000,0x00000000,0x00000249,0x00000249,
+ 0x00000249,0x00000249,0x0000024a,0x00000009},
+ {0x00000000,0x00000000,0x00000249,0x00000249,
+ 0x0000124a,0x0000124a,0x0000024a,0x00000009},
+ {0x00000000,0x00000000,0x00000249,0x00000249,
+ 0x0000124a,0x0000124a,0x0000024a,0x00000009},
+ {0x00000000,0x00000000,0x00001249,0x00001249,
+ 0x0000124a,0x00009252,0x00001252,0x00000049},
+ {0x00000000,0x00000000,0x00001249,0x00001249,
+ 0x0000124a,0x00009292,0x00001252,0x00000049},
+ {0x00000000,0x00000000,0x00001249,0x00001249,
+ 0x0000124a,0x00009292,0x00001252,0x00000049},
+ {0x00000000,0x00000000,0x00001249,0x00001249,
+ 0x00009252,0x00009292,0x00001252,0x0000024a},
+ {0x00000000,0x00000000,0x00001249,0x00001249,
+ 0x00009292,0x00009292,0x00001252,0x0000024a},
+ {0x00000000,0x00000000,0x0000924a,0x0000924a,
+ 0x00009492,0x00009493,0x00009292,0x00001252},
+ {0x00000000,0x00000000,0x00000000,0x00000000,
+ 0x00000000,0x00000000,0x00000000,0x00000000}
+ }
+ },
+ { /* version 3 */
+ { /* version 3, passes 0 */
+ {0x00000000,0x00000000,0x00000000,0x00000000,
+ 0x00000000,0x00000000,0x00000000,0x00000001},
+ {0x00000000,0x00000000,0x00000049,0x00000049,
+ 0x00000049,0x00000049,0x00000049,0x00000049},
+ {0x00000000,0x00000000,0x00000049,0x00000249,
+ 0x00000249,0x00000249,0x00001252,0x0000024a},
+ {0x00000000,0x00000000,0x00000249,0x00000249,
+ 0x00000249,0x0000124a,0x00001252,0x00001252},
+ {0x00000000,0x00000000,0x00000249,0x00000249,
+ 0x0000124a,0x00009252,0x00009292,0x00009292},
+ {0x00000000,0x00000000,0x00000249,0x00001249,
+ 0x0000124a,0x00009292,0x00009292,0x00009493},
+ {0x00000000,0x00000000,0x00000249,0x00001249,
+ 0x00009252,0x00009292,0x00009493,0x00009493},
+ {0x00000000,0x00000000,0x00000249,0x00001249,
+ 0x00009292,0x00009493,0x00009493,0x00009493},
+ {0x00000000,0x00000000,0x00000249,0x00009252,
+ 0x00009292,0x00009493,0x0000a49b,0x0000a49b},
+ {0x00000000,0x00000000,0x00001249,0x00009252,
+ 0x00009292,0x0000a49b,0x0000a49b,0x0000a49b},
+ {0x00000000,0x00000000,0x00001249,0x00009252,
+ 0x00009492,0x0000a49b,0x0000a49b,0x0000a49b},
+ {0x00000000,0x00000000,0x00001249,0x00009292,
+ 0x00009492,0x0000a49b,0x000124db,0x000124db},
+ {0x00000000,0x00000000,0x00001249,0x00009292,
+ 0x0000a493,0x0000a49b,0x000124db,0x000124db},
+ {0x00000000,0x00000000,0x00001249,0x00009493,
+ 0x0001249b,0x000126dc,0x000136e4,0x000126dc},
+ {0x00000000,0x00000000,0x0000924a,0x0000a49b,
+ 0x000124db,0x000136e4,0x0001b725,0x000136e4},
+ {0x00000000,0x00000000,0x00000000,0x00000000,
+ 0x00000000,0x00000000,0x00000000,0x00000000}
+ },
+ { /* version 3, passes 1 */
+ {0x00000000,0x00000000,0x00000000,0x00000000,
+ 0x00000000,0x00000000,0x00000000,0x00000000},
+ {0x00000000,0x00000000,0x00000049,0x00000049,
+ 0x00000049,0x00000049,0x00000001,0x00000000},
+ {0x00000000,0x00000000,0x00000249,0x00000249,
+ 0x00000249,0x00000249,0x00000049,0x00000001},
+ {0x00000000,0x00000000,0x00000249,0x00000249,
+ 0x00000249,0x0000124a,0x00001252,0x00000001},
+ {0x00000000,0x00000000,0x00000249,0x00000249,
+ 0x0000124a,0x0000124a,0x00001252,0x00000009},
+ {0x00000000,0x00000000,0x00000249,0x00001249,
+ 0x0000124a,0x00009252,0x00009292,0x00000009},
+ {0x00000000,0x00000000,0x00001249,0x00001249,
+ 0x0000124a,0x00009252,0x00009292,0x00000049},
+ {0x00000000,0x00000000,0x00001249,0x00001249,
+ 0x00009252,0x00009252,0x00009292,0x00000049},
+ {0x00000000,0x00000000,0x00001249,0x00001249,
+ 0x00009252,0x00009493,0x00009292,0x0000024a},
+ {0x00000000,0x00000000,0x00001249,0x00001249,
+ 0x00009252,0x00009493,0x00009292,0x0000024a},
+ {0x00000000,0x00000000,0x00001249,0x00001249,
+ 0x00009252,0x00009493,0x00009493,0x00001252},
+ {0x00000000,0x00000000,0x00001249,0x0000924a,
+ 0x00009292,0x00009493,0x00009493,0x00001252},
+ {0x00000000,0x00000000,0x00001249,0x0000924a,
+ 0x00009492,0x00009493,0x00009493,0x00009292},
+ {0x00000000,0x00000000,0x00001249,0x00009252,
+ 0x00009492,0x0000a49b,0x00009493,0x00009292},
+ {0x00000000,0x00000000,0x0000924a,0x00009292,
+ 0x0000a493,0x000124db,0x0000a49b,0x00009493},
+ {0x00000000,0x00000000,0x00000000,0x00000000,
+ 0x00000000,0x00000000,0x00000000,0x00000000}
+ }
+ },
+ { /* version 4 */
+ { /* version 4, passes 0 */
+ {0x00000000,0x00000000,0x00000049,0x00000049,
+ 0x00000049,0x00000049,0x0000024a,0x0000024a},
+ {0x00000000,0x00000000,0x00000249,0x00000249,
+ 0x00000249,0x0000124a,0x00001252,0x00009292},
+ {0x00000000,0x00000000,0x00000249,0x00000249,
+ 0x0000124a,0x00009252,0x00009292,0x00009292},
+ {0x00000000,0x00000000,0x00000249,0x00001249,
+ 0x0000124a,0x00009292,0x00009493,0x00009493},
+ {0x00000000,0x00000000,0x00000249,0x00001249,
+ 0x00009252,0x00009493,0x00009493,0x0000a49b},
+ {0x00000000,0x00000000,0x00000249,0x0000924a,
+ 0x00009292,0x00009493,0x0000a49b,0x0000a49b},
+ {0x00000000,0x00000000,0x00001249,0x0000924a,
+ 0x00009292,0x00009493,0x0000a49b,0x000124db},
+ {0x00000000,0x00000000,0x00001249,0x00009252,
+ 0x00009492,0x0000a49b,0x0000a49b,0x000124db},
+ {0x00000000,0x00000000,0x00001249,0x00009292,
+ 0x00009492,0x000124db,0x000124db,0x000126dc},
+ {0x00000000,0x00000000,0x00001249,0x00009292,
+ 0x0000a493,0x000124db,0x000126dc,0x000126dc},
+ {0x00000000,0x00000000,0x00001249,0x00009493,
+ 0x0000a493,0x000124db,0x000126dc,0x000136e4},
+ {0x00000000,0x00000000,0x00001249,0x00009493,
+ 0x0000a493,0x000126dc,0x000136e4,0x000136e4},
+ {0x00000000,0x00000000,0x0000924a,0x00009493,
+ 0x0001249b,0x000126dc,0x000136e4,0x000136e4},
+ {0x00000000,0x00000000,0x0000924a,0x0000a49b,
+ 0x000124db,0x000136e4,0x000136e4,0x0001b724},
+ {0x00000000,0x00000000,0x00009252,0x000124db,
+ 0x000126dc,0x0001b724,0x0001b725,0x0001b925},
+ {0x00000000,0x00000000,0x00000000,0x00000000,
+ 0x00000000,0x00000000,0x00000000,0x00000000}
+ },
+ { /* version 4, passes 1 */
+ {0x00000000,0x00000000,0x00000049,0x00000049,
+ 0x00000049,0x00000049,0x00000049,0x00000049},
+ {0x00000000,0x00000000,0x00000249,0x00000249,
+ 0x00000249,0x00000249,0x0000024a,0x00000049},
+ {0x00000000,0x00000000,0x00001249,0x00000249,
+ 0x0000124a,0x0000124a,0x00001252,0x00000049},
+ {0x00000000,0x00000000,0x00001249,0x00001249,
+ 0x0000124a,0x0000124a,0x00009292,0x0000024a},
+ {0x00000000,0x00000000,0x00001249,0x00001249,
+ 0x00009252,0x00009292,0x00009292,0x0000024a},
+ {0x00000000,0x00000000,0x00001249,0x00001249,
+ 0x00009252,0x00009292,0x0000a49b,0x0000024a},
+ {0x00000000,0x00000000,0x00001249,0x00001249,
+ 0x00009292,0x00009493,0x0000a49b,0x00001252},
+ {0x00000000,0x00000000,0x00001249,0x00001249,
+ 0x00009292,0x00009493,0x0000a49b,0x00001252},
+ {0x00000000,0x00000000,0x00001249,0x0000924a,
+ 0x00009492,0x0000a49b,0x0000a49b,0x00001252},
+ {0x00000000,0x00000000,0x00001249,0x00009252,
+ 0x00009492,0x0000a49b,0x0000a49b,0x00009292},
+ {0x00000000,0x00000000,0x00001249,0x00009292,
+ 0x00009492,0x0000a49b,0x0000a49b,0x00009292},
+ {0x00000000,0x00000000,0x00001249,0x00009493,
+ 0x0000a493,0x0000a49b,0x0000a49b,0x00009292},
+ {0x00000000,0x00000000,0x00001249,0x00009493,
+ 0x0000a493,0x0000a49b,0x0000a49b,0x00009493},
+ {0x00000000,0x00000000,0x0000924a,0x00009493,
+ 0x0000a493,0x000124db,0x0000a49b,0x00009493},
+ {0x00000000,0x00000000,0x00009252,0x0000a49b,
+ 0x0001249b,0x000126dc,0x000124db,0x0000a49b},
+ {0x00000000,0x00000000,0x00000000,0x00000000,
+ 0x00000000,0x00000000,0x00000000,0x00000000}
+ }
+ },
+ { /* version 5 */
+ { /* version 5, passes 0 */
+ {0x00000000,0x00000000,0x00000249,0x00000249,
+ 0x00000249,0x0000124a,0x00001252,0x00009292},
+ {0x00000000,0x00000000,0x00000249,0x00001249,
+ 0x0000124a,0x00009292,0x00009292,0x00009493},
+ {0x00000000,0x00000000,0x00000249,0x0000924a,
+ 0x00009292,0x00009493,0x0000a49b,0x0000a49b},
+ {0x00000000,0x00000000,0x00001249,0x0000924a,
+ 0x00009292,0x00009493,0x0000a49b,0x0000a49b},
+ {0x00000000,0x00000000,0x00001249,0x0000924a,
+ 0x00009492,0x0000a49b,0x0000a49b,0x000124db},
+ {0x00000000,0x00000000,0x00001249,0x00009292,
+ 0x00009492,0x0000a49b,0x000124db,0x000124db},
+ {0x00000000,0x00000000,0x00001249,0x00009292,
+ 0x0000a493,0x000124db,0x000124db,0x000126dc},
+ {0x00000000,0x00000000,0x00001249,0x00009493,
+ 0x0000a493,0x000124db,0x000126dc,0x000126dc},
+ {0x00000000,0x00000000,0x00001249,0x00009493,
+ 0x0000a493,0x000126dc,0x000136e4,0x000136e4},
+ {0x00000000,0x00000000,0x00001249,0x00009493,
+ 0x0001249b,0x000126dc,0x000136e4,0x000136e4},
+ {0x00000000,0x00000000,0x00001249,0x00009493,
+ 0x0001249b,0x000126dc,0x000136e4,0x000136e4},
+ {0x00000000,0x00000000,0x0000924a,0x00009493,
+ 0x0001249b,0x000126dc,0x0001b725,0x0001b724},
+ {0x00000000,0x00000000,0x0000924a,0x0000a49b,
+ 0x000124db,0x000126dc,0x0001b725,0x0001b724},
+ {0x00000000,0x00000000,0x00009292,0x0000a49b,
+ 0x000126dc,0x000136e4,0x0001b92d,0x0001b925},
+ {0x00000000,0x00000000,0x00009492,0x000124db,
+ 0x000136e4,0x0001b724,0x0001c96e,0x0001c92d},
+ {0x00000000,0x00000000,0x00000000,0x00000000,
+ 0x00000000,0x00000000,0x00000000,0x00000000}
+ },
+ { /* version 5, passes 1 */
+ {0x00000000,0x00000000,0x00000249,0x00000249,
+ 0x0000124a,0x00000249,0x0000024a,0x0000024a},
+ {0x00000000,0x00000000,0x00001249,0x00001249,
+ 0x0000124a,0x0000124a,0x00001252,0x0000024a},
+ {0x00000000,0x00000000,0x00001249,0x00001249,
+ 0x00009292,0x00009493,0x00009493,0x0000024a},
+ {0x00000000,0x00000000,0x00001249,0x00001249,
+ 0x00009292,0x00009493,0x00009493,0x00001252},
+ {0x00000000,0x00000000,0x00001249,0x00001249,
+ 0x00009292,0x00009493,0x0000a49b,0x00001252},
+ {0x00000000,0x00000000,0x00001249,0x0000924a,
+ 0x00009492,0x00009493,0x000124db,0x00001252},
+ {0x00000000,0x00000000,0x00001249,0x00009292,
+ 0x00009492,0x00009493,0x000124db,0x00009292},
+ {0x00000000,0x00000000,0x00001249,0x00009292,
+ 0x00009492,0x0000a49b,0x000124db,0x00009292},
+ {0x00000000,0x00000000,0x00001249,0x00009493,
+ 0x0000a493,0x0000a49b,0x000124db,0x00009292},
+ {0x00000000,0x00000000,0x00001249,0x00009493,
+ 0x0000a493,0x000124db,0x000124db,0x00009493},
+ {0x00000000,0x00000000,0x0000924a,0x00009493,
+ 0x0000a493,0x000124db,0x000124db,0x00009493},
+ {0x00000000,0x00000000,0x0000924a,0x00009493,
+ 0x0000a493,0x000124db,0x000124db,0x00009493},
+ {0x00000000,0x00000000,0x0000924a,0x00009493,
+ 0x0000a493,0x000124db,0x000124db,0x0000a49b},
+ {0x00000000,0x00000000,0x0000924a,0x0000a49b,
+ 0x000124db,0x000126dc,0x000124db,0x0000a49b},
+ {0x00000000,0x00000000,0x00009252,0x000124db,
+ 0x000126dc,0x000136e4,0x000126dc,0x000124db},
+ {0x00000000,0x00000000,0x00000000,0x00000000,
+ 0x00000000,0x00000000,0x00000000,0x00000000}
+ }
+ },
+ { /* version 6 */
+ { /* version 6, passes 0 */
+ {0x00000000,0x00000000,0x00000249,0x00000249,
+ 0x0000124a,0x0000124a,0x00009292,0x00009292},
+ {0x00000000,0x00000000,0x00001249,0x00001249,
+ 0x00009292,0x00009493,0x0000a49b,0x0000a49b},
+ {0x00000000,0x00000000,0x00001249,0x0000924a,
+ 0x00009492,0x0000a49b,0x0000a49b,0x000124db},
+ {0x00000000,0x00000000,0x00001249,0x00009292,
+ 0x00009492,0x000124db,0x000126dc,0x000126dc},
+ {0x00000000,0x00000000,0x00001249,0x00009493,
+ 0x0000a493,0x000124db,0x000126dc,0x000126dc},
+ {0x00000000,0x00000000,0x00001249,0x00009493,
+ 0x0000a493,0x000126dc,0x000136e4,0x000136e4},
+ {0x00000000,0x00000000,0x00001249,0x00009493,
+ 0x0000a493,0x000126dc,0x000136e4,0x0001b724},
+ {0x00000000,0x00000000,0x00001249,0x00009493,
+ 0x0001249b,0x000126dc,0x000136e4,0x0001b724},
+ {0x00000000,0x00000000,0x0000924a,0x0000a49b,
+ 0x0001249b,0x000126dc,0x000136e4,0x0001b724},
+ {0x00000000,0x00000000,0x0000924a,0x0000a49b,
+ 0x0001249b,0x000136e4,0x0001b725,0x0001b724},
+ {0x00000000,0x00000000,0x0000924a,0x0000a49b,
+ 0x000124db,0x000136e4,0x0001b725,0x0001b925},
+ {0x00000000,0x00000000,0x00009292,0x0000a49b,
+ 0x000126dc,0x000136e4,0x0001b92d,0x0001b925},
+ {0x00000000,0x00000000,0x00009292,0x0000a49b,
+ 0x000126dc,0x0001b724,0x0001b92d,0x0001c92d},
+ {0x00000000,0x00000000,0x00009492,0x000124db,
+ 0x000126dc,0x0001b724,0x0001c96e,0x0001c92d},
+ {0x00000000,0x00000000,0x0000a492,0x000126db,
+ 0x000136e4,0x0001b925,0x00025bb6,0x00024b77},
+ {0x00000000,0x00000000,0x00000000,0x00000000,
+ 0x00000000,0x00000000,0x00000000,0x00000000}
+ },
+ { /* version 6, passes 1 */
+ {0x00000000,0x00000000,0x00001249,0x00000249,
+ 0x0000124a,0x0000124a,0x00001252,0x00001252},
+ {0x00000000,0x00000000,0x00001249,0x00001249,
+ 0x00009252,0x00009292,0x00009292,0x00001252},
+ {0x00000000,0x00000000,0x00001249,0x0000924a,
+ 0x00009492,0x00009493,0x0000a49b,0x00001252},
+ {0x00000000,0x00000000,0x00001249,0x00009252,
+ 0x00009492,0x0000a49b,0x0000a49b,0x00009292},
+ {0x00000000,0x00000000,0x00001249,0x00009292,
+ 0x00009492,0x0000a49b,0x0000a49b,0x00009292},
+ {0x00000000,0x00000000,0x00001249,0x00009493,
+ 0x0000a493,0x0000a49b,0x000126dc,0x00009292},
+ {0x00000000,0x00000000,0x0000924a,0x00009493,
+ 0x0000a493,0x0000a49b,0x000126dc,0x00009493},
+ {0x00000000,0x00000000,0x0000924a,0x00009493,
+ 0x0000a493,0x0000a49b,0x000126dc,0x00009493},
+ {0x00000000,0x00000000,0x0000924a,0x00009493,
+ 0x0000a493,0x000124db,0x000126dc,0x00009493},
+ {0x00000000,0x00000000,0x0000924a,0x00009493,
+ 0x0000a493,0x000124db,0x000126dc,0x0000a49b},
+ {0x00000000,0x00000000,0x0000924a,0x0000a49b,
+ 0x0000a493,0x000124db,0x000126dc,0x0000a49b},
+ {0x00000000,0x00000000,0x0000924a,0x0000a49b,
+ 0x0001249b,0x000126dc,0x000126dc,0x0000a49b},
+ {0x00000000,0x00000000,0x0000924a,0x0000a49b,
+ 0x000124db,0x000136e4,0x000126dc,0x000124db},
+ {0x00000000,0x00000000,0x00009492,0x0000a49b,
+ 0x000136e4,0x000136e4,0x000126dc,0x000124db},
+ {0x00000000,0x00000000,0x0000a492,0x000124db,
+ 0x0001b724,0x0001b724,0x000136e4,0x000126dc},
+ {0x00000000,0x00000000,0x00000000,0x00000000,
+ 0x00000000,0x00000000,0x00000000,0x00000000}
+ }
+ },
+ { /* version 7 */
+ { /* version 7, passes 0 */
+ {0x00000000,0x00000000,0x00001249,0x00001249,
+ 0x00009292,0x00009493,0x0000a49b,0x000124db},
+ {0x00000000,0x00000000,0x00001249,0x00009292,
+ 0x0000a493,0x0000a49b,0x000124db,0x000126dc},
+ {0x00000000,0x00000000,0x00001249,0x00009493,
+ 0x0000a493,0x000124db,0x000126dc,0x000136e4},
+ {0x00000000,0x00000000,0x00001249,0x00009493,
+ 0x0000a493,0x000124db,0x000136e4,0x000136e4},
+ {0x00000000,0x00000000,0x00001249,0x00009493,
+ 0x0001249b,0x000126dc,0x000136e4,0x000136e4},
+ {0x00000000,0x00000000,0x00001249,0x0000a49b,
+ 0x0001249b,0x000126dc,0x000136e4,0x0001b724},
+ {0x00000000,0x00000000,0x0000924a,0x0000a49b,
+ 0x0001249b,0x000126dc,0x000136e4,0x0001b724},
+ {0x00000000,0x00000000,0x0000924a,0x0000a49b,
+ 0x000124db,0x000136e4,0x0001b725,0x0001b724},
+ {0x00000000,0x00000000,0x0000924a,0x0000a49b,
+ 0x000126dc,0x000136e4,0x0001b725,0x0001b925},
+ {0x00000000,0x00000000,0x0000924a,0x0000a49b,
+ 0x000126dc,0x0001b724,0x0001b92d,0x0001b925},
+ {0x00000000,0x00000000,0x00009292,0x0000a49b,
+ 0x000126dc,0x0001b724,0x0001c96e,0x0001c92d},
+ {0x00000000,0x00000000,0x00009292,0x000124db,
+ 0x000126dc,0x0001b724,0x0001c96e,0x0001c92d},
+ {0x00000000,0x00000000,0x00009492,0x000124db,
+ 0x000136e4,0x0001b724,0x0001c96e,0x0002496e},
+ {0x00000000,0x00000000,0x00009492,0x000126db,
+ 0x000136e4,0x0001b925,0x0001c96e,0x0002496e},
+ {0x00000000,0x00000000,0x0000a492,0x000136db,
+ 0x0001b724,0x0002496d,0x00025bb6,0x00025bbf},
+ {0x00000000,0x00000000,0x00000000,0x00000000,
+ 0x00000000,0x00000000,0x00000000,0x00000000}
+ },
+ { /* version 7, passes 1 */
+ {0x00000000,0x00000000,0x00001249,0x00001249,
+ 0x00009252,0x00009292,0x00009292,0x00009292},
+ {0x00000000,0x00000000,0x00001249,0x0000924a,
+ 0x00009492,0x00009493,0x00009493,0x00009292},
+ {0x00000000,0x00000000,0x00001249,0x00009493,
+ 0x0000a493,0x0000a49b,0x0000a49b,0x00009292},
+ {0x00000000,0x00000000,0x0000924a,0x00009493,
+ 0x0000a493,0x0000a49b,0x000124db,0x00009493},
+ {0x00000000,0x00000000,0x0000924a,0x00009493,
+ 0x0000a493,0x000124db,0x000124db,0x00009493},
+ {0x00000000,0x00000000,0x0000924a,0x0000a49b,
+ 0x0000a493,0x000124db,0x000136e4,0x00009493},
+ {0x00000000,0x00000000,0x0000924a,0x0000a49b,
+ 0x0000a493,0x000124db,0x000136e4,0x0000a49b},
+ {0x00000000,0x00000000,0x0000924a,0x0000a49b,
+ 0x0001249b,0x000124db,0x000136e4,0x0000a49b},
+ {0x00000000,0x00000000,0x0000924a,0x0000a49b,
+ 0x0001249b,0x000126dc,0x000136e4,0x0000a49b},
+ {0x00000000,0x00000000,0x0000924a,0x0000a49b,
+ 0x0001249b,0x000126dc,0x000136e4,0x000124db},
+ {0x00000000,0x00000000,0x0000924a,0x0000a49b,
+ 0x000126dc,0x000136e4,0x000136e4,0x000124db},
+ {0x00000000,0x00000000,0x0000924a,0x0000a49b,
+ 0x000126dc,0x000136e4,0x000136e4,0x000124db},
+ {0x00000000,0x00000000,0x0000924a,0x000124db,
+ 0x000136e4,0x000136e4,0x000136e4,0x000126dc},
+ {0x00000000,0x00000000,0x0000a492,0x000124db,
+ 0x000136e4,0x0001b724,0x000136e4,0x000126dc},
+ {0x00000000,0x00000000,0x00012492,0x000126db,
+ 0x0001b724,0x0001b925,0x0001b725,0x000136e4},
+ {0x00000000,0x00000000,0x00000000,0x00000000,
+ 0x00000000,0x00000000,0x00000000,0x00000000}
+ }
+ },
+ { /* version 8 */
+ { /* version 8, passes 0 */
+ {0x00000000,0x00000000,0x00001249,0x00001249,
+ 0x00009292,0x00009493,0x0000a49b,0x000124db},
+ {0x00000000,0x00000000,0x00001249,0x00009292,
+ 0x0000a493,0x000124db,0x000126dc,0x000126dc},
+ {0x00000000,0x00000000,0x00001249,0x00009493,
+ 0x0000a493,0x000124db,0x000126dc,0x000136e4},
+ {0x00000000,0x00000000,0x00001249,0x0000a49b,
+ 0x0001249b,0x000126dc,0x000136e4,0x0001b724},
+ {0x00000000,0x00000000,0x0000924a,0x0000a49b,
+ 0x0001249b,0x000126dc,0x000136e4,0x0001b724},
+ {0x00000000,0x00000000,0x0000924a,0x0000a49b,
+ 0x000124db,0x000136e4,0x0001b725,0x0001b724},
+ {0x00000000,0x00000000,0x0000924a,0x0000a49b,
+ 0x000126dc,0x000136e4,0x0001b725,0x0001b925},
+ {0x00000000,0x00000000,0x0000924a,0x0000a49b,
+ 0x000126dc,0x0001b724,0x0001b92d,0x0001c92d},
+ {0x00000000,0x00000000,0x00009252,0x000124db,
+ 0x000126dc,0x0001b724,0x0001b92d,0x0001c92d},
+ {0x00000000,0x00000000,0x00009292,0x000124db,
+ 0x000126dc,0x0001b925,0x0001c96e,0x0001c92d},
+ {0x00000000,0x00000000,0x00009492,0x000124db,
+ 0x000136e4,0x0001b925,0x0001c96e,0x0001c92d},
+ {0x00000000,0x00000000,0x00009492,0x000124db,
+ 0x000136e4,0x0001b925,0x00024b76,0x00024b77},
+ {0x00000000,0x00000000,0x00009492,0x000126db,
+ 0x000136e4,0x0001b925,0x00024b76,0x00025bbf},
+ {0x00000000,0x00000000,0x0000a492,0x000126db,
+ 0x000136e4,0x0001c92d,0x00024b76,0x00025bbf},
+ {0x00000000,0x00000000,0x00012492,0x000136db,
+ 0x0001b724,0x00024b6d,0x0002ddb6,0x0002efff},
+ {0x00000000,0x00000000,0x00000000,0x00000000,
+ 0x00000000,0x00000000,0x00000000,0x00000000}
+ },
+ { /* version 8, passes 1 */
+ {0x00000000,0x00000000,0x00001249,0x00001249,
+ 0x00009252,0x00009493,0x00009493,0x00009493},
+ {0x00000000,0x00000000,0x00001249,0x00009292,
+ 0x0000a493,0x0000a49b,0x0000a49b,0x00009493},
+ {0x00000000,0x00000000,0x0000924a,0x00009493,
+ 0x0000a493,0x0000a49b,0x000124db,0x00009493},
+ {0x00000000,0x00000000,0x0000924a,0x00009493,
+ 0x0000a493,0x000124db,0x000126dc,0x0000a49b},
+ {0x00000000,0x00000000,0x0000924a,0x0000a49b,
+ 0x0000a493,0x000124db,0x000126dc,0x0000a49b},
+ {0x00000000,0x00000000,0x0000924a,0x0000a49b,
+ 0x0000a493,0x000124db,0x000136e4,0x000124db},
+ {0x00000000,0x00000000,0x0000924a,0x0000a49b,
+ 0x0001249b,0x000126dc,0x000136e4,0x000124db},
+ {0x00000000,0x00000000,0x0000924a,0x0000a49b,
+ 0x000126dc,0x000126dc,0x000136e4,0x000126dc},
+ {0x00000000,0x00000000,0x0000924a,0x0000a49b,
+ 0x000126dc,0x000136e4,0x000136e4,0x000126dc},
+ {0x00000000,0x00000000,0x0000924a,0x000124db,
+ 0x000126dc,0x000136e4,0x000136e4,0x000126dc},
+ {0x00000000,0x00000000,0x0000924a,0x000124db,
+ 0x000126dc,0x000136e4,0x000136e4,0x000136e4},
+ {0x00000000,0x00000000,0x00009292,0x000124db,
+ 0x000136e4,0x0001b724,0x0001b725,0x000136e4},
+ {0x00000000,0x00000000,0x00009492,0x000126db,
+ 0x000136e4,0x0001b925,0x0001b725,0x0001b724},
+ {0x00000000,0x00000000,0x00009492,0x000126db,
+ 0x000136e4,0x0001b925,0x0001b725,0x0001b724},
+ {0x00000000,0x00000000,0x0000a492,0x000136db,
+ 0x0001b724,0x0002496d,0x0001b92d,0x0001b925},
+ {0x00000000,0x00000000,0x00000000,0x00000000,
+ 0x00000000,0x00000000,0x00000000,0x00000000}
+ }
+ },
+ { /* version 9 */
+ { /* version 9, passes 0 */
+ {0x00000000,0x00000000,0x00000049,0x00000049,
+ 0x00000049,0x00000049,0x00000049,0x00000049},
+ {0x00000000,0x00000000,0x00000249,0x00000049,
+ 0x00000249,0x00000249,0x0000024a,0x00000049},
+ {0x00000000,0x00000000,0x00000249,0x00000249,
+ 0x0000124a,0x00009252,0x00001252,0x0000024a},
+ {0x00000000,0x00000000,0x00001249,0x00001249,
+ 0x00009252,0x00009292,0x00009493,0x00001252},
+ {0x00000000,0x00000000,0x00001249,0x0000924a,
+ 0x00009292,0x00009493,0x00009493,0x00001252},
+ {0x00000000,0x00000000,0x00001249,0x00009292,
+ 0x00009492,0x0000a49b,0x0000a49b,0x00009292},
+ {0x00000000,0x00000000,0x00001249,0x00009493,
+ 0x0000a493,0x000124db,0x000124db,0x00009493},
+ {0x00000000,0x00000000,0x0000924a,0x00009493,
+ 0x0000a493,0x000124db,0x000126dc,0x0000a49b},
+ {0x00000000,0x00000000,0x0000924a,0x00009493,
+ 0x0000a493,0x000124db,0x000126dc,0x0000a49b},
+ {0x00000000,0x00000000,0x0000924a,0x00009493,
+ 0x0001249b,0x000126dc,0x000126dc,0x000124db},
+ {0x00000000,0x00000000,0x00009252,0x00009493,
+ 0x000124db,0x000136e4,0x000136e4,0x000126dc},
+ {0x00000000,0x00000000,0x00009252,0x0000a49b,
+ 0x000124db,0x000136e4,0x000136e4,0x000126dc},
+ {0x00000000,0x00000000,0x00009292,0x0000a49b,
+ 0x000126dc,0x000136e4,0x000136e4,0x000136e4},
+ {0x00000000,0x00000000,0x00009492,0x0000a49b,
+ 0x000126dc,0x0001b724,0x0001b725,0x0001b724},
+ {0x00000000,0x00000000,0x0000a492,0x000124db,
+ 0x000136e4,0x0001b925,0x0001b92d,0x0001b925},
+ {0x00000000,0x00000000,0x00000000,0x00000000,
+ 0x00000000,0x00000000,0x00000000,0x00000000}
+ },
+ { /* version 9, passes 1 */
+ {0x00000000,0x00000000,0x00000249,0x00000049,
+ 0x00000009,0x00000009,0x00000009,0x00000009},
+ {0x00000000,0x00000000,0x00000249,0x00000249,
+ 0x00000049,0x00000049,0x00000009,0x00000009},
+ {0x00000000,0x00000000,0x00001249,0x00001249,
+ 0x0000124a,0x00000249,0x00000049,0x00000049},
+ {0x00000000,0x00000000,0x00001249,0x00001249,
+ 0x0000124a,0x0000124a,0x00000049,0x00000049},
+ {0x00000000,0x00000000,0x00001249,0x00001249,
+ 0x00009252,0x0000124a,0x0000024a,0x0000024a},
+ {0x00000000,0x00000000,0x00001249,0x0000924a,
+ 0x00009252,0x0000124a,0x0000024a,0x0000024a},
+ {0x00000000,0x00000000,0x00001249,0x00009292,
+ 0x00009492,0x00009252,0x00001252,0x00001252},
+ {0x00000000,0x00000000,0x00001249,0x00009493,
+ 0x0000a493,0x00009292,0x00009292,0x00001252},
+ {0x00000000,0x00000000,0x0000924a,0x00009493,
+ 0x0000a493,0x00009292,0x00009292,0x00009292},
+ {0x00000000,0x00000000,0x0000924a,0x00009493,
+ 0x0000a493,0x00009493,0x00009493,0x00009292},
+ {0x00000000,0x00000000,0x0000924a,0x0000a49b,
+ 0x0000a493,0x0000a49b,0x00009493,0x00009493},
+ {0x00000000,0x00000000,0x0000924a,0x0000a49b,
+ 0x0000a493,0x0000a49b,0x0000a49b,0x00009493},
+ {0x00000000,0x00000000,0x0000924a,0x0000a49b,
+ 0x0001249b,0x000124db,0x0000a49b,0x0000a49b},
+ {0x00000000,0x00000000,0x0000924a,0x0000a49b,
+ 0x000136e4,0x000126dc,0x000124db,0x0000a49b},
+ {0x00000000,0x00000000,0x00009252,0x000124db,
+ 0x0001b724,0x000136e4,0x000126dc,0x000124db},
+ {0x00000000,0x00000000,0x00000000,0x00000000,
+ 0x00000000,0x00000000,0x00000000,0x00000000}
+ }
+ },
+ { /* version 10 */
+ { /* version 10, passes 0 */
+ {0x00000000,0x00000000,0x00000249,0x00000249,
+ 0x00000249,0x00000249,0x0000024a,0x0000024a},
+ {0x00000000,0x00000000,0x00000249,0x00001249,
+ 0x00009252,0x00009292,0x00009292,0x0000024a},
+ {0x00000000,0x00000000,0x00001249,0x00001249,
+ 0x00009252,0x00009292,0x00009292,0x00001252},
+ {0x00000000,0x00000000,0x00001249,0x0000924a,
+ 0x00009492,0x00009493,0x0000a49b,0x00009292},
+ {0x00000000,0x00000000,0x00001249,0x00009292,
+ 0x00009492,0x000124db,0x000124db,0x00009292},
+ {0x00000000,0x00000000,0x00001249,0x00009493,
+ 0x0000a493,0x000124db,0x000124db,0x00009493},
+ {0x00000000,0x00000000,0x00001249,0x00009493,
+ 0x0000a493,0x000124db,0x000126dc,0x0000a49b},
+ {0x00000000,0x00000000,0x0000924a,0x00009493,
+ 0x0000a493,0x000124db,0x000126dc,0x000124db},
+ {0x00000000,0x00000000,0x0000924a,0x00009493,
+ 0x0001249b,0x000126dc,0x000126dc,0x000124db},
+ {0x00000000,0x00000000,0x0000924a,0x0000a49b,
+ 0x000124db,0x000126dc,0x000136e4,0x000126dc},
+ {0x00000000,0x00000000,0x00009252,0x0000a49b,
+ 0x000124db,0x000136e4,0x000136e4,0x000136e4},
+ {0x00000000,0x00000000,0x00009292,0x0000a49b,
+ 0x000126dc,0x000136e4,0x000136e4,0x000136e4},
+ {0x00000000,0x00000000,0x00009492,0x0000a49b,
+ 0x000126dc,0x0001b724,0x0001b92d,0x0001b724},
+ {0x00000000,0x00000000,0x00009492,0x000124db,
+ 0x000126dc,0x0001b925,0x0001b92d,0x0001b925},
+ {0x00000000,0x00000000,0x0000a492,0x000126db,
+ 0x000136e4,0x0002496d,0x0001c96e,0x0001c92d},
+ {0x00000000,0x00000000,0x00000000,0x00000000,
+ 0x00000000,0x00000000,0x00000000,0x00000000}
+ },
+ { /* version 10, passes 1 */
+ {0x00000000,0x00000000,0x00000249,0x00000249,
+ 0x00000049,0x00000049,0x00000049,0x00000049},
+ {0x00000000,0x00000000,0x00001249,0x00001249,
+ 0x0000124a,0x00000249,0x00000049,0x00000049},
+ {0x00000000,0x00000000,0x00001249,0x00001249,
+ 0x0000124a,0x00009252,0x0000024a,0x00000049},
+ {0x00000000,0x00000000,0x00001249,0x00001249,
+ 0x00009252,0x00009493,0x0000024a,0x0000024a},
+ {0x00000000,0x00000000,0x00001249,0x00009252,
+ 0x00009492,0x00009493,0x00001252,0x0000024a},
+ {0x00000000,0x00000000,0x00001249,0x00009292,
+ 0x00009492,0x00009493,0x00001252,0x00001252},
+ {0x00000000,0x00000000,0x0000924a,0x00009493,
+ 0x00009492,0x00009493,0x00009292,0x00001252},
+ {0x00000000,0x00000000,0x0000924a,0x00009493,
+ 0x0000a493,0x00009493,0x00009292,0x00009292},
+ {0x00000000,0x00000000,0x0000924a,0x00009493,
+ 0x0000a493,0x0000a49b,0x00009493,0x00009292},
+ {0x00000000,0x00000000,0x0000924a,0x00009493,
+ 0x0000a493,0x0000a49b,0x00009493,0x00009292},
+ {0x00000000,0x00000000,0x0000924a,0x0000a49b,
+ 0x0000a493,0x000124db,0x0000a49b,0x00009493},
+ {0x00000000,0x00000000,0x0000924a,0x0000a49b,
+ 0x0000a493,0x000124db,0x0000a49b,0x00009493},
+ {0x00000000,0x00000000,0x0000924a,0x000124db,
+ 0x000136e4,0x000126dc,0x000124db,0x0000a49b},
+ {0x00000000,0x00000000,0x0000924a,0x000124db,
+ 0x000136e4,0x000126dc,0x000124db,0x0000a49b},
+ {0x00000000,0x00000000,0x00009252,0x000126db,
+ 0x0001b724,0x000136e4,0x000126dc,0x000124db},
+ {0x00000000,0x00000000,0x00000000,0x00000000,
+ 0x00000000,0x00000000,0x00000000,0x00000000}
+ }
+ },
+ { /* version 11 */
+ { /* version 11, passes 0 */
+ {0x00000000,0x00000000,0x00000249,0x00000249,
+ 0x00000249,0x00000249,0x00001252,0x00001252},
+ {0x00000000,0x00000000,0x00001249,0x00001249,
+ 0x00009252,0x00009292,0x00009292,0x00001252},
+ {0x00000000,0x00000000,0x00001249,0x0000924a,
+ 0x00009492,0x0000a49b,0x0000a49b,0x00009292},
+ {0x00000000,0x00000000,0x00001249,0x00009493,
+ 0x0000a493,0x0000a49b,0x000124db,0x00009493},
+ {0x00000000,0x00000000,0x00001249,0x00009493,
+ 0x0000a493,0x000124db,0x000126dc,0x00009493},
+ {0x00000000,0x00000000,0x0000924a,0x00009493,
+ 0x0000a493,0x000126dc,0x000126dc,0x0000a49b},
+ {0x00000000,0x00000000,0x0000924a,0x0000a49b,
+ 0x0001249b,0x000126dc,0x000136e4,0x000124db},
+ {0x00000000,0x00000000,0x0000924a,0x0000a49b,
+ 0x000126dc,0x000136e4,0x000136e4,0x000126dc},
+ {0x00000000,0x00000000,0x00009292,0x0000a49b,
+ 0x000126dc,0x000136e4,0x000136e4,0x000126dc},
+ {0x00000000,0x00000000,0x00009292,0x0000a49b,
+ 0x000126dc,0x0001b724,0x0001b725,0x000136e4},
+ {0x00000000,0x00000000,0x00009292,0x0000a49b,
+ 0x000136e4,0x0001b724,0x0001b92d,0x0001b724},
+ {0x00000000,0x00000000,0x00009492,0x0000a49b,
+ 0x000136e4,0x0001b724,0x0001b92d,0x0001b724},
+ {0x00000000,0x00000000,0x00009492,0x000124db,
+ 0x000136e4,0x0001b925,0x0001c96e,0x0001b925},
+ {0x00000000,0x00000000,0x00009492,0x000124db,
+ 0x0001b724,0x0001b925,0x0001c96e,0x0001c92d},
+ {0x00000000,0x00000000,0x0000a492,0x000126db,
+ 0x0001c924,0x0002496d,0x00025bb6,0x00024b77},
+ {0x00000000,0x00000000,0x00000000,0x00000000,
+ 0x00000000,0x00000000,0x00000000,0x00000000}
+ },
+ { /* version 11, passes 1 */
+ {0x00000000,0x00000000,0x00001249,0x00000249,
+ 0x00000249,0x00000249,0x0000024a,0x0000024a},
+ {0x00000000,0x00000000,0x00001249,0x00001249,
+ 0x0000124a,0x0000124a,0x0000024a,0x0000024a},
+ {0x00000000,0x00000000,0x00001249,0x0000924a,
+ 0x00009252,0x00009252,0x0000024a,0x0000024a},
+ {0x00000000,0x00000000,0x00001249,0x00009292,
+ 0x00009492,0x0000a49b,0x00001252,0x00001252},
+ {0x00000000,0x00000000,0x0000924a,0x00009493,
+ 0x0000a493,0x0000a49b,0x00001252,0x00001252},
+ {0x00000000,0x00000000,0x0000924a,0x00009493,
+ 0x0000a493,0x0000a49b,0x00009292,0x00001252},
+ {0x00000000,0x00000000,0x0000924a,0x0000a49b,
+ 0x0000a493,0x0000a49b,0x00009292,0x00009292},
+ {0x00000000,0x00000000,0x0000924a,0x0000a49b,
+ 0x0000a493,0x0000a49b,0x00009493,0x00009292},
+ {0x00000000,0x00000000,0x0000924a,0x0000a49b,
+ 0x0001249b,0x000124db,0x00009493,0x00009292},
+ {0x00000000,0x00000000,0x0000924a,0x0000a49b,
+ 0x0001249b,0x000124db,0x00009493,0x00009493},
+ {0x00000000,0x00000000,0x0000924a,0x0000a49b,
+ 0x000124db,0x000124db,0x0000a49b,0x00009493},
+ {0x00000000,0x00000000,0x0000924a,0x000124db,
+ 0x000126dc,0x000126dc,0x0000a49b,0x00009493},
+ {0x00000000,0x00000000,0x0000924a,0x000124db,
+ 0x000136e4,0x000126dc,0x000124db,0x0000a49b},
+ {0x00000000,0x00000000,0x00009292,0x000124db,
+ 0x000136e4,0x000126dc,0x000124db,0x0000a49b},
+ {0x00000000,0x00000000,0x00009492,0x000126db,
+ 0x0001b724,0x000136e4,0x000126dc,0x000124db},
+ {0x00000000,0x00000000,0x00000000,0x00000000,
+ 0x00000000,0x00000000,0x00000000,0x00000000}
+ }
+ },
+ { /* version 12 */
+ { /* version 12, passes 0 */
+ {0x00000000,0x00000000,0x00001249,0x00001249,
+ 0x00009252,0x00009292,0x00009493,0x00009493},
+ {0x00000000,0x00000000,0x00001249,0x00009292,
+ 0x0000a493,0x0000a49b,0x0000a49b,0x00009493},
+ {0x00000000,0x00000000,0x00001249,0x00009493,
+ 0x0000a493,0x000124db,0x000124db,0x0000a49b},
+ {0x00000000,0x00000000,0x0000924a,0x00009493,
+ 0x0000a493,0x000126dc,0x000126dc,0x0000a49b},
+ {0x00000000,0x00000000,0x0000924a,0x0000a49b,
+ 0x0001249b,0x000126dc,0x000136e4,0x000124db},
+ {0x00000000,0x00000000,0x0000924a,0x0000a49b,
+ 0x000126dc,0x000136e4,0x000136e4,0x000126dc},
+ {0x00000000,0x00000000,0x00009292,0x0000a49b,
+ 0x000126dc,0x0001b724,0x0001b725,0x000126dc},
+ {0x00000000,0x00000000,0x00009292,0x0000a49b,
+ 0x000136e4,0x0001b724,0x0001b92d,0x000136e4},
+ {0x00000000,0x00000000,0x00009492,0x0000a49b,
+ 0x000136e4,0x0001b724,0x0001b92d,0x0001b724},
+ {0x00000000,0x00000000,0x00009492,0x000124db,
+ 0x000136e4,0x0001b724,0x0001b92d,0x0001b724},
+ {0x00000000,0x00000000,0x00009492,0x000124db,
+ 0x000136e4,0x0001b925,0x0001b92d,0x0001b925},
+ {0x00000000,0x00000000,0x00009492,0x000124db,
+ 0x0001b724,0x0001b925,0x0001c96e,0x0001c92d},
+ {0x00000000,0x00000000,0x0000a492,0x000124db,
+ 0x0001b724,0x0001c92d,0x0001c96e,0x0001c92d},
+ {0x00000000,0x00000000,0x0000a492,0x000124db,
+ 0x0001b724,0x0001c92d,0x00024b76,0x0002496e},
+ {0x00000000,0x00000000,0x00012492,0x000126db,
+ 0x0001c924,0x00024b6d,0x0002ddb6,0x00025bbf},
+ {0x00000000,0x00000000,0x00000000,0x00000000,
+ 0x00000000,0x00000000,0x00000000,0x00000000}
+ },
+ { /* version 12, passes 1 */
+ {0x00000000,0x00000000,0x00001249,0x00001249,
+ 0x0000124a,0x0000124a,0x00001252,0x00001252},
+ {0x00000000,0x00000000,0x00001249,0x00009292,
+ 0x00009492,0x00009252,0x00001252,0x00001252},
+ {0x00000000,0x00000000,0x0000924a,0x00009493,
+ 0x0000a493,0x00009292,0x00001252,0x00001252},
+ {0x00000000,0x00000000,0x0000924a,0x0000a49b,
+ 0x0000a493,0x0000a49b,0x00009292,0x00009292},
+ {0x00000000,0x00000000,0x0000924a,0x0000a49b,
+ 0x0000a493,0x0000a49b,0x00009292,0x00009292},
+ {0x00000000,0x00000000,0x0000924a,0x0000a49b,
+ 0x0001249b,0x0000a49b,0x00009493,0x00009292},
+ {0x00000000,0x00000000,0x0000924a,0x0000a49b,
+ 0x000124db,0x000124db,0x00009493,0x00009493},
+ {0x00000000,0x00000000,0x0000924a,0x0000a49b,
+ 0x000124db,0x000124db,0x0000a49b,0x00009493},
+ {0x00000000,0x00000000,0x0000924a,0x000124db,
+ 0x000126dc,0x000124db,0x0000a49b,0x00009493},
+ {0x00000000,0x00000000,0x0000924a,0x000124db,
+ 0x000126dc,0x000126dc,0x0000a49b,0x0000a49b},
+ {0x00000000,0x00000000,0x0000924a,0x000124db,
+ 0x000136e4,0x000126dc,0x000124db,0x0000a49b},
+ {0x00000000,0x00000000,0x00009492,0x000126db,
+ 0x000136e4,0x000126dc,0x000124db,0x0000a49b},
+ {0x00000000,0x00000000,0x00009492,0x000126db,
+ 0x0001b724,0x000136e4,0x000126dc,0x000124db},
+ {0x00000000,0x00000000,0x00009492,0x000126db,
+ 0x0001b724,0x000136e4,0x000126dc,0x000124db},
+ {0x00000000,0x00000000,0x0000a492,0x000136db,
+ 0x0001c924,0x0001b724,0x000136e4,0x000126dc},
+ {0x00000000,0x00000000,0x00000000,0x00000000,
+ 0x00000000,0x00000000,0x00000000,0x00000000}
+ }
+ },
+ { /* version 13 */
+ { /* version 13, passes 0 */
+ {0x00000000,0x00000000,0x00001249,0x00001249,
+ 0x00009252,0x00009292,0x00009493,0x00009493},
+ {0x00000000,0x00000000,0x00001249,0x00009493,
+ 0x0000a493,0x000124db,0x000126dc,0x00009493},
+ {0x00000000,0x00000000,0x00001249,0x0000a49b,
+ 0x0001249b,0x000126dc,0x000126dc,0x0000a49b},
+ {0x00000000,0x00000000,0x0000924a,0x0000a49b,
+ 0x0001249b,0x000126dc,0x000136e4,0x0000a49b},
+ {0x00000000,0x00000000,0x0000924a,0x0000a49b,
+ 0x000126dc,0x000136e4,0x0001b725,0x000124db},
+ {0x00000000,0x00000000,0x00009292,0x0000a49b,
+ 0x000136e4,0x0001b724,0x0001b725,0x000126dc},
+ {0x00000000,0x00000000,0x00009292,0x000124db,
+ 0x000136e4,0x0001b724,0x0001b725,0x000126dc},
+ {0x00000000,0x00000000,0x00009492,0x000124db,
+ 0x000136e4,0x0001b724,0x0001c96e,0x000136e4},
+ {0x00000000,0x00000000,0x00009492,0x000124db,
+ 0x000136e4,0x0001c92d,0x0001c96e,0x0001b724},
+ {0x00000000,0x00000000,0x0000a492,0x000124db,
+ 0x000136e4,0x0001c92d,0x0001c96e,0x0001b724},
+ {0x00000000,0x00000000,0x0000a492,0x000124db,
+ 0x0001b724,0x0001c92d,0x0001c96e,0x0001b925},
+ {0x00000000,0x00000000,0x0000a492,0x000126db,
+ 0x0001b724,0x0001c92d,0x00024b76,0x0001c92d},
+ {0x00000000,0x00000000,0x0000a492,0x000126db,
+ 0x0001b924,0x0001c92d,0x00024b76,0x0001c92d},
+ {0x00000000,0x00000000,0x0000a492,0x000126db,
+ 0x0001b924,0x0001c92d,0x00024b76,0x0002496e},
+ {0x00000000,0x00000000,0x00012492,0x000136db,
+ 0x00024924,0x00024b6d,0x0002ddb6,0x00025bbf},
+ {0x00000000,0x00000000,0x00000000,0x00000000,
+ 0x00000000,0x00000000,0x00000000,0x00000000}
+ },
+ { /* version 13, passes 1 */
+ {0x00000000,0x00000000,0x00001249,0x00001249,
+ 0x0000124a,0x0000124a,0x00001252,0x00001252},
+ {0x00000000,0x00000000,0x0000924a,0x00009493,
+ 0x00009492,0x00009292,0x00001252,0x00001252},
+ {0x00000000,0x00000000,0x0000924a,0x0000a49b,
+ 0x0000a493,0x0000a49b,0x00001252,0x00001252},
+ {0x00000000,0x00000000,0x0000924a,0x0000a49b,
+ 0x0000a493,0x0000a49b,0x00009292,0x00009292},
+ {0x00000000,0x00000000,0x0000924a,0x0000a49b,
+ 0x0000a493,0x0000a49b,0x00009292,0x00009292},
+ {0x00000000,0x00000000,0x0000924a,0x0000a49b,
+ 0x000126dc,0x0000a49b,0x00009493,0x00009292},
+ {0x00000000,0x00000000,0x0000924a,0x000124db,
+ 0x000126dc,0x000124db,0x00009493,0x00009493},
+ {0x00000000,0x00000000,0x0000924a,0x000124db,
+ 0x000136e4,0x000124db,0x0000a49b,0x00009493},
+ {0x00000000,0x00000000,0x0000924a,0x000136db,
+ 0x0001b724,0x000124db,0x0000a49b,0x00009493},
+ {0x00000000,0x00000000,0x0000924a,0x000136db,
+ 0x0001b724,0x000126dc,0x0000a49b,0x0000a49b},
+ {0x00000000,0x00000000,0x00009292,0x000136db,
+ 0x0001b724,0x000126dc,0x000124db,0x0000a49b},
+ {0x00000000,0x00000000,0x00009492,0x000136db,
+ 0x0001b724,0x000126dc,0x000124db,0x0000a49b},
+ {0x00000000,0x00000000,0x0000a492,0x000136db,
+ 0x0001b724,0x000136e4,0x000126dc,0x000124db},
+ {0x00000000,0x00000000,0x0000a492,0x000136db,
+ 0x0001b724,0x000136e4,0x000126dc,0x000124db},
+ {0x00000000,0x00000000,0x00012492,0x0001b6db,
+ 0x0001c924,0x0001b724,0x000136e4,0x000126dc},
+ {0x00000000,0x00000000,0x00000000,0x00000000,
+ 0x00000000,0x00000000,0x00000000,0x00000000}
+ }
+ },
+ { /* version 14 */
+ { /* version 14, passes 0 */
+ {0x00000000,0x00000000,0x00001249,0x0000924a,
+ 0x00009292,0x00009493,0x00009493,0x00009493},
+ {0x00000000,0x00000000,0x00001249,0x0000a49b,
+ 0x0000a493,0x000124db,0x000126dc,0x00009493},
+ {0x00000000,0x00000000,0x0000924a,0x0000a49b,
+ 0x0001249b,0x000126dc,0x000136e4,0x0000a49b},
+ {0x00000000,0x00000000,0x0000924a,0x000124db,
+ 0x000126dc,0x000136e4,0x0001b725,0x000124db},
+ {0x00000000,0x00000000,0x00009292,0x000124db,
+ 0x000126dc,0x0001b724,0x0001b92d,0x000126dc},
+ {0x00000000,0x00000000,0x00009492,0x000124db,
+ 0x000136e4,0x0001b724,0x0001b92d,0x000126dc},
+ {0x00000000,0x00000000,0x00009492,0x000124db,
+ 0x000136e4,0x0001c92d,0x0001c96e,0x000136e4},
+ {0x00000000,0x00000000,0x00009492,0x000124db,
+ 0x0001b724,0x0001c92d,0x0001c96e,0x0001b724},
+ {0x00000000,0x00000000,0x0000a492,0x000124db,
+ 0x0001b724,0x0001c92d,0x00024b76,0x0001b925},
+ {0x00000000,0x00000000,0x0000a492,0x000126db,
+ 0x0001b724,0x0001c92d,0x00024b76,0x0001c92d},
+ {0x00000000,0x00000000,0x0000a492,0x000126db,
+ 0x0001b724,0x0001c92d,0x00024b76,0x0001c92d},
+ {0x00000000,0x00000000,0x0000a492,0x000136db,
+ 0x0001b724,0x0001c92d,0x00024b76,0x0002496e},
+ {0x00000000,0x00000000,0x0000a492,0x000136db,
+ 0x0001b924,0x0002496d,0x00024b76,0x00024b77},
+ {0x00000000,0x00000000,0x0000a492,0x000136db,
+ 0x0001b924,0x00024b6d,0x0002ddb6,0x00025bbf},
+ {0x00000000,0x00000000,0x00012492,0x0001b6db,
+ 0x00024924,0x0002db6d,0x00036db6,0x0002efff},
+ {0x00000000,0x00000000,0x00000000,0x00000000,
+ 0x00000000,0x00000000,0x00000000,0x00000000}
+ },
+ { /* version 14, passes 1 */
+ {0x00000000,0x00000000,0x00001249,0x00001249,
+ 0x0000124a,0x0000124a,0x00001252,0x00001252},
+ {0x00000000,0x00000000,0x0000924a,0x00009493,
+ 0x0000a493,0x00009292,0x00001252,0x00001252},
+ {0x00000000,0x00000000,0x0000924a,0x0000a49b,
+ 0x0000a493,0x0000a49b,0x00001252,0x00001252},
+ {0x00000000,0x00000000,0x0000924a,0x0000a49b,
+ 0x0001249b,0x000136e4,0x00009292,0x00009292},
+ {0x00000000,0x00000000,0x0000924a,0x0000a49b,
+ 0x0001249b,0x000136e4,0x00009292,0x00009292},
+ {0x00000000,0x00000000,0x0000924a,0x000124db,
+ 0x000136e4,0x000136e4,0x00009493,0x00009292},
+ {0x00000000,0x00000000,0x00009492,0x000136db,
+ 0x0001b724,0x000136e4,0x00009493,0x00009493},
+ {0x00000000,0x00000000,0x00009492,0x000136db,
+ 0x0001b724,0x000136e4,0x0000a49b,0x00009493},
+ {0x00000000,0x00000000,0x00009492,0x000136db,
+ 0x0001b724,0x000136e4,0x0000a49b,0x00009493},
+ {0x00000000,0x00000000,0x00009492,0x000136db,
+ 0x0001b724,0x000136e4,0x0000a49b,0x0000a49b},
+ {0x00000000,0x00000000,0x0000a492,0x000136db,
+ 0x0001b724,0x000136e4,0x000124db,0x0000a49b},
+ {0x00000000,0x00000000,0x0000a492,0x000136db,
+ 0x0001b724,0x000136e4,0x000124db,0x0000a49b},
+ {0x00000000,0x00000000,0x0000a492,0x000136db,
+ 0x0001b724,0x000136e4,0x000126dc,0x000124db},
+ {0x00000000,0x00000000,0x0000a492,0x000136db,
+ 0x0001b724,0x000136e4,0x000126dc,0x000124db},
+ {0x00000000,0x00000000,0x00012492,0x0001b6db,
+ 0x0001c924,0x0001b724,0x000136e4,0x000126dc},
+ {0x00000000,0x00000000,0x00000000,0x00000000,
+ 0x00000000,0x00000000,0x00000000,0x00000000}
+ }
+ },
+ { /* version 15 */
+ { /* version 15, passes 0 */
+ {0x00000000,0x00000000,0x00001249,0x00009493,
+ 0x0000a493,0x0000a49b,0x000124db,0x000124db},
+ {0x00000000,0x00000000,0x0000924a,0x0000a49b,
+ 0x0001249b,0x000126dc,0x000136e4,0x000124db},
+ {0x00000000,0x00000000,0x0000924a,0x0000a49b,
+ 0x000126dc,0x0001b724,0x0001b725,0x000126dc},
+ {0x00000000,0x00000000,0x0000924a,0x000124db,
+ 0x000136e4,0x0001b724,0x0001b92d,0x000126dc},
+ {0x00000000,0x00000000,0x00009492,0x000124db,
+ 0x000136e4,0x0001b925,0x0001c96e,0x000136e4},
+ {0x00000000,0x00000000,0x00009492,0x000124db,
+ 0x0001b724,0x0001c92d,0x0001c96e,0x0001b724},
+ {0x00000000,0x00000000,0x0000a492,0x000124db,
+ 0x0001b724,0x0001c92d,0x0001c96e,0x0001b724},
+ {0x00000000,0x00000000,0x0000a492,0x000126db,
+ 0x0001b724,0x0001c92d,0x0001c96e,0x0001b925},
+ {0x00000000,0x00000000,0x0000a492,0x000126db,
+ 0x0001b924,0x0001c92d,0x00024b76,0x0001c92d},
+ {0x00000000,0x00000000,0x0000a492,0x000136db,
+ 0x0001b924,0x0001c92d,0x00024b76,0x0001c92d},
+ {0x00000000,0x00000000,0x0000a492,0x000136db,
+ 0x0001b924,0x0002496d,0x00024b76,0x0002496e},
+ {0x00000000,0x00000000,0x0000a492,0x000136db,
+ 0x0001c924,0x0002496d,0x00025bb6,0x00024b77},
+ {0x00000000,0x00000000,0x0000a492,0x000136db,
+ 0x0001c924,0x00024b6d,0x00025bb6,0x00024b77},
+ {0x00000000,0x00000000,0x00012492,0x000136db,
+ 0x0001c924,0x00024b6d,0x0002ddb6,0x00025bbf},
+ {0x00000000,0x00000000,0x00012492,0x0001b6db,
+ 0x00024924,0x0002db6d,0x00036db6,0x0002efff},
+ {0x00000000,0x00000000,0x00000000,0x00000000,
+ 0x00000000,0x00000000,0x00000000,0x00000000}
+ },
+ { /* version 15, passes 1 */
+ {0x00000000,0x00000000,0x0000924a,0x0000924a,
+ 0x00009292,0x00009292,0x00009292,0x00009292},
+ {0x00000000,0x00000000,0x0000924a,0x0000a49b,
+ 0x0000a493,0x000124db,0x00009292,0x00009292},
+ {0x00000000,0x00000000,0x0000924a,0x000124db,
+ 0x000124db,0x0001b724,0x00009493,0x00009493},
+ {0x00000000,0x00000000,0x0000924a,0x000124db,
+ 0x000126dc,0x0001b724,0x00009493,0x00009493},
+ {0x00000000,0x00000000,0x0000924a,0x000124db,
+ 0x000136e4,0x0001b724,0x0000a49b,0x0000a49b},
+ {0x00000000,0x00000000,0x00009292,0x000136db,
+ 0x0001b724,0x0001b724,0x0000a49b,0x0000a49b},
+ {0x00000000,0x00000000,0x00009492,0x000136db,
+ 0x0001c924,0x0001b724,0x000124db,0x000124db},
+ {0x00000000,0x00000000,0x00009492,0x000136db,
+ 0x0001c924,0x0001b724,0x000124db,0x000124db},
+ {0x00000000,0x00000000,0x0000a492,0x000136db,
+ 0x0001c924,0x0001b724,0x000126dc,0x000126dc},
+ {0x00000000,0x00000000,0x0000a492,0x000136db,
+ 0x0001c924,0x0001b925,0x000126dc,0x000126dc},
+ {0x00000000,0x00000000,0x0000a492,0x000136db,
+ 0x0001c924,0x0001b925,0x000136e4,0x000136e4},
+ {0x00000000,0x00000000,0x0000a492,0x000136db,
+ 0x0001c924,0x0001b925,0x000136e4,0x000136e4},
+ {0x00000000,0x00000000,0x0000a492,0x000136db,
+ 0x0001c924,0x0001b925,0x0001b725,0x0001b724},
+ {0x00000000,0x00000000,0x00012492,0x000136db,
+ 0x0001c924,0x0001b925,0x0001b725,0x0001b724},
+ {0x00000000,0x00000000,0x00012492,0x0001b6db,
+ 0x00024924,0x0002496d,0x0001b92d,0x0001b925},
+ {0x00000000,0x00000000,0x00000000,0x00000000,
+ 0x00000000,0x00000000,0x00000000,0x00000000}
+ }
+ }
+};
--- /dev/null
+/* Linux driver for Philips webcam
+ Decompression frontend.
+ (C) 1999-2003 Nemosoft Unv.
+ (C) 2004 Luc Saillard (luc@saillard.org)
+
+ NOTE: this version of pwc is an unofficial (modified) release of pwc & pcwx
+ driver and thus may have bugs that are not present in the original version.
+ Please send bug reports and support requests to <luc@saillard.org>.
+ The decompression routines have been implemented by reverse-engineering the
+ Nemosoft binary pwcx module. Caveat emptor.
+
+ This program is free software; you can redistribute it and/or modify
+ it under the terms of the GNU General Public License as published by
+ the Free Software Foundation; either version 2 of the License, or
+ (at your option) any later version.
+
+ This program is distributed in the hope that it will be useful,
+ but WITHOUT ANY WARRANTY; without even the implied warranty of
+ MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
+ GNU General Public License for more details.
+
+ You should have received a copy of the GNU General Public License
+ along with this program; if not, write to the Free Software
+ Foundation, Inc., 59 Temple Place, Suite 330, Boston, MA 02111-1307 USA
+*/
+
+#include <asm/current.h>
+#include <asm/types.h>
+
+#include "pwc.h"
+#include "pwc-uncompress.h"
+#include "pwc-dec1.h"
+#include "pwc-dec23.h"
+
+int pwc_decompress(struct pwc_device *pdev)
+{
+ struct pwc_frame_buf *fbuf;
+ int n, line, col, stride;
+ void *yuv, *image;
+ u16 *src;
+ u16 *dsty, *dstu, *dstv;
+
+ if (pdev == NULL)
+ return -EFAULT;
+#if defined(__KERNEL__) && defined(PWC_MAGIC)
+ if (pdev->magic != PWC_MAGIC) {
+ Err("pwc_decompress(): magic failed.\n");
+ return -EFAULT;
+ }
+#endif
+
+ fbuf = pdev->read_frame;
+ if (fbuf == NULL)
+ return -EFAULT;
+ image = pdev->image_ptr[pdev->fill_image];
+ if (!image)
+ return -EFAULT;
+
+ yuv = fbuf->data + pdev->frame_header_size; /* Skip header */
+
+ /* Raw format; that's easy... */
+ if (pdev->vpalette == VIDEO_PALETTE_RAW)
+ {
+ memcpy(image, yuv, pdev->frame_size);
+ return 0;
+ }
+
+ if (pdev->vbandlength == 0) {
+ /* Uncompressed mode. We copy the data into the output buffer,
+ using the viewport size (which may be larger than the image
+ size). Unfortunately we have to do a bit of byte stuffing
+ to get the desired output format/size.
+ */
+ /*
+ * We do some byte shuffling here to go from the
+ * native format to YUV420P.
+ */
+ src = (u16 *)yuv;
+ n = pdev->view.x * pdev->view.y;
+
+ /* offset in Y plane */
+ stride = pdev->view.x * pdev->offset.y + pdev->offset.x;
+ dsty = (u16 *)(image + stride);
+
+ /* offsets in U/V planes */
+ stride = pdev->view.x * pdev->offset.y / 4 + pdev->offset.x / 2;
+ dstu = (u16 *)(image + n + stride);
+ dstv = (u16 *)(image + n + n / 4 + stride);
+
+ /* increment after each line */
+ stride = (pdev->view.x - pdev->image.x) / 2; /* u16 is 2 bytes */
+
+ for (line = 0; line < pdev->image.y; line++) {
+ for (col = 0; col < pdev->image.x; col += 4) {
+ *dsty++ = *src++;
+ *dsty++ = *src++;
+ if (line & 1)
+ *dstv++ = *src++;
+ else
+ *dstu++ = *src++;
+ }
+ dsty += stride;
+ if (line & 1)
+ dstv += (stride >> 1);
+ else
+ dstu += (stride >> 1);
+ }
+ }
+ else {
+ /* Compressed; the decompressor routines will write the data
+ in planar format immediately.
+ */
+ int flags;
+
+ flags = PWCX_FLAG_PLANAR;
+ if (pdev->vsize == PSZ_VGA && pdev->vframes == 5 && pdev->vsnapshot)
+ {
+ printk(KERN_ERR "pwc: Mode Bayer is not supported for now\n");
+ flags |= PWCX_FLAG_BAYER;
+ return -ENXIO; /* No such device or address: missing decompressor */
+ }
+
+ switch (pdev->type)
+ {
+ case 675:
+ case 680:
+ case 690:
+ case 720:
+ case 730:
+ case 740:
+ case 750:
+ pwc_dec23_decompress(&pdev->image, &pdev->view, &pdev->offset,
+ yuv, image,
+ flags,
+ pdev->decompress_data, pdev->vbandlength);
+ break;
+ case 645:
+ case 646:
+ /* TODO & FIXME */
+ return -ENXIO; /* No such device or address: missing decompressor */
+ break;
+ }
+ }
+ return 0;
+}
+
+
--- /dev/null
+/* (C) 1999-2003 Nemosoft Unv.
+ (C) 2004 Luc Saillard (luc@saillard.org)
+
+ NOTE: this version of pwc is an unofficial (modified) release of pwc & pcwx
+ driver and thus may have bugs that are not present in the original version.
+ Please send bug reports and support requests to <luc@saillard.org>.
+ The decompression routines have been implemented by reverse-engineering the
+ Nemosoft binary pwcx module. Caveat emptor.
+
+ This program is free software; you can redistribute it and/or modify
+ it under the terms of the GNU General Public License as published by
+ the Free Software Foundation; either version 2 of the License, or
+ (at your option) any later version.
+
+ This program is distributed in the hope that it will be useful,
+ but WITHOUT ANY WARRANTY; without even the implied warranty of
+ MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
+ GNU General Public License for more details.
+
+ You should have received a copy of the GNU General Public License
+ along with this program; if not, write to the Free Software
+ Foundation, Inc., 59 Temple Place, Suite 330, Boston, MA 02111-1307 USA
+*/
+
+#ifndef PWC_H
+#define PWC_H
+
+#include <linux/version.h>
+
+#include <linux/config.h>
+#include <linux/module.h>
+#include <linux/usb.h>
+#include <linux/spinlock.h>
+#include <linux/videodev.h>
+#include <linux/wait.h>
+#include <linux/smp_lock.h>
+#include <asm/semaphore.h>
+#include <asm/errno.h>
+
+#include "pwc-uncompress.h"
+#include "pwc-ioctl.h"
+
+/* Defines and structures for the Philips webcam */
+/* Used for checking memory corruption/pointer validation */
+#define PWC_MAGIC 0x89DC10ABUL
+#undef PWC_MAGIC
+
+/* Turn some debugging options on/off */
+#define PWC_DEBUG 0
+
+/* Trace certain actions in the driver */
+#define TRACE_MODULE 0x0001
+#define TRACE_PROBE 0x0002
+#define TRACE_OPEN 0x0004
+#define TRACE_READ 0x0008
+#define TRACE_MEMORY 0x0010
+#define TRACE_FLOW 0x0020
+#define TRACE_SIZE 0x0040
+#define TRACE_PWCX 0x0080
+#define TRACE_SEQUENCE 0x1000
+
+#define Trace(R, A...) if (pwc_trace & R) printk(KERN_DEBUG PWC_NAME " " A)
+#define Debug(A...) printk(KERN_DEBUG PWC_NAME " " A)
+#define Info(A...) printk(KERN_INFO PWC_NAME " " A)
+#define Err(A...) printk(KERN_ERR PWC_NAME " " A)
+
+
+/* Defines for ToUCam cameras */
+#define TOUCAM_HEADER_SIZE 8
+#define TOUCAM_TRAILER_SIZE 4
+
+#define FEATURE_MOTOR_PANTILT 0x0001
+
+/* Version block */
+#define PWC_MAJOR 9
+#define PWC_MINOR 0
+#define PWC_VERSION "9.0.2-unofficial"
+#define PWC_NAME "pwc"
+
+/* Turn certain features on/off */
+#define PWC_INT_PIPE 0
+
+/* Ignore errors in the first N frames, to allow for startup delays */
+#define FRAME_LOWMARK 5
+
+/* Size and number of buffers for the ISO pipe. */
+#define MAX_ISO_BUFS 2
+#define ISO_FRAMES_PER_DESC 10
+#define ISO_MAX_FRAME_SIZE 960
+#define ISO_BUFFER_SIZE (ISO_FRAMES_PER_DESC * ISO_MAX_FRAME_SIZE)
+
+/* Frame buffers: contains compressed or uncompressed video data. */
+#define MAX_FRAMES 5
+/* Maximum size after decompression is 640x480 YUV data, 1.5 * 640 * 480 */
+#define PWC_FRAME_SIZE (460800 + TOUCAM_HEADER_SIZE + TOUCAM_TRAILER_SIZE)
+
+/* Absolute maximum number of buffers available for mmap() */
+#define MAX_IMAGES 10
+
+/* The following structures were based on cpia.h. Why reinvent the wheel? :-) */
+struct pwc_iso_buf
+{
+ void *data;
+ int length;
+ int read;
+ struct urb *urb;
+};
+
+/* intermediate buffers with raw data from the USB cam */
+struct pwc_frame_buf
+{
+ void *data;
+ volatile int filled; /* number of bytes filled */
+ struct pwc_frame_buf *next; /* list */
+#if PWC_DEBUG
+ int sequence; /* Sequence number */
+#endif
+};
+
+struct pwc_device
+{
+ struct video_device *vdev;
+#ifdef PWC_MAGIC
+ int magic;
+#endif
+ /* Pointer to our usb_device */
+ struct usb_device *udev;
+
+ int type; /* type of cam (645, 646, 675, 680, 690, 720, 730, 740, 750) */
+ int release; /* release number */
+ int features; /* feature bits */
+ char serial[30]; /* serial number (string) */
+ int error_status; /* set when something goes wrong with the cam (unplugged, USB errors) */
+ int usb_init; /* set when the cam has been initialized over USB */
+
+ /*** Video data ***/
+ int vopen; /* flag */
+ int vendpoint; /* video isoc endpoint */
+ int vcinterface; /* video control interface */
+ int valternate; /* alternate interface needed */
+ int vframes, vsize; /* frames-per-second & size (see PSZ_*) */
+ int vpalette; /* palette: 420P, RAW or RGBBAYER */
+ int vframe_count; /* received frames */
+ int vframes_dumped; /* counter for dumped frames */
+ int vframes_error; /* frames received in error */
+ int vmax_packet_size; /* USB maxpacket size */
+ int vlast_packet_size; /* for frame synchronisation */
+ int visoc_errors; /* number of contiguous ISOC errors */
+ int vcompression; /* desired compression factor */
+ int vbandlength; /* compressed band length; 0 is uncompressed */
+ char vsnapshot; /* snapshot mode */
+ char vsync; /* used by isoc handler */
+ char vmirror; /* for ToUCaM series */
+
+ int cmd_len;
+ unsigned char cmd_buf[13];
+
+ /* The image acquisition requires 3 to 4 steps:
+ 1. data is gathered in short packets from the USB controller
+ 2. data is synchronized and packed into a frame buffer
+ 3a. in case data is compressed, decompress it directly into image buffer
+ 3b. in case data is uncompressed, copy into image buffer with viewport
+ 4. data is transferred to the user process
+
+ Note that MAX_ISO_BUFS != MAX_FRAMES != MAX_IMAGES....
+ We have in effect a back-to-back-double-buffer system.
+ */
+ /* 1: isoc */
+ struct pwc_iso_buf sbuf[MAX_ISO_BUFS];
+ char iso_init;
+
+ /* 2: frame */
+ struct pwc_frame_buf *fbuf; /* all frames */
+ struct pwc_frame_buf *empty_frames, *empty_frames_tail; /* all empty frames */
+ struct pwc_frame_buf *full_frames, *full_frames_tail; /* all filled frames */
+ struct pwc_frame_buf *fill_frame; /* frame currently being filled */
+ struct pwc_frame_buf *read_frame; /* frame currently read by user process */
+ int frame_header_size, frame_trailer_size;
+ int frame_size;
+ int frame_total_size; /* including header & trailer */
+ int drop_frames;
+#if PWC_DEBUG
+ int sequence; /* Debugging aid */
+#endif
+
+ /* 3: decompression */
+ struct pwc_decompressor *decompressor; /* function block with decompression routines */
+ void *decompress_data; /* private data for decompression engine */
+
+ /* 4: image */
+ /* We have an 'image' and a 'view', where 'image' is the fixed-size image
+ as delivered by the camera, and 'view' is the size requested by the
+ program. The camera image is centered in this viewport, laced with
+ a gray or black border. view_min <= image <= view <= view_max;
+ */
+ int image_mask; /* bitmask of supported sizes */
+ struct pwc_coord view_min, view_max; /* minimum and maximum viewable sizes */
+ struct pwc_coord abs_max; /* maximum supported size with compression */
+ struct pwc_coord image, view; /* image and viewport size */
+ struct pwc_coord offset; /* offset within the viewport */
+
+ void *image_data; /* total buffer, which is subdivided into ... */
+ void *image_ptr[MAX_IMAGES]; /* ...several images... */
+ int fill_image; /* ...which are rotated. */
+ int len_per_image; /* length per image */
+ int image_read_pos; /* In case we read data in pieces, keep track of were we are in the imagebuffer */
+ int image_used[MAX_IMAGES]; /* For MCAPTURE and SYNC */
+
+ struct semaphore modlock; /* to prevent races in video_open(), etc */
+ spinlock_t ptrlock; /* for manipulating the buffer pointers */
+
+ /*** motorized pan/tilt feature */
+ struct pwc_mpt_range angle_range;
+ int pan_angle; /* in degrees * 100 */
+ int tilt_angle; /* absolute angle; 0,0 is home position */
+
+ /*** Misc. data ***/
+ wait_queue_head_t frameq; /* When waiting for a frame to finish... */
+#if PWC_INT_PIPE
+ void *usb_int_handler; /* for the interrupt endpoint */
+#endif
+};
+
+
+#ifdef __cplusplus
+extern "C" {
+#endif
+
+/* Global variables */
+extern int pwc_trace;
+extern int pwc_preferred_compression;
+
+/** functions in pwc-if.c */
+int pwc_try_video_mode(struct pwc_device *pdev, int width, int height, int new_fps, int new_compression, int new_snapshot);
+
+/** Functions in pwc-misc.c */
+/* sizes in pixels */
+extern struct pwc_coord pwc_image_sizes[PSZ_MAX];
+
+int pwc_decode_size(struct pwc_device *pdev, int width, int height);
+void pwc_construct(struct pwc_device *pdev);
+
+/** Functions in pwc-ctrl.c */
+/* Request a certain video mode. Returns < 0 if not possible */
+extern int pwc_set_video_mode(struct pwc_device *pdev, int width, int height, int frames, int compression, int snapshot);
+/* Calculate the number of bytes per image (not frame) */
+extern void pwc_set_image_buffer_size(struct pwc_device *pdev);
+
+/* Various controls; should be obvious. Value 0..65535, or < 0 on error */
+extern int pwc_get_brightness(struct pwc_device *pdev);
+extern int pwc_set_brightness(struct pwc_device *pdev, int value);
+extern int pwc_get_contrast(struct pwc_device *pdev);
+extern int pwc_set_contrast(struct pwc_device *pdev, int value);
+extern int pwc_get_gamma(struct pwc_device *pdev);
+extern int pwc_set_gamma(struct pwc_device *pdev, int value);
+extern int pwc_get_saturation(struct pwc_device *pdev);
+extern int pwc_set_saturation(struct pwc_device *pdev, int value);
+extern int pwc_set_leds(struct pwc_device *pdev, int on_value, int off_value);
+extern int pwc_get_leds(struct pwc_device *pdev, int *on_value, int *off_value);
+extern int pwc_get_cmos_sensor(struct pwc_device *pdev, int *sensor);
+
+/* Power down or up the camera; not supported by all models */
+extern int pwc_camera_power(struct pwc_device *pdev, int power);
+
+/* Private ioctl()s; see pwc-ioctl.h */
+extern int pwc_ioctl(struct pwc_device *pdev, unsigned int cmd, void *arg);
+
+
+/** pwc-uncompress.c */
+/* Expand frame to image, possibly including decompression. Uses read_frame and fill_image */
+extern int pwc_decompress(struct pwc_device *pdev);
+
+#ifdef __cplusplus
+}
+#endif
+
+
+#endif
--- /dev/null
+/*
+ *
+ * This file is subject to the terms and conditions of the GNU General Public
+ * License. See the file "COPYING" in the main directory of this archive
+ * for more details.
+ *
+ * Copyright (c) 2001-2004 Silicon Graphics, Inc. All rights reserved.
+ */
+
+#ifndef _ASM_IA64_SN_SHUB_MMR_H
+#define _ASM_IA64_SN_SHUB_MMR_H
+
+/* ==================================================================== */
+/* Register "SH_IPI_INT" */
+/* SHub Inter-Processor Interrupt Registers */
+/* ==================================================================== */
+#define SH_IPI_INT 0x0000000110000380UL
+#define SH_IPI_INT_MASK 0x8ff3ffffffefffffUL
+#define SH_IPI_INT_INIT 0x0000000000000000UL
+
+/* SH_IPI_INT_TYPE */
+/* Description: Type of Interrupt: 0=INT, 2=PMI, 4=NMI, 5=INIT */
+#define SH_IPI_INT_TYPE_SHFT 0
+#define SH_IPI_INT_TYPE_MASK 0x0000000000000007UL
+
+/* SH_IPI_INT_AGT */
+/* Description: Agent, must be 0 for SHub */
+#define SH_IPI_INT_AGT_SHFT 3
+#define SH_IPI_INT_AGT_MASK 0x0000000000000008UL
+
+/* SH_IPI_INT_PID */
+/* Description: Processor ID, same setting as on targeted McKinley */
+#define SH_IPI_INT_PID_SHFT 4
+#define SH_IPI_INT_PID_MASK 0x00000000000ffff0UL
+
+/* SH_IPI_INT_BASE */
+/* Description: Optional interrupt vector area, 2MB aligned */
+#define SH_IPI_INT_BASE_SHFT 21
+#define SH_IPI_INT_BASE_MASK 0x0003ffffffe00000UL
+
+/* SH_IPI_INT_IDX */
+/* Description: Targeted McKinley interrupt vector */
+#define SH_IPI_INT_IDX_SHFT 52
+#define SH_IPI_INT_IDX_MASK 0x0ff0000000000000UL
+
+/* SH_IPI_INT_SEND */
+/* Description: Send Interrupt Message to PI, This generates a puls */
+#define SH_IPI_INT_SEND_SHFT 63
+#define SH_IPI_INT_SEND_MASK 0x8000000000000000UL
+
+/* ==================================================================== */
+/* Register "SH_EVENT_OCCURRED" */
+/* SHub Interrupt Event Occurred */
+/* ==================================================================== */
+#define SH_EVENT_OCCURRED 0x0000000110010000UL
+#define SH_EVENT_OCCURRED_ALIAS 0x0000000110010008UL
+
+/* ==================================================================== */
+/* Register "SH_PI_CAM_CONTROL" */
+/* CRB CAM MMR Access Control */
+/* ==================================================================== */
+#ifndef __ASSEMBLY__
+#define SH_PI_CAM_CONTROL 0x0000000120050300UL
+#else
+#define SH_PI_CAM_CONTROL 0x0000000120050300
+#endif
+
+/* ==================================================================== */
+/* Register "SH_SHUB_ID" */
+/* SHub ID Number */
+/* ==================================================================== */
+#define SH_SHUB_ID 0x0000000110060580UL
+#define SH_SHUB_ID_REVISION_SHFT 28
+#define SH_SHUB_ID_REVISION_MASK 0x00000000f0000000
+
+/* ==================================================================== */
+/* Register "SH_PTC_0" */
+/* Puge Translation Cache Message Configuration Information */
+/* ==================================================================== */
+#define SH_PTC_0 0x00000001101a0000UL
+#define SH_PTC_1 0x00000001101a0080UL
+
+/* ==================================================================== */
+/* Register "SH_RTC" */
+/* Real-time Clock */
+/* ==================================================================== */
+#define SH_RTC 0x00000001101c0000UL
+#define SH_RTC_MASK 0x007fffffffffffffUL
+
+/* ==================================================================== */
+/* Register "SH_MEMORY_WRITE_STATUS_0|1" */
+/* Memory Write Status for CPU 0 & 1 */
+/* ==================================================================== */
+#define SH_MEMORY_WRITE_STATUS_0 0x0000000120070000UL
+#define SH_MEMORY_WRITE_STATUS_1 0x0000000120070080UL
+
+/* ==================================================================== */
+/* Register "SH_PIO_WRITE_STATUS_0|1" */
+/* PIO Write Status for CPU 0 & 1 */
+/* ==================================================================== */
+#ifndef __ASSEMBLY__
+#define SH_PIO_WRITE_STATUS_0 0x0000000120070200UL
+#define SH_PIO_WRITE_STATUS_1 0x0000000120070280UL
+
+/* SH_PIO_WRITE_STATUS_0_WRITE_DEADLOCK */
+/* Description: Deadlock response detected */
+#define SH_PIO_WRITE_STATUS_0_WRITE_DEADLOCK_SHFT 1
+#define SH_PIO_WRITE_STATUS_0_WRITE_DEADLOCK_MASK 0x0000000000000002
+
+/* SH_PIO_WRITE_STATUS_0_PENDING_WRITE_COUNT */
+/* Description: Count of currently pending PIO writes */
+#define SH_PIO_WRITE_STATUS_0_PENDING_WRITE_COUNT_SHFT 56
+#define SH_PIO_WRITE_STATUS_0_PENDING_WRITE_COUNT_MASK 0x3f00000000000000UL
+#else
+#define SH_PIO_WRITE_STATUS_0 0x0000000120070200
+#define SH_PIO_WRITE_STATUS_0_PENDING_WRITE_COUNT_SHFT 56
+#define SH_PIO_WRITE_STATUS_0_WRITE_DEADLOCK_SHFT 1
+#endif
+
+/* ==================================================================== */
+/* Register "SH_PIO_WRITE_STATUS_0_ALIAS" */
+/* ==================================================================== */
+#ifndef __ASSEMBLY__
+#define SH_PIO_WRITE_STATUS_0_ALIAS 0x0000000120070208UL
+#else
+#define SH_PIO_WRITE_STATUS_0_ALIAS 0x0000000120070208
+#endif
+
+/* ==================================================================== */
+/* Register "SH_EVENT_OCCURRED" */
+/* SHub Interrupt Event Occurred */
+/* ==================================================================== */
+/* SH_EVENT_OCCURRED_UART_INT */
+/* Description: Pending Junk Bus UART Interrupt */
+#define SH_EVENT_OCCURRED_UART_INT_SHFT 20
+#define SH_EVENT_OCCURRED_UART_INT_MASK 0x0000000000100000
+
+/* SH_EVENT_OCCURRED_IPI_INT */
+/* Description: Pending IPI Interrupt */
+#define SH_EVENT_OCCURRED_IPI_INT_SHFT 28
+#define SH_EVENT_OCCURRED_IPI_INT_MASK 0x0000000010000000
+
+/* SH_EVENT_OCCURRED_II_INT0 */
+/* Description: Pending II 0 Interrupt */
+#define SH_EVENT_OCCURRED_II_INT0_SHFT 29
+#define SH_EVENT_OCCURRED_II_INT0_MASK 0x0000000020000000
+
+/* SH_EVENT_OCCURRED_II_INT1 */
+/* Description: Pending II 1 Interrupt */
+#define SH_EVENT_OCCURRED_II_INT1_SHFT 30
+#define SH_EVENT_OCCURRED_II_INT1_MASK 0x0000000040000000
+
+/* ==================================================================== */
+/* Register "SH_PTC_0" */
+/* Puge Translation Cache Message Configuration Information */
+/* ==================================================================== */
+#define SH_PTC_0 0x00000001101a0000UL
+#define SH_PTC_0_MASK 0x80000000fffffffd
+#define SH_PTC_0_INIT 0x0000000000000000
+
+/* SH_PTC_0_A */
+/* Description: Type */
+#define SH_PTC_0_A_SHFT 0
+#define SH_PTC_0_A_MASK 0x0000000000000001
+
+/* SH_PTC_0_PS */
+/* Description: Page Size */
+#define SH_PTC_0_PS_SHFT 2
+#define SH_PTC_0_PS_MASK 0x00000000000000fc
+
+/* SH_PTC_0_RID */
+/* Description: Region ID */
+#define SH_PTC_0_RID_SHFT 8
+#define SH_PTC_0_RID_MASK 0x00000000ffffff00
+
+/* SH_PTC_0_START */
+/* Description: Start */
+#define SH_PTC_0_START_SHFT 63
+#define SH_PTC_0_START_MASK 0x8000000000000000
+
+/* ==================================================================== */
+/* Register "SH_PTC_1" */
+/* Puge Translation Cache Message Configuration Information */
+/* ==================================================================== */
+#define SH_PTC_1 0x00000001101a0080UL
+#define SH_PTC_1_MASK 0x9ffffffffffff000
+#define SH_PTC_1_INIT 0x0000000000000000
+
+/* SH_PTC_1_VPN */
+/* Description: Virtual page number */
+#define SH_PTC_1_VPN_SHFT 12
+#define SH_PTC_1_VPN_MASK 0x1ffffffffffff000
+
+/* SH_PTC_1_START */
+/* Description: PTC_1 Start */
+#define SH_PTC_1_START_SHFT 63
+#define SH_PTC_1_START_MASK 0x8000000000000000
+
+#endif /* _ASM_IA64_SN_SHUB_MMR_H */