1 /*******************************************************************
2 * This file is part of the Emulex Linux Device Driver for *
3 * Enterprise Fibre Channel Host Bus Adapters. *
4 * Refer to the README file included with this package for *
5 * driver version and adapter support. *
6 * Copyright (C) 2004 Emulex Corporation. *
9 * This program is free software; you can redistribute it and/or *
10 * modify it under the terms of the GNU General Public License *
11 * as published by the Free Software Foundation; either version 2 *
12 * of the License, or (at your option) any later version. *
14 * This program is distributed in the hope that it will be useful, *
15 * but WITHOUT ANY WARRANTY; without even the implied warranty of *
16 * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the *
17 * GNU General Public License for more details, a copy of which *
18 * can be found in the file COPYING included with this package. *
19 *******************************************************************/
22 * $Id: lpfc_fcp.c 1.431 2004/11/23 13:18:57EST sf_support Exp $
25 #include <linux/version.h>
26 #include <linux/config.h>
27 #include <linux/init.h>
28 #include <linux/blkdev.h>
29 #include <linux/ctype.h>
30 #include <linux/kernel.h>
31 #include <linux/module.h>
32 #include <linux/moduleparam.h>
33 #include <linux/pci.h>
34 #include <linux/smp_lock.h>
35 #include <linux/spinlock.h>
36 #include <linux/timer.h>
37 #include <linux/utsname.h>
39 #include <asm/byteorder.h>
41 #include <scsi/scsi_device.h>
42 #include <scsi/scsi_host.h>
43 #include <scsi/scsi_cmnd.h>
44 #include <scsi/scsi_tcq.h>
45 #include <scsi/scsi_transport_fc.h>
48 #include "lpfc_disc.h"
49 #include "lpfc_scsi.h"
53 #include "lpfc_logmsg.h"
55 #include "lpfc_version.h"
56 #include "lpfc_compat.h"
57 #include "lpfc_crtn.h"
59 static char *lpfc_drvr_name = LPFC_DRIVER_NAME;
61 static struct scsi_transport_template *lpfc_transport_template = NULL;
63 static struct list_head lpfc_hba_list = LIST_HEAD_INIT(lpfc_hba_list);
66 lpfc_info(struct Scsi_Host *host)
68 struct lpfc_hba *phba = (struct lpfc_hba *) host->hostdata[0];
70 static char lpfcinfobuf[128];
72 memset(lpfcinfobuf,0,128);
73 if (phba && phba->pcidev){
74 lpfc_get_hba_model_desc(phba, NULL, lpfcinfobuf);
75 len = strlen(lpfcinfobuf);
76 snprintf(lpfcinfobuf + len,
78 " on PCI bus %02x device %02x irq %d",
79 phba->pcidev->bus->number,
87 lpfc_jedec_to_ascii(int incr, char hdw[])
90 for (i = 0; i < 8; i++) {
93 hdw[7 - i] = 0x30 + j;
95 hdw[7 - i] = 0x61 + j - 10;
103 lpfc_drvr_version_show(struct class_device *cdev, char *buf)
105 return snprintf(buf, PAGE_SIZE, LPFC_MODULE_DESC "\n");
109 management_version_show(struct class_device *cdev, char *buf)
111 return snprintf(buf, PAGE_SIZE, DFC_API_VERSION "\n");
115 lpfc_info_show(struct class_device *cdev, char *buf)
117 struct Scsi_Host *host = class_to_shost(cdev);
118 return snprintf(buf, PAGE_SIZE, "%s\n",lpfc_info(host));
122 lpfc_serialnum_show(struct class_device *cdev, char *buf)
124 struct Scsi_Host *host = class_to_shost(cdev);
125 struct lpfc_hba *phba = (struct lpfc_hba*)host->hostdata[0];
126 return snprintf(buf, PAGE_SIZE, "%s\n",phba->SerialNumber);
130 lpfc_fwrev_show(struct class_device *cdev, char *buf)
132 struct Scsi_Host *host = class_to_shost(cdev);
133 struct lpfc_hba *phba = (struct lpfc_hba*)host->hostdata[0];
135 lpfc_decode_firmware_rev(phba, fwrev, 1);
136 return snprintf(buf, PAGE_SIZE, "%s\n",fwrev);
140 lpfc_hdw_show(struct class_device *cdev, char *buf)
143 struct Scsi_Host *host = class_to_shost(cdev);
144 struct lpfc_hba *phba = (struct lpfc_hba*)host->hostdata[0];
145 lpfc_vpd_t *vp = &phba->vpd;
146 lpfc_jedec_to_ascii(vp->rev.biuRev, hdw);
147 return snprintf(buf, PAGE_SIZE, "%s\n", hdw);
150 lpfc_option_rom_version_show(struct class_device *cdev, char *buf)
152 struct Scsi_Host *host = class_to_shost(cdev);
153 struct lpfc_hba *phba = (struct lpfc_hba*)host->hostdata[0];
154 return snprintf(buf, PAGE_SIZE, "%s\n", phba->OptionROMVersion);
157 lpfc_state_show(struct class_device *cdev, char *buf)
159 struct Scsi_Host *host = class_to_shost(cdev);
160 struct lpfc_hba *phba = (struct lpfc_hba*)host->hostdata[0];
162 switch (phba->hba_state) {
163 case LPFC_INIT_START:
164 case LPFC_INIT_MBX_CMDS:
166 len += snprintf(buf + len, PAGE_SIZE-len, "Link Down\n");
169 case LPFC_LOCAL_CFG_LINK:
170 len += snprintf(buf + len, PAGE_SIZE-len, "Link Up\n");
173 case LPFC_FABRIC_CFG_LINK:
176 case LPFC_BUILD_DISC_LIST:
179 len += snprintf(buf + len, PAGE_SIZE-len,
180 "Link Up - Discovery\n");
183 len += snprintf(buf + len, PAGE_SIZE-len,
184 "Link Up - Ready:\n");
185 if (phba->fc_topology == TOPOLOGY_LOOP) {
186 if (phba->fc_flag & FC_PUBLIC_LOOP)
187 len += snprintf(buf + len, PAGE_SIZE-len,
190 len += snprintf(buf + len, PAGE_SIZE-len,
193 if (phba->fc_flag & FC_FABRIC)
194 len += snprintf(buf + len, PAGE_SIZE-len,
197 len += snprintf(buf + len, PAGE_SIZE-len,
205 lpfc_num_discovered_ports_show(struct class_device *cdev, char *buf)
207 struct Scsi_Host *host = class_to_shost(cdev);
208 struct lpfc_hba *phba = (struct lpfc_hba*)host->hostdata[0];
209 return snprintf(buf, PAGE_SIZE, "%d\n", phba->fc_map_cnt +
213 #ifndef FC_TRANS_VER2
215 * These are replaced by Generic FC transport attributes
218 lpfc_speed_show(struct class_device *cdev, char *buf)
220 struct Scsi_Host *host = class_to_shost(cdev);
221 struct lpfc_hba *phba = (struct lpfc_hba*)host->hostdata[0];
223 if (phba->fc_linkspeed == LA_4GHZ_LINK)
224 len += snprintf(buf + len, PAGE_SIZE-len, "4 Gigabit\n");
226 if (phba->fc_linkspeed == LA_2GHZ_LINK)
227 len += snprintf(buf + len, PAGE_SIZE-len, "2 Gigabit\n");
229 len += snprintf(buf + len, PAGE_SIZE-len, "1 Gigabit\n");
234 lpfc_node_name_show(struct class_device *cdev, char *buf)
236 struct Scsi_Host *host = class_to_shost(cdev);
237 struct lpfc_hba *phba = (struct lpfc_hba*)host->hostdata[0];
238 uint64_t node_name = 0;
239 memcpy (&node_name, &phba->fc_nodename, sizeof (struct lpfc_name));
240 return snprintf(buf, PAGE_SIZE, "0x%llx\n", be64_to_cpu(node_name));
243 lpfc_port_name_show(struct class_device *cdev, char *buf)
245 struct Scsi_Host *host = class_to_shost(cdev);
246 struct lpfc_hba *phba = (struct lpfc_hba*)host->hostdata[0];
247 uint64_t port_name = 0;
248 memcpy (&port_name, &phba->fc_portname, sizeof (struct lpfc_name));
249 return snprintf(buf, PAGE_SIZE, "0x%llx\n", be64_to_cpu(port_name));
252 lpfc_did_show(struct class_device *cdev, char *buf)
254 struct Scsi_Host *host = class_to_shost(cdev);
255 struct lpfc_hba *phba = (struct lpfc_hba*)host->hostdata[0];
256 return snprintf(buf, PAGE_SIZE, "0x%x\n", phba->fc_myDID);
260 lpfc_port_type_show(struct class_device *cdev, char *buf)
262 struct Scsi_Host *host = class_to_shost(cdev);
263 struct lpfc_hba *phba = (struct lpfc_hba*)host->hostdata[0];
267 if (phba->fc_topology == TOPOLOGY_LOOP) {
268 if (phba->fc_flag & FC_PUBLIC_LOOP)
269 retval = snprintf(buf, PAGE_SIZE, "NL_Port\n");
271 retval = snprintf(buf, PAGE_SIZE, "L_Port\n");
273 if (phba->fc_flag & FC_FABRIC)
274 retval = snprintf(buf, PAGE_SIZE, "N_Port\n");
276 retval = snprintf(buf, PAGE_SIZE,
277 "Point-to-Point N_Port\n");
284 lpfc_fabric_name_show(struct class_device *cdev, char *buf)
286 struct Scsi_Host *host = class_to_shost(cdev);
287 struct lpfc_hba *phba = (struct lpfc_hba*)host->hostdata[0];
288 uint64_t node_name = 0;
289 memcpy (&node_name, &phba->fc_nodename, sizeof (struct lpfc_name));
291 if ((phba->fc_flag & FC_FABRIC) ||
292 ((phba->fc_topology == TOPOLOGY_LOOP) &&
293 (phba->fc_flag & FC_PUBLIC_LOOP))) {
295 & phba->fc_fabparam.nodeName,
296 sizeof (struct lpfc_name));
299 return snprintf(buf, PAGE_SIZE, "0x%08llx\n", be64_to_cpu(node_name));
301 #endif /* not FC_TRANS_VER2 */
304 lpfc_events_show(struct class_device *cdev, char *buf)
306 struct Scsi_Host *host = class_to_shost(cdev);
307 struct lpfc_hba *phba = (struct lpfc_hba*)host->hostdata[0];
308 int i = 0, len = 0, get = phba->hba_event_put;
309 struct lpfc_hba_event *rec;
311 if (get == phba->hba_event_get)
312 return snprintf(buf, PAGE_SIZE, "None\n");
314 for (i = 0; i < MAX_HBAEVT; i++) {
318 rec = &phba->hbaevt[get];
319 switch (rec->fc_eventcode) {
321 len += snprintf(buf+len, PAGE_SIZE-len,
325 len += snprintf(buf+len, PAGE_SIZE-len,
328 case HBA_EVENT_LINK_UP:
329 len += snprintf(buf+len, PAGE_SIZE-len,
332 case HBA_EVENT_LINK_DOWN:
333 len += snprintf(buf+len, PAGE_SIZE-len,
337 len += snprintf(buf+len, PAGE_SIZE-len,
342 len += snprintf(buf+len, PAGE_SIZE-len, " %d,%d,%d,%d\n",
343 rec->fc_evdata1, rec->fc_evdata2,
344 rec->fc_evdata3, rec->fc_evdata4);
350 lpfc_issue_lip (struct class_device *cdev, const char *buf, size_t count)
352 struct Scsi_Host *host = class_to_shost(cdev);
353 struct lpfc_hba *phba = (struct lpfc_hba *) host->hostdata[0];
355 LPFC_MBOXQ_t *pmboxq;
356 int mbxstatus = MBXERR_ERROR;
358 if ((sscanf(buf, "%d", &val) != 1) ||
362 if ((phba->fc_flag & FC_OFFLINE_MODE) ||
363 (phba->hba_state != LPFC_HBA_READY))
366 pmboxq = mempool_alloc(phba->mbox_mem_pool,GFP_KERNEL);
371 memset((void *)pmboxq, 0, sizeof (LPFC_MBOXQ_t));
372 lpfc_init_link(phba, pmboxq, phba->cfg_topology, phba->cfg_link_speed);
373 mbxstatus = lpfc_sli_issue_mbox_wait(phba, pmboxq, phba->fc_ratov * 2);
375 if (mbxstatus == MBX_TIMEOUT)
376 pmboxq->mbox_cmpl = NULL;
378 mempool_free( pmboxq, phba->mbox_mem_pool);
380 if (mbxstatus == MBXERR_ERROR)
387 lpfc_nport_evt_cnt_show(struct class_device *cdev, char *buf)
389 struct Scsi_Host *host = class_to_shost(cdev);
390 struct lpfc_hba *phba = (struct lpfc_hba*)host->hostdata[0];
391 return snprintf(buf, PAGE_SIZE, "%d\n", phba->nport_event_cnt);
395 lpfc_board_online_show(struct class_device *cdev, char *buf)
397 struct Scsi_Host *host = class_to_shost(cdev);
398 struct lpfc_hba *phba = (struct lpfc_hba*)host->hostdata[0];
402 if (phba->fc_flag & FC_OFFLINE_MODE)
403 return snprintf(buf, PAGE_SIZE, "0\n");
405 return snprintf(buf, PAGE_SIZE, "1\n");
409 lpfc_board_online_store(struct class_device *cdev, const char *buf,
412 struct Scsi_Host *host = class_to_shost(cdev);
413 struct lpfc_hba *phba = (struct lpfc_hba*)host->hostdata[0];
418 if (sscanf(buf, "%d", &val) != 1)
421 if (val && (phba->fc_flag & FC_OFFLINE_MODE)) {
424 else if (!val && !(phba->fc_flag & FC_OFFLINE_MODE)) {
432 lpfc_disc_ndlp_show(struct lpfc_hba * phba, struct lpfc_nodelist *ndlp,
433 char *buf, int offset)
435 int len = 0, pgsz = PAGE_SIZE;
436 uint8_t name[sizeof (struct lpfc_name)];
440 len += snprintf(buf + len, pgsz -len,
441 "DID %06x WWPN ", ndlp->nlp_DID);
443 /* A Fibre Channel node or port name is 8 octets
444 * long and delimited by colons.
446 memcpy (&name[0], &ndlp->nlp_portname,
447 sizeof (struct lpfc_name));
448 len += snprintf(buf + len, pgsz-len,
449 "%02x:%02x:%02x:%02x:%02x:%02x:"
451 name[0], name[1], name[2],
452 name[3], name[4], name[5],
455 len += snprintf(buf + len, pgsz-len,
457 memcpy (&name[0], &ndlp->nlp_nodename,
458 sizeof (struct lpfc_name));
459 len += snprintf(buf + len, pgsz-len,
460 "%02x:%02x:%02x:%02x:%02x:%02x:"
462 name[0], name[1], name[2],
463 name[3], name[4], name[5],
465 len += snprintf(buf + len, pgsz-len,
466 " INFO %02x:%08x:%02x:%02x:%02x:%02x:"
468 ndlp->nlp_state, ndlp->nlp_flag, ndlp->nlp_type,
469 ndlp->nlp_rpi, ndlp->nlp_sid, ndlp->nlp_failMask,
470 ndlp->nlp_retry, ndlp->nlp_disc_refcnt,
475 #define LPFC_MAX_SYS_DISC_ENTRIES 35
478 lpfc_disc_npr_show(struct class_device *cdev, char *buf)
480 struct Scsi_Host *host = class_to_shost(cdev);
481 struct lpfc_hba *phba = (struct lpfc_hba*)host->hostdata[0];
482 struct lpfc_nodelist *ndlp, *next_ndlp;
483 struct list_head *listp;
489 spin_lock_irqsave(phba->host->host_lock, iflag);
490 listp = &phba->fc_npr_list;
491 if (list_empty(listp)) {
492 spin_unlock_irqrestore(phba->host->host_lock, iflag);
493 return snprintf(buf, PAGE_SIZE, "NPR list: Empty\n");
496 len += snprintf(buf+len, PAGE_SIZE-len, "NPR list: %d Entries\n",
498 list_for_each_entry_safe(ndlp, next_ndlp, listp, nlp_listp) {
500 if(i > LPFC_MAX_SYS_DISC_ENTRIES) {
501 len += snprintf(buf+len, PAGE_SIZE-len,
502 "Missed %d entries - sysfs %ld limit exceeded\n",
503 (phba->fc_npr_cnt - i + 1), PAGE_SIZE);
506 if(len > (PAGE_SIZE-1)) /* double check */
508 len += lpfc_disc_ndlp_show(phba, ndlp, buf, len);
510 spin_unlock_irqrestore(phba->host->host_lock, iflag);
515 lpfc_disc_map_show(struct class_device *cdev, char *buf)
517 struct Scsi_Host *host = class_to_shost(cdev);
518 struct lpfc_hba *phba = (struct lpfc_hba*)host->hostdata[0];
519 struct lpfc_nodelist *ndlp, *next_ndlp;
520 struct list_head *listp;
526 spin_lock_irqsave(phba->host->host_lock, iflag);
527 listp = &phba->fc_nlpmap_list;
528 if (list_empty(listp)) {
529 spin_unlock_irqrestore(phba->host->host_lock, iflag);
530 return snprintf(buf, PAGE_SIZE, "Map list: Empty\n");
533 len += snprintf(buf+len, PAGE_SIZE-len, "Map list: %d Entries\n",
535 list_for_each_entry_safe(ndlp, next_ndlp, listp, nlp_listp) {
537 if(i > LPFC_MAX_SYS_DISC_ENTRIES) {
538 len += snprintf(buf+len, PAGE_SIZE-len,
539 "Missed %d entries - sysfs %ld limit exceeded\n",
540 (phba->fc_map_cnt - i + 1), PAGE_SIZE);
543 if(len > (PAGE_SIZE-1)) /* double check */
545 len += lpfc_disc_ndlp_show(phba, ndlp, buf, len);
547 spin_unlock_irqrestore(phba->host->host_lock, iflag);
552 lpfc_disc_unmap_show(struct class_device *cdev, char *buf)
554 struct Scsi_Host *host = class_to_shost(cdev);
555 struct lpfc_hba *phba = (struct lpfc_hba*)host->hostdata[0];
556 struct lpfc_nodelist *ndlp, *next_ndlp;
557 struct list_head *listp;
563 spin_lock_irqsave(phba->host->host_lock, iflag);
564 listp = &phba->fc_nlpunmap_list;
565 if (list_empty(listp)) {
566 spin_unlock_irqrestore(phba->host->host_lock, iflag);
567 return snprintf(buf, PAGE_SIZE, "Unmap list: Empty\n");
570 len += snprintf(buf+len, PAGE_SIZE-len, "Unmap list: %d Entries\n",
572 list_for_each_entry_safe(ndlp, next_ndlp, listp, nlp_listp) {
574 if(i > LPFC_MAX_SYS_DISC_ENTRIES) {
575 len += snprintf(buf+len, PAGE_SIZE-len,
576 "Missed %d entries - sysfs %ld limit exceeded\n",
577 (phba->fc_unmap_cnt - i + 1), PAGE_SIZE);
580 if(len > (PAGE_SIZE-1)) /* double check */
582 len += lpfc_disc_ndlp_show(phba, ndlp, buf, len);
584 spin_unlock_irqrestore(phba->host->host_lock, iflag);
589 lpfc_disc_prli_show(struct class_device *cdev, char *buf)
591 struct Scsi_Host *host = class_to_shost(cdev);
592 struct lpfc_hba *phba = (struct lpfc_hba*)host->hostdata[0];
593 struct lpfc_nodelist *ndlp, *next_ndlp;
594 struct list_head *listp;
600 spin_lock_irqsave(phba->host->host_lock, iflag);
601 listp = &phba->fc_prli_list;
602 if (list_empty(listp)) {
603 spin_unlock_irqrestore(phba->host->host_lock, iflag);
604 return snprintf(buf, PAGE_SIZE, "PRLI list: Empty\n");
607 len += snprintf(buf+len, PAGE_SIZE-len, "PRLI list: %d Entries\n",
609 list_for_each_entry_safe(ndlp, next_ndlp, listp, nlp_listp) {
611 if(i > LPFC_MAX_SYS_DISC_ENTRIES) {
612 len += snprintf(buf+len, PAGE_SIZE-len,
613 "Missed %d entries - sysfs %ld limit exceeded\n",
614 (phba->fc_prli_cnt - i + 1), PAGE_SIZE);
617 if(len > (PAGE_SIZE-1)) /* double check */
619 len += lpfc_disc_ndlp_show(phba, ndlp, buf, len);
621 spin_unlock_irqrestore(phba->host->host_lock, iflag);
626 lpfc_disc_reglgn_show(struct class_device *cdev, char *buf)
628 struct Scsi_Host *host = class_to_shost(cdev);
629 struct lpfc_hba *phba = (struct lpfc_hba*)host->hostdata[0];
630 struct lpfc_nodelist *ndlp, *next_ndlp;
631 struct list_head *listp;
637 spin_lock_irqsave(phba->host->host_lock, iflag);
638 listp = &phba->fc_reglogin_list;
639 if (list_empty(listp)) {
640 spin_unlock_irqrestore(phba->host->host_lock, iflag);
641 return snprintf(buf, PAGE_SIZE, "RegLgn list: Empty\n");
644 len += snprintf(buf+len, PAGE_SIZE-len, "RegLgn list: %d Entries\n",
645 phba->fc_reglogin_cnt);
646 list_for_each_entry_safe(ndlp, next_ndlp, listp, nlp_listp) {
648 if(i > LPFC_MAX_SYS_DISC_ENTRIES) {
649 len += snprintf(buf+len, PAGE_SIZE-len,
650 "Missed %d entries - sysfs %ld limit exceeded\n",
651 (phba->fc_reglogin_cnt - i + 1), PAGE_SIZE);
654 if(len > (PAGE_SIZE-1)) /* double check */
656 len += lpfc_disc_ndlp_show(phba, ndlp, buf, len);
658 spin_unlock_irqrestore(phba->host->host_lock, iflag);
663 lpfc_disc_adisc_show(struct class_device *cdev, char *buf)
665 struct Scsi_Host *host = class_to_shost(cdev);
666 struct lpfc_hba *phba = (struct lpfc_hba*)host->hostdata[0];
667 struct lpfc_nodelist *ndlp, *next_ndlp;
668 struct list_head *listp;
674 spin_lock_irqsave(phba->host->host_lock, iflag);
675 listp = &phba->fc_adisc_list;
676 if (list_empty(listp)) {
677 spin_unlock_irqrestore(phba->host->host_lock, iflag);
678 return snprintf(buf, PAGE_SIZE, "ADISC list: Empty\n");
681 len += snprintf(buf+len, PAGE_SIZE-len, "ADISC list: %d Entries\n",
683 list_for_each_entry_safe(ndlp, next_ndlp, listp, nlp_listp) {
685 if(i > LPFC_MAX_SYS_DISC_ENTRIES) {
686 len += snprintf(buf+len, PAGE_SIZE-len,
687 "Missed %d entries - sysfs %ld limit exceeded\n",
688 (phba->fc_adisc_cnt - i + 1), PAGE_SIZE);
691 if(len > (PAGE_SIZE-1)) /* double check */
693 len += lpfc_disc_ndlp_show(phba, ndlp, buf, len);
695 spin_unlock_irqrestore(phba->host->host_lock, iflag);
700 lpfc_disc_plogi_show(struct class_device *cdev, char *buf)
702 struct Scsi_Host *host = class_to_shost(cdev);
703 struct lpfc_hba *phba = (struct lpfc_hba*)host->hostdata[0];
704 struct lpfc_nodelist *ndlp, *next_ndlp;
705 struct list_head *listp;
711 spin_lock_irqsave(phba->host->host_lock, iflag);
712 listp = &phba->fc_plogi_list;
713 if (list_empty(listp)) {
714 spin_unlock_irqrestore(phba->host->host_lock, iflag);
715 return snprintf(buf, PAGE_SIZE, "PLOGI list: Empty\n");
718 len += snprintf(buf+len, PAGE_SIZE-len, "PLOGI list: %d Entries\n",
720 list_for_each_entry_safe(ndlp, next_ndlp, listp, nlp_listp) {
722 if(i > LPFC_MAX_SYS_DISC_ENTRIES) {
723 len += snprintf(buf+len, PAGE_SIZE-len,
724 "Missed %d entries - sysfs %ld limit exceeded\n",
725 (phba->fc_plogi_cnt - i + 1), PAGE_SIZE);
728 if(len > (PAGE_SIZE-1)) /* double check */
730 len += lpfc_disc_ndlp_show(phba, ndlp, buf, len);
732 spin_unlock_irqrestore(phba->host->host_lock, iflag);
737 lpfc_disc_unused_show(struct class_device *cdev, char *buf)
739 struct Scsi_Host *host = class_to_shost(cdev);
740 struct lpfc_hba *phba = (struct lpfc_hba*)host->hostdata[0];
741 struct lpfc_nodelist *ndlp, *next_ndlp;
742 struct list_head *listp;
748 spin_lock_irqsave(phba->host->host_lock, iflag);
749 listp = &phba->fc_unused_list;
750 if (list_empty(listp)) {
751 spin_unlock_irqrestore(phba->host->host_lock, iflag);
752 return snprintf(buf, PAGE_SIZE, "Unused list: Empty\n");
755 len += snprintf(buf+len, PAGE_SIZE-len, "Unused list: %d Entries\n",
756 phba->fc_unused_cnt);
757 list_for_each_entry_safe(ndlp, next_ndlp, listp, nlp_listp) {
759 if(i > LPFC_MAX_SYS_DISC_ENTRIES) {
760 len += snprintf(buf+len, PAGE_SIZE-len,
761 "Missed %d entries - sysfs %ld limit exceeded\n",
762 (phba->fc_unused_cnt - i + 1), PAGE_SIZE);
765 if(len > (PAGE_SIZE-1)) /* double check */
767 len += lpfc_disc_ndlp_show(phba, ndlp, buf, len);
769 spin_unlock_irqrestore(phba->host->host_lock, iflag);
773 #define LPFC_MAX_SYS_OUTFCPIO_ENTRIES 50
776 lpfc_outfcpio_show(struct class_device *cdev, char *buf)
778 struct Scsi_Host *host = class_to_shost(cdev);
779 struct lpfc_hba *phba = (struct lpfc_hba*)host->hostdata[0];
780 struct lpfc_sli *psli;
781 struct lpfc_sli_ring *pring;
782 struct lpfc_target *targetp;
783 struct lpfc_nodelist *ndlp;
784 struct lpfc_scsi_buf *lpfc_cmd;
785 struct list_head *curr, *next;
786 struct lpfc_iocbq *iocb;
787 struct lpfc_iocbq *next_iocb;
791 int cnt = 0, unused = 0, total = 0;
792 int tx_count, txcmpl_count;
796 pring = &psli->ring[psli->fcp_ring];
799 spin_lock_irqsave(phba->host->host_lock, iflag);
801 for(i=0;i<LPFC_MAX_TARGET;i++) {
802 targetp = phba->device_queue_hash[i];
804 if(cnt >= LPFC_MAX_SYS_OUTFCPIO_ENTRIES) {
809 len += snprintf(buf+len, PAGE_SIZE-len,
810 "ID %03d:qcmd %08x done %08x err %08x "
811 "slv %03x ", targetp->scsi_id, targetp->qcmdcnt,
812 targetp->iodonecnt, targetp->errorcnt,
814 total += (targetp->qcmdcnt - targetp->iodonecnt);
819 /* Count I/Os on txq and txcmplq. */
820 list_for_each_safe(curr, next, &pring->txq) {
821 next_iocb = list_entry(curr, struct lpfc_iocbq,
826 /* Must be a FCP command */
827 if ((cmd->ulpCommand != CMD_FCP_ICMND64_CR) &&
828 (cmd->ulpCommand != CMD_FCP_IWRITE64_CR) &&
829 (cmd->ulpCommand != CMD_FCP_IREAD64_CR)) {
833 /* context1 MUST be a struct lpfc_scsi_buf */
835 (struct lpfc_scsi_buf *) (iocb->context1);
837 || (lpfc_cmd->pCmd->device->id !=
844 /* Next check the txcmplq */
845 list_for_each_safe(curr, next, &pring->txcmplq) {
846 next_iocb = list_entry(curr, struct lpfc_iocbq,
851 /* Must be a FCP command */
852 if ((cmd->ulpCommand != CMD_FCP_ICMND64_CR) &&
853 (cmd->ulpCommand != CMD_FCP_IWRITE64_CR) &&
854 (cmd->ulpCommand != CMD_FCP_IREAD64_CR)) {
858 /* context1 MUST be a struct lpfc_scsi_buf */
860 (struct lpfc_scsi_buf *) (iocb->context1);
862 || (lpfc_cmd->pCmd->device->id !=
869 len += snprintf(buf+len, PAGE_SIZE-len,
871 tx_count, txcmpl_count);
873 ndlp = targetp->pnode;
875 len += snprintf(buf+len, PAGE_SIZE-len,
879 if(ndlp->nlp_state == NLP_STE_MAPPED_NODE) {
880 len += snprintf(buf+len, PAGE_SIZE-len,
884 len += snprintf(buf+len, PAGE_SIZE-len,
890 if(len > (PAGE_SIZE-1)) /* double check */
894 len += snprintf(buf+len, PAGE_SIZE-len,
895 "Missed x%x entries - sysfs %ld limit exceeded\n",
898 len += snprintf(buf+len, PAGE_SIZE-len,
899 "x%x total I/Os outstanding\n", total);
901 spin_unlock_irqrestore(phba->host->host_lock, iflag);
905 #define lpfc_param_show(attr) \
907 lpfc_##attr##_show(struct class_device *cdev, char *buf) \
909 struct Scsi_Host *host = class_to_shost(cdev);\
910 struct lpfc_hba *phba = (struct lpfc_hba*)host->hostdata[0];\
913 val = phba->cfg_##attr;\
914 return snprintf(buf, PAGE_SIZE, "%d\n",\
920 #define lpfc_param_store(attr, minval, maxval) \
922 lpfc_##attr##_store(struct class_device *cdev, const char *buf, size_t count) \
924 struct Scsi_Host *host = class_to_shost(cdev);\
925 struct lpfc_hba *phba = (struct lpfc_hba*)host->hostdata[0];\
927 if (sscanf(buf, "%d", &val) != 1)\
930 if (val >= minval && val <= maxval) {\
931 phba->cfg_##attr = val;\
938 #define LPFC_ATTR_R_NOINIT(name, desc) \
939 extern int lpfc_##name;\
940 module_param(lpfc_##name, int, 0);\
941 MODULE_PARM_DESC(lpfc_##name, desc);\
942 lpfc_param_show(name)\
943 static CLASS_DEVICE_ATTR(lpfc_##name, S_IRUGO , lpfc_##name##_show, NULL)
945 #define LPFC_ATTR_R(name, defval, minval, maxval, desc) \
946 static int lpfc_##name = defval;\
947 module_param(lpfc_##name, int, 0);\
948 MODULE_PARM_DESC(lpfc_##name, desc);\
949 lpfc_param_show(name)\
950 static CLASS_DEVICE_ATTR(lpfc_##name, S_IRUGO , lpfc_##name##_show, NULL)
952 #define LPFC_ATTR_RW(name, defval, minval, maxval, desc) \
953 static int lpfc_##name = defval;\
954 module_param(lpfc_##name, int, 0);\
955 MODULE_PARM_DESC(lpfc_##name, desc);\
956 lpfc_param_show(name)\
957 lpfc_param_store(name, minval, maxval)\
958 static CLASS_DEVICE_ATTR(lpfc_##name, S_IRUGO | S_IWUSR,\
959 lpfc_##name##_show, lpfc_##name##_store)
961 static CLASS_DEVICE_ATTR(info, S_IRUGO, lpfc_info_show, NULL);
962 static CLASS_DEVICE_ATTR(serialnum, S_IRUGO, lpfc_serialnum_show, NULL);
963 static CLASS_DEVICE_ATTR(fwrev, S_IRUGO, lpfc_fwrev_show, NULL);
964 static CLASS_DEVICE_ATTR(hdw, S_IRUGO, lpfc_hdw_show, NULL);
965 static CLASS_DEVICE_ATTR(state, S_IRUGO, lpfc_state_show, NULL);
966 static CLASS_DEVICE_ATTR(option_rom_version, S_IRUGO,
967 lpfc_option_rom_version_show, NULL);
968 static CLASS_DEVICE_ATTR(num_discovered_ports, S_IRUGO,
969 lpfc_num_discovered_ports_show, NULL);
970 #ifndef FC_TRANS_VER2
971 static CLASS_DEVICE_ATTR(speed, S_IRUGO, lpfc_speed_show, NULL);
972 static CLASS_DEVICE_ATTR(node_name, S_IRUGO, lpfc_node_name_show, NULL);
973 static CLASS_DEVICE_ATTR(port_name, S_IRUGO, lpfc_port_name_show, NULL);
974 static CLASS_DEVICE_ATTR(portfcid, S_IRUGO, lpfc_did_show, NULL);
975 static CLASS_DEVICE_ATTR(port_type, S_IRUGO, lpfc_port_type_show, NULL);
976 static CLASS_DEVICE_ATTR(fabric_name, S_IRUGO, lpfc_fabric_name_show, NULL);
977 #endif /* FC_TRANS_VER2 */
978 static CLASS_DEVICE_ATTR(events, S_IRUGO, lpfc_events_show, NULL);
979 static CLASS_DEVICE_ATTR(nport_evt_cnt, S_IRUGO, lpfc_nport_evt_cnt_show, NULL);
980 static CLASS_DEVICE_ATTR(lpfc_drvr_version, S_IRUGO, lpfc_drvr_version_show,
982 static CLASS_DEVICE_ATTR(management_version, S_IRUGO, management_version_show,
984 static CLASS_DEVICE_ATTR(issue_lip, S_IWUSR, NULL, lpfc_issue_lip);
985 static CLASS_DEVICE_ATTR(board_online, S_IRUGO | S_IWUSR,
986 lpfc_board_online_show, lpfc_board_online_store);
988 static CLASS_DEVICE_ATTR(disc_npr, S_IRUGO, lpfc_disc_npr_show, NULL);
989 static CLASS_DEVICE_ATTR(disc_map, S_IRUGO, lpfc_disc_map_show, NULL);
990 static CLASS_DEVICE_ATTR(disc_unmap, S_IRUGO, lpfc_disc_unmap_show, NULL);
991 static CLASS_DEVICE_ATTR(disc_prli, S_IRUGO, lpfc_disc_prli_show, NULL);
992 static CLASS_DEVICE_ATTR(disc_reglgn, S_IRUGO, lpfc_disc_reglgn_show, NULL);
993 static CLASS_DEVICE_ATTR(disc_adisc, S_IRUGO, lpfc_disc_adisc_show, NULL);
994 static CLASS_DEVICE_ATTR(disc_plogi, S_IRUGO, lpfc_disc_plogi_show, NULL);
995 static CLASS_DEVICE_ATTR(disc_unused, S_IRUGO, lpfc_disc_unused_show, NULL);
996 static CLASS_DEVICE_ATTR(outfcpio, S_IRUGO, lpfc_outfcpio_show, NULL);
999 # lpfc_log_verbose: Only turn this flag on if you are willing to risk being
1000 # deluged with LOTS of information.
1001 # You can set a bit mask to record specific types of verbose messages:
1003 # LOG_ELS 0x1 ELS events
1004 # LOG_DISCOVERY 0x2 Link discovery events
1005 # LOG_MBOX 0x4 Mailbox events
1006 # LOG_INIT 0x8 Initialization events
1007 # LOG_LINK_EVENT 0x10 Link events
1008 # LOG_IP 0x20 IP traffic history
1009 # LOG_FCP 0x40 FCP traffic history
1010 # LOG_NODE 0x80 Node table events
1011 # LOG_MISC 0x400 Miscellaneous events
1012 # LOG_SLI 0x800 SLI events
1013 # LOG_CHK_COND 0x1000 FCP Check condition flag
1014 # LOG_LIBDFC 0x2000 LIBDFC events
1015 # LOG_ALL_MSG 0xffff LOG all messages
1017 LPFC_ATTR_RW(log_verbose, 0x0, 0x0, 0xffff, "Verbose logging bit-mask");
1020 # lun_queue_depth: This parameter is used to limit the number of outstanding
1021 # commands per FCP LUN. Value range is [1,128]. Default value is 30.
1023 LPFC_ATTR_R(lun_queue_depth, 30, 1, 128,
1024 "Max number of FCP commands we can queue to a specific LUN");
1027 # Some disk devices have a "select ID" or "select Target" capability.
1028 # From a protocol standpoint "select ID" usually means select the
1029 # Fibre channel "ALPA". In the FC-AL Profile there is an "informative
1030 # annex" which contains a table that maps a "select ID" (a number
1031 # between 0 and 7F) to an ALPA. By default, for compatibility with
1032 # older drivers, the lpfc driver scans this table from low ALPA to high
1035 # Turning on the scan-down variable (on = 1, off = 0) will
1036 # cause the lpfc driver to use an inverted table, effectively
1037 # scanning ALPAs from high to low. Value range is [0,1]. Default value is 1.
1039 # (Note: This "select ID" functionality is a LOOP ONLY characteristic
1040 # and will not work across a fabric. Also this parameter will take
1041 # effect only in the case when ALPA map is not available.)
1043 LPFC_ATTR_RW(scan_down, 1, 0, 1,
1044 "Start scanning for devices from highest ALPA to lowest");
1047 # lpfc_nodev_tmo: If set, it will hold all I/O errors on devices that disappear
1048 # until the timer expires. Value range is [0,255]. Default value is 20.
1049 # NOTE: this MUST be less then the SCSI Layer command timeout - 1.
1051 LPFC_ATTR_RW(nodev_tmo, 30, 0, 255,
1052 "Seconds driver will hold I/O waiting for a device to come back");
1055 # lpfc_topology: link topology for init link
1056 # 0x0 = attempt loop mode then point-to-point
1057 # 0x02 = attempt point-to-point mode only
1058 # 0x04 = attempt loop mode only
1059 # 0x06 = attempt point-to-point mode then loop
1060 # Set point-to-point mode if you want to run as an N_Port.
1061 # Set loop mode if you want to run as an NL_Port. Value range is [0,0x6].
1062 # Default value is 0.
1064 LPFC_ATTR_R(topology, 0, 0, 6, "Select Fibre Channel topology");
1067 # lpfc_link_speed: Link speed selection for initializing the Fibre Channel
1069 # 0 = auto select (default)
1073 # Value range is [0,4]. Default value is 0.
1075 LPFC_ATTR_R(link_speed, 0, 0, 4, "Select link speed");
1078 # lpfc_fcp_class: Determines FC class to use for the FCP protocol.
1079 # Value range is [2,3]. Default value is 3.
1081 LPFC_ATTR_R(fcp_class, 3, 2, 3,
1082 "Select Fibre Channel class of service for FCP sequences");
1085 # lpfc_use_adisc: Use ADISC for FCP rediscovery instead of PLOGI. Value range
1086 # is [0,1]. Default value is 0.
1088 LPFC_ATTR_RW(use_adisc, 0, 0, 1,
1089 "Use ADISC on rediscovery to authenticate FCP devices");
1092 # lpfc_ack0: Use ACK0, instead of ACK1 for class 2 acknowledgement. Value
1093 # range is [0,1]. Default value is 0.
1095 LPFC_ATTR_R(ack0, 0, 0, 1, "Enable ACK0 support");
1098 # If automap is set, SCSI IDs for all FCP nodes without
1099 # consistent bindings will be automatically generated.
1100 # If new FCP devices are added to the network when the system is down,
1101 # there is no guarantee that these SCSI IDs will remain the same
1102 # when the system is booted again.
1103 # The bind method of the port is used as the binding method of
1104 # automap devices to preserve SCSI IDs between link down and link up.
1105 # If automap is 0, only devices with consistent bindings will be
1106 # recognized by the system. User can change the automap property
1107 # of port instance X by changing the value of lpfcX_automap parameter.
1108 # Value range is [0,1]. Default value is 1.
1110 LPFC_ATTR_RW(automap, 1, 0, 1,
1111 "Automatically bind FCP devices as they are discovered");
1114 # lpfc_fcp_bind_method: It specifies the method of binding to be used for each
1115 # port. This binding method is used for consistent binding and automaped
1116 # binding. A value of 1 will force WWNN binding, value of 2 will force WWPN
1117 # binding, value of 3 will force DID binding and value of 4 will force the
1118 # driver to derive binding from ALPA. Any consistent binding whose type does
1119 # not match with the bind method of the port will be ignored. Value range
1120 # is [1,4]. Default value is 2.
1122 LPFC_ATTR_RW(fcp_bind_method, 2, 0, 4,
1123 "Select the bind method to be used");
1126 # lpfc_cr_delay & lpfc_cr_count: Default values for I/O colaesing
1127 # cr_delay (msec) or cr_count outstanding commands. cr_delay can take
1128 # value [0,63]. cr_count can take value [0,255]. Default value of cr_delay
1129 # is 0. Default value of cr_count is 0. The cr_count feature is disabled if
1130 # cr_delay is set to 0.
1132 static int lpfc_cr_delay = 0;
1133 module_param(lpfc_cr_delay, int , 0);
1134 MODULE_PARM_DESC(lpfc_cr_delay, "A count of milliseconds after which an"
1135 "interrupt response is generated");
1137 static int lpfc_cr_count = 1;
1138 module_param(lpfc_cr_count, int, 0);
1139 MODULE_PARM_DESC(lpfc_cr_count, "A count of I/O completions after which an"
1140 "interrupt response is generated");
1143 # lpfc_fdmi_on: controls FDMI support.
1144 # 0 = no FDMI support
1145 # 1 = support FDMI without attribute of hostname
1146 # 2 = support FDMI with attribute of hostname
1147 # Value range [0,2]. Default value is 0.
1149 LPFC_ATTR_RW(fdmi_on, 0, 0, 2, "Enable FDMI support");
1152 # Specifies the maximum number of ELS cmds we can have outstanding (for
1153 # discovery). Value range is [1,64]. Default value = 1.
1155 static int lpfc_discovery_threads = 1;
1156 module_param(lpfc_discovery_threads, int, 0);
1157 MODULE_PARM_DESC(lpfc_discovery_threads, "Maximum number of ELS commands"
1158 "during discovery");
1160 #ifdef USE_SCAN_TARGET
1162 # This enables lpfc_target_add and lpfc_target_remove.
1163 # Default value = 1, SCSI hotplug enabled.
1165 static int lpfc_scsi_hotplug = 1;
1168 # This enables lpfc_target_add and lpfc_target_remove.
1169 # Default value = 0, SCSI hotplug disabled.
1171 static int lpfc_scsi_hotplug = 0;
1173 module_param(lpfc_scsi_hotplug, int, 0);
1174 MODULE_PARM_DESC(lpfc_scsi_hotplug, "Enables support of SCSI hotplug");
1177 # lpfc_max_luns: maximum number of LUNs per target driver will support
1178 # Value range is [1,32768]. Default value is 256.
1179 # NOTE: The SCSI layer will scan each target for this many luns
1181 LPFC_ATTR_RW(max_luns, 256, 1, 32768,
1182 "Maximum number of LUNs per target driver will support");
1187 dfc_rsp_data_copy(struct lpfc_hba * phba, uint8_t * outdataptr,
1188 DMABUFEXT_t * mlist, uint32_t size)
1190 DMABUFEXT_t *mlast = NULL;
1191 int cnt, offset = 0;
1192 struct list_head head, *curr, *next;
1194 if (!mlist) /* FIX ME - fix the return values */
1197 list_add_tail(&head, &mlist->dma.list);
1199 list_for_each_safe(curr, next, &head) {
1200 mlast = list_entry(curr, DMABUFEXT_t , dma.list);
1204 /* We copy chunks of 4K */
1205 cnt = size > 4096 ? 4096: size;
1208 pci_dma_sync_single_for_device(phba->pcidev,
1209 mlast->dma.phys, LPFC_BPL_SIZE, PCI_DMA_TODEVICE);
1211 memcpy(outdataptr + offset,
1212 mlast->dma.virt, cnt);
1222 dfc_cmd_data_free(struct lpfc_hba * phba, DMABUFEXT_t * mlist)
1225 struct pci_dev *pcidev = phba->pcidev;
1226 struct list_head head, *curr, *next;
1228 if (!mlist) /* FIX ME - need different return value */
1231 list_add_tail(&head, &mlist->dma.list);
1233 list_for_each_safe(curr, next, &head) {
1234 mlast = list_entry(curr, DMABUFEXT_t , dma.list);
1235 if (mlast->dma.virt) {
1236 dma_free_coherent(&pcidev->dev,
1247 static DMABUFEXT_t *
1248 dfc_cmd_data_alloc(struct lpfc_hba * phba, char *indataptr,
1249 struct ulp_bde64 * bpl, uint32_t size)
1251 DMABUFEXT_t *mlist = NULL, *dmp;
1252 int cnt, offset = 0, i = 0, rc = 0;
1253 struct pci_dev *pcidev = phba->pcidev;
1256 cnt = size > 4096 ? 4096: size;
1258 dmp = kmalloc(sizeof (DMABUFEXT_t), GFP_KERNEL);
1260 goto dfc_cmd_data_alloc_exit;
1262 INIT_LIST_HEAD(&dmp->dma.list);
1265 list_add_tail(&dmp->dma.list, &mlist->dma.list);
1269 dmp->dma.virt = dma_alloc_coherent(&pcidev->dev,
1274 if (!dmp->dma.virt) /* FIX ME - who free's the list ?*/
1275 goto dfc_cmd_data_alloc_free_dmp;
1280 bpl->tus.f.bdeFlags = BUFF_USE_RCV;
1283 /*rc = copy_from_user(dmp->dma.virt, indataptr + offset,
1285 memcpy(dmp->dma.virt, indataptr+offset, cnt);
1286 if (rc) /* FIX ME - who free's the list ?*/
1287 goto dfc_cmd_data_alloc_free_dmp;
1288 bpl->tus.f.bdeFlags = 0;
1290 pci_dma_sync_single_for_device(phba->pcidev,
1291 dmp->dma.phys, LPFC_BPL_SIZE, PCI_DMA_TODEVICE);
1294 /* build buffer ptr list for IOCB */
1295 bpl->addrLow = le32_to_cpu( putPaddrLow(dmp->dma.phys) );
1296 bpl->addrHigh = le32_to_cpu( putPaddrHigh(dmp->dma.phys) );
1297 bpl->tus.f.bdeSize = (ushort) cnt;
1298 bpl->tus.w = le32_to_cpu(bpl->tus.w);
1308 dfc_cmd_data_alloc_free_dmp:
1310 dfc_cmd_data_alloc_exit:
1311 dfc_cmd_data_free(phba, mlist);
1316 sysfs_ctpass_write(struct kobject *kobj, char *buf, loff_t off, size_t count)
1318 struct Scsi_Host *host = class_to_shost(container_of(kobj,
1319 struct class_device, kobj));
1320 struct lpfc_hba *phba = (struct lpfc_hba*)host->hostdata[0];
1321 struct lpfc_nodelist *pndl;
1322 struct ulp_bde64 *bpl;
1323 struct lpfc_iocbq *cmdiocbq = NULL, *rspiocbq = NULL;
1324 DMABUFEXT_t *indmp = NULL, *outdmp = NULL;
1325 IOCB_t *cmd = NULL, *rsp = NULL;
1326 struct lpfc_dmabuf *bmp = NULL;
1327 struct lpfc_sli *psli = &phba->sli;
1328 struct lpfc_sli_ring *pring = &psli->ring[LPFC_ELS_RING];
1330 int reqbfrcnt, snsbfrcnt;
1332 unsigned long iflag;
1333 uint32_t portid; /* Port to send this to. */
1334 typedef struct tagctpassthruinput {
1338 } ctpassthruinput_t;
1340 spin_lock_irqsave(phba->host->host_lock, iflag);
1342 reqbfrcnt = ((ctpassthruinput_t*)buf)->reqsize;
1343 snsbfrcnt = ((ctpassthruinput_t*)buf)->rspsize;
1344 portid = ((ctpassthruinput_t*)buf)->portid;
1346 if((portid & CT_DID_MASK) != CT_DID_MASK)
1347 goto ctpassthru_exit;
1349 if (!reqbfrcnt || !snsbfrcnt ||
1350 (reqbfrcnt > PAGE_SIZE - sizeof(ctpassthruinput_t)) ||
1351 (snsbfrcnt > PAGE_SIZE)) {
1353 goto ctpassthru_exit;
1356 pndl = lpfc_findnode_did(phba, NLP_SEARCH_MAPPED | NLP_SEARCH_UNMAPPED,
1358 if(!pndl || pndl->nlp_flag & NLP_ELS_SND_MASK) {
1360 goto ctpassthru_exit;
1363 if (!(psli->sliinit.sli_flag & LPFC_SLI2_ACTIVE)) {
1365 goto ctpassthru_exit;
1368 cmdiocbq = mempool_alloc(phba->iocb_mem_pool, GFP_ATOMIC);
1371 goto ctpassthru_exit;
1373 memset(cmdiocbq, 0, sizeof (struct lpfc_iocbq));
1374 cmd = &cmdiocbq->iocb;
1376 rspiocbq = mempool_alloc(phba->iocb_mem_pool, GFP_ATOMIC);
1379 goto ctpassthru_freecmdiocbq;
1381 memset(rspiocbq, 0, sizeof (struct lpfc_iocbq));
1382 rsp = &rspiocbq->iocb;
1384 bmp = kmalloc(sizeof (struct lpfc_dmabuf), GFP_ATOMIC);
1387 goto ctpassthru_freerspiocbq;
1390 bmp->virt = lpfc_mbuf_alloc(phba, 0, &bmp->phys);
1393 goto ctpassthru_freebmp;
1396 INIT_LIST_HEAD(&bmp->list);
1397 bpl = (struct ulp_bde64 *) bmp->virt;
1398 spin_unlock_irqrestore(phba->host->host_lock, iflag);
1399 indmp = dfc_cmd_data_alloc(phba, buf + sizeof(ctpassthruinput_t), bpl,
1401 spin_lock_irqsave(phba->host->host_lock, iflag);
1404 goto ctpassthru_freembuf;
1407 bpl += indmp->flag; /* flag contains total number of BPLs for xmit */
1409 spin_unlock_irqrestore(phba->host->host_lock, iflag);
1410 outdmp = dfc_cmd_data_alloc(phba, NULL, bpl, snsbfrcnt);
1411 spin_lock_irqsave(phba->host->host_lock, iflag);
1414 goto ctpassthru_free_indmp;
1416 outdmp->data = snsbfrcnt;
1417 outdmp->uniqueid = current_thread_info()->task->pid;
1419 cmd->un.genreq64.bdl.ulpIoTag32 = 0;
1420 cmd->un.genreq64.bdl.addrHigh = putPaddrHigh(bmp->phys);
1421 cmd->un.genreq64.bdl.addrLow = putPaddrLow(bmp->phys);
1422 cmd->un.genreq64.bdl.bdeFlags = BUFF_TYPE_BDL;
1423 cmd->un.genreq64.bdl.bdeSize =
1424 (outdmp->flag + indmp->flag) * sizeof (struct ulp_bde64);
1425 cmd->ulpCommand = CMD_GEN_REQUEST64_CR;
1426 cmd->un.genreq64.w5.hcsw.Fctl = (SI | LA);
1427 cmd->un.genreq64.w5.hcsw.Dfctl = 0;
1428 cmd->un.genreq64.w5.hcsw.Rctl = FC_UNSOL_CTL;
1429 cmd->un.genreq64.w5.hcsw.Type = FC_COMMON_TRANSPORT_ULP;
1430 cmd->ulpIoTag = lpfc_sli_next_iotag(phba, pring);
1431 cmd->ulpTimeout = 5;
1432 cmd->ulpBdeCount = 1;
1434 cmd->ulpClass = CLASS3;
1435 cmd->ulpContext = pndl->nlp_rpi;
1436 cmd->ulpOwner = OWN_CHIP;
1437 cmdiocbq->context1 = NULL;
1438 cmdiocbq->context2 = NULL;
1439 cmdiocbq->iocb_flag |= LPFC_IO_LIBDFC;
1441 if (cmd->ulpTimeout < (phba->fc_ratov * 2 + LPFC_DRVR_TIMEOUT))
1442 timeout = phba->fc_ratov * 2 + LPFC_DRVR_TIMEOUT;
1444 timeout = cmd->ulpTimeout;
1446 for (rc = -1, i = 0; i < 4 && rc != IOCB_SUCCESS; i++) {
1447 spin_unlock_irqrestore(phba->host->host_lock, iflag);
1448 rc = lpfc_sli_issue_iocb_wait(phba, pring, cmdiocbq,
1450 spin_lock_irqsave(phba->host->host_lock, iflag);
1453 if (rc != IOCB_SUCCESS) {
1455 goto ctpassthru_free_outdmp;
1458 if (!rsp->ulpStatus) {
1459 outdmp->flag = rsp->un.genreq64.bdl.bdeSize;
1461 if (rsp->ulpStatus == IOSTAT_LOCAL_REJECT) {
1462 switch (rsp->un.ulpWord[4] & 0xff) {
1463 case IOERR_SEQUENCE_TIMEOUT:
1466 case IOERR_INVALID_RPI:
1473 goto ctpassthru_free_outdmp;
1476 if (outdmp->flag > snsbfrcnt) {
1477 rc = -ERANGE; /* C_CT Request error */
1478 lpfc_printf_log(phba,
1481 "%d:1208 C_CT Request error Data: x%x x%x\n",
1483 outdmp->flag, 4096);
1484 goto ctpassthru_free_outdmp;
1486 list_add(&outdmp->list, &phba->ctrspbuflist);
1488 goto ctpassthru_free_indmp;
1490 ctpassthru_free_outdmp:
1491 dfc_cmd_data_free(phba, outdmp);
1492 ctpassthru_free_indmp:
1493 dfc_cmd_data_free(phba, indmp);
1494 ctpassthru_freembuf:
1495 lpfc_mbuf_free(phba, bmp->virt, bmp->phys);
1498 ctpassthru_freerspiocbq:
1499 mempool_free(rspiocbq, phba->iocb_mem_pool);
1500 ctpassthru_freecmdiocbq:
1501 mempool_free(cmdiocbq, phba->iocb_mem_pool);
1503 spin_unlock_irqrestore(phba->host->host_lock, iflag); /* remove */
1508 sysfs_ctpass_read(struct kobject *kobj, char *buf, loff_t off, size_t count)
1510 int rc = -EIO, uniqueid;
1511 struct Scsi_Host *host = class_to_shost(container_of(kobj,
1512 struct class_device, kobj));
1513 struct lpfc_hba *phba = (struct lpfc_hba*)host->hostdata[0];
1514 DMABUFEXT_t *outdmp = NULL, *tmpoutdmp;
1516 uniqueid = current_thread_info()->task->pid;
1518 list_for_each_entry_safe(outdmp, tmpoutdmp, &phba->ctrspbuflist, list){
1519 if (outdmp->uniqueid == uniqueid) {
1520 dfc_rsp_data_copy(phba, (uint8_t*)buf, outdmp,
1524 list_del(&outdmp->list);
1525 dfc_cmd_data_free(phba, outdmp);
1533 static struct bin_attribute sysfs_ctpass_attr = {
1536 .mode = S_IRUSR | S_IWUSR,
1537 .owner = THIS_MODULE,
1540 .read = sysfs_ctpass_read,
1541 .write = sysfs_ctpass_write,
1545 sysfs_sendrnid_write(struct kobject *kobj, char *buf, loff_t off, size_t count)
1547 struct Scsi_Host *host = class_to_shost(container_of(kobj,
1548 struct class_device, kobj));
1549 struct lpfc_hba *phba = (struct lpfc_hba*)host->hostdata[0];
1550 struct lpfc_sli *psli = &phba->sli;
1551 struct lpfc_sli_ring *pring = &psli->ring[LPFC_ELS_RING];
1552 struct lpfc_name idn;
1553 struct lpfc_iocbq *cmdiocbq = NULL, *rspiocbq = NULL;
1555 uint32_t *pcmd, *psta;
1558 unsigned long iflag;
1559 int rtnbfrsiz, i, rc = 0;
1560 struct lpfc_nodelist *pndl;
1562 struct rnidrsp *rspbuf;
1564 rspbuf = kmalloc(sizeof (rspbuf), GFP_KERNEL);
1568 memcpy(&idn, buf, sizeof(struct lpfc_name));
1569 sscanf(buf + sizeof(struct lpfc_name), "%d", &wwntype);
1571 spin_lock_irqsave(phba->host->host_lock, iflag); /* remove */
1574 pndl = lpfc_findnode_wwpn(phba, NLP_SEARCH_MAPPED |
1575 NLP_SEARCH_UNMAPPED, &idn);
1577 pndl = lpfc_findnode_wwnn(phba, NLP_SEARCH_MAPPED |
1578 NLP_SEARCH_UNMAPPED, &idn);
1585 if ((pndl->nlp_flag & NLP_ELS_SND_MASK) == NLP_RNID_SND) {
1590 cmdiocbq = lpfc_prep_els_iocb(phba, 1, 2 * sizeof (uint32_t), 0, pndl,
1597 /*********************************************************************/
1598 /* context2 is used by prep/free to locate cmd and rsp buffers, */
1599 /* but context2 is also used by iocb_wait to hold a rspiocb ptr, so */
1600 /* the rsp iocbq can be returned from the completion routine for */
1601 /* iocb_wait, so, save the prep/free value locally ... it will be */
1602 /* restored after returning from iocb_wait. */
1603 /*********************************************************************/
1604 context2 = cmdiocbq->context2; /* needed to use lpfc_els_free_iocb */
1606 rspiocbq = mempool_alloc(phba->iocb_mem_pool, GFP_ATOMIC);
1609 goto sendrnid_freecmdiocbq;
1611 memset(rspiocbq, 0, sizeof (struct lpfc_iocbq));
1612 rsp = &rspiocbq->iocb;
1614 pcmd = ((struct lpfc_dmabuf *) cmdiocbq->context2)->virt;
1615 *pcmd++ = ELS_CMD_RNID;
1616 memset(pcmd, 0, sizeof (RNID)); /* fill in RNID payload */
1617 ((RNID *)pcmd)->Format = RNID_TOPOLOGY_DISC;
1618 cmdiocbq->context1 = NULL;
1619 cmdiocbq->context2 = NULL;
1620 cmdiocbq->iocb_flag |= LPFC_IO_LIBDFC;
1622 for (rc = -1, i = 0; i < 4 && rc != IOCB_SUCCESS; i++) {
1623 pndl->nlp_flag |= NLP_RNID_SND;
1624 spin_unlock_irqrestore(phba->host->host_lock, iflag);
1625 rc = lpfc_sli_issue_iocb_wait(phba, pring, cmdiocbq,
1627 (phba->fc_ratov * 2) +
1629 spin_lock_irqsave(phba->host->host_lock, iflag);
1630 pndl->nlp_flag &= ~NLP_RNID_SND;
1631 cmdiocbq->context2 = context2;
1632 if (rc == IOCB_ERROR) {
1634 goto sendrnid_freerspiocbq;
1638 if (rc != IOCB_SUCCESS) {
1640 goto sendrnid_freerspiocbq;
1643 if (rsp->ulpStatus) {
1646 struct lpfc_dmabuf *buf_ptr1, *buf_ptr;
1647 buf_ptr1 = (struct lpfc_dmabuf *)cmdiocbq->context2;
1648 buf_ptr = list_entry(buf_ptr1->list.next, struct lpfc_dmabuf,
1650 psta = (uint32_t*)buf_ptr->virt;
1651 if (*psta++ != ELS_CMD_ACC) {
1653 goto sendrnid_freerspiocbq;
1655 prsp = (RNID*)psta; /* then rnid response data */
1656 rtnbfrsiz = prsp->CommonLen + prsp->SpecificLen;
1657 if (rtnbfrsiz > PAGE_SIZE) {
1659 goto sendrnid_freerspiocbq;
1661 rspbuf->buf = kmalloc(rtnbfrsiz, GFP_ATOMIC);
1664 goto sendrnid_freerspiocbq;
1667 memcpy(rspbuf->buf, prsp, rtnbfrsiz);
1668 rspbuf->data = rtnbfrsiz;
1669 rspbuf->uniqueid = current_thread_info()->task->pid;
1670 list_add(&rspbuf->list, &phba->rnidrspbuflist);
1674 sendrnid_freerspiocbq:
1675 mempool_free(rspiocbq, phba->iocb_mem_pool);
1676 sendrnid_freecmdiocbq:
1677 lpfc_els_free_iocb(phba, cmdiocbq);
1679 spin_unlock_irqrestore(phba->host->host_lock, iflag);
1684 sysfs_sendrnid_read(struct kobject *kobj, char *buf, loff_t off, size_t count)
1686 int rc = -EIO, uniqueid;
1687 struct Scsi_Host *host = class_to_shost(container_of(kobj,
1688 struct class_device, kobj));
1689 struct lpfc_hba *phba = (struct lpfc_hba*)host->hostdata[0];
1690 struct rnidrsp *outdmp = NULL, *tmpoutdmp;
1692 uniqueid = current_thread_info()->task->pid;
1694 list_for_each_entry_safe(outdmp, tmpoutdmp, &phba->rnidrspbuflist,
1696 if (outdmp->uniqueid == uniqueid) {
1697 memcpy(buf, outdmp->buf, outdmp->data);
1700 list_del(&outdmp->list);
1711 static struct bin_attribute sysfs_sendrnid_attr = {
1715 .owner = THIS_MODULE,
1718 .write = sysfs_sendrnid_write,
1719 .read = sysfs_sendrnid_read,
1723 sysfs_slimem_write(struct kobject *kobj, char *buf, loff_t off, size_t count)
1725 unsigned long iflag;
1726 struct Scsi_Host *host = class_to_shost(container_of(kobj,
1727 struct class_device, kobj));
1728 struct lpfc_hba *phba = (struct lpfc_hba*)host->hostdata[0];
1731 if (phba->sli.sliinit.sli_flag & LPFC_SLI2_ACTIVE)
1732 slim_size = SLI2_SLIM_SIZE;
1734 slim_size = SLI1_SLIM_SIZE;
1736 if ((count + off) > slim_size)
1739 if (count == 0) return 0;
1741 if (off % 4 || count % 4 || (unsigned long)buf % 4)
1744 spin_lock_irqsave(host->host_lock, iflag);
1746 if (!(phba->fc_flag & FC_OFFLINE_MODE)) {
1747 if (off >= 256 && off + count <= (256 + 128)) {
1748 spin_unlock_irqrestore(phba->host->host_lock, iflag);
1753 if (phba->sli.sliinit.sli_flag & LPFC_SLI2_ACTIVE)
1754 lpfc_sli_pcimem_bcopy((uint32_t*)buf,
1755 (uint32_t *)((uint8_t *) phba->slim2p+off), count);
1757 lpfc_memcpy_to_slim((uint8_t *) phba->slim_memmap_p + off,
1758 (void *)buf, count);
1760 spin_unlock_irqrestore(host->host_lock, iflag);
1766 sysfs_slimem_read(struct kobject *kobj, char *buf, loff_t off, size_t count)
1768 unsigned long iflag;
1769 struct Scsi_Host *host = class_to_shost(container_of(kobj,
1770 struct class_device, kobj));
1771 struct lpfc_hba *phba = (struct lpfc_hba*)host->hostdata[0];
1774 if (phba->sli.sliinit.sli_flag & LPFC_SLI2_ACTIVE)
1775 slim_size = SLI2_SLIM_SIZE;
1777 slim_size = SLI1_SLIM_SIZE;
1779 if (off > slim_size)
1782 if ((count + off) > slim_size)
1783 count = slim_size - off;
1785 if (count == 0) return 0;
1787 if (off % 4 || count % 4 || (unsigned long)buf % 4)
1790 spin_lock_irqsave(phba->host->host_lock, iflag);
1792 if (phba->sli.sliinit.sli_flag & LPFC_SLI2_ACTIVE)
1793 lpfc_sli_pcimem_bcopy((uint32_t *)((uint8_t *) phba->slim2p
1794 + off), (uint32_t *)buf, count);
1796 lpfc_memcpy_from_slim(buf, (uint8_t *)phba->slim_memmap_p + off,
1799 spin_unlock_irqrestore(phba->host->host_lock, iflag);
1804 static struct bin_attribute sysfs_slimem_attr = {
1807 .mode = S_IRUSR | S_IWUSR,
1808 .owner = THIS_MODULE,
1810 .size = SLI1_SLIM_SIZE,
1811 .read = sysfs_slimem_read,
1812 .write = sysfs_slimem_write,
1814 #endif /* DFC_DEBUG */
1817 sysfs_ctlreg_write(struct kobject *kobj, char *buf, loff_t off, size_t count)
1819 unsigned long iflag;
1821 struct Scsi_Host *host = class_to_shost(container_of(kobj,
1822 struct class_device, kobj));
1823 struct lpfc_hba *phba = (struct lpfc_hba*)host->hostdata[0];
1825 if ((off + count) > FF_REG_AREA_SIZE)
1828 if (count == 0) return 0;
1830 if (off % 4 || count % 4 || (unsigned long)buf % 4)
1833 spin_lock_irqsave(phba->host->host_lock, iflag);
1835 if (!(phba->fc_flag & FC_OFFLINE_MODE)) {
1836 spin_unlock_irqrestore(phba->host->host_lock, iflag);
1840 for (buf_off = 0; buf_off < count; buf_off += sizeof(uint32_t))
1841 writel(*((uint32_t *)(buf + buf_off)),
1842 (uint8_t *)phba->ctrl_regs_memmap_p + off + buf_off);
1844 spin_unlock_irqrestore(phba->host->host_lock, iflag);
1850 sysfs_ctlreg_read(struct kobject *kobj, char *buf, loff_t off, size_t count)
1852 unsigned long iflag;
1855 struct Scsi_Host *host = class_to_shost(container_of(kobj,
1856 struct class_device, kobj));
1857 struct lpfc_hba *phba = (struct lpfc_hba*)host->hostdata[0];
1859 if (off > FF_REG_AREA_SIZE)
1862 if ((off + count) > FF_REG_AREA_SIZE)
1863 count = FF_REG_AREA_SIZE - off;
1865 if (count == 0) return 0;
1867 if (off % 4 || count % 4 || (unsigned long)buf % 4)
1870 spin_lock_irqsave(phba->host->host_lock, iflag);
1872 for (buf_off = 0; buf_off < count; buf_off += sizeof(uint32_t)) {
1873 tmp_ptr = (uint32_t *)(buf + buf_off);
1874 *tmp_ptr = readl((uint8_t *)(phba->ctrl_regs_memmap_p
1878 spin_unlock_irqrestore(phba->host->host_lock, iflag);
1883 static struct bin_attribute sysfs_ctlreg_attr = {
1886 .mode = S_IRUSR | S_IWUSR,
1887 .owner = THIS_MODULE,
1890 .read = sysfs_ctlreg_read,
1891 .write = sysfs_ctlreg_write,
1895 #define MBOX_BUFF_SIZE (MAILBOX_CMD_WSIZE*sizeof(uint32_t))
1898 sysfs_mbox_idle (struct lpfc_hba * phba)
1900 phba->sysfs_mbox.state = SMBOX_IDLE;
1901 phba->sysfs_mbox.offset = 0;
1903 if (phba->sysfs_mbox.mbox) {
1904 mempool_free(phba->sysfs_mbox.mbox,
1905 phba->mbox_mem_pool);
1906 phba->sysfs_mbox.mbox = NULL;
1911 sysfs_mbox_write(struct kobject *kobj, char *buf, loff_t off, size_t count)
1913 unsigned long iflag;
1914 struct Scsi_Host * host =
1915 class_to_shost(container_of(kobj, struct class_device, kobj));
1916 struct lpfc_hba * phba = (struct lpfc_hba*)host->hostdata[0];
1917 struct lpfcMboxq * mbox = NULL;
1919 if ((count + off) > MBOX_BUFF_SIZE)
1922 if (off % 4 || count % 4 || (unsigned long)buf % 4)
1929 mbox = mempool_alloc(phba->mbox_mem_pool, GFP_KERNEL);
1935 spin_lock_irqsave(host->host_lock, iflag);
1938 if (phba->sysfs_mbox.mbox)
1939 mempool_free(mbox, phba->mbox_mem_pool);
1941 phba->sysfs_mbox.mbox = mbox;
1942 phba->sysfs_mbox.state = SMBOX_WRITING;
1945 if (phba->sysfs_mbox.state != SMBOX_WRITING ||
1946 phba->sysfs_mbox.offset != off ||
1947 phba->sysfs_mbox.mbox == NULL ) {
1948 sysfs_mbox_idle(phba);
1949 spin_unlock_irqrestore(host->host_lock, iflag);
1954 memcpy((uint8_t *) & phba->sysfs_mbox.mbox->mb + off,
1957 phba->sysfs_mbox.offset = off + count;
1959 spin_unlock_irqrestore(host->host_lock, iflag);
1965 sysfs_mbox_read(struct kobject *kobj, char *buf, loff_t off, size_t count)
1967 unsigned long iflag;
1968 struct Scsi_Host *host =
1969 class_to_shost(container_of(kobj, struct class_device,
1971 struct lpfc_hba *phba = (struct lpfc_hba*)host->hostdata[0];
1974 if (off > sizeof(MAILBOX_t))
1977 if ((count + off) > sizeof(MAILBOX_t))
1978 count = sizeof(MAILBOX_t) - off;
1980 if (off % 4 || count % 4 || (unsigned long)buf % 4)
1983 if (off && count == 0)
1986 spin_lock_irqsave(phba->host->host_lock, iflag);
1989 phba->sysfs_mbox.state == SMBOX_WRITING &&
1990 phba->sysfs_mbox.offset >= 2 * sizeof(uint32_t)) {
1992 switch (phba->sysfs_mbox.mbox->mb.mbxCommand) {
1997 case MBX_CONFIG_LINK:
1998 case MBX_CONFIG_RING:
1999 case MBX_RESET_RING:
2000 case MBX_UNREG_LOGIN:
2002 case MBX_DUMP_CONTEXT:
2005 case MBX_FLASH_WR_ULA:
2009 if (!(phba->fc_flag & FC_OFFLINE_MODE)) {
2010 printk(KERN_WARNING "mbox_read:Command 0x%x "
2011 "is illegal in on-line state\n",
2012 phba->sysfs_mbox.mbox->mb.mbxCommand);
2013 sysfs_mbox_idle(phba);
2014 spin_unlock_irqrestore(phba->host->host_lock,
2020 case MBX_READ_CONFIG:
2021 case MBX_READ_RCONFIG:
2022 case MBX_READ_STATUS:
2025 case MBX_READ_LNK_STAT:
2026 case MBX_DUMP_MEMORY:
2028 case MBX_UPDATE_CFG:
2030 case MBX_LOAD_EXP_ROM:
2032 case MBX_READ_SPARM64:
2036 case MBX_REG_LOGIN64:
2037 case MBX_CONFIG_PORT:
2038 case MBX_RUN_BIU_DIAG:
2039 printk(KERN_WARNING "mbox_read: Illegal Command 0x%x\n",
2040 phba->sysfs_mbox.mbox->mb.mbxCommand);
2041 sysfs_mbox_idle(phba);
2042 spin_unlock_irqrestore(phba->host->host_lock,
2046 printk(KERN_WARNING "mbox_read: Unknown Command 0x%x\n",
2047 phba->sysfs_mbox.mbox->mb.mbxCommand);
2048 sysfs_mbox_idle(phba);
2049 spin_unlock_irqrestore(phba->host->host_lock,
2054 if ((phba->fc_flag & FC_OFFLINE_MODE) ||
2055 (!(phba->sli.sliinit.sli_flag & LPFC_SLI2_ACTIVE))){
2057 spin_unlock_irqrestore(phba->host->host_lock, iflag);
2058 rc = lpfc_sli_issue_mbox (phba,
2059 phba->sysfs_mbox.mbox,
2061 spin_lock_irqsave(phba->host->host_lock, iflag);
2064 spin_unlock_irqrestore(phba->host->host_lock, iflag);
2065 rc = lpfc_sli_issue_mbox_wait (phba,
2066 phba->sysfs_mbox.mbox,
2067 phba->fc_ratov * 2);
2068 spin_lock_irqsave(phba->host->host_lock, iflag);
2071 if (rc != MBX_SUCCESS) {
2072 sysfs_mbox_idle(phba);
2073 spin_unlock_irqrestore(host->host_lock, iflag);
2076 phba->sysfs_mbox.state = SMBOX_READING;
2078 else if (phba->sysfs_mbox.offset != off ||
2079 phba->sysfs_mbox.state != SMBOX_READING) {
2080 printk(KERN_WARNING "mbox_read: Bad State\n");
2081 sysfs_mbox_idle(phba);
2082 spin_unlock_irqrestore(host->host_lock, iflag);
2086 memcpy(buf, (uint8_t *) & phba->sysfs_mbox.mbox->mb + off, count);
2088 phba->sysfs_mbox.offset = off + count;
2090 if (phba->sysfs_mbox.offset == sizeof(MAILBOX_t))
2091 sysfs_mbox_idle(phba);
2093 spin_unlock_irqrestore(phba->host->host_lock, iflag);
2098 static struct bin_attribute sysfs_mbox_attr = {
2101 .mode = S_IRUSR | S_IWUSR,
2102 .owner = THIS_MODULE,
2104 .size = sizeof(MAILBOX_t),
2105 .read = sysfs_mbox_read,
2106 .write = sysfs_mbox_write,
2110 #ifdef FC_TRANS_VER2 /* fc transport w/ statistics and attrs */
2113 * Dynamic FC Host Attributes Support
2117 lpfc_get_host_port_id(struct Scsi_Host *shost)
2119 struct lpfc_hba *phba = (struct lpfc_hba*)shost->hostdata[0];
2120 fc_host_port_id(shost) = be32_to_cpu(phba->fc_myDID);
2124 lpfc_get_host_port_type(struct Scsi_Host *shost)
2126 struct lpfc_hba *phba = (struct lpfc_hba*)shost->hostdata[0];
2127 unsigned long iflag = 0;
2129 spin_lock_irqsave(shost->host_lock, iflag);
2131 if (phba->hba_state == LPFC_HBA_READY) {
2132 if (phba->fc_topology == TOPOLOGY_LOOP) {
2133 if (phba->fc_flag & FC_PUBLIC_LOOP)
2134 fc_host_port_type(shost) = FC_PORTTYPE_NLPORT;
2136 fc_host_port_type(shost) = FC_PORTTYPE_LPORT;
2138 if (phba->fc_flag & FC_FABRIC)
2139 fc_host_port_type(shost) = FC_PORTTYPE_NPORT;
2141 fc_host_port_type(shost) = FC_PORTTYPE_PTP;
2144 fc_host_port_type(shost) = FC_PORTTYPE_UNKNOWN;
2146 spin_unlock_irqrestore(shost->host_lock, iflag);
2150 lpfc_get_host_port_state(struct Scsi_Host *shost)
2152 struct lpfc_hba *phba = (struct lpfc_hba*)shost->hostdata[0];
2153 unsigned long iflag = 0;
2155 spin_lock_irqsave(shost->host_lock, iflag);
2157 if (phba->fc_flag & FC_OFFLINE_MODE)
2158 fc_host_port_state(shost) = FC_PORTSTATE_OFFLINE;
2160 switch (phba->hba_state) {
2161 case LPFC_INIT_START:
2162 case LPFC_INIT_MBX_CMDS:
2163 case LPFC_LINK_DOWN:
2164 fc_host_port_state(shost) = FC_PORTSTATE_LINKDOWN;
2167 case LPFC_LOCAL_CFG_LINK:
2169 case LPFC_FABRIC_CFG_LINK:
2172 case LPFC_BUILD_DISC_LIST:
2173 case LPFC_DISC_AUTH:
2175 case LPFC_HBA_READY:
2176 /* Links up, beyond this port_type reports state */
2177 fc_host_port_state(shost) = FC_PORTSTATE_ONLINE;
2179 case LPFC_HBA_ERROR:
2180 fc_host_port_state(shost) = FC_PORTSTATE_ERROR;
2183 fc_host_port_state(shost) = FC_PORTSTATE_UNKNOWN;
2188 spin_unlock_irqrestore(shost->host_lock, iflag);
2192 lpfc_get_host_speed(struct Scsi_Host *shost)
2194 struct lpfc_hba *phba = (struct lpfc_hba*)shost->hostdata[0];
2195 unsigned long iflag = 0;
2197 spin_lock_irqsave(shost->host_lock, iflag);
2199 if (phba->hba_state == LPFC_HBA_READY) {
2200 switch(phba->fc_linkspeed) {
2202 fc_host_speed(shost) = FC_PORTSPEED_1GBIT;
2205 fc_host_speed(shost) = FC_PORTSPEED_2GBIT;
2208 fc_host_speed(shost) = FC_PORTSPEED_4GBIT;
2211 fc_host_speed(shost) = FC_PORTSPEED_UNKNOWN;
2216 spin_unlock_irqrestore(shost->host_lock, iflag);
2220 lpfc_get_host_fabric_name (struct Scsi_Host *shost)
2222 struct lpfc_hba *phba = (struct lpfc_hba*)shost->hostdata[0];
2223 unsigned long iflag = 0;
2226 spin_lock_irqsave(shost->host_lock, iflag);
2228 if ((phba->fc_flag & FC_FABRIC) ||
2229 ((phba->fc_topology == TOPOLOGY_LOOP) &&
2230 (phba->fc_flag & FC_PUBLIC_LOOP)))
2231 memcpy(&nodename, &phba->fc_fabparam.nodeName, sizeof(u64));
2233 /* fabric is local port if there is no F/FL_Port */
2234 memcpy(&nodename, &phba->fc_nodename, sizeof(u64));
2236 spin_unlock_irqrestore(shost->host_lock, iflag);
2238 fc_host_fabric_name(shost) = be64_to_cpu(nodename);
2242 static struct fc_host_statistics *
2243 lpfc_get_stats(struct Scsi_Host *shost)
2245 struct lpfc_hba *phba = (struct lpfc_hba *)shost->hostdata[0];
2246 struct lpfc_sli *psli = &phba->sli;
2247 struct fc_host_statistics *hs =
2248 (struct fc_host_statistics *)phba->link_stats;
2249 LPFC_MBOXQ_t *pmboxq;
2253 pmboxq = mempool_alloc(phba->mbox_mem_pool, GFP_ATOMIC);
2256 memset(pmboxq, 0, sizeof (LPFC_MBOXQ_t));
2259 pmb->mbxCommand = MBX_READ_STATUS;
2260 pmb->mbxOwner = OWN_HOST;
2261 pmboxq->context1 = NULL;
2263 if ((phba->fc_flag & FC_OFFLINE_MODE) ||
2264 (!(psli->sliinit.sli_flag & LPFC_SLI2_ACTIVE))){
2265 rc = lpfc_sli_issue_mbox(phba, pmboxq, MBX_POLL);
2267 rc = lpfc_sli_issue_mbox_wait(phba, pmboxq, phba->fc_ratov * 2);
2269 if (rc != MBX_SUCCESS) {
2271 if (rc == MBX_TIMEOUT)
2272 pmboxq->mbox_cmpl = NULL;
2274 mempool_free( pmboxq, phba->mbox_mem_pool);
2279 hs->tx_frames = pmb->un.varRdStatus.xmitFrameCnt;
2280 hs->tx_words = (pmb->un.varRdStatus.xmitByteCnt * 256);
2281 hs->rx_frames = pmb->un.varRdStatus.rcvFrameCnt;
2282 hs->rx_words = (pmb->un.varRdStatus.rcvByteCnt * 256);
2284 memset((void *)pmboxq, 0, sizeof (LPFC_MBOXQ_t));
2285 pmb->mbxCommand = MBX_READ_LNK_STAT;
2286 pmb->mbxOwner = OWN_HOST;
2287 pmboxq->context1 = NULL;
2289 if ((phba->fc_flag & FC_OFFLINE_MODE) ||
2290 (!(psli->sliinit.sli_flag & LPFC_SLI2_ACTIVE))) {
2291 rc = lpfc_sli_issue_mbox(phba, pmboxq, MBX_POLL);
2293 rc = lpfc_sli_issue_mbox_wait(phba, pmboxq, phba->fc_ratov * 2);
2295 if (rc != MBX_SUCCESS) {
2297 if (rc == MBX_TIMEOUT)
2298 pmboxq->mbox_cmpl = NULL;
2300 mempool_free( pmboxq, phba->mbox_mem_pool);
2305 hs->link_failure_count = pmb->un.varRdLnk.linkFailureCnt;
2306 hs->loss_of_sync_count = pmb->un.varRdLnk.lossSyncCnt;
2307 hs->loss_of_signal_count = pmb->un.varRdLnk.lossSignalCnt;
2308 hs->prim_seq_protocol_err_count = pmb->un.varRdLnk.primSeqErrCnt;
2309 hs->invalid_tx_word_count = pmb->un.varRdLnk.invalidXmitWord;
2310 hs->invalid_crc_count = pmb->un.varRdLnk.crcCnt;
2311 hs->error_frames = pmb->un.varRdLnk.crcCnt;
2313 if (phba->fc_topology == TOPOLOGY_LOOP) {
2314 hs->lip_count = (phba->fc_eventTag >> 1);
2318 hs->nos_count = (phba->fc_eventTag >> 1);
2321 hs->dumped_frames = -1;
2324 /*hs->SecondsSinceLastReset = (jiffies - lpfc_loadtime) / HZ;*/
2329 #endif /* FC_TRANS_VER2 */
2331 #ifdef FC_TRANS_VER1
2333 * The LPFC driver treats linkdown handling as target loss events so there
2334 * are no sysfs handlers for link_down_tmo.
2337 lpfc_get_starget_port_id(struct scsi_target *starget)
2339 struct lpfc_nodelist *ndlp = NULL;
2340 struct Scsi_Host *shost = dev_to_shost(starget->dev.parent);
2341 struct lpfc_hba *phba = (struct lpfc_hba *) shost->hostdata[0];
2344 spin_lock_irq(shost->host_lock);
2345 /* Search the mapped list for this target ID */
2346 list_for_each_entry(ndlp, &phba->fc_nlpmap_list, nlp_listp) {
2347 if (starget->id == ndlp->nlp_sid) {
2348 did = ndlp->nlp_DID;
2352 spin_unlock_irq(shost->host_lock);
2354 fc_starget_port_id(starget) = did;
2358 lpfc_get_starget_node_name(struct scsi_target *starget)
2360 struct lpfc_nodelist *ndlp = NULL;
2361 struct Scsi_Host *shost = dev_to_shost(starget->dev.parent);
2362 struct lpfc_hba *phba = (struct lpfc_hba *) shost->hostdata[0];
2363 uint64_t node_name = 0;
2365 spin_lock_irq(shost->host_lock);
2366 /* Search the mapped list for this target ID */
2367 list_for_each_entry(ndlp, &phba->fc_nlpmap_list, nlp_listp) {
2368 if (starget->id == ndlp->nlp_sid) {
2369 memcpy(&node_name, &ndlp->nlp_nodename,
2370 sizeof(struct lpfc_name));
2374 spin_unlock_irq(shost->host_lock);
2376 fc_starget_node_name(starget) = be64_to_cpu(node_name);
2380 lpfc_get_starget_port_name(struct scsi_target *starget)
2382 struct lpfc_nodelist *ndlp = NULL;
2383 struct Scsi_Host *shost = dev_to_shost(starget->dev.parent);
2384 struct lpfc_hba *phba = (struct lpfc_hba *) shost->hostdata[0];
2385 uint64_t port_name = 0;
2387 spin_lock_irq(shost->host_lock);
2388 /* Search the mapped list for this target ID */
2389 list_for_each_entry(ndlp, &phba->fc_nlpmap_list, nlp_listp) {
2390 if (starget->id == ndlp->nlp_sid) {
2391 memcpy(&port_name, &ndlp->nlp_portname,
2392 sizeof(struct lpfc_name));
2396 spin_unlock_irq(shost->host_lock);
2398 fc_starget_port_name(starget) = be64_to_cpu(port_name);
2402 lpfc_get_starget_loss_tmo(struct scsi_target *starget)
2405 * Return the driver's global value for device loss timeout plus
2406 * five seconds to allow the driver's nodev timer to run.
2408 fc_starget_dev_loss_tmo(starget) = lpfc_nodev_tmo + 5;
2412 lpfc_set_starget_loss_tmo(struct scsi_target *starget, uint32_t timeout)
2415 * The driver doesn't have a per-target timeout setting. Set
2416 * this value globally.
2418 lpfc_nodev_tmo = timeout;
2421 #else /* not defined FC_TRANS_VER1 */
2424 lpfc_get_port_id(struct scsi_device *sdev)
2426 struct lpfc_target *target = sdev->hostdata;
2427 if (sdev->host->transportt && target->pnode)
2428 fc_port_id(sdev) = target->pnode->nlp_DID;
2432 lpfc_get_node_name(struct scsi_device *sdev)
2434 struct lpfc_target *target = sdev->hostdata;
2435 uint64_t node_name = 0;
2436 if (sdev->host->transportt && target->pnode)
2437 memcpy(&node_name, &target->pnode->nlp_nodename,
2438 sizeof(struct lpfc_name));
2439 fc_node_name(sdev) = be64_to_cpu(node_name);
2443 lpfc_get_port_name(struct scsi_device *sdev)
2445 struct lpfc_target *target = sdev->hostdata;
2446 uint64_t port_name = 0;
2447 if (sdev->host->transportt && target->pnode)
2448 memcpy(&port_name, &target->pnode->nlp_portname,
2449 sizeof(struct lpfc_name));
2450 fc_port_name(sdev) = be64_to_cpu(port_name);
2454 static struct fc_function_template lpfc_transport_functions = {
2455 #ifdef FC_TRANS_VER2 /* fc transport w/ statistics and attrs */
2457 /* fixed attributes the driver supports */
2458 .show_host_node_name = 1,
2459 .show_host_port_name = 1,
2460 .show_host_supported_classes = 1,
2461 .show_host_supported_fc4s = 1,
2462 .show_host_symbolic_name = 1,
2463 .show_host_supported_speeds = 1,
2464 .show_host_maxframe_size = 1,
2466 /* dynamic attributes the driver supports */
2467 .get_host_port_id = lpfc_get_host_port_id,
2468 .show_host_port_id = 1,
2470 .get_host_port_type = lpfc_get_host_port_type,
2471 .show_host_port_type = 1,
2473 .get_host_port_state = lpfc_get_host_port_state,
2474 .show_host_port_state = 1,
2476 /* active_fc4s is shown but doesn't change (thus no get function) */
2477 .show_host_active_fc4s = 1,
2479 .get_host_speed = lpfc_get_host_speed,
2480 .show_host_speed = 1,
2482 .get_host_fabric_name = lpfc_get_host_fabric_name,
2483 .show_host_fabric_name = 1,
2486 * The LPFC driver treats linkdown handling as target loss events
2487 * so there are no sysfs handlers for link_down_tmo.
2490 .get_fc_host_stats = lpfc_get_stats,
2491 /* the LPFC driver doesn't support resetting stats yet */
2493 #endif /* FC_TRANS_VER2 */
2495 /* note: FC_TRANS_VER1 will set if FC_TRANS_VER2 is set */
2496 #ifdef FC_TRANS_VER1
2497 .get_starget_port_id = lpfc_get_starget_port_id,
2498 .show_starget_port_id = 1,
2500 .get_starget_node_name = lpfc_get_starget_node_name,
2501 .show_starget_node_name = 1,
2503 .get_starget_port_name = lpfc_get_starget_port_name,
2504 .show_starget_port_name = 1,
2506 .get_starget_dev_loss_tmo = lpfc_get_starget_loss_tmo,
2507 .set_starget_dev_loss_tmo = lpfc_set_starget_loss_tmo,
2508 .show_starget_dev_loss_tmo = 1,
2511 .get_port_id = lpfc_get_port_id,
2514 .get_node_name = lpfc_get_node_name,
2515 .show_node_name = 1,
2517 .get_port_name = lpfc_get_port_name,
2518 .show_port_name = 1,
2523 lpfc_proc_info(struct Scsi_Host *host,
2524 char *buf, char **start, off_t offset, int count, int rw)
2526 struct lpfc_hba *phba = (struct lpfc_hba *)host->hostdata[0];
2527 struct lpfc_nodelist *ndlp;
2530 /* Sufficient bytes to hold a port or node name. */
2531 uint8_t name[sizeof (struct lpfc_name)];
2533 /* If rw = 0, then read info
2534 * If rw = 1, then write info (NYI)
2539 list_for_each_entry(ndlp, &phba->fc_nlpmap_list, nlp_listp) {
2540 if (ndlp->nlp_state == NLP_STE_MAPPED_NODE){
2541 len += snprintf(buf + len, PAGE_SIZE -len,
2542 "lpfc%dt%02x DID %06x WWPN ",
2544 ndlp->nlp_sid, ndlp->nlp_DID);
2546 memcpy (&name[0], &ndlp->nlp_portname,
2547 sizeof (struct lpfc_name));
2548 len += snprintf(buf + len, PAGE_SIZE-len,
2549 "%02x:%02x:%02x:%02x:%02x:%02x:"
2551 name[0], name[1], name[2],
2552 name[3], name[4], name[5],
2554 len += snprintf(buf + len, PAGE_SIZE-len, " WWNN ");
2555 memcpy (&name[0], &ndlp->nlp_nodename,
2556 sizeof (struct lpfc_name));
2557 len += snprintf(buf + len, PAGE_SIZE-len,
2558 "%02x:%02x:%02x:%02x:%02x:%02x:"
2560 name[0], name[1], name[2],
2561 name[3], name[4], name[5],
2564 if (PAGE_SIZE - len < 90)
2567 if (&ndlp->nlp_listp != &phba->fc_nlpmap_list)
2568 len += snprintf(buf+len, PAGE_SIZE-len, "...\n");
2574 lpfc_slave_alloc(struct scsi_device *scsi_devs)
2576 struct lpfc_hba *phba;
2577 struct lpfc_target *target;
2580 * Store the lun pointer in the scsi_device hostdata pointer provided
2581 * the driver has already discovered the target id.
2583 phba = (struct lpfc_hba *) scsi_devs->host->hostdata[0];
2584 target = lpfc_find_target(phba, scsi_devs->id, NULL);
2586 scsi_devs->hostdata = target;
2591 #if LINUX_VERSION_CODE > KERNEL_VERSION(2,6,9)
2596 * The driver does not have a target id matching that in the scsi
2597 * device. Allocate a dummy target initialized to zero so that
2598 * the driver's queuecommand entry correctly fails the call
2599 * forcing the midlayer to call lpfc_slave_destroy. This code
2600 * will be removed in a subsequent kernel patch.
2603 target = kmalloc(sizeof (struct lpfc_target), GFP_KERNEL);
2607 memset(target, 0, sizeof (struct lpfc_target));
2608 scsi_devs->hostdata = target;
2615 lpfc_slave_configure(struct scsi_device *sdev)
2617 struct lpfc_hba *phba = (struct lpfc_hba *) sdev->host->hostdata[0];
2619 #if defined(FC_TRANS_VER1)
2620 struct lpfc_target *target = (struct lpfc_target *) sdev->hostdata;
2623 if (sdev->tagged_supported)
2624 scsi_activate_tcq(sdev, phba->cfg_lun_queue_depth);
2626 scsi_deactivate_tcq(sdev, phba->cfg_lun_queue_depth);
2628 #ifdef FC_TRANS_VER1
2629 if ((target) && (sdev->sdev_target)) {
2631 * Initialize the fc transport attributes for the target
2632 * containing this scsi device. Also note that the driver's
2633 * target pointer is stored in the starget_data for the
2634 * driver's sysfs entry point functions.
2636 target->starget = sdev->sdev_target;
2637 fc_starget_dev_loss_tmo(target->starget) = lpfc_nodev_tmo + 5;
2645 lpfc_slave_destroy(struct scsi_device *sdev)
2647 struct lpfc_hba *phba;
2648 struct lpfc_target *target;
2651 phba = (struct lpfc_hba *) sdev->host->hostdata[0];
2652 target = sdev->hostdata;
2656 /* Double check for valid lpfc_target */
2657 for (i = 0; i < MAX_FCP_TARGET; i++) {
2658 if(target == phba->device_queue_hash[i]) {
2659 if ((!target->slavecnt) && !(target->pnode)) {
2661 phba->device_queue_hash[i] = NULL;
2663 sdev->hostdata = NULL;
2667 /* If we get here, this was a dummy lpfc_target allocated
2668 * in lpfc_slave_alloc.
2670 if (!target->slavecnt)
2675 * Set this scsi device's hostdata to NULL since it is going
2676 * away. Also, (future) don't set the starget_dev_loss_tmo
2677 * this value is global to all targets managed by this
2680 sdev->hostdata = NULL;
2684 static struct class_device_attribute *lpfc_host_attrs[] = {
2685 &class_device_attr_info,
2686 &class_device_attr_serialnum,
2687 &class_device_attr_fwrev,
2688 &class_device_attr_hdw,
2689 &class_device_attr_option_rom_version,
2690 &class_device_attr_state,
2691 &class_device_attr_num_discovered_ports,
2692 #ifndef FC_TRANS_VER2
2693 &class_device_attr_speed,
2694 &class_device_attr_node_name,
2695 &class_device_attr_port_name,
2696 &class_device_attr_portfcid,
2697 &class_device_attr_port_type,
2698 &class_device_attr_fabric_name,
2699 #endif /* FC_TRANS_VER2 */
2700 &class_device_attr_events,
2701 &class_device_attr_lpfc_drvr_version,
2702 &class_device_attr_lpfc_log_verbose,
2703 &class_device_attr_lpfc_lun_queue_depth,
2704 &class_device_attr_lpfc_nodev_tmo,
2705 &class_device_attr_lpfc_automap,
2706 &class_device_attr_lpfc_fcp_class,
2707 &class_device_attr_lpfc_use_adisc,
2708 &class_device_attr_lpfc_ack0,
2709 &class_device_attr_lpfc_topology,
2710 &class_device_attr_lpfc_scan_down,
2711 &class_device_attr_lpfc_link_speed,
2712 &class_device_attr_lpfc_fdmi_on,
2713 &class_device_attr_lpfc_fcp_bind_method,
2714 &class_device_attr_lpfc_max_luns,
2715 &class_device_attr_nport_evt_cnt,
2716 &class_device_attr_management_version,
2717 &class_device_attr_issue_lip,
2718 &class_device_attr_board_online,
2719 &class_device_attr_disc_npr,
2720 &class_device_attr_disc_map,
2721 &class_device_attr_disc_unmap,
2722 &class_device_attr_disc_prli,
2723 &class_device_attr_disc_reglgn,
2724 &class_device_attr_disc_adisc,
2725 &class_device_attr_disc_plogi,
2726 &class_device_attr_disc_unused,
2727 &class_device_attr_outfcpio,
2731 static struct scsi_host_template driver_template = {
2732 .module = THIS_MODULE,
2733 .name = LPFC_DRIVER_NAME,
2735 .queuecommand = lpfc_queuecommand,
2736 .eh_abort_handler = lpfc_abort_handler,
2737 .eh_device_reset_handler= lpfc_reset_lun_handler,
2738 .eh_bus_reset_handler = lpfc_reset_bus_handler,
2739 .slave_alloc = lpfc_slave_alloc,
2740 .slave_configure = lpfc_slave_configure,
2741 .slave_destroy = lpfc_slave_destroy,
2742 .proc_info = lpfc_proc_info,
2743 .proc_name = LPFC_DRIVER_NAME,
2745 .sg_tablesize = SG_ALL,
2747 .shost_attrs = lpfc_host_attrs,
2748 .use_clustering = ENABLE_CLUSTERING,
2752 lpfc_sli_setup(struct lpfc_hba * phba)
2755 struct lpfc_sli *psli = &phba->sli;
2756 LPFC_RING_INIT_t *pring;
2758 psli->sliinit.num_rings = MAX_CONFIGURED_RINGS;
2759 psli->fcp_ring = LPFC_FCP_RING;
2760 psli->next_ring = LPFC_FCP_NEXT_RING;
2761 psli->ip_ring = LPFC_IP_RING;
2763 for (i = 0; i < psli->sliinit.num_rings; i++) {
2764 pring = &psli->sliinit.ringinit[i];
2766 case LPFC_FCP_RING: /* ring 0 - FCP */
2767 /* numCiocb and numRiocb are used in config_port */
2768 pring->numCiocb = SLI2_IOCB_CMD_R0_ENTRIES;
2769 pring->numRiocb = SLI2_IOCB_RSP_R0_ENTRIES;
2770 pring->numCiocb += SLI2_IOCB_CMD_R1XTRA_ENTRIES;
2771 pring->numRiocb += SLI2_IOCB_RSP_R1XTRA_ENTRIES;
2772 pring->numCiocb += SLI2_IOCB_CMD_R3XTRA_ENTRIES;
2773 pring->numRiocb += SLI2_IOCB_RSP_R3XTRA_ENTRIES;
2774 pring->iotag_ctr = 0;
2776 (phba->cfg_hba_queue_depth * 2);
2777 pring->fast_iotag = pring->iotag_max;
2778 pring->num_mask = 0;
2780 case LPFC_IP_RING: /* ring 1 - IP */
2781 /* numCiocb and numRiocb are used in config_port */
2782 pring->numCiocb = SLI2_IOCB_CMD_R1_ENTRIES;
2783 pring->numRiocb = SLI2_IOCB_RSP_R1_ENTRIES;
2784 pring->num_mask = 0;
2786 case LPFC_ELS_RING: /* ring 2 - ELS / CT */
2787 /* numCiocb and numRiocb are used in config_port */
2788 pring->numCiocb = SLI2_IOCB_CMD_R2_ENTRIES;
2789 pring->numRiocb = SLI2_IOCB_RSP_R2_ENTRIES;
2790 pring->fast_iotag = 0;
2791 pring->iotag_ctr = 0;
2792 pring->iotag_max = 4096;
2793 pring->num_mask = 4;
2794 pring->prt[0].profile = 0; /* Mask 0 */
2795 pring->prt[0].rctl = FC_ELS_REQ;
2796 pring->prt[0].type = FC_ELS_DATA;
2797 pring->prt[0].lpfc_sli_rcv_unsol_event =
2798 lpfc_els_unsol_event;
2799 pring->prt[1].profile = 0; /* Mask 1 */
2800 pring->prt[1].rctl = FC_ELS_RSP;
2801 pring->prt[1].type = FC_ELS_DATA;
2802 pring->prt[1].lpfc_sli_rcv_unsol_event =
2803 lpfc_els_unsol_event;
2804 pring->prt[2].profile = 0; /* Mask 2 */
2805 /* NameServer Inquiry */
2806 pring->prt[2].rctl = FC_UNSOL_CTL;
2808 pring->prt[2].type = FC_COMMON_TRANSPORT_ULP;
2809 pring->prt[2].lpfc_sli_rcv_unsol_event =
2810 lpfc_ct_unsol_event;
2811 pring->prt[3].profile = 0; /* Mask 3 */
2812 /* NameServer response */
2813 pring->prt[3].rctl = FC_SOL_CTL;
2815 pring->prt[3].type = FC_COMMON_TRANSPORT_ULP;
2816 pring->prt[3].lpfc_sli_rcv_unsol_event =
2817 lpfc_ct_unsol_event;
2820 totiocb += (pring->numCiocb + pring->numRiocb);
2822 if (totiocb > MAX_SLI2_IOCB) {
2823 /* Too many cmd / rsp ring entries in SLI2 SLIM */
2824 lpfc_printf_log(phba, KERN_ERR, LOG_INIT,
2825 "%d:0462 Too many cmd / rsp ring entries in "
2826 "SLI2 SLIM Data: x%x x%x\n",
2827 phba->brd_no, totiocb, MAX_SLI2_IOCB);
2830 #ifdef USE_HGP_HOST_SLIM
2831 psli->sliinit.sli_flag = LPFC_HGP_HOSTSLIM;
2833 psli->sliinit.sli_flag = 0;
2840 lpfc_set_bind_type(struct lpfc_hba * phba)
2842 int bind_type = phba->cfg_fcp_bind_method;
2843 int ret = LPFC_BIND_WW_NN_PN;
2845 switch (bind_type) {
2847 phba->fcp_mapping = FCP_SEED_WWNN;
2851 phba->fcp_mapping = FCP_SEED_WWPN;
2855 phba->fcp_mapping = FCP_SEED_DID;
2856 ret = LPFC_BIND_DID;
2860 phba->fcp_mapping = FCP_SEED_DID;
2861 ret = LPFC_BIND_DID;
2869 lpfc_get_cfgparam(struct lpfc_hba *phba)
2871 phba->cfg_log_verbose = lpfc_log_verbose;
2872 phba->cfg_automap = lpfc_automap;
2873 phba->cfg_fcp_bind_method = lpfc_fcp_bind_method;
2874 phba->cfg_cr_delay = lpfc_cr_delay;
2875 phba->cfg_cr_count = lpfc_cr_count;
2876 phba->cfg_lun_queue_depth = lpfc_lun_queue_depth;
2877 phba->cfg_fcp_class = lpfc_fcp_class;
2878 phba->cfg_use_adisc = lpfc_use_adisc;
2879 phba->cfg_ack0 = lpfc_ack0;
2880 phba->cfg_topology = lpfc_topology;
2881 phba->cfg_scan_down = lpfc_scan_down;
2882 phba->cfg_nodev_tmo = lpfc_nodev_tmo;
2883 phba->cfg_link_speed = lpfc_link_speed;
2884 phba->cfg_fdmi_on = lpfc_fdmi_on;
2885 phba->cfg_discovery_threads = lpfc_discovery_threads;
2886 phba->cfg_max_luns = lpfc_max_luns;
2887 phba->cfg_scsi_hotplug = lpfc_scsi_hotplug;
2889 if (phba->cfg_discovery_threads)
2890 if (phba->cfg_automap == 0)
2891 phba->cfg_discovery_threads = LPFC_MAX_DISC_THREADS;
2893 switch (phba->pcidev->device) {
2894 case PCI_DEVICE_ID_LP101:
2895 case PCI_DEVICE_ID_BSMB:
2896 case PCI_DEVICE_ID_ZSMB:
2897 phba->cfg_hba_queue_depth = LPFC_LP101_HBA_Q_DEPTH;
2899 case PCI_DEVICE_ID_RFLY:
2900 case PCI_DEVICE_ID_PFLY:
2901 case PCI_DEVICE_ID_BMID:
2902 case PCI_DEVICE_ID_ZMID:
2903 case PCI_DEVICE_ID_TFLY:
2904 phba->cfg_hba_queue_depth = LPFC_LC_HBA_Q_DEPTH;
2907 phba->cfg_hba_queue_depth = LPFC_DFT_HBA_Q_DEPTH;
2913 lpfc_consistent_bind_setup(struct lpfc_hba * phba)
2915 INIT_LIST_HEAD(&phba->fc_nlpbind_list);
2916 phba->fc_bind_cnt = 0;
2920 lpfc_get_brd_no(struct lpfc_hba * phba)
2922 uint8_t brd, found = 1;
2928 list_for_each_entry(phba, &lpfc_hba_list, hba_list) {
2929 if (phba->brd_no == brd) {
2940 static int __devinit
2941 lpfc_pci_probe_one(struct pci_dev *pdev, const struct pci_device_id *pid)
2943 struct Scsi_Host *host;
2944 struct lpfc_hba *phba;
2945 struct lpfc_sli *psli;
2946 unsigned long iflag;
2947 unsigned long bar0map_len, bar2map_len;
2948 int error = -ENODEV, retval;
2949 #ifdef FC_TRANS_VER2 /* fc transport w/ statistics and attrs */
2951 #endif /* FC_TRANS_VER2 */
2953 if (pci_enable_device(pdev))
2955 if (pci_request_regions(pdev, LPFC_DRIVER_NAME))
2956 goto out_disable_device;
2959 * Allocate space for adapter info structure
2961 #ifdef FC_TRANS_VER2 /* fc transport w/ statistics and attrs */
2962 phba = kmalloc((sizeof(*phba) + sizeof(struct fc_host_statistics)),
2964 #else /* not FC_TRANS_VER2 */
2965 phba = kmalloc(sizeof(*phba), GFP_KERNEL);
2966 #endif /* not FC_TRANS_VER2 */
2968 goto out_release_regions;
2969 memset(phba, 0, sizeof (struct lpfc_hba));
2970 #ifdef FC_TRANS_VER2 /* fc transport w/ statistics and attrs */
2971 phba->link_stats = (void *)&phba[1];
2972 #endif /* FC_TRANS_VER2 */
2974 host = scsi_host_alloc(&driver_template, sizeof (unsigned long));
2976 printk (KERN_WARNING "%s: scsi_host_alloc failed.\n",
2979 goto out_kfree_phba;
2982 phba->fc_flag |= FC_LOADING;
2983 phba->pcidev = pdev;
2986 INIT_LIST_HEAD(&phba->ctrspbuflist);
2987 INIT_LIST_HEAD(&phba->rnidrspbuflist);
2988 INIT_LIST_HEAD(&phba->freebufList);
2990 /* Initialize timers used by driver */
2991 init_timer(&phba->fc_estabtmo);
2992 phba->fc_estabtmo.function = lpfc_establish_link_tmo;
2993 phba->fc_estabtmo.data = (unsigned long)phba;
2994 init_timer(&phba->fc_disctmo);
2995 phba->fc_disctmo.function = lpfc_disc_timeout;
2996 phba->fc_disctmo.data = (unsigned long)phba;
2997 init_timer(&phba->fc_scantmo);
2998 phba->fc_scantmo.function = lpfc_scan_timeout;
2999 phba->fc_scantmo.data = (unsigned long)phba;
3001 init_timer(&phba->fc_fdmitmo);
3002 phba->fc_fdmitmo.function = lpfc_fdmi_tmo;
3003 phba->fc_fdmitmo.data = (unsigned long)phba;
3004 init_timer(&phba->els_tmofunc);
3005 phba->els_tmofunc.function = lpfc_els_timeout_handler;
3006 phba->els_tmofunc.data = (unsigned long)phba;
3008 init_timer(&psli->mbox_tmo);
3009 psli->mbox_tmo.function = lpfc_mbox_timeout;
3010 psli->mbox_tmo.data = (unsigned long)phba;
3012 /* Assign an unused board number */
3013 phba->brd_no = lpfc_get_brd_no(phba);
3014 host->unique_id = phba->brd_no;
3017 * Get all the module params for configuring this host and then
3018 * establish the host parameters.
3020 lpfc_get_cfgparam(phba);
3022 host->max_id = LPFC_MAX_TARGET;
3023 host->max_lun = phba->cfg_max_luns;
3026 if(phba->cfg_scsi_hotplug) {
3027 lpfc_printf_log(phba, KERN_ERR, LOG_FCP,
3028 "%d:0264 HotPlug Support Enabled\n",
3032 /* Add adapter structure to list */
3033 list_add_tail(&phba->hba_list, &lpfc_hba_list);
3035 /* Initialize all internally managed lists. */
3036 INIT_LIST_HEAD(&phba->fc_nlpmap_list);
3037 INIT_LIST_HEAD(&phba->fc_nlpunmap_list);
3038 INIT_LIST_HEAD(&phba->fc_unused_list);
3039 INIT_LIST_HEAD(&phba->fc_plogi_list);
3040 INIT_LIST_HEAD(&phba->fc_adisc_list);
3041 INIT_LIST_HEAD(&phba->fc_reglogin_list);
3042 INIT_LIST_HEAD(&phba->fc_prli_list);
3043 INIT_LIST_HEAD(&phba->fc_npr_list);
3044 lpfc_consistent_bind_setup(phba);
3046 init_waitqueue_head(&phba->linkevtwq);
3047 init_waitqueue_head(&phba->rscnevtwq);
3048 init_waitqueue_head(&phba->ctevtwq);
3050 pci_set_master(pdev);
3051 retval = pci_set_mwi(pdev);
3053 dev_printk(KERN_WARNING, &pdev->dev,
3054 "Warning: pci_set_mwi returned %d\n", retval);
3056 /* Configure DMA attributes. */
3057 if (dma_set_mask(&phba->pcidev->dev, 0xffffffffffffffffULL) &&
3058 dma_set_mask(&phba->pcidev->dev, 0xffffffffULL))
3062 * Get the physical address of Bar0 and Bar2 and the number of bytes
3063 * required by each mapping.
3065 phba->pci_bar0_map = pci_resource_start(phba->pcidev, 0);
3066 bar0map_len = pci_resource_len(phba->pcidev, 0);
3068 phba->pci_bar2_map = pci_resource_start(phba->pcidev, 2);
3069 bar2map_len = pci_resource_len(phba->pcidev, 2);
3071 /* Map HBA SLIM and Control Registers to a kernel virtual address. */
3072 phba->slim_memmap_p = ioremap(phba->pci_bar0_map, bar0map_len);
3073 phba->ctrl_regs_memmap_p = ioremap(phba->pci_bar2_map, bar2map_len);
3076 * Allocate memory for SLI-2 structures
3078 phba->slim2p = dma_alloc_coherent(&phba->pcidev->dev, SLI2_SLIM_SIZE,
3079 &phba->slim2p_mapping, GFP_KERNEL);
3084 lpfc_sli_setup(phba); /* Setup SLI Layer to run over lpfc HBAs */
3085 lpfc_sli_queue_setup(phba); /* Initialize the SLI Layer */
3087 error = lpfc_mem_alloc(phba);
3091 lpfc_set_bind_type(phba);
3093 /* Initialize HBA structure */
3094 phba->fc_edtov = FF_DEF_EDTOV;
3095 phba->fc_ratov = FF_DEF_RATOV;
3096 phba->fc_altov = FF_DEF_ALTOV;
3097 phba->fc_arbtov = FF_DEF_ARBTOV;
3099 INIT_LIST_HEAD(&phba->dpc_disc);
3100 init_completion(&phba->dpc_startup);
3101 init_completion(&phba->dpc_exiting);
3104 * Startup the kernel thread for this host adapter
3107 phba->dpc_pid = kernel_thread(lpfc_do_dpc, phba, 0);
3108 if (phba->dpc_pid < 0) {
3109 error = phba->dpc_pid;
3112 wait_for_completion(&phba->dpc_startup);
3114 /* Call SLI to initialize the HBA. */
3115 error = lpfc_sli_hba_setup(phba);
3119 /* We can rely on a queue depth attribute only after SLI HBA setup */
3120 host->can_queue = phba->cfg_hba_queue_depth - 10;
3123 * Starting with 2.4.0 kernel, Linux can support commands longer
3124 * than 12 bytes. However, scsi_register() always sets it to 12.
3125 * For it to be useful to the midlayer, we have to set it here.
3127 host->max_cmd_len = 16;
3130 * Queue depths per lun
3132 host->transportt = lpfc_transport_template;
3133 host->hostdata[0] = (unsigned long)phba;
3134 pci_set_drvdata(pdev, host);
3135 error = scsi_add_host(host, &pdev->dev);
3139 #ifdef FC_TRANS_VER2
3141 * set fixed host attributes
3144 memcpy(&wwname, &phba->fc_nodename, sizeof(u64));
3145 fc_host_node_name(host) = be64_to_cpu(wwname);
3146 memcpy(&wwname, &phba->fc_portname, sizeof(u64));
3147 fc_host_port_name(host) = be64_to_cpu(wwname);
3148 fc_host_supported_classes(host) = FC_COS_CLASS3;
3150 memset(fc_host_supported_fc4s(host), 0,
3151 sizeof(fc_host_supported_fc4s(host)));
3152 fc_host_supported_fc4s(host)[2] = 1;
3153 fc_host_supported_fc4s(host)[7] = 1;
3155 lpfc_get_hba_sym_node_name(phba, fc_host_symbolic_name(host));
3157 if (FC_JEDEC_ID(phba->vpd.rev.biuRev) == VIPER_JEDEC_ID)
3158 fc_host_supported_speeds(host) = FC_PORTSPEED_10GBIT;
3159 else if (FC_JEDEC_ID(phba->vpd.rev.biuRev) == HELIOS_JEDEC_ID)
3160 fc_host_supported_speeds(host) =
3161 (FC_PORTSPEED_1GBIT | FC_PORTSPEED_2GBIT |
3162 FC_PORTSPEED_4GBIT);
3163 else if ((FC_JEDEC_ID(phba->vpd.rev.biuRev) ==
3164 CENTAUR_2G_JEDEC_ID)
3165 || (FC_JEDEC_ID(phba->vpd.rev.biuRev) ==
3167 || (FC_JEDEC_ID(phba->vpd.rev.biuRev) ==
3169 fc_host_supported_speeds(host) =
3170 (FC_PORTSPEED_1GBIT | FC_PORTSPEED_2GBIT);
3172 fc_host_supported_speeds(host) = FC_PORTSPEED_1GBIT;
3174 fc_host_maxframe_size(host) = be32_to_cpu(
3175 ((((uint32_t) phba->fc_sparam.cmn.bbRcvSizeMsb) << 8) |
3176 (uint32_t) phba->fc_sparam.cmn.bbRcvSizeLsb));
3178 /* This value is also unchanging */
3179 memset(fc_host_active_fc4s(host), 0,
3180 sizeof(fc_host_active_fc4s(host)));
3181 fc_host_active_fc4s(host)[2] = 1;
3182 fc_host_active_fc4s(host)[7] = 1;
3183 #endif /* FC_TRANS_VER2 */
3186 sysfs_create_bin_file(&host->shost_classdev.kobj, &sysfs_ctpass_attr);
3187 sysfs_create_bin_file(&host->shost_classdev.kobj,
3188 &sysfs_sendrnid_attr);
3191 if (phba->sli.sliinit.sli_flag & LPFC_SLI2_ACTIVE)
3192 sysfs_slimem_attr.size = SLI2_SLIM_SIZE;
3194 sysfs_slimem_attr.size = SLI1_SLIM_SIZE;
3196 sysfs_create_bin_file(&host->shost_classdev.kobj, &sysfs_slimem_attr);
3198 sysfs_create_bin_file(&host->shost_classdev.kobj, &sysfs_ctlreg_attr);
3199 sysfs_create_bin_file(&host->shost_classdev.kobj, &sysfs_mbox_attr);
3200 scsi_scan_host(host);
3201 phba->fc_flag &= ~FC_LOADING;
3205 scsi_host_put(host);
3207 lpfc_sli_hba_down(phba);
3209 /* Stop any timers that were started during this attach. */
3210 spin_lock_irqsave(phba->host->host_lock, iflag);
3211 lpfc_stop_timer(phba);
3212 spin_unlock_irqrestore(phba->host->host_lock, iflag);
3214 /* Kill the kernel thread for this host */
3215 if (phba->dpc_pid >= 0) {
3218 kill_proc(phba->dpc_pid, SIGHUP, 1);
3219 wait_for_completion(&phba->dpc_exiting);
3222 free_irq(phba->pcidev->irq, phba);
3224 lpfc_mem_free(phba);
3226 dma_free_coherent(&pdev->dev, SLI2_SLIM_SIZE,
3227 phba->slim2p, phba->slim2p_mapping);
3229 iounmap(phba->ctrl_regs_memmap_p);
3230 iounmap(phba->slim_memmap_p);
3232 list_del_init(&phba->hba_list);
3235 out_release_regions:
3236 pci_release_regions(pdev);
3238 pci_disable_device(pdev);
3243 static void __devexit
3244 lpfc_pci_remove_one(struct pci_dev *pdev)
3246 struct Scsi_Host *host = pci_get_drvdata(pdev);
3247 struct lpfc_hba *phba = (struct lpfc_hba *)host->hostdata[0];
3248 struct lpfc_target *targetp;
3250 unsigned long iflag;
3252 sysfs_remove_bin_file(&host->shost_classdev.kobj, &sysfs_mbox_attr);
3253 sysfs_remove_bin_file(&host->shost_classdev.kobj, &sysfs_ctlreg_attr);
3255 sysfs_remove_bin_file(&host->shost_classdev.kobj, &sysfs_slimem_attr);
3256 sysfs_remove_bin_file(&host->shost_classdev.kobj,
3257 &sysfs_sendrnid_attr);
3258 sysfs_remove_bin_file(&host->shost_gendev.kobj, &sysfs_ctpass_attr);
3263 /* In case we are offline or link is down */
3264 /*scsi_unblock_requests(phba->host);*/
3266 phba->fc_flag |= FC_UNLOADING;
3268 spin_lock_irqsave(phba->host->host_lock, iflag);
3269 /* Since we are going to scsi_remove_host(), disassociate scsi_dev
3270 * from lpfc_target, and make sure its unblocked.
3272 for (i = 0; i < MAX_FCP_TARGET; i++) {
3273 targetp = phba->device_queue_hash[i];
3276 #if defined(FC_TRANS_VER1) || defined(FC_TRANS_265_BLKPATCH)
3277 if(targetp->pnode) {
3278 if(targetp->blocked) {
3279 /* If we are blocked, force a nodev_tmo */
3280 del_timer_sync(&targetp->pnode->nlp_tmofunc);
3281 lpfc_process_nodev_timeout(phba,
3285 /* If we are unblocked, just remove
3288 lpfc_target_remove(phba, targetp);
3292 #if defined(FC_TRANS_VER1)
3293 targetp->starget = NULL;
3296 spin_unlock_irqrestore(phba->host->host_lock, iflag);
3298 list_del(&phba->hba_list);
3299 scsi_remove_host(phba->host);
3301 /* detach the board */
3303 /* Kill the kernel thread for this host */
3304 if (phba->dpc_pid >= 0) {
3307 kill_proc(phba->dpc_pid, SIGHUP, 1);
3308 wait_for_completion(&phba->dpc_exiting);
3312 * Bring down the SLI Layer. This step disable all interrupts,
3313 * clears the rings, discards all mailbox commands, and resets
3316 lpfc_sli_hba_down(phba);
3318 /* Release the irq reservation */
3319 free_irq(phba->pcidev->irq, phba);
3321 spin_lock_irqsave(phba->host->host_lock, iflag);
3322 lpfc_cleanup(phba, 0);
3323 lpfc_stop_timer(phba);
3324 spin_unlock_irqrestore(phba->host->host_lock, iflag);
3325 lpfc_scsi_free(phba);
3327 lpfc_mem_free(phba);
3329 /* Free resources associated with SLI2 interface */
3330 dma_free_coherent(&pdev->dev, SLI2_SLIM_SIZE,
3331 phba->slim2p, phba->slim2p_mapping);
3333 /* unmap adapter SLIM and Control Registers */
3334 iounmap(phba->ctrl_regs_memmap_p);
3335 iounmap(phba->slim_memmap_p);
3337 pci_release_regions(phba->pcidev);
3338 pci_disable_device(phba->pcidev);
3340 scsi_host_put(phba->host);
3343 pci_set_drvdata(pdev, NULL);
3346 static struct pci_device_id lpfc_id_table[] = {
3347 {PCI_VENDOR_ID_EMULEX, PCI_DEVICE_ID_VIPER,
3348 PCI_ANY_ID, PCI_ANY_ID, },
3349 {PCI_VENDOR_ID_EMULEX, PCI_DEVICE_ID_THOR,
3350 PCI_ANY_ID, PCI_ANY_ID, },
3351 {PCI_VENDOR_ID_EMULEX, PCI_DEVICE_ID_PEGASUS,
3352 PCI_ANY_ID, PCI_ANY_ID, },
3353 {PCI_VENDOR_ID_EMULEX, PCI_DEVICE_ID_CENTAUR,
3354 PCI_ANY_ID, PCI_ANY_ID, },
3355 {PCI_VENDOR_ID_EMULEX, PCI_DEVICE_ID_DRAGONFLY,
3356 PCI_ANY_ID, PCI_ANY_ID, },
3357 {PCI_VENDOR_ID_EMULEX, PCI_DEVICE_ID_SUPERFLY,
3358 PCI_ANY_ID, PCI_ANY_ID, },
3359 {PCI_VENDOR_ID_EMULEX, PCI_DEVICE_ID_RFLY,
3360 PCI_ANY_ID, PCI_ANY_ID, },
3361 {PCI_VENDOR_ID_EMULEX, PCI_DEVICE_ID_PFLY,
3362 PCI_ANY_ID, PCI_ANY_ID, },
3363 {PCI_VENDOR_ID_EMULEX, PCI_DEVICE_ID_HELIOS,
3364 PCI_ANY_ID, PCI_ANY_ID, },
3365 {PCI_VENDOR_ID_EMULEX, PCI_DEVICE_ID_BMID,
3366 PCI_ANY_ID, PCI_ANY_ID, },
3367 {PCI_VENDOR_ID_EMULEX, PCI_DEVICE_ID_BSMB,
3368 PCI_ANY_ID, PCI_ANY_ID, },
3369 {PCI_VENDOR_ID_EMULEX, PCI_DEVICE_ID_ZEPHYR,
3370 PCI_ANY_ID, PCI_ANY_ID, },
3371 {PCI_VENDOR_ID_EMULEX, PCI_DEVICE_ID_ZMID,
3372 PCI_ANY_ID, PCI_ANY_ID, },
3373 {PCI_VENDOR_ID_EMULEX, PCI_DEVICE_ID_ZSMB,
3374 PCI_ANY_ID, PCI_ANY_ID, },
3375 {PCI_VENDOR_ID_EMULEX, PCI_DEVICE_ID_TFLY,
3376 PCI_ANY_ID, PCI_ANY_ID, },
3377 {PCI_VENDOR_ID_EMULEX, PCI_DEVICE_ID_LP101,
3378 PCI_ANY_ID, PCI_ANY_ID, },
3381 MODULE_DEVICE_TABLE(pci, lpfc_id_table);
3384 static struct pci_driver lpfc_driver = {
3385 .name = LPFC_DRIVER_NAME,
3386 .id_table = lpfc_id_table,
3387 .probe = lpfc_pci_probe_one,
3388 .remove = __devexit_p(lpfc_pci_remove_one),
3396 printk(LPFC_MODULE_DESC "\n");
3399 lpfc_transport_template =
3400 fc_attach_transport(&lpfc_transport_functions);
3401 if (!lpfc_transport_template)
3403 rc = pci_module_init(&lpfc_driver);
3411 pci_unregister_driver(&lpfc_driver);
3412 fc_release_transport(lpfc_transport_template);
3414 module_init(lpfc_init);
3415 module_exit(lpfc_exit);
3416 MODULE_LICENSE("GPL");
3417 MODULE_DESCRIPTION(LPFC_MODULE_DESC);
3418 MODULE_AUTHOR("Emulex Corporation - tech.support@emulex.com");
3419 MODULE_VERSION("0:" LPFC_DRIVER_VERSION);