sync up with 2.6.9-1.11_FC2 release
[linux-2.6.git] / drivers / scsi / ibmvscsi / ibmvscsi.c
1 /* ------------------------------------------------------------
2  * ibmvscsi.c
3  * (C) Copyright IBM Corporation 1994, 2004
4  * Authors: Colin DeVilbiss (devilbis@us.ibm.com)
5  *          Santiago Leon (santil@us.ibm.com)
6  *          Dave Boutcher (sleddog@us.ibm.com)
7  *
8  * This program is free software; you can redistribute it and/or modify
9  * it under the terms of the GNU General Public License as published by
10  * the Free Software Foundation; either version 2 of the License, or
11  * (at your option) any later version.
12  *
13  * This program is distributed in the hope that it will be useful,
14  * but WITHOUT ANY WARRANTY; without even the implied warranty of
15  * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.  See the
16  * GNU General Public License for more details.
17  *
18  * You should have received a copy of the GNU General Public License
19  * along with this program; if not, write to the Free Software
20  * Foundation, Inc., 59 Temple Place, Suite 330, Boston, MA  02111-1307
21  * USA
22  *
23  * ------------------------------------------------------------
24  * Emulation of a SCSI host adapter for Virtual I/O devices
25  *
26  * This driver supports the SCSI adapter implemented by the IBM
27  * Power5 firmware.  That SCSI adapter is not a physical adapter,
28  * but allows Linux SCSI peripheral drivers to directly
29  * access devices in another logical partition on the physical system.
30  *
31  * The virtual adapter(s) are present in the open firmware device
32  * tree just like real adapters.
33  *
34  * One of the capabilities provided on these systems is the ability
35  * to DMA between partitions.  The architecture states that for VSCSI,
36  * the server side is allowed to DMA to and from the client.  The client
37  * is never trusted to DMA to or from the server directly.
38  *
39  * Messages are sent between partitions on a "Command/Response Queue" 
40  * (CRQ), which is just a buffer of 16 byte entries in the receiver's 
41  * Senders cannot access the buffer directly, but send messages by
42  * making a hypervisor call and passing in the 16 bytes.  The hypervisor
43  * puts the message in the next 16 byte space in round-robbin fashion,
44  * turns on the high order bit of the message (the valid bit), and 
45  * generates an interrupt to the receiver (if interrupts are turned on.) 
46  * The receiver just turns off the valid bit when they have copied out
47  * the message.
48  *
49  * The VSCSI client builds a SCSI Remote Protocol (SRP) Information Unit
50  * (IU) (as defined in the T10 standard available at www.t10.org), gets 
51  * a DMA address for the message, and sends it to the server as the
52  * payload of a CRQ message.  The server DMAs the SRP IU and processes it,
53  * including doing any additional data transfers.  When it is done, it
54  * DMAs the SRP response back to the same address as the request came from,
55  * and sends a CRQ message back to inform the client that the request has
56  * completed.
57  *
58  * Note that some of the underlying infrastructure is different between
59  * machines conforming to the "RS/6000 Platform Architecture" (RPA) and
60  * the older iSeries hypervisor models.  To support both, some low level
61  * routines have been broken out into rpa_vscsi.c and iseries_vscsi.c.
62  * The Makefile should pick one, not two, not zero, of these.
63  *
64  * TODO: This is currently pretty tied to the IBM i/pSeries hypervisor
65  * interfaces.  It would be really nice to abstract this above an RDMA
66  * layer.
67  */
68
69 #include <linux/module.h>
70 #include <linux/moduleparam.h>
71 #include <linux/dma-mapping.h>
72 #include <asm/vio.h>
73 #include <scsi/scsi.h>
74 #include <scsi/scsi_cmnd.h>
75 #include <scsi/scsi_host.h>
76 #include <scsi/scsi_device.h>
77 #include "ibmvscsi.h"
78
79 /* The values below are somewhat arbitrary default values, but 
80  * OS/400 will use 3 busses (disks, CDs, tapes, I think.)
81  * Note that there are 3 bits of channel value, 6 bits of id, and
82  * 5 bits of LUN.
83  */
84 static int max_id = 64;
85 static int max_channel = 3;
86 static int init_timeout = 5;
87 static int max_requests = 50;
88 static int max_sectors = 32 * 8; /* default max I/O 32 pages */
89
90 #define IBMVSCSI_VERSION "1.5.3"
91
92 MODULE_DESCRIPTION("IBM Virtual SCSI");
93 MODULE_AUTHOR("Dave Boutcher");
94 MODULE_LICENSE("GPL");
95 MODULE_VERSION(IBMVSCSI_VERSION);
96
97 module_param_named(max_id, max_id, int, S_IRUGO | S_IWUSR);
98 MODULE_PARM_DESC(max_id, "Largest ID value for each channel");
99 module_param_named(max_channel, max_channel, int, S_IRUGO | S_IWUSR);
100 MODULE_PARM_DESC(max_channel, "Largest channel value");
101 module_param_named(init_timeout, init_timeout, int, S_IRUGO | S_IWUSR);
102 MODULE_PARM_DESC(init_timeout, "Initialization timeout in seconds");
103 module_param_named(max_requests, max_requests, int, S_IRUGO | S_IWUSR);
104 MODULE_PARM_DESC(max_requests, "Maximum requests for this adapter");
105 module_param_named(max_sectors, max_sectors, int, S_IRUGO | S_IWUSR);
106 MODULE_PARM_DESC(max_sectors, "Maximum sectors per request for this adapter");
107
108 /* ------------------------------------------------------------
109  * Routines for the event pool and event structs
110  */
111 /**
112  * initialize_event_pool: - Allocates and initializes the event pool for a host
113  * @pool:       event_pool to be initialized
114  * @size:       Number of events in pool
115  * @hostdata:   ibmvscsi_host_data who owns the event pool
116  *
117  * Returns zero on success.
118 */
119 static int initialize_event_pool(struct event_pool *pool,
120                                  int size, struct ibmvscsi_host_data *hostdata)
121 {
122         int i;
123
124         pool->size = size;
125         pool->next = 0;
126         pool->events = kmalloc(pool->size * sizeof(*pool->events), GFP_KERNEL);
127         if (!pool->events)
128                 return -ENOMEM;
129         memset(pool->events, 0x00, pool->size * sizeof(*pool->events));
130
131         pool->iu_storage =
132             dma_alloc_coherent(hostdata->dev,
133                                pool->size * sizeof(*pool->iu_storage),
134                                &pool->iu_token, 0);
135         if (!pool->iu_storage) {
136                 kfree(pool->events);
137                 return -ENOMEM;
138         }
139
140         for (i = 0; i < pool->size; ++i) {
141                 struct srp_event_struct *evt = &pool->events[i];
142                 memset(&evt->crq, 0x00, sizeof(evt->crq));
143                 atomic_set(&evt->free, 1);
144                 evt->crq.valid = 0x80;
145                 evt->crq.IU_length = sizeof(*evt->xfer_iu);
146                 evt->crq.IU_data_ptr = pool->iu_token + 
147                         sizeof(*evt->xfer_iu) * i;
148                 evt->xfer_iu = pool->iu_storage + i;
149                 evt->hostdata = hostdata;
150         }
151
152         return 0;
153 }
154
155 /**
156  * release_event_pool: - Frees memory of an event pool of a host
157  * @pool:       event_pool to be released
158  * @hostdata:   ibmvscsi_host_data who owns the even pool
159  *
160  * Returns zero on success.
161 */
162 static void release_event_pool(struct event_pool *pool,
163                                struct ibmvscsi_host_data *hostdata)
164 {
165         int i, in_use = 0;
166         for (i = 0; i < pool->size; ++i)
167                 if (atomic_read(&pool->events[i].free) != 1)
168                         ++in_use;
169         if (in_use)
170                 printk(KERN_WARNING
171                        "ibmvscsi: releasing event pool with %d "
172                        "events still in use?\n", in_use);
173         kfree(pool->events);
174         dma_free_coherent(hostdata->dev,
175                           pool->size * sizeof(*pool->iu_storage),
176                           pool->iu_storage, pool->iu_token);
177 }
178
179 /**
180  * valid_event_struct: - Determines if event is valid.
181  * @pool:       event_pool that contains the event
182  * @evt:        srp_event_struct to be checked for validity
183  *
184  * Returns zero if event is invalid, one otherwise.
185 */
186 static int valid_event_struct(struct event_pool *pool,
187                                 struct srp_event_struct *evt)
188 {
189         int index = evt - pool->events;
190         if (index < 0 || index >= pool->size)   /* outside of bounds */
191                 return 0;
192         if (evt != pool->events + index)        /* unaligned */
193                 return 0;
194         return 1;
195 }
196
197 /**
198  * ibmvscsi_free-event_struct: - Changes status of event to "free"
199  * @pool:       event_pool that contains the event
200  * @evt:        srp_event_struct to be modified
201  *
202 */
203 static void free_event_struct(struct event_pool *pool,
204                                        struct srp_event_struct *evt)
205 {
206         if (!valid_event_struct(pool, evt)) {
207                 printk(KERN_ERR
208                        "ibmvscsi: Freeing invalid event_struct %p "
209                        "(not in pool %p)\n", evt, pool->events);
210                 return;
211         }
212         if (atomic_inc_return(&evt->free) != 1) {
213                 printk(KERN_ERR
214                        "ibmvscsi: Freeing event_struct %p "
215                        "which is not in use!\n", evt);
216                 return;
217         }
218 }
219
220 /**
221  * get_evt_struct: - Gets the next free event in pool
222  * @pool:       event_pool that contains the events to be searched
223  *
224  * Returns the next event in "free" state, and NULL if none are free.
225  * Note that no synchronization is done here, we assume the host_lock
226  * will syncrhonze things.
227 */
228 static struct srp_event_struct *get_event_struct(struct event_pool *pool)
229 {
230         int i;
231         int poolsize = pool->size;
232         int offset = pool->next;
233
234         for (i = 0; i < poolsize; i++) {
235                 offset = (offset + 1) % poolsize;
236                 if (!atomic_dec_if_positive(&pool->events[offset].free)) {
237                         pool->next = offset;
238                         return &pool->events[offset];
239                 }
240         }
241
242         printk(KERN_ERR "ibmvscsi: found no event struct in pool!\n");
243         return NULL;
244 }
245
246 /**
247  * init_event_struct: Initialize fields in an event struct that are always 
248  *                    required.
249  * @evt:        The event
250  * @done:       Routine to call when the event is responded to
251  * @format:     SRP or MAD format
252  * @timeout:    timeout value set in the CRQ
253  */
254 static void init_event_struct(struct srp_event_struct *evt_struct,
255                               void (*done) (struct srp_event_struct *),
256                               u8 format,
257                               int timeout)
258 {
259         evt_struct->cmnd = NULL;
260         evt_struct->cmnd_done = NULL;
261         evt_struct->crq.format = format;
262         evt_struct->crq.timeout = timeout;
263         evt_struct->done = done;
264 }
265
266 /* ------------------------------------------------------------
267  * Routines for receiving SCSI responses from the hosting partition
268  */
269
270 /**
271  * set_srp_direction: Set the fields in the srp related to data
272  *     direction and number of buffers based on the direction in
273  *     the scsi_cmnd and the number of buffers
274  */
275 static void set_srp_direction(struct scsi_cmnd *cmd,
276                               struct srp_cmd *srp_cmd, 
277                               int numbuf)
278 {
279         if (numbuf == 0)
280                 return;
281         
282         if (numbuf == 1) {
283                 if (cmd->sc_data_direction == DMA_TO_DEVICE)
284                         srp_cmd->data_out_format = SRP_DIRECT_BUFFER;
285                 else 
286                         srp_cmd->data_in_format = SRP_DIRECT_BUFFER;
287         } else {
288                 if (cmd->sc_data_direction == DMA_TO_DEVICE) {
289                         srp_cmd->data_out_format = SRP_INDIRECT_BUFFER;
290                         srp_cmd->data_out_count = numbuf;
291                 } else {
292                         srp_cmd->data_in_format = SRP_INDIRECT_BUFFER;
293                         srp_cmd->data_in_count = numbuf;
294                 }
295         }
296 }
297
298 /**
299  * unmap_cmd_data: - Unmap data pointed in srp_cmd based on the format
300  * @cmd:        srp_cmd whose additional_data member will be unmapped
301  * @dev:        device for which the memory is mapped
302  *
303 */
304 static void unmap_cmd_data(struct srp_cmd *cmd, struct device *dev)
305 {
306         int i;
307
308         if ((cmd->data_out_format == SRP_NO_BUFFER) &&
309             (cmd->data_in_format == SRP_NO_BUFFER))
310                 return;
311         else if ((cmd->data_out_format == SRP_DIRECT_BUFFER) ||
312                  (cmd->data_in_format == SRP_DIRECT_BUFFER)) {
313                 struct memory_descriptor *data =
314                         (struct memory_descriptor *)cmd->additional_data;
315                 dma_unmap_single(dev, data->virtual_address, data->length,
316                                  DMA_BIDIRECTIONAL);
317         } else {
318                 struct indirect_descriptor *indirect =
319                         (struct indirect_descriptor *)cmd->additional_data;
320                 int num_mapped = indirect->head.length / 
321                         sizeof(indirect->list[0]);
322                 for (i = 0; i < num_mapped; ++i) {
323                         struct memory_descriptor *data = &indirect->list[i];
324                         dma_unmap_single(dev,
325                                          data->virtual_address,
326                                          data->length, DMA_BIDIRECTIONAL);
327                 }
328         }
329 }
330
331 /**
332  * map_sg_data: - Maps dma for a scatterlist and initializes decriptor fields
333  * @cmd:        Scsi_Cmnd with the scatterlist
334  * @srp_cmd:    srp_cmd that contains the memory descriptor
335  * @dev:        device for which to map dma memory
336  *
337  * Called by map_data_for_srp_cmd() when building srp cmd from scsi cmd.
338  * Returns 1 on success.
339 */
340 static int map_sg_data(struct scsi_cmnd *cmd,
341                        struct srp_cmd *srp_cmd, struct device *dev)
342 {
343
344         int i, sg_mapped;
345         u64 total_length = 0;
346         struct scatterlist *sg = cmd->request_buffer;
347         struct memory_descriptor *data =
348             (struct memory_descriptor *)srp_cmd->additional_data;
349         struct indirect_descriptor *indirect =
350             (struct indirect_descriptor *)data;
351
352         sg_mapped = dma_map_sg(dev, sg, cmd->use_sg, DMA_BIDIRECTIONAL);
353
354         if (sg_mapped == 0)
355                 return 0;
356
357         set_srp_direction(cmd, srp_cmd, sg_mapped);
358
359         /* special case; we can use a single direct descriptor */
360         if (sg_mapped == 1) {
361                 data->virtual_address = sg_dma_address(&sg[0]);
362                 data->length = sg_dma_len(&sg[0]);
363                 data->memory_handle = 0;
364                 return 1;
365         }
366
367         if (sg_mapped > MAX_INDIRECT_BUFS) {
368                 printk(KERN_ERR
369                        "ibmvscsi: More than %d mapped sg entries, got %d\n",
370                        MAX_INDIRECT_BUFS, sg_mapped);
371                 return 0;
372         }
373
374         indirect->head.virtual_address = 0;
375         indirect->head.length = sg_mapped * sizeof(indirect->list[0]);
376         indirect->head.memory_handle = 0;
377         for (i = 0; i < sg_mapped; ++i) {
378                 struct memory_descriptor *descr = &indirect->list[i];
379                 struct scatterlist *sg_entry = &sg[i];
380                 descr->virtual_address = sg_dma_address(sg_entry);
381                 descr->length = sg_dma_len(sg_entry);
382                 descr->memory_handle = 0;
383                 total_length += sg_dma_len(sg_entry);
384         }
385         indirect->total_length = total_length;
386
387         return 1;
388 }
389
390 /**
391  * map_single_data: - Maps memory and initializes memory decriptor fields
392  * @cmd:        struct scsi_cmnd with the memory to be mapped
393  * @srp_cmd:    srp_cmd that contains the memory descriptor
394  * @dev:        device for which to map dma memory
395  *
396  * Called by map_data_for_srp_cmd() when building srp cmd from scsi cmd.
397  * Returns 1 on success.
398 */
399 static int map_single_data(struct scsi_cmnd *cmd,
400                            struct srp_cmd *srp_cmd, struct device *dev)
401 {
402         struct memory_descriptor *data =
403             (struct memory_descriptor *)srp_cmd->additional_data;
404
405         data->virtual_address =
406                 dma_map_single(dev, cmd->request_buffer,
407                                cmd->request_bufflen,
408                                DMA_BIDIRECTIONAL);
409         if (dma_mapping_error(data->virtual_address)) {
410                 printk(KERN_ERR
411                        "ibmvscsi: Unable to map request_buffer for command!\n");
412                 return 0;
413         }
414         data->length = cmd->request_bufflen;
415         data->memory_handle = 0;
416
417         set_srp_direction(cmd, srp_cmd, 1);
418
419         return 1;
420 }
421
422 /**
423  * map_data_for_srp_cmd: - Calls functions to map data for srp cmds
424  * @cmd:        struct scsi_cmnd with the memory to be mapped
425  * @srp_cmd:    srp_cmd that contains the memory descriptor
426  * @dev:        dma device for which to map dma memory
427  *
428  * Called by scsi_cmd_to_srp_cmd() when converting scsi cmds to srp cmds 
429  * Returns 1 on success.
430 */
431 static int map_data_for_srp_cmd(struct scsi_cmnd *cmd,
432                                 struct srp_cmd *srp_cmd, struct device *dev)
433 {
434         switch (cmd->sc_data_direction) {
435         case DMA_FROM_DEVICE:
436         case DMA_TO_DEVICE:
437                 break;
438         case DMA_NONE:
439                 return 1;
440         case DMA_BIDIRECTIONAL:
441                 printk(KERN_ERR
442                        "ibmvscsi: Can't map DMA_BIDIRECTIONAL to read/write\n");
443                 return 0;
444         default:
445                 printk(KERN_ERR
446                        "ibmvscsi: Unknown data direction 0x%02x; can't map!\n",
447                        cmd->sc_data_direction);
448                 return 0;
449         }
450
451         if (!cmd->request_buffer)
452                 return 1;
453         if (cmd->use_sg)
454                 return map_sg_data(cmd, srp_cmd, dev);
455         return map_single_data(cmd, srp_cmd, dev);
456 }
457
458 /* ------------------------------------------------------------
459  * Routines for sending and receiving SRPs
460  */
461 /**
462  * ibmvscsi_send_srp_event: - Transforms event to u64 array and calls send_crq()
463  * @evt_struct: evt_struct to be sent
464  * @hostdata:   ibmvscsi_host_data of host
465  *
466  * Returns the value returned from ibmvscsi_send_crq(). (Zero for success)
467  * Note that this routine assumes that host_lock is held for synchronization
468 */
469 static int ibmvscsi_send_srp_event(struct srp_event_struct *evt_struct,
470                                    struct ibmvscsi_host_data *hostdata)
471 {
472         struct scsi_cmnd *cmnd = evt_struct->cmnd;
473         u64 *crq_as_u64 = (u64 *) &evt_struct->crq;
474         int rc;
475
476         /* If we have exhausted our request limit, just fail this request.
477          * Note that there are rare cases involving driver generated requests 
478          * (such as task management requests) that the mid layer may think we
479          * can handle more requests (can_queue) when we actually can't
480          */
481         if ((evt_struct->crq.format == VIOSRP_SRP_FORMAT) &&
482             (atomic_dec_if_positive(&hostdata->request_limit) < 0)) {
483                 /* See if the adapter is disabled */
484                 if (atomic_read(&hostdata->request_limit) < 0) {
485                         if (cmnd)
486                                 cmnd->result = DID_ERROR << 16;
487                         if (evt_struct->cmnd_done)
488                                 evt_struct->cmnd_done(cmnd);
489                         unmap_cmd_data(&evt_struct->iu.srp.cmd,
490                                        hostdata->dev);
491                         free_event_struct(&hostdata->pool, evt_struct);
492                         return 0;
493                 } else {
494                         printk("ibmvscsi: Warning, request_limit exceeded\n");
495                         unmap_cmd_data(&evt_struct->iu.srp.cmd,
496                                        hostdata->dev);
497                         free_event_struct(&hostdata->pool, evt_struct);
498                         return SCSI_MLQUEUE_HOST_BUSY;
499                 }
500         }
501
502         /* Copy the IU into the transfer area */
503         *evt_struct->xfer_iu = evt_struct->iu;
504         evt_struct->xfer_iu->srp.generic.tag = (u64)evt_struct;
505
506         /* Add this to the sent list.  We need to do this 
507          * before we actually send 
508          * in case it comes back REALLY fast
509          */
510         list_add_tail(&evt_struct->list, &hostdata->sent);
511
512         if ((rc =
513              ibmvscsi_send_crq(hostdata, crq_as_u64[0], crq_as_u64[1])) != 0) {
514                 list_del(&evt_struct->list);
515
516                 cmnd = evt_struct->cmnd;
517                 printk(KERN_ERR "ibmvscsi: failed to send event struct rc %d\n",
518                        rc);
519                 unmap_cmd_data(&evt_struct->iu.srp.cmd, hostdata->dev);
520                 free_event_struct(&hostdata->pool, evt_struct);
521                 if (cmnd)
522                         cmnd->result = DID_ERROR << 16;
523                 if (evt_struct->cmnd_done)
524                         evt_struct->cmnd_done(cmnd);
525         }
526
527         return 0;
528 }
529
530 /**
531  * handle_cmd_rsp: -  Handle responses from commands
532  * @evt_struct: srp_event_struct to be handled
533  *
534  * Used as a callback by when sending scsi cmds.
535  * Gets called by ibmvscsi_handle_crq()
536 */
537 static void handle_cmd_rsp(struct srp_event_struct *evt_struct)
538 {
539         struct srp_rsp *rsp = &evt_struct->xfer_iu->srp.rsp;
540         struct scsi_cmnd *cmnd = evt_struct->cmnd;
541
542         if (cmnd) {
543                 cmnd->result = rsp->status;
544                 if (((cmnd->result >> 1) & 0x1f) == CHECK_CONDITION)
545                         memcpy(cmnd->sense_buffer,
546                                rsp->sense_and_response_data,
547                                rsp->sense_data_list_length);
548                 unmap_cmd_data(&evt_struct->iu.srp.cmd, 
549                                evt_struct->hostdata->dev);
550
551                 if (rsp->doover)
552                         cmnd->resid = rsp->data_out_residual_count;
553                 else if (rsp->diover)
554                         cmnd->resid = rsp->data_in_residual_count;
555         }
556
557         if (evt_struct->cmnd_done)
558                 evt_struct->cmnd_done(cmnd);
559 }
560
561 /**
562  * lun_from_dev: - Returns the lun of the scsi device
563  * @dev:        struct scsi_device
564  *
565 */
566 static inline u16 lun_from_dev(struct scsi_device *dev)
567 {
568         return (0x2 << 14) | (dev->id << 8) | (dev->channel << 5) | dev->lun;
569 }
570
571 /**
572  * ibmvscsi_queue: - The queuecommand function of the scsi template 
573  * @cmd:        struct scsi_cmnd to be executed
574  * @done:       Callback function to be called when cmd is completed
575 */
576 static int ibmvscsi_queuecommand(struct scsi_cmnd *cmnd,
577                                  void (*done) (struct scsi_cmnd *))
578 {
579         struct srp_cmd *srp_cmd;
580         struct srp_event_struct *evt_struct;
581         struct ibmvscsi_host_data *hostdata =
582                 (struct ibmvscsi_host_data *)&cmnd->device->host->hostdata;
583         u16 lun = lun_from_dev(cmnd->device);
584
585         evt_struct = get_event_struct(&hostdata->pool);
586         if (!evt_struct)
587                 return SCSI_MLQUEUE_HOST_BUSY;
588
589         init_event_struct(evt_struct,
590                           handle_cmd_rsp,
591                           VIOSRP_SRP_FORMAT,
592                           cmnd->timeout);
593
594         evt_struct->cmnd = cmnd;
595         evt_struct->cmnd_done = done;
596
597         /* Set up the actual SRP IU */
598         srp_cmd = &evt_struct->iu.srp.cmd;
599         memset(srp_cmd, 0x00, sizeof(*srp_cmd));
600         srp_cmd->type = SRP_CMD_TYPE;
601         memcpy(srp_cmd->cdb, cmnd->cmnd, sizeof(cmnd->cmnd));
602         srp_cmd->lun = ((u64) lun) << 48;
603
604         if (!map_data_for_srp_cmd(cmnd, srp_cmd, hostdata->dev)) {
605                 printk(KERN_ERR "ibmvscsi: couldn't convert cmd to srp_cmd\n");
606                 free_event_struct(&hostdata->pool, evt_struct);
607                 return SCSI_MLQUEUE_HOST_BUSY;
608         }
609
610         /* Fix up dma address of the buffer itself */
611         if ((srp_cmd->data_out_format == SRP_INDIRECT_BUFFER) ||
612             (srp_cmd->data_in_format == SRP_INDIRECT_BUFFER)) {
613                 struct indirect_descriptor *indirect =
614                     (struct indirect_descriptor *)srp_cmd->additional_data;
615                 indirect->head.virtual_address = evt_struct->crq.IU_data_ptr +
616                     offsetof(struct srp_cmd, additional_data) +
617                     offsetof(struct indirect_descriptor, list);
618         }
619
620         return ibmvscsi_send_srp_event(evt_struct, hostdata);
621 }
622
623 /* ------------------------------------------------------------
624  * Routines for driver initialization
625  */
626 /**
627  * adapter_info_rsp: - Handle response to MAD adapter info request
628  * @evt_struct: srp_event_struct with the response
629  *
630  * Used as a "done" callback by when sending adapter_info. Gets called
631  * by ibmvscsi_handle_crq()
632 */
633 static void adapter_info_rsp(struct srp_event_struct *evt_struct)
634 {
635         struct ibmvscsi_host_data *hostdata = evt_struct->hostdata;
636         dma_unmap_single(hostdata->dev,
637                          evt_struct->iu.mad.adapter_info.buffer,
638                          evt_struct->iu.mad.adapter_info.common.length,
639                          DMA_BIDIRECTIONAL);
640
641         if (evt_struct->xfer_iu->mad.adapter_info.common.status) {
642                 printk("ibmvscsi: error %d getting adapter info\n",
643                        evt_struct->xfer_iu->mad.adapter_info.common.status);
644         } else {
645                 printk("ibmvscsi: host srp version: %s, "
646                        "host partition %s (%d), OS %d, max io %u\n",
647                        hostdata->madapter_info.srp_version,
648                        hostdata->madapter_info.partition_name,
649                        hostdata->madapter_info.partition_number,
650                        hostdata->madapter_info.os_type,
651                        hostdata->madapter_info.port_max_txu[0]);
652
653                 if (hostdata->madapter_info.port_max_txu[0]) 
654                     hostdata->host->max_sectors = 
655                         hostdata->madapter_info.port_max_txu[0] >> 9;
656         }
657 }
658
659 /**
660  * send_mad_adapter_info: - Sends the mad adapter info request
661  *      and stores the result so it can be retrieved with
662  *      sysfs.  We COULD consider causing a failure if the
663  *      returned SRP version doesn't match ours.
664  * @hostdata:   ibmvscsi_host_data of host
665  * 
666  * Returns zero if successful.
667 */
668 static void send_mad_adapter_info(struct ibmvscsi_host_data *hostdata)
669 {
670         struct viosrp_adapter_info *req;
671         struct srp_event_struct *evt_struct;
672         
673         memset(&hostdata->madapter_info, 0x00, sizeof(hostdata->madapter_info));
674         
675         evt_struct = get_event_struct(&hostdata->pool);
676         if (!evt_struct) {
677                 printk(KERN_ERR "ibmvscsi: couldn't allocate an event "
678                        "for ADAPTER_INFO_REQ!\n");
679                 return;
680         }
681
682         init_event_struct(evt_struct,
683                           adapter_info_rsp,
684                           VIOSRP_MAD_FORMAT,
685                           init_timeout * HZ);
686         
687         req = &evt_struct->iu.mad.adapter_info;
688         memset(req, 0x00, sizeof(*req));
689         
690         req->common.type = VIOSRP_ADAPTER_INFO_TYPE;
691         req->common.length = sizeof(hostdata->madapter_info);
692         req->buffer = dma_map_single(hostdata->dev,
693                                      &hostdata->madapter_info,
694                                      sizeof(hostdata->madapter_info),
695                                      DMA_BIDIRECTIONAL);
696
697         if (dma_mapping_error(req->buffer)) {
698                 printk(KERN_ERR
699                        "ibmvscsi: Unable to map request_buffer "
700                        "for adapter_info!\n");
701                 free_event_struct(&hostdata->pool, evt_struct);
702                 return;
703         }
704         
705         if (ibmvscsi_send_srp_event(evt_struct, hostdata))
706                 printk(KERN_ERR "ibmvscsi: couldn't send ADAPTER_INFO_REQ!\n");
707 };
708
709 /**
710  * login_rsp: - Handle response to SRP login request
711  * @evt_struct: srp_event_struct with the response
712  *
713  * Used as a "done" callback by when sending srp_login. Gets called
714  * by ibmvscsi_handle_crq()
715 */
716 static void login_rsp(struct srp_event_struct *evt_struct)
717 {
718         struct ibmvscsi_host_data *hostdata = evt_struct->hostdata;
719         switch (evt_struct->xfer_iu->srp.generic.type) {
720         case SRP_LOGIN_RSP_TYPE:        /* it worked! */
721                 break;
722         case SRP_LOGIN_REJ_TYPE:        /* refused! */
723                 printk(KERN_INFO "ibmvscsi: SRP_LOGIN_REQ rejected\n");
724                 /* Login failed.  */
725                 atomic_set(&hostdata->request_limit, -1);
726                 return;
727         default:
728                 printk(KERN_ERR
729                        "ibmvscsi: Invalid login response typecode 0x%02x!\n",
730                        evt_struct->xfer_iu->srp.generic.type);
731                 /* Login failed.  */
732                 atomic_set(&hostdata->request_limit, -1);
733                 return;
734         }
735
736         printk(KERN_INFO "ibmvscsi: SRP_LOGIN succeeded\n");
737
738         if (evt_struct->xfer_iu->srp.login_rsp.request_limit_delta >
739             (max_requests - 2))
740                 evt_struct->xfer_iu->srp.login_rsp.request_limit_delta =
741                     max_requests - 2;
742
743         /* Now we know what the real request-limit is */
744         atomic_set(&hostdata->request_limit,
745                    evt_struct->xfer_iu->srp.login_rsp.request_limit_delta);
746
747         hostdata->host->can_queue =
748             evt_struct->xfer_iu->srp.login_rsp.request_limit_delta - 2;
749
750         if (hostdata->host->can_queue < 1) {
751                 printk(KERN_ERR "ibmvscsi: Invalid request_limit_delta\n");
752                 return;
753         }
754
755         send_mad_adapter_info(hostdata);
756         return;
757 }
758
759 /**
760  * send_srp_login: - Sends the srp login
761  * @hostdata:   ibmvscsi_host_data of host
762  * 
763  * Returns zero if successful.
764 */
765 static int send_srp_login(struct ibmvscsi_host_data *hostdata)
766 {
767         int rc;
768         unsigned long flags;
769         struct srp_login_req *login;
770         struct srp_event_struct *evt_struct = get_event_struct(&hostdata->pool);
771         if (!evt_struct) {
772                 printk(KERN_ERR
773                        "ibmvscsi: couldn't allocate an event for login req!\n");
774                 return FAILED;
775         }
776
777         init_event_struct(evt_struct,
778                           login_rsp,
779                           VIOSRP_SRP_FORMAT,
780                           init_timeout * HZ);
781
782         login = &evt_struct->iu.srp.login_req;
783         login->type = SRP_LOGIN_REQ_TYPE;
784         login->max_requested_initiator_to_target_iulen = sizeof(union srp_iu);
785         login->required_buffer_formats = 0x0006;
786         
787         /* Start out with a request limit of 1, since this is negotiated in
788          * the login request we are just sending
789          */
790         atomic_set(&hostdata->request_limit, 1);
791
792         spin_lock_irqsave(hostdata->host->host_lock, flags);
793         rc = ibmvscsi_send_srp_event(evt_struct, hostdata);
794         spin_unlock_irqrestore(hostdata->host->host_lock, flags);
795         return rc;
796 };
797
798 /**
799  * sync_completion: Signal that a synchronous command has completed
800  * Note that after returning from this call, the evt_struct is freed.
801  * the caller waiting on this completion shouldn't touch the evt_struct
802  * again.
803  */
804 static void sync_completion(struct srp_event_struct *evt_struct)
805 {
806         complete(&evt_struct->comp);
807 }
808
809 /**
810  * ibmvscsi_abort: Abort a command...from scsi host template
811  * send this over to the server and wait synchronously for the response
812  */
813 static int ibmvscsi_eh_abort_handler(struct scsi_cmnd *cmd)
814 {
815         struct ibmvscsi_host_data *hostdata =
816             (struct ibmvscsi_host_data *)cmd->device->host->hostdata;
817         struct srp_tsk_mgmt *tsk_mgmt;
818         struct srp_event_struct *evt;
819         struct srp_event_struct *tmp_evt, *found_evt;
820         u16 lun = lun_from_dev(cmd->device);
821
822         /* First, find this command in our sent list so we can figure
823          * out the correct tag
824          */
825         found_evt = NULL;
826         list_for_each_entry(tmp_evt, &hostdata->sent, list) {
827                 if (tmp_evt->cmnd == cmd) {
828                         found_evt = tmp_evt;
829                         break;
830                 }
831         }
832
833         if (!found_evt) 
834                 return FAILED;
835
836         evt = get_event_struct(&hostdata->pool);
837         if (evt == NULL) {
838                 printk(KERN_ERR "ibmvscsi: failed to allocate abort event\n");
839                 return FAILED;
840         }
841         
842         init_event_struct(evt,
843                           sync_completion,
844                           VIOSRP_SRP_FORMAT,
845                           init_timeout * HZ);
846
847         tsk_mgmt = &evt->iu.srp.tsk_mgmt;
848         
849         /* Set up an abort SRP command */
850         memset(tsk_mgmt, 0x00, sizeof(*tsk_mgmt));
851         tsk_mgmt->type = SRP_TSK_MGMT_TYPE;
852         tsk_mgmt->lun = ((u64) lun) << 48;
853         tsk_mgmt->task_mgmt_flags = 0x01;       /* ABORT TASK */
854         tsk_mgmt->managed_task_tag = (u64) found_evt;
855
856         printk(KERN_INFO "ibmvscsi: aborting command. lun 0x%lx, tag 0x%lx\n",
857                tsk_mgmt->lun, tsk_mgmt->managed_task_tag);
858
859         init_completion(&evt->comp);
860         if (ibmvscsi_send_srp_event(evt, hostdata) != 0) {
861                 printk(KERN_ERR "ibmvscsi: failed to send abort() event\n");
862                 return FAILED;
863         }
864
865         spin_unlock_irq(hostdata->host->host_lock);
866         wait_for_completion(&evt->comp);
867         spin_lock_irq(hostdata->host->host_lock);
868
869         /* Because we dropped the spinlock above, it's possible
870          * The event is no longer in our list.  Make sure it didn't
871          * complete while we were aborting
872          */
873         found_evt = NULL;
874         list_for_each_entry(tmp_evt, &hostdata->sent, list) {
875                 if (tmp_evt->cmnd == cmd) {
876                         found_evt = tmp_evt;
877                         break;
878                 }
879         }
880
881         printk(KERN_INFO
882                "ibmvscsi: successfully aborted task tag 0x%lx\n",
883                tsk_mgmt->managed_task_tag);
884
885         if (found_evt == NULL)
886                 return SUCCESS;
887
888         cmd->result = (DID_ABORT << 16);
889         list_del(&found_evt->list);
890         unmap_cmd_data(&found_evt->iu.srp.cmd, found_evt->hostdata->dev);
891         free_event_struct(&found_evt->hostdata->pool, found_evt);
892         atomic_inc(&hostdata->request_limit);
893         return SUCCESS;
894 }
895
896 /**
897  * ibmvscsi_eh_device_reset_handler: Reset a single LUN...from scsi host 
898  * template send this over to the server and wait synchronously for the 
899  * response
900  */
901 static int ibmvscsi_eh_device_reset_handler(struct scsi_cmnd *cmd)
902 {
903         struct ibmvscsi_host_data *hostdata =
904             (struct ibmvscsi_host_data *)cmd->device->host->hostdata;
905
906         struct srp_tsk_mgmt *tsk_mgmt;
907         struct srp_event_struct *evt;
908         struct srp_event_struct *tmp_evt, *pos;
909         u16 lun = lun_from_dev(cmd->device);
910
911         evt = get_event_struct(&hostdata->pool);
912         if (evt == NULL) {
913                 printk(KERN_ERR "ibmvscsi: failed to allocate reset event\n");
914                 return FAILED;
915         }
916         
917         init_event_struct(evt,
918                           sync_completion,
919                           VIOSRP_SRP_FORMAT,
920                           init_timeout * HZ);
921
922         tsk_mgmt = &evt->iu.srp.tsk_mgmt;
923
924         /* Set up a lun reset SRP command */
925         memset(tsk_mgmt, 0x00, sizeof(*tsk_mgmt));
926         tsk_mgmt->type = SRP_TSK_MGMT_TYPE;
927         tsk_mgmt->lun = ((u64) lun) << 48;
928         tsk_mgmt->task_mgmt_flags = 0x08;       /* LUN RESET */
929
930         printk(KERN_INFO "ibmvscsi: resetting device. lun 0x%lx\n",
931                tsk_mgmt->lun);
932
933         init_completion(&evt->comp);
934         if (ibmvscsi_send_srp_event(evt, hostdata) != 0) {
935                 printk(KERN_ERR "ibmvscsi: failed to send reset event\n");
936                 return FAILED;
937         }
938
939         spin_unlock_irq(hostdata->host->host_lock);
940         wait_for_completion(&evt->comp);
941         spin_lock_irq(hostdata->host->host_lock);
942
943         /* We need to find all commands for this LUN that have not yet been
944          * responded to, and fail them with DID_RESET
945          */
946         list_for_each_entry_safe(tmp_evt, pos, &hostdata->sent, list) {
947                 if ((tmp_evt->cmnd) && (tmp_evt->cmnd->device == cmd->device)) {
948                         if (tmp_evt->cmnd)
949                                 tmp_evt->cmnd->result = (DID_RESET << 16);
950                         list_del(&tmp_evt->list);
951                         unmap_cmd_data(&tmp_evt->iu.srp.cmd, tmp_evt->hostdata->dev);
952                         free_event_struct(&tmp_evt->hostdata->pool,
953                                                    tmp_evt);
954                         atomic_inc(&hostdata->request_limit);
955                         if (tmp_evt->cmnd_done)
956                                 tmp_evt->cmnd_done(tmp_evt->cmnd);
957                         else if (tmp_evt->done)
958                                 tmp_evt->done(tmp_evt);
959                 }
960         }
961         return SUCCESS;
962 }
963
964 /**
965  * purge_requests: Our virtual adapter just shut down.  purge any sent requests
966  * @hostdata:    the adapter
967  */
968 static void purge_requests(struct ibmvscsi_host_data *hostdata)
969 {
970         struct srp_event_struct *tmp_evt, *pos;
971         unsigned long flags;
972
973         spin_lock_irqsave(hostdata->host->host_lock, flags);
974         list_for_each_entry_safe(tmp_evt, pos, &hostdata->sent, list) {
975                 list_del(&tmp_evt->list);
976                 if (tmp_evt->cmnd) {
977                         tmp_evt->cmnd->result = (DID_ERROR << 16);
978                         unmap_cmd_data(&tmp_evt->iu.srp.cmd, 
979                                        tmp_evt->hostdata->dev);
980                         if (tmp_evt->cmnd_done)
981                                 tmp_evt->cmnd_done(tmp_evt->cmnd);
982                 } else {
983                         if (tmp_evt->done) {
984                                 tmp_evt->done(tmp_evt);
985                         }
986                 }
987                 free_event_struct(&tmp_evt->hostdata->pool, tmp_evt);
988         }
989         spin_unlock_irqrestore(hostdata->host->host_lock, flags);
990 }
991
992 /**
993  * ibmvscsi_handle_crq: - Handles and frees received events in the CRQ
994  * @crq:        Command/Response queue
995  * @hostdata:   ibmvscsi_host_data of host
996  *
997 */
998 void ibmvscsi_handle_crq(struct viosrp_crq *crq,
999                          struct ibmvscsi_host_data *hostdata)
1000 {
1001         unsigned long flags;
1002         struct srp_event_struct *evt_struct =
1003             (struct srp_event_struct *)crq->IU_data_ptr;
1004         switch (crq->valid) {
1005         case 0xC0:              /* initialization */
1006                 switch (crq->format) {
1007                 case 0x01:      /* Initialization message */
1008                         printk(KERN_INFO "ibmvscsi: partner initialized\n");
1009                         /* Send back a response */
1010                         if (ibmvscsi_send_crq(hostdata,
1011                                               0xC002000000000000LL, 0) == 0) {
1012                                 /* Now login */
1013                                 send_srp_login(hostdata);
1014                         } else {
1015                                 printk(KERN_ERR
1016                                        "ibmvscsi: Unable to send init rsp\n");
1017                         }
1018
1019                         break;
1020                 case 0x02:      /* Initialization response */
1021                         printk(KERN_INFO
1022                                "ibmvscsi: partner initialization complete\n");
1023
1024                         /* Now login */
1025                         send_srp_login(hostdata);
1026                         break;
1027                 default:
1028                         printk(KERN_ERR "ibmvscsi: unknown crq message type\n");
1029                 }
1030                 return;
1031         case 0xFF:              /* Hypervisor telling us the connection is closed */
1032                 printk(KERN_INFO "ibmvscsi: Virtual adapter failed!\n");
1033
1034                 atomic_set(&hostdata->request_limit, -1);
1035                 purge_requests(hostdata);
1036                 ibmvscsi_reset_crq_queue(&hostdata->queue, hostdata);
1037                 return;
1038         case 0x80:              /* real payload */
1039                 break;
1040         default:
1041                 printk(KERN_ERR
1042                        "ibmvscsi: got an invalid message type 0x%02x\n",
1043                        crq->valid);
1044                 return;
1045         }
1046
1047         /* The only kind of payload CRQs we should get are responses to
1048          * things we send. Make sure this response is to something we
1049          * actually sent
1050          */
1051         if (!valid_event_struct(&hostdata->pool, evt_struct)) {
1052                 printk(KERN_ERR
1053                        "ibmvscsi: returned correlation_token 0x%p is invalid!\n",
1054                        (void *)crq->IU_data_ptr);
1055                 return;
1056         }
1057
1058         if (crq->format == VIOSRP_SRP_FORMAT)
1059                 atomic_add(evt_struct->xfer_iu->srp.rsp.request_limit_delta,
1060                            &hostdata->request_limit);
1061
1062         if (evt_struct->done)
1063                 evt_struct->done(evt_struct);
1064         else
1065                 printk(KERN_ERR
1066                        "ibmvscsi: returned done() is NULL; not running it!\n");
1067
1068         /*
1069          * Lock the host_lock before messing with these structures, since we
1070          * are running in a task context
1071          */
1072         spin_lock_irqsave(evt_struct->hostdata->host->host_lock, flags);
1073         list_del(&evt_struct->list);
1074         free_event_struct(&evt_struct->hostdata->pool, evt_struct);
1075         spin_unlock_irqrestore(evt_struct->hostdata->host->host_lock, flags);
1076 }
1077
1078 /**
1079  * ibmvscsi_get_host_config: Send the command to the server to get host
1080  * configuration data.  The data is opaque to us.
1081  */
1082 static int ibmvscsi_do_host_config(struct ibmvscsi_host_data *hostdata,
1083                                    unsigned char *buffer, int length)
1084 {
1085         struct viosrp_host_config *host_config;
1086         struct srp_event_struct *evt_struct;
1087         int rc;
1088
1089         evt_struct = get_event_struct(&hostdata->pool);
1090         if (!evt_struct) {
1091                 printk(KERN_ERR
1092                        "ibmvscsi: could't allocate event for HOST_CONFIG!\n");
1093                 return -1;
1094         }
1095
1096         init_event_struct(evt_struct,
1097                           sync_completion,
1098                           VIOSRP_MAD_FORMAT,
1099                           init_timeout * HZ);
1100
1101         host_config = &evt_struct->iu.mad.host_config;
1102
1103         /* Set up a lun reset SRP command */
1104         memset(host_config, 0x00, sizeof(*host_config));
1105         host_config->common.type = VIOSRP_HOST_CONFIG_TYPE;
1106         host_config->common.length = length;
1107         host_config->buffer = dma_map_single(hostdata->dev, buffer, length,
1108                                             DMA_BIDIRECTIONAL);
1109
1110         if (dma_mapping_error(host_config->buffer)) {
1111                 printk(KERN_ERR
1112                        "ibmvscsi: dma_mapping error " "getting host config\n");
1113                 free_event_struct(&hostdata->pool, evt_struct);
1114                 return -1;
1115         }
1116
1117         init_completion(&evt_struct->comp);
1118         rc = ibmvscsi_send_srp_event(evt_struct, hostdata);
1119         if (rc == 0) {
1120                 wait_for_completion(&evt_struct->comp);
1121                 dma_unmap_single(hostdata->dev, host_config->buffer,
1122                                  length, DMA_BIDIRECTIONAL);
1123         }
1124
1125         return rc;
1126 }
1127
1128 /* ------------------------------------------------------------
1129  * sysfs attributes
1130  */
1131 static ssize_t show_host_srp_version(struct class_device *class_dev, char *buf)
1132 {
1133         struct Scsi_Host *shost = class_to_shost(class_dev);
1134         struct ibmvscsi_host_data *hostdata =
1135             (struct ibmvscsi_host_data *)shost->hostdata;
1136         int len;
1137
1138         len = snprintf(buf, PAGE_SIZE, "%s\n",
1139                        hostdata->madapter_info.srp_version);
1140         return len;
1141 }
1142
1143 static struct class_device_attribute ibmvscsi_host_srp_version = {
1144         .attr = {
1145                  .name = "srp_version",
1146                  .mode = S_IRUGO,
1147                  },
1148         .show = show_host_srp_version,
1149 };
1150
1151 static ssize_t show_host_partition_name(struct class_device *class_dev,
1152                                         char *buf)
1153 {
1154         struct Scsi_Host *shost = class_to_shost(class_dev);
1155         struct ibmvscsi_host_data *hostdata =
1156             (struct ibmvscsi_host_data *)shost->hostdata;
1157         int len;
1158
1159         len = snprintf(buf, PAGE_SIZE, "%s\n",
1160                        hostdata->madapter_info.partition_name);
1161         return len;
1162 }
1163
1164 static struct class_device_attribute ibmvscsi_host_partition_name = {
1165         .attr = {
1166                  .name = "partition_name",
1167                  .mode = S_IRUGO,
1168                  },
1169         .show = show_host_partition_name,
1170 };
1171
1172 static ssize_t show_host_partition_number(struct class_device *class_dev,
1173                                           char *buf)
1174 {
1175         struct Scsi_Host *shost = class_to_shost(class_dev);
1176         struct ibmvscsi_host_data *hostdata =
1177             (struct ibmvscsi_host_data *)shost->hostdata;
1178         int len;
1179
1180         len = snprintf(buf, PAGE_SIZE, "%d\n",
1181                        hostdata->madapter_info.partition_number);
1182         return len;
1183 }
1184
1185 static struct class_device_attribute ibmvscsi_host_partition_number = {
1186         .attr = {
1187                  .name = "partition_number",
1188                  .mode = S_IRUGO,
1189                  },
1190         .show = show_host_partition_number,
1191 };
1192
1193 static ssize_t show_host_mad_version(struct class_device *class_dev, char *buf)
1194 {
1195         struct Scsi_Host *shost = class_to_shost(class_dev);
1196         struct ibmvscsi_host_data *hostdata =
1197             (struct ibmvscsi_host_data *)shost->hostdata;
1198         int len;
1199
1200         len = snprintf(buf, PAGE_SIZE, "%d\n",
1201                        hostdata->madapter_info.mad_version);
1202         return len;
1203 }
1204
1205 static struct class_device_attribute ibmvscsi_host_mad_version = {
1206         .attr = {
1207                  .name = "mad_version",
1208                  .mode = S_IRUGO,
1209                  },
1210         .show = show_host_mad_version,
1211 };
1212
1213 static ssize_t show_host_os_type(struct class_device *class_dev, char *buf)
1214 {
1215         struct Scsi_Host *shost = class_to_shost(class_dev);
1216         struct ibmvscsi_host_data *hostdata =
1217             (struct ibmvscsi_host_data *)shost->hostdata;
1218         int len;
1219
1220         len = snprintf(buf, PAGE_SIZE, "%d\n", hostdata->madapter_info.os_type);
1221         return len;
1222 }
1223
1224 static struct class_device_attribute ibmvscsi_host_os_type = {
1225         .attr = {
1226                  .name = "os_type",
1227                  .mode = S_IRUGO,
1228                  },
1229         .show = show_host_os_type,
1230 };
1231
1232 static ssize_t show_host_config(struct class_device *class_dev, char *buf)
1233 {
1234         struct Scsi_Host *shost = class_to_shost(class_dev);
1235         struct ibmvscsi_host_data *hostdata =
1236             (struct ibmvscsi_host_data *)shost->hostdata;
1237
1238         /* returns null-terminated host config data */
1239         if (ibmvscsi_do_host_config(hostdata, buf, PAGE_SIZE) == 0)
1240                 return strlen(buf);
1241         else
1242                 return 0;
1243 }
1244
1245 static struct class_device_attribute ibmvscsi_host_config = {
1246         .attr = {
1247                  .name = "config",
1248                  .mode = S_IRUGO,
1249                  },
1250         .show = show_host_config,
1251 };
1252
1253 static struct class_device_attribute *ibmvscsi_attrs[] = {
1254         &ibmvscsi_host_srp_version,
1255         &ibmvscsi_host_partition_name,
1256         &ibmvscsi_host_partition_number,
1257         &ibmvscsi_host_mad_version,
1258         &ibmvscsi_host_os_type,
1259         &ibmvscsi_host_config,
1260         NULL
1261 };
1262
1263 /* ------------------------------------------------------------
1264  * SCSI driver registration
1265  */
1266 static struct scsi_host_template driver_template = {
1267         .module = THIS_MODULE,
1268         .name = "IBM POWER Virtual SCSI Adapter " IBMVSCSI_VERSION,
1269         .proc_name = "ibmvscsi",
1270         .queuecommand = ibmvscsi_queuecommand,
1271         .eh_abort_handler = ibmvscsi_eh_abort_handler,
1272         .eh_device_reset_handler = ibmvscsi_eh_device_reset_handler,
1273         .cmd_per_lun = 16,
1274         .can_queue = 1,         /* Updated after SRP_LOGIN */
1275         .this_id = -1,
1276         .sg_tablesize = MAX_INDIRECT_BUFS,
1277         .use_clustering = ENABLE_CLUSTERING,
1278         .shost_attrs = ibmvscsi_attrs,
1279 };
1280
1281 /**
1282  * Called by bus code for each adapter
1283  */
1284 static int ibmvscsi_probe(struct vio_dev *vdev, const struct vio_device_id *id)
1285 {
1286         struct ibmvscsi_host_data *hostdata;
1287         struct Scsi_Host *host;
1288         struct device *dev = &vdev->dev;
1289         unsigned long wait_switch = 0;
1290
1291         vdev->dev.driver_data = NULL;
1292
1293         host = scsi_host_alloc(&driver_template, sizeof(*hostdata));
1294         if (!host) {
1295                 printk(KERN_ERR "ibmvscsi: couldn't allocate host data\n");
1296                 goto scsi_host_alloc_failed;
1297         }
1298
1299         hostdata = (struct ibmvscsi_host_data *)host->hostdata;
1300         memset(hostdata, 0x00, sizeof(*hostdata));
1301         INIT_LIST_HEAD(&hostdata->sent);
1302         hostdata->host = host;
1303         hostdata->dev = dev;
1304         atomic_set(&hostdata->request_limit, -1);
1305         hostdata->host->max_sectors = max_sectors; 
1306
1307         if (ibmvscsi_init_crq_queue(&hostdata->queue, hostdata,
1308                                     max_requests) != 0) {
1309                 printk(KERN_ERR "ibmvscsi: couldn't initialize crq\n");
1310                 goto init_crq_failed;
1311         }
1312         if (initialize_event_pool(&hostdata->pool, max_requests, hostdata) != 0) {
1313                 printk(KERN_ERR "ibmvscsi: couldn't initialize event pool\n");
1314                 goto init_pool_failed;
1315         }
1316
1317         host->max_lun = 8;
1318         host->max_id = max_id;
1319         host->max_channel = max_channel;
1320
1321         if (scsi_add_host(hostdata->host, hostdata->dev))
1322                 goto add_host_failed;
1323
1324         /* Try to send an initialization message.  Note that this is allowed
1325          * to fail if the other end is not acive.  In that case we don't
1326          * want to scan
1327          */
1328         if (ibmvscsi_send_crq(hostdata, 0xC001000000000000LL, 0) == 0) {
1329                 /*
1330                  * Wait around max init_timeout secs for the adapter to finish
1331                  * initializing. When we are done initializing, we will have a
1332                  * valid request_limit.  We don't want Linux scanning before
1333                  * we are ready.
1334                  */
1335                 for (wait_switch = jiffies + (init_timeout * HZ);
1336                      time_before(jiffies, wait_switch) &&
1337                      atomic_read(&hostdata->request_limit) < 2;) {
1338
1339                         set_current_state(TASK_UNINTERRUPTIBLE);
1340                         schedule_timeout(HZ / 100);
1341                 }
1342
1343                 /* if we now have a valid request_limit, initiate a scan */
1344                 if (atomic_read(&hostdata->request_limit) > 0)
1345                         scsi_scan_host(host);
1346         }
1347
1348         vdev->dev.driver_data = hostdata;
1349         return 0;
1350
1351       add_host_failed:
1352         release_event_pool(&hostdata->pool, hostdata);
1353       init_pool_failed:
1354         ibmvscsi_release_crq_queue(&hostdata->queue, hostdata, max_requests);
1355       init_crq_failed:
1356         scsi_host_put(host);
1357       scsi_host_alloc_failed:
1358         return -1;
1359 }
1360
1361 static int ibmvscsi_remove(struct vio_dev *vdev)
1362 {
1363         struct ibmvscsi_host_data *hostdata = vdev->dev.driver_data;
1364         release_event_pool(&hostdata->pool, hostdata);
1365         ibmvscsi_release_crq_queue(&hostdata->queue, hostdata,
1366                                    max_requests);
1367         
1368         scsi_remove_host(hostdata->host);
1369         scsi_host_put(hostdata->host);
1370
1371         return 0;
1372 }
1373
1374 /**
1375  * ibmvscsi_device_table: Used by vio.c to match devices in the device tree we 
1376  * support.
1377  */
1378 static struct vio_device_id ibmvscsi_device_table[] __devinitdata = {
1379         {"vscsi", "IBM,v-scsi"},
1380         {0,}
1381 };
1382
1383 MODULE_DEVICE_TABLE(vio, ibmvscsi_device_table);
1384 static struct vio_driver ibmvscsi_driver = {
1385         .name = "ibmvscsi",
1386         .id_table = ibmvscsi_device_table,
1387         .probe = ibmvscsi_probe,
1388         .remove = ibmvscsi_remove
1389 };
1390
1391 int __init ibmvscsi_module_init(void)
1392 {
1393         return vio_register_driver(&ibmvscsi_driver);
1394 }
1395
1396 void __exit ibmvscsi_module_exit(void)
1397 {
1398         vio_unregister_driver(&ibmvscsi_driver);
1399 }
1400
1401 module_init(ibmvscsi_module_init);
1402 module_exit(ibmvscsi_module_exit);