vserver 1.9.5.x5
[linux-2.6.git] / drivers / scsi / scsi_transport_spi.c
1 /* 
2  *  Parallel SCSI (SPI) transport specific attributes exported to sysfs.
3  *
4  *  Copyright (c) 2003 Silicon Graphics, Inc.  All rights reserved.
5  *  Copyright (c) 2004, 2005 James Bottomley <James.Bottomley@SteelEye.com>
6  *
7  *  This program is free software; you can redistribute it and/or modify
8  *  it under the terms of the GNU General Public License as published by
9  *  the Free Software Foundation; either version 2 of the License, or
10  *  (at your option) any later version.
11  *
12  *  This program is distributed in the hope that it will be useful,
13  *  but WITHOUT ANY WARRANTY; without even the implied warranty of
14  *  MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.  See the
15  *  GNU General Public License for more details.
16  *
17  *  You should have received a copy of the GNU General Public License
18  *  along with this program; if not, write to the Free Software
19  *  Foundation, Inc., 59 Temple Place, Suite 330, Boston, MA  02111-1307  USA
20  */
21 #include <linux/module.h>
22 #include <linux/init.h>
23 #include <linux/smp_lock.h>
24 #include <linux/list.h>
25 #include <linux/spinlock.h>
26 #include <linux/mm.h>
27 #include <linux/workqueue.h>
28 #include <asm/scatterlist.h>
29 #include <asm/io.h>
30 #include <scsi/scsi.h>
31 #include "scsi_priv.h"
32 #include <scsi/scsi_device.h>
33 #include <scsi/scsi_host.h>
34 #include <scsi/scsi_request.h>
35 #include <scsi/scsi_eh.h>
36 #include <scsi/scsi_transport.h>
37 #include <scsi/scsi_transport_spi.h>
38
39 #define SPI_PRINTK(x, l, f, a...)       dev_printk(l, &(x)->dev, f , ##a)
40
41 #define SPI_NUM_ATTRS 10        /* increase this if you add attributes */
42 #define SPI_OTHER_ATTRS 1       /* Increase this if you add "always
43                                  * on" attributes */
44 #define SPI_HOST_ATTRS  1
45
46 #define SPI_MAX_ECHO_BUFFER_SIZE        4096
47
48 /* Private data accessors (keep these out of the header file) */
49 #define spi_dv_pending(x) (((struct spi_transport_attrs *)&(x)->starget_data)->dv_pending)
50 #define spi_dv_sem(x) (((struct spi_transport_attrs *)&(x)->starget_data)->dv_sem)
51
52 struct spi_internal {
53         struct scsi_transport_template t;
54         struct spi_function_template *f;
55         /* The actual attributes */
56         struct class_device_attribute private_attrs[SPI_NUM_ATTRS];
57         /* The array of null terminated pointers to attributes 
58          * needed by scsi_sysfs.c */
59         struct class_device_attribute *attrs[SPI_NUM_ATTRS + SPI_OTHER_ATTRS + 1];
60         struct class_device_attribute private_host_attrs[SPI_HOST_ATTRS];
61         struct class_device_attribute *host_attrs[SPI_HOST_ATTRS + 1];
62 };
63
64 #define to_spi_internal(tmpl)   container_of(tmpl, struct spi_internal, t)
65
66 static const char *const ppr_to_ns[] = {
67         /* The PPR values 0-6 are reserved, fill them in when
68          * the committee defines them */
69         NULL,                   /* 0x00 */
70         NULL,                   /* 0x01 */
71         NULL,                   /* 0x02 */
72         NULL,                   /* 0x03 */
73         NULL,                   /* 0x04 */
74         NULL,                   /* 0x05 */
75         NULL,                   /* 0x06 */
76         "3.125",                /* 0x07 */
77         "6.25",                 /* 0x08 */
78         "12.5",                 /* 0x09 */
79         "25",                   /* 0x0a */
80         "30.3",                 /* 0x0b */
81         "50",                   /* 0x0c */
82 };
83 /* The PPR values at which you calculate the period in ns by multiplying
84  * by 4 */
85 #define SPI_STATIC_PPR  0x0c
86
87 static struct {
88         enum spi_signal_type    value;
89         char                    *name;
90 } signal_types[] = {
91         { SPI_SIGNAL_UNKNOWN, "unknown" },
92         { SPI_SIGNAL_SE, "SE" },
93         { SPI_SIGNAL_LVD, "LVD" },
94         { SPI_SIGNAL_HVD, "HVD" },
95 };
96
97 static inline const char *spi_signal_to_string(enum spi_signal_type type)
98 {
99         int i;
100
101         for (i = 0; i < sizeof(signal_types)/sizeof(signal_types[0]); i++) {
102                 if (type == signal_types[i].value)
103                         return signal_types[i].name;
104         }
105         return NULL;
106 }
107 static inline enum spi_signal_type spi_signal_to_value(const char *name)
108 {
109         int i, len;
110
111         for (i = 0; i < sizeof(signal_types)/sizeof(signal_types[0]); i++) {
112                 len =  strlen(signal_types[i].name);
113                 if (strncmp(name, signal_types[i].name, len) == 0 &&
114                     (name[len] == '\n' || name[len] == '\0'))
115                         return signal_types[i].value;
116         }
117         return SPI_SIGNAL_UNKNOWN;
118 }
119
120 static int spi_host_setup(struct device *dev)
121 {
122         struct Scsi_Host *shost = dev_to_shost(dev);
123
124         spi_signalling(shost) = SPI_SIGNAL_UNKNOWN;
125
126         return 0;
127 }
128
129 static DECLARE_TRANSPORT_CLASS(spi_host_class,
130                                "spi_host",
131                                spi_host_setup,
132                                NULL,
133                                NULL);
134
135 static int spi_host_match(struct attribute_container *cont,
136                           struct device *dev)
137 {
138         struct Scsi_Host *shost;
139         struct spi_internal *i;
140
141         if (!scsi_is_host_device(dev))
142                 return 0;
143
144         shost = dev_to_shost(dev);
145         if (!shost->transportt  || shost->transportt->host_attrs.class
146             != &spi_host_class.class)
147                 return 0;
148
149         i = to_spi_internal(shost->transportt);
150         
151         return &i->t.host_attrs == cont;
152 }
153
154 static int spi_device_configure(struct device *dev)
155 {
156         struct scsi_device *sdev = to_scsi_device(dev);
157         struct scsi_target *starget = sdev->sdev_target;
158
159         /* Populate the target capability fields with the values
160          * gleaned from the device inquiry */
161
162         spi_support_sync(starget) = scsi_device_sync(sdev);
163         spi_support_wide(starget) = scsi_device_wide(sdev);
164         spi_support_dt(starget) = scsi_device_dt(sdev);
165         spi_support_dt_only(starget) = scsi_device_dt_only(sdev);
166         spi_support_ius(starget) = scsi_device_ius(sdev);
167         spi_support_qas(starget) = scsi_device_qas(sdev);
168
169         return 0;
170 }
171
172 static int spi_setup_transport_attrs(struct device *dev)
173 {
174         struct scsi_target *starget = to_scsi_target(dev);
175
176         spi_period(starget) = -1;       /* illegal value */
177         spi_offset(starget) = 0;        /* async */
178         spi_width(starget) = 0; /* narrow */
179         spi_iu(starget) = 0;    /* no IU */
180         spi_dt(starget) = 0;    /* ST */
181         spi_qas(starget) = 0;
182         spi_wr_flow(starget) = 0;
183         spi_rd_strm(starget) = 0;
184         spi_rti(starget) = 0;
185         spi_pcomp_en(starget) = 0;
186         spi_dv_pending(starget) = 0;
187         spi_initial_dv(starget) = 0;
188         init_MUTEX(&spi_dv_sem(starget));
189
190         return 0;
191 }
192
193 #define spi_transport_show_function(field, format_string)               \
194                                                                         \
195 static ssize_t                                                          \
196 show_spi_transport_##field(struct class_device *cdev, char *buf)        \
197 {                                                                       \
198         struct scsi_target *starget = transport_class_to_starget(cdev); \
199         struct Scsi_Host *shost = dev_to_shost(starget->dev.parent);    \
200         struct spi_transport_attrs *tp;                                 \
201         struct spi_internal *i = to_spi_internal(shost->transportt);    \
202         tp = (struct spi_transport_attrs *)&starget->starget_data;      \
203         if (i->f->get_##field)                                          \
204                 i->f->get_##field(starget);                             \
205         return snprintf(buf, 20, format_string, tp->field);             \
206 }
207
208 #define spi_transport_store_function(field, format_string)              \
209 static ssize_t                                                          \
210 store_spi_transport_##field(struct class_device *cdev, const char *buf, \
211                             size_t count)                               \
212 {                                                                       \
213         int val;                                                        \
214         struct scsi_target *starget = transport_class_to_starget(cdev); \
215         struct Scsi_Host *shost = dev_to_shost(starget->dev.parent);    \
216         struct spi_internal *i = to_spi_internal(shost->transportt);    \
217                                                                         \
218         val = simple_strtoul(buf, NULL, 0);                             \
219         i->f->set_##field(starget, val);                                \
220         return count;                                                   \
221 }
222
223 #define spi_transport_rd_attr(field, format_string)                     \
224         spi_transport_show_function(field, format_string)               \
225         spi_transport_store_function(field, format_string)              \
226 static CLASS_DEVICE_ATTR(field, S_IRUGO | S_IWUSR,                      \
227                          show_spi_transport_##field,                    \
228                          store_spi_transport_##field);
229
230 /* The Parallel SCSI Tranport Attributes: */
231 spi_transport_rd_attr(offset, "%d\n");
232 spi_transport_rd_attr(width, "%d\n");
233 spi_transport_rd_attr(iu, "%d\n");
234 spi_transport_rd_attr(dt, "%d\n");
235 spi_transport_rd_attr(qas, "%d\n");
236 spi_transport_rd_attr(wr_flow, "%d\n");
237 spi_transport_rd_attr(rd_strm, "%d\n");
238 spi_transport_rd_attr(rti, "%d\n");
239 spi_transport_rd_attr(pcomp_en, "%d\n");
240
241 static ssize_t
242 store_spi_revalidate(struct class_device *cdev, const char *buf, size_t count)
243 {
244         struct scsi_target *starget = transport_class_to_starget(cdev);
245
246         /* FIXME: we're relying on an awful lot of device internals
247          * here.  We really need a function to get the first available
248          * child */
249         struct device *dev = container_of(starget->dev.children.next, struct device, node);
250         struct scsi_device *sdev = to_scsi_device(dev);
251         spi_dv_device(sdev);
252         return count;
253 }
254 static CLASS_DEVICE_ATTR(revalidate, S_IWUSR, NULL, store_spi_revalidate);
255
256 /* Translate the period into ns according to the current spec
257  * for SDTR/PPR messages */
258 static ssize_t show_spi_transport_period(struct class_device *cdev, char *buf)
259
260 {
261         struct scsi_target *starget = transport_class_to_starget(cdev);
262         struct Scsi_Host *shost = dev_to_shost(starget->dev.parent);
263         struct spi_transport_attrs *tp;
264         const char *str;
265         struct spi_internal *i = to_spi_internal(shost->transportt);
266
267         tp = (struct spi_transport_attrs *)&starget->starget_data;
268
269         if (i->f->get_period)
270                 i->f->get_period(starget);
271
272         switch(tp->period) {
273
274         case 0x07 ... SPI_STATIC_PPR:
275                 str = ppr_to_ns[tp->period];
276                 if(!str)
277                         str = "reserved";
278                 break;
279
280
281         case (SPI_STATIC_PPR+1) ... 0xff:
282                 return sprintf(buf, "%d\n", tp->period * 4);
283
284         default:
285                 str = "unknown";
286         }
287         return sprintf(buf, "%s\n", str);
288 }
289
290 static ssize_t
291 store_spi_transport_period(struct class_device *cdev, const char *buf,
292                             size_t count)
293 {
294         struct scsi_target *starget = transport_class_to_starget(cdev);
295         struct Scsi_Host *shost = dev_to_shost(starget->dev.parent);
296         struct spi_internal *i = to_spi_internal(shost->transportt);
297         int j, period = -1;
298
299         for (j = 0; j < SPI_STATIC_PPR; j++) {
300                 int len;
301
302                 if(ppr_to_ns[j] == NULL)
303                         continue;
304
305                 len = strlen(ppr_to_ns[j]);
306
307                 if(strncmp(ppr_to_ns[j], buf, len) != 0)
308                         continue;
309
310                 if(buf[len] != '\n')
311                         continue;
312                 
313                 period = j;
314                 break;
315         }
316
317         if (period == -1) {
318                 int val = simple_strtoul(buf, NULL, 0);
319
320
321                 /* Should probably check limits here, but this
322                  * gets reasonably close to OK for most things */
323                 period = val/4;
324         }
325
326         if (period > 0xff)
327                 period = 0xff;
328
329         i->f->set_period(starget, period);
330
331         return count;
332 }
333         
334 static CLASS_DEVICE_ATTR(period, S_IRUGO | S_IWUSR, 
335                          show_spi_transport_period,
336                          store_spi_transport_period);
337
338 static ssize_t show_spi_host_signalling(struct class_device *cdev, char *buf)
339 {
340         struct Scsi_Host *shost = transport_class_to_shost(cdev);
341         struct spi_internal *i = to_spi_internal(shost->transportt);
342
343         if (i->f->get_signalling)
344                 i->f->get_signalling(shost);
345
346         return sprintf(buf, "%s\n", spi_signal_to_string(spi_signalling(shost)));
347 }
348 static ssize_t store_spi_host_signalling(struct class_device *cdev,
349                                          const char *buf, size_t count)
350 {
351         struct Scsi_Host *shost = transport_class_to_shost(cdev);
352         struct spi_internal *i = to_spi_internal(shost->transportt);
353         enum spi_signal_type type = spi_signal_to_value(buf);
354
355         if (type != SPI_SIGNAL_UNKNOWN)
356                 i->f->set_signalling(shost, type);
357
358         return count;
359 }
360 static CLASS_DEVICE_ATTR(signalling, S_IRUGO | S_IWUSR,
361                          show_spi_host_signalling,
362                          store_spi_host_signalling);
363
364 #define DV_SET(x, y)                    \
365         if(i->f->set_##x)               \
366                 i->f->set_##x(sdev->sdev_target, y)
367
368 #define DV_LOOPS        3
369 #define DV_TIMEOUT      (10*HZ)
370 #define DV_RETRIES      3       /* should only need at most 
371                                  * two cc/ua clears */
372
373 enum spi_compare_returns {
374         SPI_COMPARE_SUCCESS,
375         SPI_COMPARE_FAILURE,
376         SPI_COMPARE_SKIP_TEST,
377 };
378
379
380 /* This is for read/write Domain Validation:  If the device supports
381  * an echo buffer, we do read/write tests to it */
382 static enum spi_compare_returns
383 spi_dv_device_echo_buffer(struct scsi_request *sreq, u8 *buffer,
384                           u8 *ptr, const int retries)
385 {
386         struct scsi_device *sdev = sreq->sr_device;
387         int len = ptr - buffer;
388         int j, k, r;
389         unsigned int pattern = 0x0000ffff;
390
391         const char spi_write_buffer[] = {
392                 WRITE_BUFFER, 0x0a, 0, 0, 0, 0, 0, len >> 8, len & 0xff, 0
393         };
394         const char spi_read_buffer[] = {
395                 READ_BUFFER, 0x0a, 0, 0, 0, 0, 0, len >> 8, len & 0xff, 0
396         };
397
398         /* set up the pattern buffer.  Doesn't matter if we spill
399          * slightly beyond since that's where the read buffer is */
400         for (j = 0; j < len; ) {
401
402                 /* fill the buffer with counting (test a) */
403                 for ( ; j < min(len, 32); j++)
404                         buffer[j] = j;
405                 k = j;
406                 /* fill the buffer with alternating words of 0x0 and
407                  * 0xffff (test b) */
408                 for ( ; j < min(len, k + 32); j += 2) {
409                         u16 *word = (u16 *)&buffer[j];
410                         
411                         *word = (j & 0x02) ? 0x0000 : 0xffff;
412                 }
413                 k = j;
414                 /* fill with crosstalk (alternating 0x5555 0xaaa)
415                  * (test c) */
416                 for ( ; j < min(len, k + 32); j += 2) {
417                         u16 *word = (u16 *)&buffer[j];
418
419                         *word = (j & 0x02) ? 0x5555 : 0xaaaa;
420                 }
421                 k = j;
422                 /* fill with shifting bits (test d) */
423                 for ( ; j < min(len, k + 32); j += 4) {
424                         u32 *word = (unsigned int *)&buffer[j];
425                         u32 roll = (pattern & 0x80000000) ? 1 : 0;
426                         
427                         *word = pattern;
428                         pattern = (pattern << 1) | roll;
429                 }
430                 /* don't bother with random data (test e) */
431         }
432
433         for (r = 0; r < retries; r++) {
434                 sreq->sr_cmd_len = 0;   /* wait_req to fill in */
435                 sreq->sr_data_direction = DMA_TO_DEVICE;
436                 scsi_wait_req(sreq, spi_write_buffer, buffer, len,
437                               DV_TIMEOUT, DV_RETRIES);
438                 if(sreq->sr_result || !scsi_device_online(sdev)) {
439                         struct scsi_sense_hdr sshdr;
440
441                         scsi_device_set_state(sdev, SDEV_QUIESCE);
442                         if (scsi_request_normalize_sense(sreq, &sshdr)
443                             && sshdr.sense_key == ILLEGAL_REQUEST
444                             /* INVALID FIELD IN CDB */
445                             && sshdr.asc == 0x24 && sshdr.ascq == 0x00)
446                                 /* This would mean that the drive lied
447                                  * to us about supporting an echo
448                                  * buffer (unfortunately some Western
449                                  * Digital drives do precisely this)
450                                  */
451                                 return SPI_COMPARE_SKIP_TEST;
452
453
454                         SPI_PRINTK(sdev->sdev_target, KERN_ERR, "Write Buffer failure %x\n", sreq->sr_result);
455                         return SPI_COMPARE_FAILURE;
456                 }
457
458                 memset(ptr, 0, len);
459                 sreq->sr_cmd_len = 0;   /* wait_req to fill in */
460                 sreq->sr_data_direction = DMA_FROM_DEVICE;
461                 scsi_wait_req(sreq, spi_read_buffer, ptr, len,
462                               DV_TIMEOUT, DV_RETRIES);
463                 scsi_device_set_state(sdev, SDEV_QUIESCE);
464
465                 if (memcmp(buffer, ptr, len) != 0)
466                         return SPI_COMPARE_FAILURE;
467         }
468         return SPI_COMPARE_SUCCESS;
469 }
470
471 /* This is for the simplest form of Domain Validation: a read test
472  * on the inquiry data from the device */
473 static enum spi_compare_returns
474 spi_dv_device_compare_inquiry(struct scsi_request *sreq, u8 *buffer,
475                               u8 *ptr, const int retries)
476 {
477         int r;
478         const int len = sreq->sr_device->inquiry_len;
479         struct scsi_device *sdev = sreq->sr_device;
480         const char spi_inquiry[] = {
481                 INQUIRY, 0, 0, 0, len, 0
482         };
483
484         for (r = 0; r < retries; r++) {
485                 sreq->sr_cmd_len = 0;   /* wait_req to fill in */
486                 sreq->sr_data_direction = DMA_FROM_DEVICE;
487
488                 memset(ptr, 0, len);
489
490                 scsi_wait_req(sreq, spi_inquiry, ptr, len,
491                               DV_TIMEOUT, DV_RETRIES);
492                 
493                 if(sreq->sr_result || !scsi_device_online(sdev)) {
494                         scsi_device_set_state(sdev, SDEV_QUIESCE);
495                         return SPI_COMPARE_FAILURE;
496                 }
497
498                 /* If we don't have the inquiry data already, the
499                  * first read gets it */
500                 if (ptr == buffer) {
501                         ptr += len;
502                         --r;
503                         continue;
504                 }
505
506                 if (memcmp(buffer, ptr, len) != 0)
507                         /* failure */
508                         return SPI_COMPARE_FAILURE;
509         }
510         return SPI_COMPARE_SUCCESS;
511 }
512
513 static enum spi_compare_returns
514 spi_dv_retrain(struct scsi_request *sreq, u8 *buffer, u8 *ptr,
515                enum spi_compare_returns 
516                (*compare_fn)(struct scsi_request *, u8 *, u8 *, int))
517 {
518         struct spi_internal *i = to_spi_internal(sreq->sr_host->transportt);
519         struct scsi_device *sdev = sreq->sr_device;
520         int period = 0, prevperiod = 0; 
521         enum spi_compare_returns retval;
522
523
524         for (;;) {
525                 int newperiod;
526                 retval = compare_fn(sreq, buffer, ptr, DV_LOOPS);
527
528                 if (retval == SPI_COMPARE_SUCCESS
529                     || retval == SPI_COMPARE_SKIP_TEST)
530                         break;
531
532                 /* OK, retrain, fallback */
533                 if (i->f->get_period)
534                         i->f->get_period(sdev->sdev_target);
535                 newperiod = spi_period(sdev->sdev_target);
536                 period = newperiod > period ? newperiod : period;
537                 if (period < 0x0d)
538                         period++;
539                 else
540                         period += period >> 1;
541
542                 if (unlikely(period > 0xff || period == prevperiod)) {
543                         /* Total failure; set to async and return */
544                         SPI_PRINTK(sdev->sdev_target, KERN_ERR, "Domain Validation Failure, dropping back to Asynchronous\n");
545                         DV_SET(offset, 0);
546                         return SPI_COMPARE_FAILURE;
547                 }
548                 SPI_PRINTK(sdev->sdev_target, KERN_ERR, "Domain Validation detected failure, dropping back\n");
549                 DV_SET(period, period);
550                 prevperiod = period;
551         }
552         return retval;
553 }
554
555 static int
556 spi_dv_device_get_echo_buffer(struct scsi_request *sreq, u8 *buffer)
557 {
558         int l;
559
560         /* first off do a test unit ready.  This can error out 
561          * because of reservations or some other reason.  If it
562          * fails, the device won't let us write to the echo buffer
563          * so just return failure */
564         
565         const char spi_test_unit_ready[] = {
566                 TEST_UNIT_READY, 0, 0, 0, 0, 0
567         };
568
569         const char spi_read_buffer_descriptor[] = {
570                 READ_BUFFER, 0x0b, 0, 0, 0, 0, 0, 0, 4, 0
571         };
572
573         
574         sreq->sr_cmd_len = 0;
575         sreq->sr_data_direction = DMA_NONE;
576
577         /* We send a set of three TURs to clear any outstanding 
578          * unit attention conditions if they exist (Otherwise the
579          * buffer tests won't be happy).  If the TUR still fails
580          * (reservation conflict, device not ready, etc) just
581          * skip the write tests */
582         for (l = 0; ; l++) {
583                 scsi_wait_req(sreq, spi_test_unit_ready, NULL, 0,
584                               DV_TIMEOUT, DV_RETRIES);
585
586                 if(sreq->sr_result) {
587                         if(l >= 3)
588                                 return 0;
589                 } else {
590                         /* TUR succeeded */
591                         break;
592                 }
593         }
594
595         sreq->sr_cmd_len = 0;
596         sreq->sr_data_direction = DMA_FROM_DEVICE;
597
598         scsi_wait_req(sreq, spi_read_buffer_descriptor, buffer, 4,
599                       DV_TIMEOUT, DV_RETRIES);
600
601         if (sreq->sr_result)
602                 /* Device has no echo buffer */
603                 return 0;
604
605         return buffer[3] + ((buffer[2] & 0x1f) << 8);
606 }
607
608 static void
609 spi_dv_device_internal(struct scsi_request *sreq, u8 *buffer)
610 {
611         struct spi_internal *i = to_spi_internal(sreq->sr_host->transportt);
612         struct scsi_device *sdev = sreq->sr_device;
613         int len = sdev->inquiry_len;
614         /* first set us up for narrow async */
615         DV_SET(offset, 0);
616         DV_SET(width, 0);
617         
618         if (spi_dv_device_compare_inquiry(sreq, buffer, buffer, DV_LOOPS)
619             != SPI_COMPARE_SUCCESS) {
620                 SPI_PRINTK(sdev->sdev_target, KERN_ERR, "Domain Validation Initial Inquiry Failed\n");
621                 /* FIXME: should probably offline the device here? */
622                 return;
623         }
624
625         /* test width */
626         if (i->f->set_width && sdev->wdtr) {
627                 i->f->set_width(sdev->sdev_target, 1);
628
629                 if (spi_dv_device_compare_inquiry(sreq, buffer,
630                                                    buffer + len,
631                                                    DV_LOOPS)
632                     != SPI_COMPARE_SUCCESS) {
633                         SPI_PRINTK(sdev->sdev_target, KERN_ERR, "Wide Transfers Fail\n");
634                         i->f->set_width(sdev->sdev_target, 0);
635                 }
636         }
637
638         if (!i->f->set_period)
639                 return;
640
641         /* device can't handle synchronous */
642         if(!sdev->ppr && !sdev->sdtr)
643                 return;
644
645         /* see if the device has an echo buffer.  If it does we can
646          * do the SPI pattern write tests */
647
648         len = 0;
649         if (sdev->ppr)
650                 len = spi_dv_device_get_echo_buffer(sreq, buffer);
651
652  retry:
653
654         /* now set up to the maximum */
655         DV_SET(offset, 255);
656         DV_SET(period, 1);
657
658         if (len == 0) {
659                 SPI_PRINTK(sdev->sdev_target, KERN_INFO, "Domain Validation skipping write tests\n");
660                 spi_dv_retrain(sreq, buffer, buffer + len,
661                                spi_dv_device_compare_inquiry);
662                 return;
663         }
664
665         if (len > SPI_MAX_ECHO_BUFFER_SIZE) {
666                 SPI_PRINTK(sdev->sdev_target, KERN_WARNING, "Echo buffer size %d is too big, trimming to %d\n", len, SPI_MAX_ECHO_BUFFER_SIZE);
667                 len = SPI_MAX_ECHO_BUFFER_SIZE;
668         }
669
670         if (spi_dv_retrain(sreq, buffer, buffer + len,
671                            spi_dv_device_echo_buffer)
672             == SPI_COMPARE_SKIP_TEST) {
673                 /* OK, the stupid drive can't do a write echo buffer
674                  * test after all, fall back to the read tests */
675                 len = 0;
676                 goto retry;
677         }
678 }
679
680
681 /**     spi_dv_device - Do Domain Validation on the device
682  *      @sdev:          scsi device to validate
683  *
684  *      Performs the domain validation on the given device in the
685  *      current execution thread.  Since DV operations may sleep,
686  *      the current thread must have user context.  Also no SCSI
687  *      related locks that would deadlock I/O issued by the DV may
688  *      be held.
689  */
690 void
691 spi_dv_device(struct scsi_device *sdev)
692 {
693         struct scsi_request *sreq = scsi_allocate_request(sdev, GFP_KERNEL);
694         struct scsi_target *starget = sdev->sdev_target;
695         u8 *buffer;
696         const int len = SPI_MAX_ECHO_BUFFER_SIZE*2;
697
698         if (unlikely(!sreq))
699                 return;
700
701         if (unlikely(scsi_device_get(sdev)))
702                 goto out_free_req;
703
704         buffer = kmalloc(len, GFP_KERNEL);
705
706         if (unlikely(!buffer))
707                 goto out_put;
708
709         memset(buffer, 0, len);
710
711         /* We need to verify that the actual device will quiesce; the
712          * later target quiesce is just a nice to have */
713         if (unlikely(scsi_device_quiesce(sdev)))
714                 goto out_free;
715
716         scsi_target_quiesce(starget);
717
718         spi_dv_pending(starget) = 1;
719         down(&spi_dv_sem(starget));
720
721         SPI_PRINTK(starget, KERN_INFO, "Beginning Domain Validation\n");
722
723         spi_dv_device_internal(sreq, buffer);
724
725         SPI_PRINTK(starget, KERN_INFO, "Ending Domain Validation\n");
726
727         up(&spi_dv_sem(starget));
728         spi_dv_pending(starget) = 0;
729
730         scsi_target_resume(starget);
731
732         spi_initial_dv(starget) = 1;
733
734  out_free:
735         kfree(buffer);
736  out_put:
737         scsi_device_put(sdev);
738  out_free_req:
739         scsi_release_request(sreq);
740 }
741 EXPORT_SYMBOL(spi_dv_device);
742
743 struct work_queue_wrapper {
744         struct work_struct      work;
745         struct scsi_device      *sdev;
746 };
747
748 static void
749 spi_dv_device_work_wrapper(void *data)
750 {
751         struct work_queue_wrapper *wqw = (struct work_queue_wrapper *)data;
752         struct scsi_device *sdev = wqw->sdev;
753
754         kfree(wqw);
755         spi_dv_device(sdev);
756         spi_dv_pending(sdev->sdev_target) = 0;
757         scsi_device_put(sdev);
758 }
759
760
761 /**
762  *      spi_schedule_dv_device - schedule domain validation to occur on the device
763  *      @sdev:  The device to validate
764  *
765  *      Identical to spi_dv_device() above, except that the DV will be
766  *      scheduled to occur in a workqueue later.  All memory allocations
767  *      are atomic, so may be called from any context including those holding
768  *      SCSI locks.
769  */
770 void
771 spi_schedule_dv_device(struct scsi_device *sdev)
772 {
773         struct work_queue_wrapper *wqw =
774                 kmalloc(sizeof(struct work_queue_wrapper), GFP_ATOMIC);
775
776         if (unlikely(!wqw))
777                 return;
778
779         if (unlikely(spi_dv_pending(sdev->sdev_target))) {
780                 kfree(wqw);
781                 return;
782         }
783         /* Set pending early (dv_device doesn't check it, only sets it) */
784         spi_dv_pending(sdev->sdev_target) = 1;
785         if (unlikely(scsi_device_get(sdev))) {
786                 kfree(wqw);
787                 spi_dv_pending(sdev->sdev_target) = 0;
788                 return;
789         }
790
791         INIT_WORK(&wqw->work, spi_dv_device_work_wrapper, wqw);
792         wqw->sdev = sdev;
793
794         schedule_work(&wqw->work);
795 }
796 EXPORT_SYMBOL(spi_schedule_dv_device);
797
798 #define SETUP_ATTRIBUTE(field)                                          \
799         i->private_attrs[count] = class_device_attr_##field;            \
800         if (!i->f->set_##field) {                                       \
801                 i->private_attrs[count].attr.mode = S_IRUGO;            \
802                 i->private_attrs[count].store = NULL;                   \
803         }                                                               \
804         i->attrs[count] = &i->private_attrs[count];                     \
805         if (i->f->show_##field)                                         \
806                 count++
807
808 #define SETUP_HOST_ATTRIBUTE(field)                                     \
809         i->private_host_attrs[count] = class_device_attr_##field;       \
810         if (!i->f->set_##field) {                                       \
811                 i->private_host_attrs[count].attr.mode = S_IRUGO;       \
812                 i->private_host_attrs[count].store = NULL;              \
813         }                                                               \
814         i->host_attrs[count] = &i->private_host_attrs[count];           \
815         count++
816
817 static int spi_device_match(struct attribute_container *cont,
818                             struct device *dev)
819 {
820         struct scsi_device *sdev;
821         struct Scsi_Host *shost;
822
823         if (!scsi_is_sdev_device(dev))
824                 return 0;
825
826         sdev = to_scsi_device(dev);
827         shost = sdev->host;
828         if (!shost->transportt  || shost->transportt->host_attrs.class
829             != &spi_host_class.class)
830                 return 0;
831         /* Note: this class has no device attributes, so it has
832          * no per-HBA allocation and thus we don't need to distinguish
833          * the attribute containers for the device */
834         return 1;
835 }
836
837 static int spi_target_match(struct attribute_container *cont,
838                             struct device *dev)
839 {
840         struct Scsi_Host *shost;
841         struct spi_internal *i;
842
843         if (!scsi_is_target_device(dev))
844                 return 0;
845
846         shost = dev_to_shost(dev->parent);
847         if (!shost->transportt  || shost->transportt->host_attrs.class
848             != &spi_host_class.class)
849                 return 0;
850
851         i = to_spi_internal(shost->transportt);
852         
853         return &i->t.target_attrs == cont;
854 }
855
856 static DECLARE_TRANSPORT_CLASS(spi_transport_class,
857                                "spi_transport",
858                                spi_setup_transport_attrs,
859                                NULL,
860                                NULL);
861
862 static DECLARE_ANON_TRANSPORT_CLASS(spi_device_class,
863                                     spi_device_match,
864                                     spi_device_configure);
865
866 struct scsi_transport_template *
867 spi_attach_transport(struct spi_function_template *ft)
868 {
869         struct spi_internal *i = kmalloc(sizeof(struct spi_internal),
870                                          GFP_KERNEL);
871         int count = 0;
872         if (unlikely(!i))
873                 return NULL;
874
875         memset(i, 0, sizeof(struct spi_internal));
876
877
878         i->t.target_attrs.class = &spi_transport_class.class;
879         i->t.target_attrs.attrs = &i->attrs[0];
880         i->t.target_attrs.match = spi_target_match;
881         attribute_container_register(&i->t.target_attrs);
882         i->t.target_size = sizeof(struct spi_transport_attrs);
883         i->t.host_attrs.class = &spi_host_class.class;
884         i->t.host_attrs.attrs = &i->host_attrs[0];
885         i->t.host_attrs.match = spi_host_match;
886         attribute_container_register(&i->t.host_attrs);
887         i->t.host_size = sizeof(struct spi_host_attrs);
888         i->f = ft;
889
890         SETUP_ATTRIBUTE(period);
891         SETUP_ATTRIBUTE(offset);
892         SETUP_ATTRIBUTE(width);
893         SETUP_ATTRIBUTE(iu);
894         SETUP_ATTRIBUTE(dt);
895         SETUP_ATTRIBUTE(qas);
896         SETUP_ATTRIBUTE(wr_flow);
897         SETUP_ATTRIBUTE(rd_strm);
898         SETUP_ATTRIBUTE(rti);
899         SETUP_ATTRIBUTE(pcomp_en);
900
901         /* if you add an attribute but forget to increase SPI_NUM_ATTRS
902          * this bug will trigger */
903         BUG_ON(count > SPI_NUM_ATTRS);
904
905         i->attrs[count++] = &class_device_attr_revalidate;
906
907         i->attrs[count] = NULL;
908
909         count = 0;
910         SETUP_HOST_ATTRIBUTE(signalling);
911
912         BUG_ON(count > SPI_HOST_ATTRS);
913
914         i->host_attrs[count] = NULL;
915
916         return &i->t;
917 }
918 EXPORT_SYMBOL(spi_attach_transport);
919
920 void spi_release_transport(struct scsi_transport_template *t)
921 {
922         struct spi_internal *i = to_spi_internal(t);
923
924         attribute_container_unregister(&i->t.target_attrs);
925         attribute_container_unregister(&i->t.host_attrs);
926
927         kfree(i);
928 }
929 EXPORT_SYMBOL(spi_release_transport);
930
931 static __init int spi_transport_init(void)
932 {
933         int error = transport_class_register(&spi_transport_class);
934         if (error)
935                 return error;
936         error = anon_transport_class_register(&spi_device_class);
937         return transport_class_register(&spi_host_class);
938 }
939
940 static void __exit spi_transport_exit(void)
941 {
942         transport_class_unregister(&spi_transport_class);
943         anon_transport_class_unregister(&spi_device_class);
944         transport_class_unregister(&spi_host_class);
945 }
946
947 MODULE_AUTHOR("Martin Hicks");
948 MODULE_DESCRIPTION("SPI Transport Attributes");
949 MODULE_LICENSE("GPL");
950
951 module_init(spi_transport_init);
952 module_exit(spi_transport_exit);