ftp://ftp.kernel.org/pub/linux/kernel/v2.6/linux-2.6.6.tar.bz2
[linux-2.6.git] / drivers / mtd / chips / cfi_cmdset_0020.c
1 /*
2  * Common Flash Interface support:
3  *   ST Advanced Architecture Command Set (ID 0x0020)
4  *
5  * (C) 2000 Red Hat. GPL'd
6  *
7  * 
8  * 10/10/2000   Nicolas Pitre <nico@cam.org>
9  *      - completely revamped method functions so they are aware and
10  *        independent of the flash geometry (buswidth, interleave, etc.)
11  *      - scalability vs code size is completely set at compile-time
12  *        (see include/linux/mtd/cfi.h for selection)
13  *      - optimized write buffer method
14  * 06/21/2002   Joern Engel <joern@wh.fh-wedel.de> and others
15  *      - modified Intel Command Set 0x0001 to support ST Advanced Architecture
16  *        (command set 0x0020)
17  *      - added a writev function
18  */
19
20 #include <linux/version.h>
21 #include <linux/module.h>
22 #include <linux/types.h>
23 #include <linux/kernel.h>
24 #include <linux/sched.h>
25 #include <linux/init.h>
26 #include <asm/io.h>
27 #include <asm/byteorder.h>
28
29 #include <linux/errno.h>
30 #include <linux/slab.h>
31 #include <linux/delay.h>
32 #include <linux/interrupt.h>
33 #include <linux/mtd/map.h>
34 #include <linux/mtd/cfi.h>
35 #include <linux/mtd/mtd.h>
36 #include <linux/mtd/compatmac.h>
37
38
39 static int cfi_staa_read(struct mtd_info *, loff_t, size_t, size_t *, u_char *);
40 static int cfi_staa_write_buffers(struct mtd_info *, loff_t, size_t, size_t *, const u_char *);
41 static int cfi_staa_writev(struct mtd_info *mtd, const struct iovec *vecs,
42                 unsigned long count, loff_t to, size_t *retlen);
43 static int cfi_staa_erase_varsize(struct mtd_info *, struct erase_info *);
44 static void cfi_staa_sync (struct mtd_info *);
45 static int cfi_staa_lock(struct mtd_info *mtd, loff_t ofs, size_t len);
46 static int cfi_staa_unlock(struct mtd_info *mtd, loff_t ofs, size_t len);
47 static int cfi_staa_suspend (struct mtd_info *);
48 static void cfi_staa_resume (struct mtd_info *);
49
50 static void cfi_staa_destroy(struct mtd_info *);
51
52 struct mtd_info *cfi_cmdset_0020(struct map_info *, int);
53
54 static struct mtd_info *cfi_staa_setup (struct map_info *);
55
56 static struct mtd_chip_driver cfi_staa_chipdrv = {
57         .probe          = NULL, /* Not usable directly */
58         .destroy        = cfi_staa_destroy,
59         .name           = "cfi_cmdset_0020",
60         .module         = THIS_MODULE
61 };
62
63 /* #define DEBUG_LOCK_BITS */
64 //#define DEBUG_CFI_FEATURES
65
66 #ifdef DEBUG_CFI_FEATURES
67 static void cfi_tell_features(struct cfi_pri_intelext *extp)
68 {
69         int i;
70         printk("  Feature/Command Support: %4.4X\n", extp->FeatureSupport);
71         printk("     - Chip Erase:         %s\n", extp->FeatureSupport&1?"supported":"unsupported");
72         printk("     - Suspend Erase:      %s\n", extp->FeatureSupport&2?"supported":"unsupported");
73         printk("     - Suspend Program:    %s\n", extp->FeatureSupport&4?"supported":"unsupported");
74         printk("     - Legacy Lock/Unlock: %s\n", extp->FeatureSupport&8?"supported":"unsupported");
75         printk("     - Queued Erase:       %s\n", extp->FeatureSupport&16?"supported":"unsupported");
76         printk("     - Instant block lock: %s\n", extp->FeatureSupport&32?"supported":"unsupported");
77         printk("     - Protection Bits:    %s\n", extp->FeatureSupport&64?"supported":"unsupported");
78         printk("     - Page-mode read:     %s\n", extp->FeatureSupport&128?"supported":"unsupported");
79         printk("     - Synchronous read:   %s\n", extp->FeatureSupport&256?"supported":"unsupported");
80         for (i=9; i<32; i++) {
81                 if (extp->FeatureSupport & (1<<i)) 
82                         printk("     - Unknown Bit %X:      supported\n", i);
83         }
84         
85         printk("  Supported functions after Suspend: %2.2X\n", extp->SuspendCmdSupport);
86         printk("     - Program after Erase Suspend: %s\n", extp->SuspendCmdSupport&1?"supported":"unsupported");
87         for (i=1; i<8; i++) {
88                 if (extp->SuspendCmdSupport & (1<<i))
89                         printk("     - Unknown Bit %X:               supported\n", i);
90         }
91         
92         printk("  Block Status Register Mask: %4.4X\n", extp->BlkStatusRegMask);
93         printk("     - Lock Bit Active:      %s\n", extp->BlkStatusRegMask&1?"yes":"no");
94         printk("     - Valid Bit Active:     %s\n", extp->BlkStatusRegMask&2?"yes":"no");
95         for (i=2; i<16; i++) {
96                 if (extp->BlkStatusRegMask & (1<<i))
97                         printk("     - Unknown Bit %X Active: yes\n",i);
98         }
99         
100         printk("  Vcc Logic Supply Optimum Program/Erase Voltage: %d.%d V\n", 
101                extp->VccOptimal >> 8, extp->VccOptimal & 0xf);
102         if (extp->VppOptimal)
103                 printk("  Vpp Programming Supply Optimum Program/Erase Voltage: %d.%d V\n", 
104                        extp->VppOptimal >> 8, extp->VppOptimal & 0xf);
105 }
106 #endif
107
108 /* This routine is made available to other mtd code via
109  * inter_module_register.  It must only be accessed through
110  * inter_module_get which will bump the use count of this module.  The
111  * addresses passed back in cfi are valid as long as the use count of
112  * this module is non-zero, i.e. between inter_module_get and
113  * inter_module_put.  Keith Owens <kaos@ocs.com.au> 29 Oct 2000.
114  */
115 struct mtd_info *cfi_cmdset_0020(struct map_info *map, int primary)
116 {
117         struct cfi_private *cfi = map->fldrv_priv;
118         int i;
119         __u32 base = cfi->chips[0].start;
120
121         if (cfi->cfi_mode) {
122                 /* 
123                  * It's a real CFI chip, not one for which the probe
124                  * routine faked a CFI structure. So we read the feature
125                  * table from it.
126                  */
127                 __u16 adr = primary?cfi->cfiq->P_ADR:cfi->cfiq->A_ADR;
128                 struct cfi_pri_intelext *extp;
129                 int ofs_factor = cfi->interleave * cfi->device_type;
130
131                 printk(" ST Microelectronics Extended Query Table at 0x%4.4X\n", adr);
132                 if (!adr)
133                         return NULL;
134
135                 /* Switch it into Query Mode */
136                 cfi_send_gen_cmd(0x98, 0x55, base, map, cfi, cfi->device_type, NULL);
137
138                 extp = kmalloc(sizeof(*extp), GFP_KERNEL);
139                 if (!extp) {
140                         printk(KERN_ERR "Failed to allocate memory\n");
141                         return NULL;
142                 }
143                 
144                 /* Read in the Extended Query Table */
145                 for (i=0; i<sizeof(*extp); i++) {
146                         ((unsigned char *)extp)[i] = 
147                                 cfi_read_query(map, (base+((adr+i)*ofs_factor)));
148                 }
149                 
150                 if (extp->MajorVersion != '1' || 
151                     (extp->MinorVersion < '0' || extp->MinorVersion > '2')) {
152                     printk(KERN_WARNING "  Unknown staa Extended Query "
153                            "version %c.%c.\n",  extp->MajorVersion,
154                            extp->MinorVersion);
155                     kfree(extp);
156                     return NULL;
157                 }
158                 
159                 /* Do some byteswapping if necessary */
160                 extp->FeatureSupport = cfi32_to_cpu(extp->FeatureSupport);
161                 extp->BlkStatusRegMask = cfi32_to_cpu(extp->BlkStatusRegMask);
162                 
163 #ifdef DEBUG_CFI_FEATURES
164                 /* Tell the user about it in lots of lovely detail */
165                 cfi_tell_features(extp);
166 #endif  
167
168                 /* Install our own private info structure */
169                 cfi->cmdset_priv = extp;
170         }       
171
172         for (i=0; i< cfi->numchips; i++) {
173                 cfi->chips[i].word_write_time = 128;
174                 cfi->chips[i].buffer_write_time = 128;
175                 cfi->chips[i].erase_time = 1024;
176         }               
177
178         /* Make sure it's in read mode */
179         cfi_send_gen_cmd(0xff, 0x55, base, map, cfi, cfi->device_type, NULL);
180         return cfi_staa_setup(map);
181 }
182
183 static struct mtd_info *cfi_staa_setup(struct map_info *map)
184 {
185         struct cfi_private *cfi = map->fldrv_priv;
186         struct mtd_info *mtd;
187         unsigned long offset = 0;
188         int i,j;
189         unsigned long devsize = (1<<cfi->cfiq->DevSize) * cfi->interleave;
190
191         mtd = kmalloc(sizeof(*mtd), GFP_KERNEL);
192         //printk(KERN_DEBUG "number of CFI chips: %d\n", cfi->numchips);
193
194         if (!mtd) {
195                 printk(KERN_ERR "Failed to allocate memory for MTD device\n");
196                 kfree(cfi->cmdset_priv);
197                 return NULL;
198         }
199
200         memset(mtd, 0, sizeof(*mtd));
201         mtd->priv = map;
202         mtd->type = MTD_NORFLASH;
203         mtd->size = devsize * cfi->numchips;
204
205         mtd->numeraseregions = cfi->cfiq->NumEraseRegions * cfi->numchips;
206         mtd->eraseregions = kmalloc(sizeof(struct mtd_erase_region_info) 
207                         * mtd->numeraseregions, GFP_KERNEL);
208         if (!mtd->eraseregions) { 
209                 printk(KERN_ERR "Failed to allocate memory for MTD erase region info\n");
210                 kfree(cfi->cmdset_priv);
211                 kfree(mtd);
212                 return NULL;
213         }
214         
215         for (i=0; i<cfi->cfiq->NumEraseRegions; i++) {
216                 unsigned long ernum, ersize;
217                 ersize = ((cfi->cfiq->EraseRegionInfo[i] >> 8) & ~0xff) * cfi->interleave;
218                 ernum = (cfi->cfiq->EraseRegionInfo[i] & 0xffff) + 1;
219
220                 if (mtd->erasesize < ersize) {
221                         mtd->erasesize = ersize;
222                 }
223                 for (j=0; j<cfi->numchips; j++) {
224                         mtd->eraseregions[(j*cfi->cfiq->NumEraseRegions)+i].offset = (j*devsize)+offset;
225                         mtd->eraseregions[(j*cfi->cfiq->NumEraseRegions)+i].erasesize = ersize;
226                         mtd->eraseregions[(j*cfi->cfiq->NumEraseRegions)+i].numblocks = ernum;
227                 }
228                 offset += (ersize * ernum);
229                 }
230
231                 if (offset != devsize) {
232                         /* Argh */
233                         printk(KERN_WARNING "Sum of regions (%lx) != total size of set of interleaved chips (%lx)\n", offset, devsize);
234                         kfree(mtd->eraseregions);
235                         kfree(cfi->cmdset_priv);
236                         kfree(mtd);
237                         return NULL;
238                 }
239
240                 for (i=0; i<mtd->numeraseregions;i++){
241                         printk(KERN_DEBUG "%d: offset=0x%x,size=0x%x,blocks=%d\n",
242                                i,mtd->eraseregions[i].offset,
243                                mtd->eraseregions[i].erasesize,
244                                mtd->eraseregions[i].numblocks);
245                 }
246
247         /* Also select the correct geometry setup too */ 
248         mtd->erase = cfi_staa_erase_varsize;
249         mtd->read = cfi_staa_read;
250         mtd->write = cfi_staa_write_buffers;
251         mtd->writev = cfi_staa_writev;
252         mtd->sync = cfi_staa_sync;
253         mtd->lock = cfi_staa_lock;
254         mtd->unlock = cfi_staa_unlock;
255         mtd->suspend = cfi_staa_suspend;
256         mtd->resume = cfi_staa_resume;
257         mtd->flags = MTD_CAP_NORFLASH;
258         mtd->flags |= MTD_ECC; /* FIXME: Not all STMicro flashes have this */
259         mtd->eccsize = 8; /* FIXME: Should be 0 for STMicro flashes w/out ECC */
260         map->fldrv = &cfi_staa_chipdrv;
261         __module_get(THIS_MODULE);
262         mtd->name = map->name;
263         return mtd;
264 }
265
266
267 static inline int do_read_onechip(struct map_info *map, struct flchip *chip, loff_t adr, size_t len, u_char *buf)
268 {
269         __u32 status, status_OK;
270         unsigned long timeo;
271         DECLARE_WAITQUEUE(wait, current);
272         int suspended = 0;
273         unsigned long cmd_addr;
274         struct cfi_private *cfi = map->fldrv_priv;
275
276         adr += chip->start;
277
278         /* Ensure cmd read/writes are aligned. */ 
279         cmd_addr = adr & ~(CFIDEV_BUSWIDTH-1); 
280
281         /* Let's determine this according to the interleave only once */
282         status_OK = CMD(0x80);
283
284         timeo = jiffies + HZ;
285  retry:
286         spin_lock_bh(chip->mutex);
287
288         /* Check that the chip's ready to talk to us.
289          * If it's in FL_ERASING state, suspend it and make it talk now.
290          */
291         switch (chip->state) {
292         case FL_ERASING:
293                 if (!((struct cfi_pri_intelext *)cfi->cmdset_priv)->FeatureSupport & 2)
294                         goto sleep; /* We don't support erase suspend */
295                 
296                 cfi_write (map, CMD(0xb0), cmd_addr);
297                 /* If the flash has finished erasing, then 'erase suspend'
298                  * appears to make some (28F320) flash devices switch to
299                  * 'read' mode.  Make sure that we switch to 'read status'
300                  * mode so we get the right data. --rmk
301                  */
302                 cfi_write(map, CMD(0x70), cmd_addr);
303                 chip->oldstate = FL_ERASING;
304                 chip->state = FL_ERASE_SUSPENDING;
305                 //              printk("Erase suspending at 0x%lx\n", cmd_addr);
306                 for (;;) {
307                         status = cfi_read(map, cmd_addr);
308                         if ((status & status_OK) == status_OK)
309                                 break;
310                         
311                         if (time_after(jiffies, timeo)) {
312                                 /* Urgh */
313                                 cfi_write(map, CMD(0xd0), cmd_addr);
314                                 /* make sure we're in 'read status' mode */
315                                 cfi_write(map, CMD(0x70), cmd_addr);
316                                 chip->state = FL_ERASING;
317                                 spin_unlock_bh(chip->mutex);
318                                 printk(KERN_ERR "Chip not ready after erase "
319                                        "suspended: status = 0x%x\n", status);
320                                 return -EIO;
321                         }
322                         
323                         spin_unlock_bh(chip->mutex);
324                         cfi_udelay(1);
325                         spin_lock_bh(chip->mutex);
326                 }
327                 
328                 suspended = 1;
329                 cfi_write(map, CMD(0xff), cmd_addr);
330                 chip->state = FL_READY;
331                 break;
332         
333 #if 0
334         case FL_WRITING:
335                 /* Not quite yet */
336 #endif
337
338         case FL_READY:
339                 break;
340
341         case FL_CFI_QUERY:
342         case FL_JEDEC_QUERY:
343                 cfi_write(map, CMD(0x70), cmd_addr);
344                 chip->state = FL_STATUS;
345
346         case FL_STATUS:
347                 status = cfi_read(map, cmd_addr);
348                 if ((status & status_OK) == status_OK) {
349                         cfi_write(map, CMD(0xff), cmd_addr);
350                         chip->state = FL_READY;
351                         break;
352                 }
353                 
354                 /* Urgh. Chip not yet ready to talk to us. */
355                 if (time_after(jiffies, timeo)) {
356                         spin_unlock_bh(chip->mutex);
357                         printk(KERN_ERR "waiting for chip to be ready timed out in read. WSM status = %x\n", status);
358                         return -EIO;
359                 }
360
361                 /* Latency issues. Drop the lock, wait a while and retry */
362                 spin_unlock_bh(chip->mutex);
363                 cfi_udelay(1);
364                 goto retry;
365
366         default:
367         sleep:
368                 /* Stick ourselves on a wait queue to be woken when
369                    someone changes the status */
370                 set_current_state(TASK_UNINTERRUPTIBLE);
371                 add_wait_queue(&chip->wq, &wait);
372                 spin_unlock_bh(chip->mutex);
373                 schedule();
374                 remove_wait_queue(&chip->wq, &wait);
375                 timeo = jiffies + HZ;
376                 goto retry;
377         }
378
379         map_copy_from(map, buf, adr, len);
380
381         if (suspended) {
382                 chip->state = chip->oldstate;
383                 /* What if one interleaved chip has finished and the 
384                    other hasn't? The old code would leave the finished
385                    one in READY mode. That's bad, and caused -EROFS 
386                    errors to be returned from do_erase_oneblock because
387                    that's the only bit it checked for at the time.
388                    As the state machine appears to explicitly allow 
389                    sending the 0x70 (Read Status) command to an erasing
390                    chip and expecting it to be ignored, that's what we 
391                    do. */
392                 cfi_write(map, CMD(0xd0), cmd_addr);
393                 cfi_write(map, CMD(0x70), cmd_addr);            
394         }
395
396         wake_up(&chip->wq);
397         spin_unlock_bh(chip->mutex);
398         return 0;
399 }
400
401 static int cfi_staa_read (struct mtd_info *mtd, loff_t from, size_t len, size_t *retlen, u_char *buf)
402 {
403         struct map_info *map = mtd->priv;
404         struct cfi_private *cfi = map->fldrv_priv;
405         unsigned long ofs;
406         int chipnum;
407         int ret = 0;
408
409         /* ofs: offset within the first chip that the first read should start */
410         chipnum = (from >> cfi->chipshift);
411         ofs = from - (chipnum <<  cfi->chipshift);
412
413         *retlen = 0;
414
415         while (len) {
416                 unsigned long thislen;
417
418                 if (chipnum >= cfi->numchips)
419                         break;
420
421                 if ((len + ofs -1) >> cfi->chipshift)
422                         thislen = (1<<cfi->chipshift) - ofs;
423                 else
424                         thislen = len;
425
426                 ret = do_read_onechip(map, &cfi->chips[chipnum], ofs, thislen, buf);
427                 if (ret)
428                         break;
429
430                 *retlen += thislen;
431                 len -= thislen;
432                 buf += thislen;
433                 
434                 ofs = 0;
435                 chipnum++;
436         }
437         return ret;
438 }
439
440 static inline int do_write_buffer(struct map_info *map, struct flchip *chip, 
441                                   unsigned long adr, const u_char *buf, int len)
442 {
443         struct cfi_private *cfi = map->fldrv_priv;
444         __u32 status, status_OK;
445         unsigned long cmd_adr, timeo;
446         DECLARE_WAITQUEUE(wait, current);
447         int wbufsize, z;
448         
449         /* M58LW064A requires bus alignment for buffer wriets -- saw */
450         if (adr & (CFIDEV_BUSWIDTH-1))
451             return -EINVAL;
452
453         wbufsize = CFIDEV_INTERLEAVE << cfi->cfiq->MaxBufWriteSize;
454         adr += chip->start;
455         cmd_adr = adr & ~(wbufsize-1);
456         
457         /* Let's determine this according to the interleave only once */
458         status_OK = CMD(0x80);
459         
460         timeo = jiffies + HZ;
461  retry:
462
463 #ifdef DEBUG_CFI_FEATURES
464        printk("%s: chip->state[%d]\n", __FUNCTION__, chip->state);
465 #endif
466         spin_lock_bh(chip->mutex);
467  
468         /* Check that the chip's ready to talk to us.
469          * Later, we can actually think about interrupting it
470          * if it's in FL_ERASING state.
471          * Not just yet, though.
472          */
473         switch (chip->state) {
474         case FL_READY:
475                 break;
476                 
477         case FL_CFI_QUERY:
478         case FL_JEDEC_QUERY:
479                 cfi_write(map, CMD(0x70), cmd_adr);
480                 chip->state = FL_STATUS;
481 #ifdef DEBUG_CFI_FEATURES
482         printk("%s: 1 status[%x]\n", __FUNCTION__, cfi_read(map, cmd_adr));
483 #endif
484
485         case FL_STATUS:
486                 status = cfi_read(map, cmd_adr);
487                 if ((status & status_OK) == status_OK)
488                         break;
489                 /* Urgh. Chip not yet ready to talk to us. */
490                 if (time_after(jiffies, timeo)) {
491                         spin_unlock_bh(chip->mutex);
492                         printk(KERN_ERR "waiting for chip to be ready timed out in buffer write Xstatus = %x, status = %x\n",
493                                status, cfi_read(map, cmd_adr));
494                         return -EIO;
495                 }
496
497                 /* Latency issues. Drop the lock, wait a while and retry */
498                 spin_unlock_bh(chip->mutex);
499                 cfi_udelay(1);
500                 goto retry;
501
502         default:
503                 /* Stick ourselves on a wait queue to be woken when
504                    someone changes the status */
505                 set_current_state(TASK_UNINTERRUPTIBLE);
506                 add_wait_queue(&chip->wq, &wait);
507                 spin_unlock_bh(chip->mutex);
508                 schedule();
509                 remove_wait_queue(&chip->wq, &wait);
510                 timeo = jiffies + HZ;
511                 goto retry;
512         }
513
514         ENABLE_VPP(map);
515         cfi_write(map, CMD(0xe8), cmd_adr);
516         chip->state = FL_WRITING_TO_BUFFER;
517
518         z = 0;
519         for (;;) {
520                 status = cfi_read(map, cmd_adr);
521                 if ((status & status_OK) == status_OK)
522                         break;
523
524                 spin_unlock_bh(chip->mutex);
525                 cfi_udelay(1);
526                 spin_lock_bh(chip->mutex);
527
528                 if (++z > 100) {
529                         /* Argh. Not ready for write to buffer */
530                         DISABLE_VPP(map);
531                         cfi_write(map, CMD(0x70), cmd_adr);
532                         chip->state = FL_STATUS;
533                         spin_unlock_bh(chip->mutex);
534                         printk(KERN_ERR "Chip not ready for buffer write. Xstatus = %x\n", status);
535                         return -EIO;
536                 }
537         }
538
539         /* Write length of data to come */
540         cfi_write(map, CMD(len/CFIDEV_BUSWIDTH-1), cmd_adr );
541         
542         /* Write data */
543         for (z = 0; z < len; z += CFIDEV_BUSWIDTH) {
544                 if (cfi_buswidth_is_1()) {
545                         u8 *b = (u8 *)buf;
546
547                         map_write8 (map, *b++, adr+z);
548                         buf = (const u_char *)b;
549                 } else if (cfi_buswidth_is_2()) {
550                         u16 *b = (u16 *)buf;
551
552                         map_write16 (map, *b++, adr+z);
553                         buf = (const u_char *)b;
554                 } else if (cfi_buswidth_is_4()) {
555                         u32 *b = (u32 *)buf;
556
557                         map_write32 (map, *b++, adr+z);
558                         buf = (const u_char *)b;
559                 } else {
560                         DISABLE_VPP(map);
561                         return -EINVAL;
562                 }
563         }
564         /* GO GO GO */
565         cfi_write(map, CMD(0xd0), cmd_adr);
566         chip->state = FL_WRITING;
567
568         spin_unlock_bh(chip->mutex);
569         cfi_udelay(chip->buffer_write_time);
570         spin_lock_bh(chip->mutex);
571
572         timeo = jiffies + (HZ/2);
573         z = 0;
574         for (;;) {
575                 if (chip->state != FL_WRITING) {
576                         /* Someone's suspended the write. Sleep */
577                         set_current_state(TASK_UNINTERRUPTIBLE);
578                         add_wait_queue(&chip->wq, &wait);
579                         spin_unlock_bh(chip->mutex);
580                         schedule();
581                         remove_wait_queue(&chip->wq, &wait);
582                         timeo = jiffies + (HZ / 2); /* FIXME */
583                         spin_lock_bh(chip->mutex);
584                         continue;
585                 }
586
587                 status = cfi_read(map, cmd_adr);
588                 if ((status & status_OK) == status_OK)
589                         break;
590
591                 /* OK Still waiting */
592                 if (time_after(jiffies, timeo)) {
593                         /* clear status */
594                         cfi_write(map, CMD(0x50), cmd_adr);
595                         /* put back into read status register mode */
596                         cfi_write(map, CMD(0x70), adr);
597                         chip->state = FL_STATUS;
598                         DISABLE_VPP(map);
599                         spin_unlock_bh(chip->mutex);
600                         printk(KERN_ERR "waiting for chip to be ready timed out in bufwrite\n");
601                         return -EIO;
602                 }
603                 
604                 /* Latency issues. Drop the lock, wait a while and retry */
605                 spin_unlock_bh(chip->mutex);
606                 cfi_udelay(1);
607                 z++;
608                 spin_lock_bh(chip->mutex);
609         }
610         if (!z) {
611                 chip->buffer_write_time--;
612                 if (!chip->buffer_write_time)
613                         chip->buffer_write_time++;
614         }
615         if (z > 1) 
616                 chip->buffer_write_time++;
617         
618         /* Done and happy. */
619         DISABLE_VPP(map);
620         chip->state = FL_STATUS;
621
622         /* check for errors: 'lock bit', 'VPP', 'dead cell'/'unerased cell' or 'incorrect cmd' -- saw */
623         if ((status & CMD(0x02)) || (status & CMD(0x08)) ||
624             (status & CMD(0x10)) || (status & CMD(0x20))) {
625 #ifdef DEBUG_CFI_FEATURES
626             printk("%s: 2 status[%x]\n", __FUNCTION__, status);
627 #endif
628             /* clear status */
629             cfi_write(map, CMD(0x50), cmd_adr);
630             /* put back into read status register mode */
631             cfi_write(map, CMD(0x70), adr);
632             wake_up(&chip->wq);
633             spin_unlock_bh(chip->mutex);
634             return (status & CMD(0x02)) ? -EROFS : -EIO;
635         }
636         wake_up(&chip->wq);
637         spin_unlock_bh(chip->mutex);
638
639         return 0;
640 }
641
642 static int cfi_staa_write_buffers (struct mtd_info *mtd, loff_t to, 
643                                        size_t len, size_t *retlen, const u_char *buf)
644 {
645         struct map_info *map = mtd->priv;
646         struct cfi_private *cfi = map->fldrv_priv;
647         int wbufsize = CFIDEV_INTERLEAVE << cfi->cfiq->MaxBufWriteSize;
648         int ret = 0;
649         int chipnum;
650         unsigned long ofs;
651
652         *retlen = 0;
653         if (!len)
654                 return 0;
655
656         chipnum = to >> cfi->chipshift;
657         ofs = to  - (chipnum << cfi->chipshift);
658
659 #ifdef DEBUG_CFI_FEATURES
660         printk("%s: CFIDEV_BUSWIDTH[%x]\n", __FUNCTION__, CFIDEV_BUSWIDTH);
661         printk("%s: chipnum[%x] wbufsize[%x]\n", __FUNCTION__, chipnum, wbufsize);
662         printk("%s: ofs[%x] len[%x]\n", __FUNCTION__, ofs, len);
663 #endif
664         
665         /* Write buffer is worth it only if more than one word to write... */
666         while (len > 0) {
667                 /* We must not cross write block boundaries */
668                 int size = wbufsize - (ofs & (wbufsize-1));
669
670                 if (size > len)
671                     size = len;
672
673                 ret = do_write_buffer(map, &cfi->chips[chipnum], 
674                                       ofs, buf, size);
675                 if (ret)
676                         return ret;
677
678                 ofs += size;
679                 buf += size;
680                 (*retlen) += size;
681                 len -= size;
682
683                 if (ofs >> cfi->chipshift) {
684                         chipnum ++; 
685                         ofs = 0;
686                         if (chipnum == cfi->numchips)
687                                 return 0;
688                 }
689         }
690         
691         return 0;
692 }
693
694 /*
695  * Writev for ECC-Flashes is a little more complicated. We need to maintain
696  * a small buffer for this.
697  * XXX: If the buffer size is not a multiple of 2, this will break
698  */
699 #define ECCBUF_SIZE (mtd->eccsize)
700 #define ECCBUF_DIV(x) ((x) & ~(ECCBUF_SIZE - 1))
701 #define ECCBUF_MOD(x) ((x) &  (ECCBUF_SIZE - 1))
702 static int
703 cfi_staa_writev(struct mtd_info *mtd, const struct iovec *vecs,
704                 unsigned long count, loff_t to, size_t *retlen)
705 {
706         unsigned long i;
707         size_t   totlen = 0, thislen;
708         int      ret = 0;
709         size_t   buflen = 0;
710         static char *buffer;
711
712         if (!ECCBUF_SIZE) {
713                 /* We should fall back to a general writev implementation.
714                  * Until that is written, just break.
715                  */
716                 return -EIO;
717         }
718         buffer = kmalloc(ECCBUF_SIZE, GFP_KERNEL);
719         if (!buffer)
720                 return -ENOMEM;
721
722         for (i=0; i<count; i++) {
723                 size_t elem_len = vecs[i].iov_len;
724                 void *elem_base = vecs[i].iov_base;
725                 if (!elem_len) /* FIXME: Might be unnecessary. Check that */
726                         continue;
727                 if (buflen) { /* cut off head */
728                         if (buflen + elem_len < ECCBUF_SIZE) { /* just accumulate */
729                                 memcpy(buffer+buflen, elem_base, elem_len);
730                                 buflen += elem_len;
731                                 continue;
732                         }
733                         memcpy(buffer+buflen, elem_base, ECCBUF_SIZE-buflen);
734                         ret = mtd->write(mtd, to, ECCBUF_SIZE, &thislen, buffer);
735                         totlen += thislen;
736                         if (ret || thislen != ECCBUF_SIZE)
737                                 goto write_error;
738                         elem_len -= thislen-buflen;
739                         elem_base += thislen-buflen;
740                         to += ECCBUF_SIZE;
741                 }
742                 if (ECCBUF_DIV(elem_len)) { /* write clean aligned data */
743                         ret = mtd->write(mtd, to, ECCBUF_DIV(elem_len), &thislen, elem_base);
744                         totlen += thislen;
745                         if (ret || thislen != ECCBUF_DIV(elem_len))
746                                 goto write_error;
747                         to += thislen;
748                 }
749                 buflen = ECCBUF_MOD(elem_len); /* cut off tail */
750                 if (buflen) {
751                         memset(buffer, 0xff, ECCBUF_SIZE);
752                         memcpy(buffer, elem_base + thislen, buflen);
753                 }
754         }
755         if (buflen) { /* flush last page, even if not full */
756                 /* This is sometimes intended behaviour, really */
757                 ret = mtd->write(mtd, to, buflen, &thislen, buffer);
758                 totlen += thislen;
759                 if (ret || thislen != ECCBUF_SIZE)
760                         goto write_error;
761         }
762 write_error:
763         if (retlen)
764                 *retlen = totlen;
765         return ret;
766 }
767
768
769 static inline int do_erase_oneblock(struct map_info *map, struct flchip *chip, unsigned long adr)
770 {
771         struct cfi_private *cfi = map->fldrv_priv;
772         __u32 status, status_OK;
773         unsigned long timeo;
774         int retries = 3;
775         DECLARE_WAITQUEUE(wait, current);
776         int ret = 0;
777
778         adr += chip->start;
779
780         /* Let's determine this according to the interleave only once */
781         status_OK = CMD(0x80);
782
783         timeo = jiffies + HZ;
784 retry:
785         spin_lock_bh(chip->mutex);
786
787         /* Check that the chip's ready to talk to us. */
788         switch (chip->state) {
789         case FL_CFI_QUERY:
790         case FL_JEDEC_QUERY:
791         case FL_READY:
792                 cfi_write(map, CMD(0x70), adr);
793                 chip->state = FL_STATUS;
794
795         case FL_STATUS:
796                 status = cfi_read(map, adr);
797                 if ((status & status_OK) == status_OK)
798                         break;
799                 
800                 /* Urgh. Chip not yet ready to talk to us. */
801                 if (time_after(jiffies, timeo)) {
802                         spin_unlock_bh(chip->mutex);
803                         printk(KERN_ERR "waiting for chip to be ready timed out in erase\n");
804                         return -EIO;
805                 }
806
807                 /* Latency issues. Drop the lock, wait a while and retry */
808                 spin_unlock_bh(chip->mutex);
809                 cfi_udelay(1);
810                 goto retry;
811
812         default:
813                 /* Stick ourselves on a wait queue to be woken when
814                    someone changes the status */
815                 set_current_state(TASK_UNINTERRUPTIBLE);
816                 add_wait_queue(&chip->wq, &wait);
817                 spin_unlock_bh(chip->mutex);
818                 schedule();
819                 remove_wait_queue(&chip->wq, &wait);
820                 timeo = jiffies + HZ;
821                 goto retry;
822         }
823
824         ENABLE_VPP(map);
825         /* Clear the status register first */
826         cfi_write(map, CMD(0x50), adr);
827
828         /* Now erase */
829         cfi_write(map, CMD(0x20), adr);
830         cfi_write(map, CMD(0xD0), adr);
831         chip->state = FL_ERASING;
832         
833         spin_unlock_bh(chip->mutex);
834         schedule_timeout(HZ);
835         spin_lock_bh(chip->mutex);
836
837         /* FIXME. Use a timer to check this, and return immediately. */
838         /* Once the state machine's known to be working I'll do that */
839
840         timeo = jiffies + (HZ*20);
841         for (;;) {
842                 if (chip->state != FL_ERASING) {
843                         /* Someone's suspended the erase. Sleep */
844                         set_current_state(TASK_UNINTERRUPTIBLE);
845                         add_wait_queue(&chip->wq, &wait);
846                         spin_unlock_bh(chip->mutex);
847                         schedule();
848                         remove_wait_queue(&chip->wq, &wait);
849                         timeo = jiffies + (HZ*20); /* FIXME */
850                         spin_lock_bh(chip->mutex);
851                         continue;
852                 }
853
854                 status = cfi_read(map, adr);
855                 if ((status & status_OK) == status_OK)
856                         break;
857                 
858                 /* OK Still waiting */
859                 if (time_after(jiffies, timeo)) {
860                         cfi_write(map, CMD(0x70), adr);
861                         chip->state = FL_STATUS;
862                         printk(KERN_ERR "waiting for erase to complete timed out. Xstatus = %x, status = %x.\n", status, cfi_read(map, adr));
863                         DISABLE_VPP(map);
864                         spin_unlock_bh(chip->mutex);
865                         return -EIO;
866                 }
867                 
868                 /* Latency issues. Drop the lock, wait a while and retry */
869                 spin_unlock_bh(chip->mutex);
870                 cfi_udelay(1);
871                 spin_lock_bh(chip->mutex);
872         }
873         
874         DISABLE_VPP(map);
875         ret = 0;
876
877         /* We've broken this before. It doesn't hurt to be safe */
878         cfi_write(map, CMD(0x70), adr);
879         chip->state = FL_STATUS;
880         status = cfi_read(map, adr);
881
882         /* check for lock bit */
883         if (status & CMD(0x3a)) {
884                 unsigned char chipstatus = status;
885                 if (status != CMD(status & 0xff)) {
886                         int i;
887                         for (i = 1; i<CFIDEV_INTERLEAVE; i++) {
888                                       chipstatus |= status >> (cfi->device_type * 8);
889                         }
890                         printk(KERN_WARNING "Status is not identical for all chips: 0x%x. Merging to give 0x%02x\n", status, chipstatus);
891                 }
892                 /* Reset the error bits */
893                 cfi_write(map, CMD(0x50), adr);
894                 cfi_write(map, CMD(0x70), adr);
895                 
896                 if ((chipstatus & 0x30) == 0x30) {
897                         printk(KERN_NOTICE "Chip reports improper command sequence: status 0x%x\n", status);
898                         ret = -EIO;
899                 } else if (chipstatus & 0x02) {
900                         /* Protection bit set */
901                         ret = -EROFS;
902                 } else if (chipstatus & 0x8) {
903                         /* Voltage */
904                         printk(KERN_WARNING "Chip reports voltage low on erase: status 0x%x\n", status);
905                         ret = -EIO;
906                 } else if (chipstatus & 0x20) {
907                         if (retries--) {
908                                 printk(KERN_DEBUG "Chip erase failed at 0x%08lx: status 0x%x. Retrying...\n", adr, status);
909                                 timeo = jiffies + HZ;
910                                 chip->state = FL_STATUS;
911                                 spin_unlock_bh(chip->mutex);
912                                 goto retry;
913                         }
914                         printk(KERN_DEBUG "Chip erase failed at 0x%08lx: status 0x%x\n", adr, status);
915                         ret = -EIO;
916                 }
917         }
918
919         wake_up(&chip->wq);
920         spin_unlock_bh(chip->mutex);
921         return ret;
922 }
923
924 int cfi_staa_erase_varsize(struct mtd_info *mtd, struct erase_info *instr)
925 {       struct map_info *map = mtd->priv;
926         struct cfi_private *cfi = map->fldrv_priv;
927         unsigned long adr, len;
928         int chipnum, ret = 0;
929         int i, first;
930         struct mtd_erase_region_info *regions = mtd->eraseregions;
931
932         if (instr->addr > mtd->size)
933                 return -EINVAL;
934
935         if ((instr->len + instr->addr) > mtd->size)
936                 return -EINVAL;
937
938         /* Check that both start and end of the requested erase are
939          * aligned with the erasesize at the appropriate addresses.
940          */
941
942         i = 0;
943
944         /* Skip all erase regions which are ended before the start of 
945            the requested erase. Actually, to save on the calculations,
946            we skip to the first erase region which starts after the
947            start of the requested erase, and then go back one.
948         */
949         
950         while (i < mtd->numeraseregions && instr->addr >= regions[i].offset)
951                i++;
952         i--;
953
954         /* OK, now i is pointing at the erase region in which this 
955            erase request starts. Check the start of the requested
956            erase range is aligned with the erase size which is in
957            effect here.
958         */
959
960         if (instr->addr & (regions[i].erasesize-1))
961                 return -EINVAL;
962
963         /* Remember the erase region we start on */
964         first = i;
965
966         /* Next, check that the end of the requested erase is aligned
967          * with the erase region at that address.
968          */
969
970         while (i<mtd->numeraseregions && (instr->addr + instr->len) >= regions[i].offset)
971                 i++;
972
973         /* As before, drop back one to point at the region in which
974            the address actually falls
975         */
976         i--;
977         
978         if ((instr->addr + instr->len) & (regions[i].erasesize-1))
979                 return -EINVAL;
980
981         chipnum = instr->addr >> cfi->chipshift;
982         adr = instr->addr - (chipnum << cfi->chipshift);
983         len = instr->len;
984
985         i=first;
986
987         while(len) {
988                 ret = do_erase_oneblock(map, &cfi->chips[chipnum], adr);
989                 
990                 if (ret)
991                         return ret;
992
993                 adr += regions[i].erasesize;
994                 len -= regions[i].erasesize;
995
996                 if (adr % (1<< cfi->chipshift) == ((regions[i].offset + (regions[i].erasesize * regions[i].numblocks)) %( 1<< cfi->chipshift)))
997                         i++;
998
999                 if (adr >> cfi->chipshift) {
1000                         adr = 0;
1001                         chipnum++;
1002                         
1003                         if (chipnum >= cfi->numchips)
1004                         break;
1005                 }
1006         }
1007                 
1008         instr->state = MTD_ERASE_DONE;
1009         if (instr->callback)
1010                 instr->callback(instr);
1011         
1012         return 0;
1013 }
1014
1015 static void cfi_staa_sync (struct mtd_info *mtd)
1016 {
1017         struct map_info *map = mtd->priv;
1018         struct cfi_private *cfi = map->fldrv_priv;
1019         int i;
1020         struct flchip *chip;
1021         int ret = 0;
1022         DECLARE_WAITQUEUE(wait, current);
1023
1024         for (i=0; !ret && i<cfi->numchips; i++) {
1025                 chip = &cfi->chips[i];
1026
1027         retry:
1028                 spin_lock_bh(chip->mutex);
1029
1030                 switch(chip->state) {
1031                 case FL_READY:
1032                 case FL_STATUS:
1033                 case FL_CFI_QUERY:
1034                 case FL_JEDEC_QUERY:
1035                         chip->oldstate = chip->state;
1036                         chip->state = FL_SYNCING;
1037                         /* No need to wake_up() on this state change - 
1038                          * as the whole point is that nobody can do anything
1039                          * with the chip now anyway.
1040                          */
1041                 case FL_SYNCING:
1042                         spin_unlock_bh(chip->mutex);
1043                         break;
1044
1045                 default:
1046                         /* Not an idle state */
1047                         add_wait_queue(&chip->wq, &wait);
1048                         
1049                         spin_unlock_bh(chip->mutex);
1050                         schedule();
1051                         remove_wait_queue(&chip->wq, &wait);
1052                         
1053                         goto retry;
1054                 }
1055         }
1056
1057         /* Unlock the chips again */
1058
1059         for (i--; i >=0; i--) {
1060                 chip = &cfi->chips[i];
1061
1062                 spin_lock_bh(chip->mutex);
1063                 
1064                 if (chip->state == FL_SYNCING) {
1065                         chip->state = chip->oldstate;
1066                         wake_up(&chip->wq);
1067                 }
1068                 spin_unlock_bh(chip->mutex);
1069         }
1070 }
1071
1072 static inline int do_lock_oneblock(struct map_info *map, struct flchip *chip, unsigned long adr)
1073 {
1074         struct cfi_private *cfi = map->fldrv_priv;
1075         __u32 status, status_OK;
1076         unsigned long timeo = jiffies + HZ;
1077         DECLARE_WAITQUEUE(wait, current);
1078
1079         adr += chip->start;
1080
1081         /* Let's determine this according to the interleave only once */
1082         status_OK = CMD(0x80);
1083
1084         timeo = jiffies + HZ;
1085 retry:
1086         spin_lock_bh(chip->mutex);
1087
1088         /* Check that the chip's ready to talk to us. */
1089         switch (chip->state) {
1090         case FL_CFI_QUERY:
1091         case FL_JEDEC_QUERY:
1092         case FL_READY:
1093                 cfi_write(map, CMD(0x70), adr);
1094                 chip->state = FL_STATUS;
1095
1096         case FL_STATUS:
1097                 status = cfi_read(map, adr);
1098                 if ((status & status_OK) == status_OK) 
1099                         break;
1100                 
1101                 /* Urgh. Chip not yet ready to talk to us. */
1102                 if (time_after(jiffies, timeo)) {
1103                         spin_unlock_bh(chip->mutex);
1104                         printk(KERN_ERR "waiting for chip to be ready timed out in lock\n");
1105                         return -EIO;
1106                 }
1107
1108                 /* Latency issues. Drop the lock, wait a while and retry */
1109                 spin_unlock_bh(chip->mutex);
1110                 cfi_udelay(1);
1111                 goto retry;
1112
1113         default:
1114                 /* Stick ourselves on a wait queue to be woken when
1115                    someone changes the status */
1116                 set_current_state(TASK_UNINTERRUPTIBLE);
1117                 add_wait_queue(&chip->wq, &wait);
1118                 spin_unlock_bh(chip->mutex);
1119                 schedule();
1120                 remove_wait_queue(&chip->wq, &wait);
1121                 timeo = jiffies + HZ;
1122                 goto retry;
1123         }
1124
1125         ENABLE_VPP(map);
1126         cfi_write(map, CMD(0x60), adr);
1127         cfi_write(map, CMD(0x01), adr);
1128         chip->state = FL_LOCKING;
1129         
1130         spin_unlock_bh(chip->mutex);
1131         schedule_timeout(HZ);
1132         spin_lock_bh(chip->mutex);
1133
1134         /* FIXME. Use a timer to check this, and return immediately. */
1135         /* Once the state machine's known to be working I'll do that */
1136
1137         timeo = jiffies + (HZ*2);
1138         for (;;) {
1139
1140                 status = cfi_read(map, adr);
1141                 if ((status & status_OK) == status_OK)
1142                         break;
1143                 
1144                 /* OK Still waiting */
1145                 if (time_after(jiffies, timeo)) {
1146                         cfi_write(map, CMD(0x70), adr);
1147                         chip->state = FL_STATUS;
1148                         printk(KERN_ERR "waiting for lock to complete timed out. Xstatus = %x, status = %x.\n", status, cfi_read(map, adr));
1149                         DISABLE_VPP(map);
1150                         spin_unlock_bh(chip->mutex);
1151                         return -EIO;
1152                 }
1153                 
1154                 /* Latency issues. Drop the lock, wait a while and retry */
1155                 spin_unlock_bh(chip->mutex);
1156                 cfi_udelay(1);
1157                 spin_lock_bh(chip->mutex);
1158         }
1159         
1160         /* Done and happy. */
1161         chip->state = FL_STATUS;
1162         DISABLE_VPP(map);
1163         wake_up(&chip->wq);
1164         spin_unlock_bh(chip->mutex);
1165         return 0;
1166 }
1167 static int cfi_staa_lock(struct mtd_info *mtd, loff_t ofs, size_t len)
1168 {
1169         struct map_info *map = mtd->priv;
1170         struct cfi_private *cfi = map->fldrv_priv;
1171         unsigned long adr;
1172         int chipnum, ret = 0;
1173 #ifdef DEBUG_LOCK_BITS
1174         int ofs_factor = cfi->interleave * cfi->device_type;
1175 #endif
1176
1177         if (ofs & (mtd->erasesize - 1))
1178                 return -EINVAL;
1179
1180         if (len & (mtd->erasesize -1))
1181                 return -EINVAL;
1182
1183         if ((len + ofs) > mtd->size)
1184                 return -EINVAL;
1185
1186         chipnum = ofs >> cfi->chipshift;
1187         adr = ofs - (chipnum << cfi->chipshift);
1188
1189         while(len) {
1190
1191 #ifdef DEBUG_LOCK_BITS
1192                 cfi_send_gen_cmd(0x90, 0x55, 0, map, cfi, cfi->device_type, NULL);
1193                 printk("before lock: block status register is %x\n",cfi_read_query(map, adr+(2*ofs_factor)));
1194                 cfi_send_gen_cmd(0xff, 0x55, 0, map, cfi, cfi->device_type, NULL);
1195 #endif
1196
1197                 ret = do_lock_oneblock(map, &cfi->chips[chipnum], adr);
1198
1199 #ifdef DEBUG_LOCK_BITS
1200                 cfi_send_gen_cmd(0x90, 0x55, 0, map, cfi, cfi->device_type, NULL);
1201                 printk("after lock: block status register is %x\n",cfi_read_query(map, adr+(2*ofs_factor)));
1202                 cfi_send_gen_cmd(0xff, 0x55, 0, map, cfi, cfi->device_type, NULL);
1203 #endif  
1204                 
1205                 if (ret)
1206                         return ret;
1207
1208                 adr += mtd->erasesize;
1209                 len -= mtd->erasesize;
1210
1211                 if (adr >> cfi->chipshift) {
1212                         adr = 0;
1213                         chipnum++;
1214                         
1215                         if (chipnum >= cfi->numchips)
1216                         break;
1217                 }
1218         }
1219         return 0;
1220 }
1221 static inline int do_unlock_oneblock(struct map_info *map, struct flchip *chip, unsigned long adr)
1222 {
1223         struct cfi_private *cfi = map->fldrv_priv;
1224         __u32 status, status_OK;
1225         unsigned long timeo = jiffies + HZ;
1226         DECLARE_WAITQUEUE(wait, current);
1227
1228         adr += chip->start;
1229
1230         /* Let's determine this according to the interleave only once */
1231         status_OK = CMD(0x80);
1232
1233         timeo = jiffies + HZ;
1234 retry:
1235         spin_lock_bh(chip->mutex);
1236
1237         /* Check that the chip's ready to talk to us. */
1238         switch (chip->state) {
1239         case FL_CFI_QUERY:
1240         case FL_JEDEC_QUERY:
1241         case FL_READY:
1242                 cfi_write(map, CMD(0x70), adr);
1243                 chip->state = FL_STATUS;
1244
1245         case FL_STATUS:
1246                 status = cfi_read(map, adr);
1247                 if ((status & status_OK) == status_OK)
1248                         break;
1249                 
1250                 /* Urgh. Chip not yet ready to talk to us. */
1251                 if (time_after(jiffies, timeo)) {
1252                         spin_unlock_bh(chip->mutex);
1253                         printk(KERN_ERR "waiting for chip to be ready timed out in unlock\n");
1254                         return -EIO;
1255                 }
1256
1257                 /* Latency issues. Drop the lock, wait a while and retry */
1258                 spin_unlock_bh(chip->mutex);
1259                 cfi_udelay(1);
1260                 goto retry;
1261
1262         default:
1263                 /* Stick ourselves on a wait queue to be woken when
1264                    someone changes the status */
1265                 set_current_state(TASK_UNINTERRUPTIBLE);
1266                 add_wait_queue(&chip->wq, &wait);
1267                 spin_unlock_bh(chip->mutex);
1268                 schedule();
1269                 remove_wait_queue(&chip->wq, &wait);
1270                 timeo = jiffies + HZ;
1271                 goto retry;
1272         }
1273
1274         ENABLE_VPP(map);
1275         cfi_write(map, CMD(0x60), adr);
1276         cfi_write(map, CMD(0xD0), adr);
1277         chip->state = FL_UNLOCKING;
1278         
1279         spin_unlock_bh(chip->mutex);
1280         schedule_timeout(HZ);
1281         spin_lock_bh(chip->mutex);
1282
1283         /* FIXME. Use a timer to check this, and return immediately. */
1284         /* Once the state machine's known to be working I'll do that */
1285
1286         timeo = jiffies + (HZ*2);
1287         for (;;) {
1288
1289                 status = cfi_read(map, adr);
1290                 if ((status & status_OK) == status_OK)
1291                         break;
1292                 
1293                 /* OK Still waiting */
1294                 if (time_after(jiffies, timeo)) {
1295                         cfi_write(map, CMD(0x70), adr);
1296                         chip->state = FL_STATUS;
1297                         printk(KERN_ERR "waiting for unlock to complete timed out. Xstatus = %x, status = %x.\n", status, cfi_read(map, adr));
1298                         DISABLE_VPP(map);
1299                         spin_unlock_bh(chip->mutex);
1300                         return -EIO;
1301                 }
1302                 
1303                 /* Latency issues. Drop the unlock, wait a while and retry */
1304                 spin_unlock_bh(chip->mutex);
1305                 cfi_udelay(1);
1306                 spin_lock_bh(chip->mutex);
1307         }
1308         
1309         /* Done and happy. */
1310         chip->state = FL_STATUS;
1311         DISABLE_VPP(map);
1312         wake_up(&chip->wq);
1313         spin_unlock_bh(chip->mutex);
1314         return 0;
1315 }
1316 static int cfi_staa_unlock(struct mtd_info *mtd, loff_t ofs, size_t len)
1317 {
1318         struct map_info *map = mtd->priv;
1319         struct cfi_private *cfi = map->fldrv_priv;
1320         unsigned long adr;
1321         int chipnum, ret = 0;
1322 #ifdef DEBUG_LOCK_BITS
1323         int ofs_factor = cfi->interleave * cfi->device_type;
1324 #endif
1325
1326         chipnum = ofs >> cfi->chipshift;
1327         adr = ofs - (chipnum << cfi->chipshift);
1328
1329 #ifdef DEBUG_LOCK_BITS
1330         {
1331                 unsigned long temp_adr = adr;
1332                 unsigned long temp_len = len;
1333                  
1334                 cfi_send_gen_cmd(0x90, 0x55, 0, map, cfi, cfi->device_type, NULL);
1335                 while (temp_len) {
1336                         printk("before unlock %x: block status register is %x\n",temp_adr,cfi_read_query(map, temp_adr+(2*ofs_factor)));
1337                         temp_adr += mtd->erasesize;
1338                         temp_len -= mtd->erasesize;
1339                 }
1340                 cfi_send_gen_cmd(0xff, 0x55, 0, map, cfi, cfi->device_type, NULL);
1341         }
1342 #endif
1343
1344         ret = do_unlock_oneblock(map, &cfi->chips[chipnum], adr);
1345
1346 #ifdef DEBUG_LOCK_BITS
1347         cfi_send_gen_cmd(0x90, 0x55, 0, map, cfi, cfi->device_type, NULL);
1348         printk("after unlock: block status register is %x\n",cfi_read_query(map, adr+(2*ofs_factor)));
1349         cfi_send_gen_cmd(0xff, 0x55, 0, map, cfi, cfi->device_type, NULL);
1350 #endif
1351         
1352         return ret;
1353 }
1354
1355 static int cfi_staa_suspend(struct mtd_info *mtd)
1356 {
1357         struct map_info *map = mtd->priv;
1358         struct cfi_private *cfi = map->fldrv_priv;
1359         int i;
1360         struct flchip *chip;
1361         int ret = 0;
1362
1363         for (i=0; !ret && i<cfi->numchips; i++) {
1364                 chip = &cfi->chips[i];
1365
1366                 spin_lock_bh(chip->mutex);
1367
1368                 switch(chip->state) {
1369                 case FL_READY:
1370                 case FL_STATUS:
1371                 case FL_CFI_QUERY:
1372                 case FL_JEDEC_QUERY:
1373                         chip->oldstate = chip->state;
1374                         chip->state = FL_PM_SUSPENDED;
1375                         /* No need to wake_up() on this state change - 
1376                          * as the whole point is that nobody can do anything
1377                          * with the chip now anyway.
1378                          */
1379                 case FL_PM_SUSPENDED:
1380                         break;
1381
1382                 default:
1383                         ret = -EAGAIN;
1384                         break;
1385                 }
1386                 spin_unlock_bh(chip->mutex);
1387         }
1388
1389         /* Unlock the chips again */
1390
1391         if (ret) {
1392                 for (i--; i >=0; i--) {
1393                         chip = &cfi->chips[i];
1394                         
1395                         spin_lock_bh(chip->mutex);
1396                         
1397                         if (chip->state == FL_PM_SUSPENDED) {
1398                                 /* No need to force it into a known state here,
1399                                    because we're returning failure, and it didn't
1400                                    get power cycled */
1401                                 chip->state = chip->oldstate;
1402                                 wake_up(&chip->wq);
1403                         }
1404                         spin_unlock_bh(chip->mutex);
1405                 }
1406         } 
1407         
1408         return ret;
1409 }
1410
1411 static void cfi_staa_resume(struct mtd_info *mtd)
1412 {
1413         struct map_info *map = mtd->priv;
1414         struct cfi_private *cfi = map->fldrv_priv;
1415         int i;
1416         struct flchip *chip;
1417
1418         for (i=0; i<cfi->numchips; i++) {
1419         
1420                 chip = &cfi->chips[i];
1421
1422                 spin_lock_bh(chip->mutex);
1423                 
1424                 /* Go to known state. Chip may have been power cycled */
1425                 if (chip->state == FL_PM_SUSPENDED) {
1426                         cfi_write(map, CMD(0xFF), 0);
1427                         chip->state = FL_READY;
1428                         wake_up(&chip->wq);
1429                 }
1430
1431                 spin_unlock_bh(chip->mutex);
1432         }
1433 }
1434
1435 static void cfi_staa_destroy(struct mtd_info *mtd)
1436 {
1437         struct map_info *map = mtd->priv;
1438         struct cfi_private *cfi = map->fldrv_priv;
1439         kfree(cfi->cmdset_priv);
1440         kfree(cfi);
1441 }
1442
1443 #if LINUX_VERSION_CODE < 0x20212 && defined(MODULE)
1444 #define cfi_staa_init init_module
1445 #define cfi_staa_exit cleanup_module
1446 #endif
1447
1448 static char im_name[]="cfi_cmdset_0020";
1449
1450 int __init cfi_staa_init(void)
1451 {
1452         inter_module_register(im_name, THIS_MODULE, &cfi_cmdset_0020);
1453         return 0;
1454 }
1455
1456 static void __exit cfi_staa_exit(void)
1457 {
1458         inter_module_unregister(im_name);
1459 }
1460
1461 module_init(cfi_staa_init);
1462 module_exit(cfi_staa_exit);
1463
1464 MODULE_LICENSE("GPL");